repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
DDOD | DDOD-main/mmdet/datasets/crowdhuman.py | import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .api_wrappers import COCO, COCOeval
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CrowdhumanDataset(CustomDataset):
CLASSES = ('person', )
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = mmcv.load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox,
# cocoapi will use the box area instead of the mask area
# for calculating the instance area. Though the overall AP
# is not affected, this leads to different
# small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
warnings.simplefilter('once')
warnings.warn(
'The key "bbox" is deleted for more accurate mask AP '
'of small/medium/large instances since v2.12.0. This '
'does not change the overall mAP calculation.',
UserWarning)
cocoDt = cocoGt.loadRes(predictions)
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 22,425 | 40.07326 | 124 | py |
DDOD | DDOD-main/mmdet/datasets/cityscapes.py | # Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
import glob
import os
import os.path as osp
import tempfile
from collections import OrderedDict
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmcv.utils import print_log
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle')
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat
or all_iscrowd):
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
img_info (dict): Image info of an image.
ann_info (list[dict]): Annotation info of an image.
Returns:
dict: A dict containing the following keys: bboxes, \
bboxes_ignore, labels, masks, seg_map. \
"masks" are already decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
"""Dump the detection results to a txt file.
Args:
results (list[list | tuple]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files.
If the prefix is "somepath/xxx",
the txt files will be named "somepath/xxx.txt".
Returns:
list[str]: Result txt files which contains corresponding \
instance segmentation images.
"""
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to '
'install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.data_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
bbox_result, segm_result = result
bboxes = np.vstack(bbox_result)
# segm results
if isinstance(segm_result, tuple):
# Some detectors use different scores for bbox and mask,
# like Mask Scoring R-CNN. Score of segm will be used instead
# of bbox score.
segms = mmcv.concat_list(segm_result[0])
mask_score = segm_result[1]
else:
# use bbox score for mask score
segms = mmcv.concat_list(segm_result)
mask_score = [bbox[-1] for bbox in bboxes]
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
assert len(bboxes) == len(segms) == len(labels)
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = mask_score[i]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix,
basename + f'_{i}_{classes}.png')
mmcv.imwrite(mask, png_filename)
fout.write(f'{osp.basename(png_filename)} {class_id} '
f'{score}\n')
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
"""Format the results to txt (standard format for Cityscapes
evaluation).
Args:
results (list): Testing results of the dataset.
txtfile_prefix (str | None): The prefix of txt files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving txt/png files when txtfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if txtfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
outfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in Cityscapes/COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
outfile_prefix (str | None): The prefix of output file. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If results are evaluated with COCO protocol, it would be the
prefix of output json file. For example, the metric is 'bbox'
and 'segm', then json files would be "a/b/prefix.bbox.json" and
"a/b/prefix.segm.json".
If results are evaluated with cityscapes protocol, it would be
the prefix of output txt/png files. The output files would be
png images under folder "a/b/prefix/xxx/" and the file name of
images would be written into a txt file
"a/b/prefix/xxx_pred.txt", where "xxx" is the video name of
cityscapes. If not specified, a temp file will be created.
Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str, float]: COCO style evaluation metric or cityscapes mAP \
and AP@50.
"""
eval_results = dict()
metrics = metric.copy() if isinstance(metric, list) else [metric]
if 'cityscapes' in metrics:
eval_results.update(
self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
# left metrics are all coco metric
if len(metrics) > 0:
# create CocoDataset with CityscapesDataset annotation
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
None, self.data_root, self.img_prefix,
self.seg_prefix, self.proposal_file,
self.test_mode, self.filter_empty_gt)
# TODO: remove this in the future
# reload annotations of correct class
self_coco.CLASSES = self.CLASSES
self_coco.data_infos = self_coco.load_annotations(self.ann_file)
eval_results.update(
self_coco.evaluate(results, metrics, logger, outfile_prefix,
classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
"""Evaluation in Cityscapes protocol.
Args:
results (list): Testing results of the dataset.
txtfile_prefix (str | None): The prefix of output txt file
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: Cityscapes evaluation results, contains 'mAP' \
and 'AP@50'.
"""
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to '
'install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
result_files, tmp_dir = self.format_results(results, txtfile_prefix)
if tmp_dir is None:
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = OrderedDict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
# set global states in cityscapes evaluation API
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir,
'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(
self.img_prefix.replace('leftImg8bit', 'gtFine'),
'*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), 'Cannot find ground truth images' \
f' in {CSEval.args.groundTruthSearch}.'
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList,
groundTruthImgList,
CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results['AP@50'] = CSEval_results['allAp50%']
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 14,288 | 41.653731 | 135 | py |
DDOD | DDOD-main/mmdet/datasets/utils.py | import copy
import warnings
from mmcv.cnn import VGG
from mmcv.runner.hooks import HOOKS, Hook
from mmdet.datasets.builder import PIPELINES
from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
from mmdet.models.dense_heads import GARPNHead, RPNHead
from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
def replace_ImageToTensor(pipelines):
"""Replace the ImageToTensor transform in a data pipeline to
DefaultFormatBundle, which is normally useful in batch inference.
Args:
pipelines (list[dict]): Data pipeline configs.
Returns:
list: The new pipeline list with all ImageToTensor replaced by
DefaultFormatBundle.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(
... type='MultiScaleFlipAug',
... img_scale=(1333, 800),
... flip=False,
... transforms=[
... dict(type='Resize', keep_ratio=True),
... dict(type='RandomFlip'),
... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
... dict(type='Pad', size_divisor=32),
... dict(type='ImageToTensor', keys=['img']),
... dict(type='Collect', keys=['img']),
... ])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(
... type='MultiScaleFlipAug',
... img_scale=(1333, 800),
... flip=False,
... transforms=[
... dict(type='Resize', keep_ratio=True),
... dict(type='RandomFlip'),
... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img']),
... ])
... ]
>>> assert expected_pipelines == replace_ImageToTensor(pipelines)
"""
pipelines = copy.deepcopy(pipelines)
for i, pipeline in enumerate(pipelines):
if pipeline['type'] == 'MultiScaleFlipAug':
assert 'transforms' in pipeline
pipeline['transforms'] = replace_ImageToTensor(
pipeline['transforms'])
elif pipeline['type'] == 'ImageToTensor':
warnings.warn(
'"ImageToTensor" pipeline is replaced by '
'"DefaultFormatBundle" for batch inference. It is '
'recommended to manually replace it in the test '
'data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = PIPELINES.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
@HOOKS.register_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
"""Check whether the `num_classes` in head matches the length of
`CLASSSES` in `dataset`.
Args:
runner (obj:`EpochBasedRunner`): Epoch based Runner.
"""
model = runner.model
dataset = runner.data_loader.dataset
if dataset.CLASSES is None:
runner.logger.warning(
f'Please set `CLASSES` '
f'in the {dataset.__class__.__name__} and'
f'check if it is consistent with the `num_classes` '
f'of head')
else:
assert type(dataset.CLASSES) is not str, \
(f'`CLASSES` in {dataset.__class__.__name__}'
f'should be a tuple of str.'
f'Add comma if number of classes is 1 as '
f'CLASSES = ({dataset.CLASSES},)')
for name, module in model.named_modules():
if hasattr(module, 'num_classes') and not isinstance(
module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)):
assert module.num_classes == len(dataset.CLASSES), \
(f'The `num_classes` ({module.num_classes}) in '
f'{module.__class__.__name__} of '
f'{model.__class__.__name__} does not matches '
f'the length of `CLASSES` '
f'{len(dataset.CLASSES)}) in '
f'{dataset.__class__.__name__}')
def before_train_epoch(self, runner):
"""Check whether the training dataset is compatible with head.
Args:
runner (obj:`EpochBasedRunner`): Epoch based Runner.
"""
self._check_head(runner)
def before_val_epoch(self, runner):
"""Check whether the dataset in val epoch is compatible with head.
Args:
runner (obj:`EpochBasedRunner`): Epoch based Runner.
"""
self._check_head(runner)
| 6,486 | 38.554878 | 78 | py |
DDOD | DDOD-main/mmdet/datasets/dataset_wrappers.py | import bisect
import math
from collections import defaultdict
import numpy as np
from mmcv.utils import print_log
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
separate_eval (bool): Whether to evaluate the results
separately if it is used as validation dataset.
Defaults to True.
"""
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.separate_eval = separate_eval
if not separate_eval:
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
"""Get category ids of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
"""Evaluate the results.
Args:
results (list[list | tuple]): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: AP results of the total dataset or each separate
dataset if `self.separate_eval=True`.
"""
assert len(results) == self.cumulative_sizes[-1], \
('Dataset and results have different sizes: '
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
# Check whether all the datasets support evaluation
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), \
f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = -1
total_eval_results = dict()
for size, dataset in zip(self.cumulative_sizes, self.datasets):
start_idx = 0 if dataset_idx == -1 else \
self.cumulative_sizes[dataset_idx]
end_idx = self.cumulative_sizes[dataset_idx + 1]
results_per_dataset = results[start_idx:end_idx]
print_log(
f'\nEvaluateing {dataset.ann_file} with '
f'{len(results_per_dataset)} images now',
logger=logger)
eval_results_per_dataset = dataset.evaluate(
results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for k, v in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in self.datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum(
[dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(
results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
"""Get category ids of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset:
"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
in each epoch, an image may appear multiple times based on its
"repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: :math:`f(c)`
2. For each category c, compute the category-level repeat factor:
:math:`r(c) = max(1, sqrt(t/f(c)))`
3. For each image I, compute the image-level repeat factor:
:math:`r(I) = max_{c in I} r(c)`
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with ``f_c >= oversample_thr``, there is
no oversampling. For categories with ``f_c < oversample_thr``, the
degree of oversampling following the square-root inverse frequency
heuristic above.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes will not be oversampled. Otherwise, they will be categorized
as the pure background class and involved into the oversampling.
Default: True.
"""
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_idx, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
"""Get repeat factor for each images in the dataset.
Args:
dataset (:obj:`CustomDataset`): The dataset
repeat_thr (float): The threshold of frequency. If an image
contains the categories whose frequency below the threshold,
it would be repeated.
Returns:
list[float]: The repeat factors for each images in the dataset.
"""
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if len(cat_ids) > 0:
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def __len__(self):
"""Length after repetition."""
return len(self.repeat_indices)
| 11,072 | 38.127208 | 167 | py |
DDOD | DDOD-main/mmdet/datasets/xml_style.py | import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from PIL import Image
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class XMLDataset(CustomDataset):
"""XML dataset for detection.
Args:
min_size (int | float, optional): The minimum size of bounding
boxes in the images. If the size of a bounding box is less than
``min_size``, it would be add to ignored field.
"""
def __init__(self, min_size=None, **kwargs):
assert self.CLASSES or kwargs.get(
'classes', None), 'CLASSES in `XMLDataset` can not be None.'
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}
self.min_size = min_size
def load_annotations(self, ann_file):
"""Load annotation from XML style ann_file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'JPEGImages/{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
if size is not None:
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, 'JPEGImages',
'{}.jpg'.format(img_id))
img = Image.open(img_path)
width, height = img.size
data_infos.append(
dict(id=img_id, filename=filename, width=width, height=height))
return data_infos
def _filter_imgs(self, min_size=32):
"""Filter images too small or without annotation."""
valid_inds = []
for i, img_info in enumerate(self.data_infos):
if min(img_info['width'], img_info['height']) < min_size:
continue
if self.filter_empty_gt:
img_id = img_info['id']
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if name in self.CLASSES:
valid_inds.append(i)
break
else:
valid_inds.append(i)
return valid_inds
def get_ann_info(self, idx):
"""Get annotation from XML file by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
if name not in self.CLASSES:
continue
label = self.cat2label[name]
difficult = obj.find('difficult')
difficult = 0 if difficult is None else int(difficult.text)
bnd_box = obj.find('bndbox')
# TODO: check whether it is necessary to use int
# Coordinates may be float type
bbox = [
int(float(bnd_box.find('xmin').text)),
int(float(bnd_box.find('ymin').text)),
int(float(bnd_box.find('xmax').text)),
int(float(bnd_box.find('ymax').text))
]
ignore = False
if self.min_size:
assert not self.test_mode
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
if w < self.min_size or h < self.min_size:
ignore = True
if difficult or ignore:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
ann = dict(
bboxes=bboxes.astype(np.float32),
labels=labels.astype(np.int64),
bboxes_ignore=bboxes_ignore.astype(np.float32),
labels_ignore=labels_ignore.astype(np.int64))
return ann
def get_cat_ids(self, idx):
"""Get category ids in XML file by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
cat_ids = []
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if name not in self.CLASSES:
continue
label = self.cat2label[name]
cat_ids.append(label)
return cat_ids
| 5,886 | 33.426901 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/__init__.py | from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .crowdhuman import CrowdhumanDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'NumClassCheckHook', 'CrowdhumanDataset'
]
| 1,219 | 45.923077 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/lvis.py | import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class LVISV05Dataset(CocoDataset):
CLASSES = (
'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',
'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',
'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',
'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',
'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',
'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',
'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',
'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',
'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',
'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',
'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',
'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',
'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead',
'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',
'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can',
'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench',
'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars',
'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse',
'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag',
'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp',
'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin',
'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet',
'book', 'book_bag', 'bookcase', 'booklet', 'bookmark',
'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet',
'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl',
'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',
'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',
'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',
'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',
'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',
'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',
'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',
'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',
'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',
'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',
'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',
'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',
'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',
'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',
'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard',
'cherry', 'chessboard', 'chest_of_drawers_(furniture)',
'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua',
'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',
'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',
'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',
'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',
'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',
'clementine', 'clip', 'clipboard', 'clock', 'clock_tower',
'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',
'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter',
'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin',
'colander', 'coleslaw', 'coloring_material', 'combination_lock',
'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer',
'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie',
'cookie_jar', 'cooking_utensil', 'cooler_(for_food)',
'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn',
'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',
'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',
'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',
'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',
'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',
'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',
'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',
'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',
'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',
'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',
'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',
'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',
'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',
'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',
'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash',
'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',
'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',
'food_processor', 'football_(American)', 'football_helmet',
'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',
'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',
'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',
'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',
'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',
'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',
'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',
'headband', 'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',
'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',
'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',
'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',
'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',
'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',
'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',
'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',
'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',
'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',
'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',
'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',
'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',
'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',
'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',
'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',
'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',
'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine',
'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',
'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',
'mascot', 'mashed_potato', 'masher', 'mask', 'mast',
'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',
'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',
'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',
'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',
'mound_(baseball)', 'mouse_(animal_rodent)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand',
'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)',
'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',
'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',
'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman',
'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',
'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette',
'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',
'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',
'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',
'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',
'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playing_card', 'playpen', 'pliers',
'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',
'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',
'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer',
'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding',
'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet',
'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car',
'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft',
'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'red_cabbage', 'reflector',
'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',
'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',
'Rollerblade', 'rolling_pin', 'root_beer',
'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',
'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',
'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',
'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',
'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',
'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',
'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',
'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',
'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',
'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',
'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart',
'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head',
'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo',
'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka',
'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain',
'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero',
'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk',
'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear',
'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear',
'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife',
'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light',
'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',
'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',
'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',
'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',
'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',
'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',
'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)',
'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',
'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',
'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',
'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',
'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',
'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',
'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',
'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',
'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',
'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski',
'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam',
'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair',
'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime',
'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock',
'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair',
'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath',
'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
def load_annotations(self, ann_file):
"""Load annotation from lvis style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from LVIS api.
"""
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
self.coco = LVIS(ann_file)
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
if info['file_name'].startswith('COCO'):
# Convert form the COCO 2014 file naming convention of
# COCO_[train/val/test]2014_000000000000.jpg to the 2017
# naming convention of 000000000000.jpg
# (LVIS v1 will fix this naming issue)
info['filename'] = info['file_name'][-16:]
else:
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05)):
"""Evaluation in LVIS protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None):
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str, float]: LVIS style metrics.
"""
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVISResults, LVISEval
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError('metric {} is not supported'.format(metric))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
eval_results = OrderedDict()
# get original api
lvis_gt = self.coco
for metric in metrics:
msg = 'Evaluating {}...'.format(metric)
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results['AR@{}'.format(num)] = ar[i]
log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError('{} is not in results'.format(metric))
try:
lvis_dt = LVISResults(lvis_gt, result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
lvis_eval.params.imgIds = self.img_ids
if metric == 'proposal':
lvis_eval.params.useCats = 0
lvis_eval.params.maxDets = list(proposal_nums)
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
for k, v in lvis_eval.get_results().items():
if k.startswith('AR'):
val = float('{:.3f}'.format(float(v)))
eval_results[k] = val
else:
lvis_eval.evaluate()
lvis_eval.accumulate()
lvis_eval.summarize()
lvis_results = lvis_eval.get_results()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = lvis_eval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.load_cats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
for k, v in lvis_results.items():
if k.startswith('AP'):
key = '{}_{}'.format(metric, k)
val = float('{:.3f}'.format(float(v)))
eval_results[key] = val
ap_summary = ' '.join([
'{}:{:.3f}'.format(k, float(v))
for k, v in lvis_results.items() if k.startswith('AP')
])
eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary
lvis_eval.print_results()
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
LVISDataset = LVISV05Dataset
DATASETS.register_module(name='LVISDataset', module=LVISDataset)
@DATASETS.register_module()
class LVISV1Dataset(LVISDataset):
CLASSES = (
'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
'folding_chair', 'food_processor', 'football_(American)',
'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',
'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'reflector', 'remote_control',
'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
'rolling_pin', 'root_beer', 'router_(computer_equipment)',
'rubber_band', 'runner_(carpet)', 'plastic_bag',
'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
'washbasin', 'automatic_washer', 'watch', 'water_bottle',
'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
def load_annotations(self, ann_file):
try:
import lvis
if getattr(lvis, '__version__', '0') >= '10.5.3':
warnings.warn(
'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501
UserWarning)
from lvis import LVIS
except ImportError:
raise ImportError(
'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501
)
self.coco = LVIS(ann_file)
self.cat_ids = self.coco.get_cat_ids()
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
# coco_url is used in LVISv1 instead of file_name
# e.g. http://images.cocodataset.org/train2017/000000391895.jpg
# train/val split in specified in url
info['filename'] = info['coco_url'].replace(
'http://images.cocodataset.org/', '')
data_infos.append(info)
return data_infos
| 46,136 | 61.51626 | 157 | py |
DDOD | DDOD-main/mmdet/datasets/builder.py | import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
# pop 'separate_eval' since it is not a valid key for common datasets.
if 'separate_eval' in data_cfg:
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
# DistributedGroupSampler will definitely shuffle the data to satisfy
# that images on each GPU are in the same group
if shuffle:
sampler = DistributedGroupSampler(
dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=False, seed=seed)
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=False,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 5,284 | 35.701389 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/coco.py | import itertools
import logging
import os.path as osp
import tempfile
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .api_wrappers import COCO, COCOeval
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
# The order of returned `cat_ids` will not
# change with the order of the CLASSES
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
ann_ids = self.coco.get_ann_ids(img_ids=[i])
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = OrderedDict()
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
iou_type = 'bbox' if metric == 'proposal' else metric
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
predictions = mmcv.load(result_files[metric])
if iou_type == 'segm':
# Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa
# When evaluating mask AP, if the results contain bbox,
# cocoapi will use the box area instead of the mask area
# for calculating the instance area. Though the overall AP
# is not affected, this leads to different
# small/medium/large mask AP results.
for x in predictions:
x.pop('bbox')
warnings.simplefilter('once')
warnings.warn(
'The key "bbox" is deleted for more accurate mask AP '
'of small/medium/large instances since v2.12.0. This '
'does not change the overall mAP calculation.',
UserWarning)
cocoDt = cocoGt.loadRes(predictions)
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 23,463 | 40.974955 | 124 | py |
DDOD | DDOD-main/mmdet/datasets/wider_face.py | import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
| 1,501 | 27.884615 | 68 | py |
DDOD | DDOD-main/mmdet/datasets/api_wrappers/coco_api.py | # This file add snake case alias for coco api
import warnings
import pycocotools
from pycocotools.coco import COCO as _COCO
from pycocotools.cocoeval import COCOeval as _COCOeval
class COCO(_COCO):
"""This class is almost the same as official pycocotools package.
It implements some snake case function aliases. So that the COCO class has
the same interface as LVIS class.
"""
def __init__(self, annotation_file=None):
if getattr(pycocotools, '__version__', '0') >= '12.0.2':
warnings.warn(
'mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', # noqa: E501
UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
# just for the ease of import
COCOeval = _COCOeval
| 1,458 | 30.042553 | 126 | py |
DDOD | DDOD-main/mmdet/datasets/api_wrappers/__init__.py | from .coco_api import COCO, COCOeval
__all__ = ['COCO', 'COCOeval']
| 69 | 16.5 | 36 | py |
DDOD | DDOD-main/mmdet/datasets/samplers/group_sampler.py | from __future__ import division
import math
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import Sampler
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
seed (int, optional): random seed used to shuffle the sampler if
``shuffle=True``. This number should be identical across all
processes in the distributed group. Default: 0.
"""
def __init__(self,
dataset,
samples_per_gpu=1,
num_replicas=None,
rank=None,
seed=0):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = seed if seed is not None else 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = []
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
# add .numpy() to avoid bug when selecting indice in parrots.
# TODO: check whether torch.randperm() can be replaced by
# numpy.random.permutation().
indice = indice[list(
torch.randperm(int(size), generator=g).numpy())].tolist()
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
# pad indice
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices.extend(indice)
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| 5,368 | 35.033557 | 78 | py |
DDOD | DDOD-main/mmdet/datasets/samplers/distributed_sampler.py | import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 1,310 | 31.775 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/samplers/__init__.py | from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
| 194 | 38 | 75 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/loading.py | import os.path as osp
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmdet.core import BitmapMasks, PolygonMasks
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile:
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadImageFromWebcam(LoadImageFromFile):
"""Load an image from webcam.
Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in
``results['img']``.
"""
def __call__(self, results):
"""Call functions to add image meta information.
Args:
results (dict): Result dict with Webcam read image in
``results['img']``.
Returns:
dict: The dict contains loaded image and meta information.
"""
img = results['img']
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = None
results['ori_filename'] = None
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
return results
@PIPELINES.register_module()
class LoadMultiChannelImageFromFiles:
"""Load multi-channel images from a list of separate channel files.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename", which is expected to be a list of filenames).
Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='unchanged',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load multiple images and get images meta
information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded images and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = [
osp.join(results['img_prefix'], fname)
for fname in results['img_info']['filename']
]
else:
filename = results['img_info']['filename']
img = []
for name in filename:
img_bytes = self.file_client.get(name)
img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
img = np.stack(img, axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadAnnotations:
"""Load multiple types of annotations.
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Default: True.
with_label (bool): Whether to parse and load the label annotation.
Default: True.
with_mask (bool): Whether to parse and load the mask annotation.
Default: False.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Default: False.
poly2mask (bool): Whether to convert the instance masks from polygons
to bitmaps. Default: True.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
poly2mask=True,
file_client_args=dict(backend='disk')):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.file_client_args = file_client_args.copy()
self.file_client = None
def _load_bboxes(self, results):
"""Private function to load bounding box annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded bounding box annotations.
"""
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded label annotations.
"""
results['gt_labels'] = results['ann_info']['labels'].copy()
return results
def _poly2mask(self, mask_ann, img_h, img_w):
"""Private function to convert masks represented with polygon to
bitmaps.
Args:
mask_ann (list | dict): Polygon mask annotation input.
img_h (int): The height of output mask.
img_w (int): The width of output mask.
Returns:
numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
"""
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_polygons(self, polygons):
"""Convert polygons to list of ndarray and filter invalid polygons.
Args:
polygons (list[list]): Polygons of one instance.
Returns:
list[numpy.ndarray]: Processed polygons.
"""
polygons = [np.array(p) for p in polygons]
valid_polygons = []
for polygon in polygons:
if len(polygon) % 2 == 0 and len(polygon) >= 6:
valid_polygons.append(polygon)
return valid_polygons
def _load_masks(self, results):
"""Private function to load mask annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded mask annotations.
If ``self.poly2mask`` is set ``True``, `gt_mask` will contain
:obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.
"""
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if self.poly2mask:
gt_masks = BitmapMasks(
[self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
else:
gt_masks = PolygonMasks(
[self.process_polygons(polygons) for polygons in gt_masks], h,
w)
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
return results
def _load_semantic_seg(self, results):
"""Private function to load semantic segmentation annotations.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
img_bytes = self.file_client.get(filename)
results['gt_semantic_seg'] = mmcv.imfrombytes(
img_bytes, flag='unchanged').squeeze()
results['seg_fields'].append('gt_semantic_seg')
return results
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded bounding box, label, mask and
semantic segmentation annotations.
"""
if self.with_bbox:
results = self._load_bboxes(results)
if results is None:
return None
if self.with_label:
results = self._load_labels(results)
if self.with_mask:
results = self._load_masks(results)
if self.with_seg:
results = self._load_semantic_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(with_bbox={self.with_bbox}, '
repr_str += f'with_label={self.with_label}, '
repr_str += f'with_mask={self.with_mask}, '
repr_str += f'with_seg={self.with_seg}, '
repr_str += f'poly2mask={self.poly2mask}, '
repr_str += f'poly2mask={self.file_client_args})'
return repr_str
@PIPELINES.register_module()
class LoadProposals:
"""Load proposal pipeline.
Required key is "proposals". Updated keys are "proposals", "bbox_fields".
Args:
num_max_proposals (int, optional): Maximum number of proposals to load.
If not specified, all proposals will be loaded.
"""
def __init__(self, num_max_proposals=None):
self.num_max_proposals = num_max_proposals
def __call__(self, results):
"""Call function to load proposals from file.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded proposal annotations.
"""
proposals = results['proposals']
if proposals.shape[1] not in (4, 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
f'but found {proposals.shape}')
proposals = proposals[:, :4]
if self.num_max_proposals is not None:
proposals = proposals[:self.num_max_proposals]
if len(proposals) == 0:
proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
results['proposals'] = proposals
results['bbox_fields'].append('proposals')
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(num_max_proposals={self.num_max_proposals})'
@PIPELINES.register_module()
class FilterAnnotations:
"""Filter invalid annotations.
Args:
min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth
boxes.
"""
def __init__(self, min_gt_bbox_wh):
# TODO: add more filter options
self.min_gt_bbox_wh = min_gt_bbox_wh
def __call__(self, results):
assert 'gt_bboxes' in results
gt_bboxes = results['gt_bboxes']
w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])
if not keep.any():
return None
else:
keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg')
for key in keys:
if key in results:
results[key] = results[key][keep]
return results
| 15,860 | 33.555556 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/instaboost.py | import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
orig_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(orig_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
| 3,486 | 34.222222 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/compose.py | import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
| 1,456 | 27.019231 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/auto_augment.py | import copy
import cv2
import mmcv
import numpy as np
from ..builder import PIPELINES
from .compose import Compose
_MAX_LEVEL = 10
def level_to_value(level, max_value):
"""Map from level to values based on max_value."""
return (level / _MAX_LEVEL) * max_value
def enhance_level_to_value(level, a=1.8, b=0.1):
"""Map from level to values."""
return (level / _MAX_LEVEL) * a + b
def random_negative(value, random_negative_prob):
"""Randomly negate value based on random_negative_prob."""
return -value if np.random.rand() < random_negative_prob else value
def bbox2fields():
"""The key correspondence from bboxes to labels, masks and
segmentations."""
bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
bbox2seg = {
'gt_bboxes': 'gt_semantic_seg',
}
return bbox2label, bbox2mask, bbox2seg
@PIPELINES.register_module()
class AutoAugment:
"""Auto augmentation.
This data augmentation is proposed in `Learning Data Augmentation
Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_.
TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms
Args:
policies (list[list[dict]]): The policies of auto augmentation. Each
policy in ``policies`` is a specific augmentation policy, and is
composed by several augmentations (dict). When AutoAugment is
called, a random policy in ``policies`` will be selected to
augment images.
Examples:
>>> replace = (104, 116, 124)
>>> policies = [
>>> [
>>> dict(type='Sharpness', prob=0.0, level=8),
>>> dict(
>>> type='Shear',
>>> prob=0.4,
>>> level=0,
>>> replace=replace,
>>> axis='x')
>>> ],
>>> [
>>> dict(
>>> type='Rotate',
>>> prob=0.6,
>>> level=10,
>>> replace=replace),
>>> dict(type='Color', prob=1.0, level=6)
>>> ]
>>> ]
>>> augmentation = AutoAugment(policies)
>>> img = np.ones(100, 100, 3)
>>> gt_bboxes = np.ones(10, 4)
>>> results = dict(img=img, gt_bboxes=gt_bboxes)
>>> results = augmentation(results)
"""
def __init__(self, policies):
assert isinstance(policies, list) and len(policies) > 0, \
'Policies must be a non-empty list.'
for policy in policies:
assert isinstance(policy, list) and len(policy) > 0, \
'Each policy in policies must be a non-empty list.'
for augment in policy:
assert isinstance(augment, dict) and 'type' in augment, \
'Each specific augmentation must be a dict with key' \
' "type".'
self.policies = copy.deepcopy(policies)
self.transforms = [Compose(policy) for policy in self.policies]
def __call__(self, results):
transform = np.random.choice(self.transforms)
return transform(results)
def __repr__(self):
return f'{self.__class__.__name__}(policies={self.policies})'
@PIPELINES.register_module()
class Shear:
"""Apply Shear Transformation to image (and its corresponding bbox, mask,
segmentation).
Args:
level (int | float): The level should be in range [0,_MAX_LEVEL].
img_fill_val (int | float | tuple): The filled values for image border.
If float, the same fill value will be used for all the three
channels of image. If tuple, the should be 3 elements.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Default 255.
prob (float): The probability for performing Shear and should be in
range [0, 1].
direction (str): The direction for shear, either "horizontal"
or "vertical".
max_shear_magnitude (float): The maximum magnitude for Shear
transformation.
random_negative_prob (float): The probability that turns the
offset negative. Should be in range [0,1]
interpolation (str): Same as in :func:`mmcv.imshear`.
"""
def __init__(self,
level,
img_fill_val=128,
seg_ignore_label=255,
prob=0.5,
direction='horizontal',
max_shear_magnitude=0.3,
random_negative_prob=0.5,
interpolation='bilinear'):
assert isinstance(level, (int, float)), 'The level must be type ' \
f'int or float, got {type(level)}.'
assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \
f'[0,{_MAX_LEVEL}], got {level}.'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \
f'have 3 elements. got {len(img_fill_val)}.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError(
'img_fill_val must be float or tuple with 3 elements.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \
'elements of img_fill_val should between range [0,255].' \
f'got {img_fill_val}.'
assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \
f'range [0,1]. got {prob}.'
assert direction in ('horizontal', 'vertical'), 'direction must ' \
f'in be either "horizontal" or "vertical". got {direction}.'
assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \
f'should be type float. got {type(max_shear_magnitude)}.'
assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \
'max_shear_magnitude should be in range [0,1]. ' \
f'got {max_shear_magnitude}.'
self.level = level
self.magnitude = level_to_value(level, max_shear_magnitude)
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.prob = prob
self.direction = direction
self.max_shear_magnitude = max_shear_magnitude
self.random_negative_prob = random_negative_prob
self.interpolation = interpolation
def _shear_img(self,
results,
magnitude,
direction='horizontal',
interpolation='bilinear'):
"""Shear the image.
Args:
results (dict): Result dict from loading pipeline.
magnitude (int | float): The magnitude used for shear.
direction (str): The direction for shear, either "horizontal"
or "vertical".
interpolation (str): Same as in :func:`mmcv.imshear`.
"""
for key in results.get('img_fields', ['img']):
img = results[key]
img_sheared = mmcv.imshear(
img,
magnitude,
direction,
border_value=self.img_fill_val,
interpolation=interpolation)
results[key] = img_sheared.astype(img.dtype)
def _shear_bboxes(self, results, magnitude):
"""Shear the bboxes."""
h, w, c = results['img_shape']
if self.direction == 'horizontal':
shear_matrix = np.stack([[1, magnitude],
[0, 1]]).astype(np.float32) # [2, 2]
else:
shear_matrix = np.stack([[1, 0], [magnitude,
1]]).astype(np.float32)
for key in results.get('bbox_fields', []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1)
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
[min_x, max_y],
[max_x, max_y]]) # [4, 2, nb_box, 1]
coordinates = coordinates[..., 0].transpose(
(2, 1, 0)).astype(np.float32) # [nb_box, 2, 4]
new_coords = np.matmul(shear_matrix[None, :, :],
coordinates) # [nb_box, 2, 4]
min_x = np.min(new_coords[:, 0, :], axis=-1)
min_y = np.min(new_coords[:, 1, :], axis=-1)
max_x = np.max(new_coords[:, 0, :], axis=-1)
max_y = np.max(new_coords[:, 1, :], axis=-1)
min_x = np.clip(min_x, a_min=0, a_max=w)
min_y = np.clip(min_y, a_min=0, a_max=h)
max_x = np.clip(max_x, a_min=min_x, a_max=w)
max_y = np.clip(max_y, a_min=min_y, a_max=h)
results[key] = np.stack([min_x, min_y, max_x, max_y],
axis=-1).astype(results[key].dtype)
def _shear_masks(self,
results,
magnitude,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Shear the masks."""
h, w, c = results['img_shape']
for key in results.get('mask_fields', []):
masks = results[key]
results[key] = masks.shear((h, w),
magnitude,
direction,
border_value=fill_val,
interpolation=interpolation)
def _shear_seg(self,
results,
magnitude,
direction='horizontal',
fill_val=255,
interpolation='bilinear'):
"""Shear the segmentation maps."""
for key in results.get('seg_fields', []):
seg = results[key]
results[key] = mmcv.imshear(
seg,
magnitude,
direction,
border_value=fill_val,
interpolation=interpolation).astype(seg.dtype)
def _filter_invalid(self, results, min_bbox_size=0):
"""Filter bboxes and corresponding masks too small after shear
augmentation."""
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get('bbox_fields', []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
def __call__(self, results):
"""Call function to shear images, bounding boxes, masks and semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Sheared results.
"""
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
self._shear_img(results, magnitude, self.direction, self.interpolation)
self._shear_bboxes(results, magnitude)
# fill_val set to 0 for background of mask.
self._shear_masks(
results,
magnitude,
self.direction,
fill_val=0,
interpolation=self.interpolation)
self._shear_seg(
results,
magnitude,
self.direction,
fill_val=self.seg_ignore_label,
interpolation=self.interpolation)
self._filter_invalid(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'img_fill_val={self.img_fill_val}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
repr_str += f'prob={self.prob}, '
repr_str += f'direction={self.direction}, '
repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '
repr_str += f'random_negative_prob={self.random_negative_prob}, '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@PIPELINES.register_module()
class Rotate:
"""Apply Rotate Transformation to image (and its corresponding bbox, mask,
segmentation).
Args:
level (int | float): The level should be in range (0,_MAX_LEVEL].
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
center (int | float | tuple[float]): Center point (w, h) of the
rotation in the source image. If None, the center of the
image will be used. Same in ``mmcv.imrotate``.
img_fill_val (int | float | tuple): The fill value for image border.
If float, the same value will be used for all the three
channels of image. If tuple, the should be 3 elements (e.g.
equals the number of channels for image).
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Default 255.
prob (float): The probability for perform transformation and
should be in range 0 to 1.
max_rotate_angle (int | float): The maximum angles for rotate
transformation.
random_negative_prob (float): The probability that turns the
offset negative.
"""
def __init__(self,
level,
scale=1,
center=None,
img_fill_val=128,
seg_ignore_label=255,
prob=0.5,
max_rotate_angle=30,
random_negative_prob=0.5):
assert isinstance(level, (int, float)), \
f'The level must be type int or float. got {type(level)}.'
assert 0 <= level <= _MAX_LEVEL, \
f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'
assert isinstance(scale, (int, float)), \
f'The scale must be type int or float. got type {type(scale)}.'
if isinstance(center, (int, float)):
center = (center, center)
elif isinstance(center, tuple):
assert len(center) == 2, 'center with type tuple must have '\
f'2 elements. got {len(center)} elements.'
else:
assert center is None, 'center must be None or type int, '\
f'float or tuple, got type {type(center)}.'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
f'have 3 elements. got {len(img_fill_val)}.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError(
'img_fill_val must be float or tuple with 3 elements.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
'all elements of img_fill_val should between range [0,255]. '\
f'got {img_fill_val}.'
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\
'got {prob}.'
assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\
f'should be type int or float. got type {type(max_rotate_angle)}.'
self.level = level
self.scale = scale
# Rotation angle in degrees. Positive values mean
# clockwise rotation.
self.angle = level_to_value(level, max_rotate_angle)
self.center = center
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.prob = prob
self.max_rotate_angle = max_rotate_angle
self.random_negative_prob = random_negative_prob
def _rotate_img(self, results, angle, center=None, scale=1.0):
"""Rotate the image.
Args:
results (dict): Result dict from loading pipeline.
angle (float): Rotation angle in degrees, positive values
mean clockwise rotation. Same in ``mmcv.imrotate``.
center (tuple[float], optional): Center point (w, h) of the
rotation. Same in ``mmcv.imrotate``.
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
"""
for key in results.get('img_fields', ['img']):
img = results[key].copy()
img_rotated = mmcv.imrotate(
img, angle, center, scale, border_value=self.img_fill_val)
results[key] = img_rotated.astype(img.dtype)
def _rotate_bboxes(self, results, rotate_matrix):
"""Rotate the bboxes."""
h, w, c = results['img_shape']
for key in results.get('bbox_fields', []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1)
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
[min_x, max_y],
[max_x, max_y]]) # [4, 2, nb_bbox, 1]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coordinates = np.concatenate(
(coordinates,
np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),
axis=1) # [4, 3, nb_bbox, 1]
coordinates = coordinates.transpose(
(2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
rotated_coords = np.matmul(rotate_matrix,
coordinates) # [nb_bbox, 4, 2, 1]
rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
min_x, min_y = np.min(
rotated_coords[:, :, 0], axis=1), np.min(
rotated_coords[:, :, 1], axis=1)
max_x, max_y = np.max(
rotated_coords[:, :, 0], axis=1), np.max(
rotated_coords[:, :, 1], axis=1)
min_x, min_y = np.clip(
min_x, a_min=0, a_max=w), np.clip(
min_y, a_min=0, a_max=h)
max_x, max_y = np.clip(
max_x, a_min=min_x, a_max=w), np.clip(
max_y, a_min=min_y, a_max=h)
results[key] = np.stack([min_x, min_y, max_x, max_y],
axis=-1).astype(results[key].dtype)
def _rotate_masks(self,
results,
angle,
center=None,
scale=1.0,
fill_val=0):
"""Rotate the masks."""
h, w, c = results['img_shape']
for key in results.get('mask_fields', []):
masks = results[key]
results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
def _rotate_seg(self,
results,
angle,
center=None,
scale=1.0,
fill_val=255):
"""Rotate the segmentation map."""
for key in results.get('seg_fields', []):
seg = results[key].copy()
results[key] = mmcv.imrotate(
seg, angle, center, scale,
border_value=fill_val).astype(seg.dtype)
def _filter_invalid(self, results, min_bbox_size=0):
"""Filter bboxes and corresponding masks too small after rotate
augmentation."""
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get('bbox_fields', []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
def __call__(self, results):
"""Call function to rotate images, bounding boxes, masks and semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
"""
if np.random.rand() > self.prob:
return results
h, w = results['img'].shape[:2]
center = self.center
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
angle = random_negative(self.angle, self.random_negative_prob)
self._rotate_img(results, angle, center, self.scale)
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
self._rotate_bboxes(results, rotate_matrix)
self._rotate_masks(results, angle, center, self.scale, fill_val=0)
self._rotate_seg(
results, angle, center, self.scale, fill_val=self.seg_ignore_label)
self._filter_invalid(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'scale={self.scale}, '
repr_str += f'center={self.center}, '
repr_str += f'img_fill_val={self.img_fill_val}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
repr_str += f'prob={self.prob}, '
repr_str += f'max_rotate_angle={self.max_rotate_angle}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str
@PIPELINES.register_module()
class Translate:
"""Translate the images, bboxes, masks and segmentation maps horizontally
or vertically.
Args:
level (int | float): The level for Translate and should be in
range [0,_MAX_LEVEL].
prob (float): The probability for performing translation and
should be in range [0, 1].
img_fill_val (int | float | tuple): The filled value for image
border. If float, the same fill value will be used for all
the three channels of image. If tuple, the should be 3
elements (e.g. equals the number of channels for image).
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Default 255.
direction (str): The translate direction, either "horizontal"
or "vertical".
max_translate_offset (int | float): The maximum pixel's offset for
Translate.
random_negative_prob (float): The probability that turns the
offset negative.
min_size (int | float): The minimum pixel for filtering
invalid bboxes after the translation.
"""
def __init__(self,
level,
prob=0.5,
img_fill_val=128,
seg_ignore_label=255,
direction='horizontal',
max_translate_offset=250.,
random_negative_prob=0.5,
min_size=0):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level used for calculating Translate\'s offset should be ' \
'in range [0,_MAX_LEVEL]'
assert 0 <= prob <= 1.0, \
'The probability of translation should be in range [0, 1].'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, \
'img_fill_val as tuple must have 3 elements.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError('img_fill_val must be type float or tuple.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
'all elements of img_fill_val should between range [0,255].'
assert direction in ('horizontal', 'vertical'), \
'direction should be "horizontal" or "vertical".'
assert isinstance(max_translate_offset, (int, float)), \
'The max_translate_offset must be type int or float.'
# the offset used for translation
self.offset = int(level_to_value(level, max_translate_offset))
self.level = level
self.prob = prob
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.direction = direction
self.max_translate_offset = max_translate_offset
self.random_negative_prob = random_negative_prob
self.min_size = min_size
def _translate_img(self, results, offset, direction='horizontal'):
"""Translate the image.
Args:
results (dict): Result dict from loading pipeline.
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
"""
for key in results.get('img_fields', ['img']):
img = results[key].copy()
results[key] = mmcv.imtranslate(
img, offset, direction, self.img_fill_val).astype(img.dtype)
def _translate_bboxes(self, results, offset):
"""Shift bboxes horizontally or vertically, according to offset."""
h, w, c = results['img_shape']
for key in results.get('bbox_fields', []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1)
if self.direction == 'horizontal':
min_x = np.maximum(0, min_x + offset)
max_x = np.minimum(w, max_x + offset)
elif self.direction == 'vertical':
min_y = np.maximum(0, min_y + offset)
max_y = np.minimum(h, max_y + offset)
# the boxes translated outside of image will be filtered along with
# the corresponding masks, by invoking ``_filter_invalid``.
results[key] = np.concatenate([min_x, min_y, max_x, max_y],
axis=-1)
def _translate_masks(self,
results,
offset,
direction='horizontal',
fill_val=0):
"""Translate masks horizontally or vertically."""
h, w, c = results['img_shape']
for key in results.get('mask_fields', []):
masks = results[key]
results[key] = masks.translate((h, w), offset, direction, fill_val)
def _translate_seg(self,
results,
offset,
direction='horizontal',
fill_val=255):
"""Translate segmentation maps horizontally or vertically."""
for key in results.get('seg_fields', []):
seg = results[key].copy()
results[key] = mmcv.imtranslate(seg, offset, direction,
fill_val).astype(seg.dtype)
def _filter_invalid(self, results, min_size=0):
"""Filter bboxes and masks too small or translated out of image."""
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get('bbox_fields', []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
return results
def __call__(self, results):
"""Call function to translate images, bounding boxes, masks and
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Translated results.
"""
if np.random.rand() > self.prob:
return results
offset = random_negative(self.offset, self.random_negative_prob)
self._translate_img(results, offset, self.direction)
self._translate_bboxes(results, offset)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._translate_masks(results, offset, self.direction)
# fill_val set to ``seg_ignore_label`` for the ignored value
# of segmentation map.
self._translate_seg(
results, offset, self.direction, fill_val=self.seg_ignore_label)
self._filter_invalid(results, min_size=self.min_size)
return results
@PIPELINES.register_module()
class ColorTransform:
"""Apply Color transformation to image. The bboxes, masks, and
segmentations are not modified.
Args:
level (int | float): Should be in range [0,_MAX_LEVEL].
prob (float): The probability for performing Color transformation.
"""
def __init__(self, level, prob=0.5):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level should be in range [0,_MAX_LEVEL].'
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.level = level
self.prob = prob
self.factor = enhance_level_to_value(level)
def _adjust_color_img(self, results, factor=1.0):
"""Apply Color transformation to image."""
for key in results.get('img_fields', ['img']):
# NOTE defaultly the image should be BGR format
img = results[key]
results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)
def __call__(self, results):
"""Call function for Color transformation.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Colored results.
"""
if np.random.rand() > self.prob:
return results
self._adjust_color_img(results, self.factor)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class EqualizeTransform:
"""Apply Equalize transformation to image. The bboxes, masks and
segmentations are not modified.
Args:
prob (float): The probability for performing Equalize transformation.
"""
def __init__(self, prob=0.5):
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.prob = prob
def _imequalize(self, results):
"""Equalizes the histogram of one image."""
for key in results.get('img_fields', ['img']):
img = results[key]
results[key] = mmcv.imequalize(img).astype(img.dtype)
def __call__(self, results):
"""Call function for Equalize transformation.
Args:
results (dict): Results dict from loading pipeline.
Returns:
dict: Results after the transformation.
"""
if np.random.rand() > self.prob:
return results
self._imequalize(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob})'
@PIPELINES.register_module()
class BrightnessTransform:
"""Apply Brightness transformation to image. The bboxes, masks and
segmentations are not modified.
Args:
level (int | float): Should be in range [0,_MAX_LEVEL].
prob (float): The probability for performing Brightness transformation.
"""
def __init__(self, level, prob=0.5):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level should be in range [0,_MAX_LEVEL].'
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.level = level
self.prob = prob
self.factor = enhance_level_to_value(level)
def _adjust_brightness_img(self, results, factor=1.0):
"""Adjust the brightness of image."""
for key in results.get('img_fields', ['img']):
img = results[key]
results[key] = mmcv.adjust_brightness(img,
factor).astype(img.dtype)
def __call__(self, results):
"""Call function for Brightness transformation.
Args:
results (dict): Results dict from loading pipeline.
Returns:
dict: Results after the transformation.
"""
if np.random.rand() > self.prob:
return results
self._adjust_brightness_img(results, self.factor)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class ContrastTransform:
"""Apply Contrast transformation to image. The bboxes, masks and
segmentations are not modified.
Args:
level (int | float): Should be in range [0,_MAX_LEVEL].
prob (float): The probability for performing Contrast transformation.
"""
def __init__(self, level, prob=0.5):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level should be in range [0,_MAX_LEVEL].'
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.level = level
self.prob = prob
self.factor = enhance_level_to_value(level)
def _adjust_contrast_img(self, results, factor=1.0):
"""Adjust the image contrast."""
for key in results.get('img_fields', ['img']):
img = results[key]
results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)
def __call__(self, results):
"""Call function for Contrast transformation.
Args:
results (dict): Results dict from loading pipeline.
Returns:
dict: Results after the transformation.
"""
if np.random.rand() > self.prob:
return results
self._adjust_contrast_img(results, self.factor)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'prob={self.prob})'
return repr_str
| 36,327 | 39.772166 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/formating.py | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some results to :obj:`torch.Tensor` by given keys.
Args:
keys (Sequence[str]): Keys that need to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert data in results to :obj:`torch.Tensor`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted
to :obj:`torch.Tensor`.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image to :obj:`torch.Tensor` by given keys.
The dimension order of input image is (H, W, C). The pipeline will convert
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
(1, H, W).
Args:
keys (Sequence[str]): Key of images to be converted to Tensor.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function to convert image in results to :obj:`torch.Tensor` and
transpose the channel order.
Args:
results (dict): Result dict contains the image data to convert.
Returns:
dict: The result dict contains the image converted
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
"""
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose:
"""Transpose some results by given keys.
Args:
keys (Sequence[str]): Keys of results to be transposed.
order (Sequence[int]): Order of transpose.
"""
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
"""Call function to transpose the channel order of data in results.
Args:
results (dict): Result dict contains the data to transpose.
Returns:
dict: The result dict contains the data transposed to \
``self.order``.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToDataContainer:
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
Args:
fields (Sequence[dict]): Each field is a dict like
``dict(key='xxx', **kwargs)``. The ``key`` in result will
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))``.
"""
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
"""Call function to convert data in results to
:obj:`mmcv.DataContainer`.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data converted to \
:obj:`mmcv.DataContainer`.
"""
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + f'(fields={self.fields})'
@PIPELINES.register_module()
class DefaultFormatBundle:
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
"""Call function to transform and format common fields in results.
Args:
results (dict): Result dict contains the data to convert.
Returns:
dict: The result dict contains the data that is formatted with \
default bundle.
"""
if 'img' in results:
img = results['img']
# add default meta keys
results = self._add_default_meta_keys(results)
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
return results
def _add_default_meta_keys(self, results):
"""Add default meta keys.
We set default meta keys including `pad_shape`, `scale_factor` and
`img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
`Pad` are implemented during the whole pipeline.
Args:
results (dict): Result dict contains the data to convert.
Returns:
results (dict): Updated result dict contains the data to convert.
"""
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results.setdefault(
'img_norm_cfg',
dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False))
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple \
(h, w, c). Note that images may be zero padded on the \
bottom/right if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
'img_norm_cfg')``
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'pad_shape', 'scale_factor', 'flip',
'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function to collect keys in results. The keys in ``meta_keys``
will be converted to :obj:mmcv.DataContainer.
Args:
results (dict): Result dict contains the data to collect.
Returns:
dict: The result dict contains the following keys
- keys in``self.keys``
- ``img_metas``
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists:
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='Pad', size_divisor=32),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapFieldsToLists')
>>> ]
"""
def __call__(self, results):
"""Call function to wrap fields into lists.
Args:
results (dict): Result dict contains the data to wrap.
Returns:
dict: The result dict where value of ``self.keys`` are wrapped \
into list.
"""
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| 11,981 | 31.827397 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/__init__.py | from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize,
Pad, PhotoMetricDistortion, RandomCenterCropPad,
RandomCrop, RandomFlip, RandomShift, Resize,
SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift'
]
| 1,482 | 53.925926 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/transforms.py | import copy
import inspect
import mmcv
import numpy as np
from numpy import random
from mmdet.core import PolygonMasks
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..builder import PIPELINES
try:
from imagecorruptions import corrupt
except ImportError:
corrupt = None
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
@PIPELINES.register_module()
class Resize:
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used. If the input dict contains the key
"scale_factor" (if MultiScaleFlipAug does not give img_scale but
scale_factor), the actual scale will be computed by image shape and
scale_factor.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio \
range and multiply it with the image scale.
- ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
sample a scale from the multiscale range.
- ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
sample a scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
override (bool, optional): Whether to override `scale` and
`scale_factor` so as to call resize twice. Default False. If True,
after the first resizing, the existed `scale` and `scale_factor`
will be ignored so the second resizing can be allowed.
This option is a work-around for multiple times of resize in DETR.
Defaults to False.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
bbox_clip_border=True,
backend='cv2',
override=False):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.backend = backend
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
# TODO: refactor the override option in Resize
self.override = override
self.bbox_clip_border = bbox_clip_border
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
where ``img_scale`` is the selected image scale and \
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \
``img_scale`` is sampled scale and None is just a placeholder \
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where \
``scale`` is sampled ratio multiplied with ``img_scale`` and \
None is just a placeholder to be consistent with \
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into \
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
for key in results.get('img_fields', ['img']):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results[key].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
results[key] = img
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img_shape'] = img.shape
# in case that there is no padding
results['pad_shape'] = img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
"""Resize bounding boxes with ``results['scale_factor']``."""
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
if self.bbox_clip_border:
img_shape = results['img_shape']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
results[key] = bboxes
def _resize_masks(self, results):
"""Resize masks with ``results['scale']``"""
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
results[key] = results[key].rescale(results['scale'])
else:
results[key] = results[key].resize(results['img_shape'][:2])
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
else:
gt_seg = mmcv.imresize(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
if 'scale_factor' in results:
img_shape = results['img'].shape[:2]
scale_factor = results['scale_factor']
assert isinstance(scale_factor, float)
results['scale'] = tuple(
[int(x * scale_factor) for x in img_shape][::-1])
else:
self._random_scale(results)
else:
if not self.override:
assert 'scale_factor' not in results, (
'scale and scale_factor cannot be both set.')
else:
results.pop('scale')
if 'scale_factor' in results:
results.pop('scale_factor')
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'multiscale_mode={self.multiscale_mode}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'keep_ratio={self.keep_ratio}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class RandomFlip:
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
When random flip is enabled, ``flip_ratio``/``direction`` can either be a
float/string or tuple of float/string. There are 3 flip modes:
- ``flip_ratio`` is float, ``direction`` is string: the image will be
``direction``ly flipped with probability of ``flip_ratio`` .
E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
then image will be horizontally flipped with probability of 0.5.
- ``flip_ratio`` is float, ``direction`` is list of string: the image wil
be ``direction[i]``ly flipped with probability of
``flip_ratio/len(direction)``.
E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
then image will be horizontally flipped with probability of 0.25,
vertically with probability of 0.25.
- ``flip_ratio`` is list of float, ``direction`` is list of string:
given ``len(flip_ratio) == len(direction)``, the image wil
be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
'vertical']``, then image will be horizontally flipped with probability
of 0.3, vertically with probability of 0.5
Args:
flip_ratio (float | list[float], optional): The flipping probability.
Default: None.
direction(str | list[str], optional): The flipping direction. Options
are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
If input is a list, the length must equal ``flip_ratio``. Each
element in ``flip_ratio`` indicates the flip probability of
corresponding direction.
"""
def __init__(self, flip_ratio=None, direction='horizontal'):
if isinstance(flip_ratio, list):
assert mmcv.is_list_of(flip_ratio, float)
assert 0 <= sum(flip_ratio) <= 1
elif isinstance(flip_ratio, float):
assert 0 <= flip_ratio <= 1
elif flip_ratio is None:
pass
else:
raise ValueError('flip_ratios must be None, float, '
'or list of float')
self.flip_ratio = flip_ratio
valid_directions = ['horizontal', 'vertical', 'diagonal']
if isinstance(direction, str):
assert direction in valid_directions
elif isinstance(direction, list):
assert mmcv.is_list_of(direction, str)
assert set(direction).issubset(set(valid_directions))
else:
raise ValueError('direction must be either str or list of str')
self.direction = direction
if isinstance(flip_ratio, list):
assert len(self.flip_ratio) == len(self.direction)
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally.
Args:
bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
img_shape (tuple[int]): Image shape (height, width)
direction (str): Flip direction. Options are 'horizontal',
'vertical'.
Returns:
numpy.ndarray: Flipped bounding boxes.
"""
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
elif direction == 'vertical':
h = img_shape[0]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
elif direction == 'diagonal':
w = img_shape[1]
h = img_shape[0]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
else:
raise ValueError(f"Invalid flipping direction '{direction}'")
return flipped
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added \
into result dict.
"""
if 'flip' not in results:
if isinstance(self.direction, list):
# None means non-flip
direction_list = self.direction + [None]
else:
# None means non-flip
direction_list = [self.direction, None]
if isinstance(self.flip_ratio, list):
non_flip_ratio = 1 - sum(self.flip_ratio)
flip_ratio_list = self.flip_ratio + [non_flip_ratio]
else:
non_flip_ratio = 1 - self.flip_ratio
# exclude non-flip
single_ratio = self.flip_ratio / (len(direction_list) - 1)
flip_ratio_list = [single_ratio] * (len(direction_list) -
1) + [non_flip_ratio]
cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
results['flip'] = cur_dir is not None
if 'flip_direction' not in results:
results['flip_direction'] = cur_dir
if results['flip']:
# flip image
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'],
results['flip_direction'])
# flip masks
for key in results.get('mask_fields', []):
results[key] = results[key].flip(results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
return results
def __repr__(self):
return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
@PIPELINES.register_module()
class RandomShift:
"""Shift the image and box given shift pixels and probability.
Args:
shift_ratio (float): Probability of shifts. Default 0.5.
max_shift_px (int): The max pixels for shifting. Default 32.
filter_thr_px (int): The width and height threshold for filtering.
The bbox and the rest of the targets below the width and
height threshold will be filtered. Default 1.
"""
def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1):
assert 0 <= shift_ratio <= 1
assert max_shift_px >= 0
self.shift_ratio = shift_ratio
self.max_shift_px = max_shift_px
self.filter_thr_px = int(filter_thr_px)
# The key correspondence from bboxes to labels.
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
def __call__(self, results):
"""Call function to random shift images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Shift results.
"""
if random.random() < self.shift_ratio:
img_shape = results['img'].shape[:2]
random_shift_x = random.randint(-self.max_shift_px,
self.max_shift_px)
random_shift_y = random.randint(-self.max_shift_px,
self.max_shift_px)
new_x = max(0, random_shift_x)
orig_x = max(0, -random_shift_x)
new_y = max(0, random_shift_y)
orig_y = max(0, -random_shift_y)
# TODO: support mask and semantic segmentation maps.
for key in results.get('bbox_fields', []):
bboxes = results[key].copy()
bboxes[..., 0::2] += random_shift_x
bboxes[..., 1::2] += random_shift_y
# clip border
bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1])
bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0])
# remove invalid bboxes
bbox_w = bboxes[..., 2] - bboxes[..., 0]
bbox_h = bboxes[..., 3] - bboxes[..., 1]
valid_inds = (bbox_w > self.filter_thr_px) & (
bbox_h > self.filter_thr_px)
# If the shift does not contain any gt-bbox area, skip this
# image.
if key == 'gt_bboxes' and not valid_inds.any():
return results
bboxes = bboxes[valid_inds]
results[key] = bboxes
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
for key in results.get('img_fields', ['img']):
img = results[key]
new_img = np.zeros_like(img)
img_h, img_w = img.shape[:2]
new_h = img_h - np.abs(random_shift_y)
new_w = img_w - np.abs(random_shift_x)
new_img[new_y:new_y + new_h, new_x:new_x + new_w] \
= img[orig_y:orig_y + new_h, orig_x:orig_x + new_w]
results[key] = new_img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(max_shift_px={self.max_shift_px}, '
return repr_str
@PIPELINES.register_module()
class Pad:
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
for key in results.get('img_fields', ['img']):
if self.size is not None:
padded_img = mmcv.impad(
results[key], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results[key], self.size_divisor, pad_val=self.pad_val)
results[key] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
"""Pad masks according to ``results['pad_shape']``."""
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
def _pad_seg(self, results):
"""Pad semantic segmentation map according to
``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key], shape=results['pad_shape'][:2])
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_masks(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, '
repr_str += f'size_divisor={self.size_divisor}, '
repr_str += f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize:
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class RandomCrop:
"""Random crop the image & bboxes & masks.
The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
then the cropped results are generated.
Args:
crop_size (tuple): The relative ratio or absolute pixels of
height and width.
crop_type (str, optional): one of "relative_range", "relative",
"absolute", "absolute_range". "relative" randomly crops
(h * crop_size[0], w * crop_size[1]) part from an input of size
(h, w). "relative_range" uniformly samples relative crop size from
range [crop_size[0], 1] and [crop_size[1], 1] for height and width
respectively. "absolute" crops from an input with absolute size
(crop_size[0], crop_size[1]). "absolute_range" uniformly samples
crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
allow_negative_crop (bool, optional): Whether to allow a crop that does
not contain any bbox area. Default False.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- If the image is smaller than the absolute crop size, return the
original image.
- The keys for bboxes, labels and masks must be aligned. That is,
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
`gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
`gt_masks_ignore`.
- If the crop does not contain any gt-bbox region and
`allow_negative_crop` is set to False, skip this image.
"""
def __init__(self,
crop_size,
crop_type='absolute',
allow_negative_crop=False,
bbox_clip_border=True):
if crop_type not in [
'relative_range', 'relative', 'absolute', 'absolute_range'
]:
raise ValueError(f'Invalid crop_type {crop_type}.')
if crop_type in ['absolute', 'absolute_range']:
assert crop_size[0] > 0 and crop_size[1] > 0
assert isinstance(crop_size[0], int) and isinstance(
crop_size[1], int)
else:
assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
self.crop_size = crop_size
self.crop_type = crop_type
self.allow_negative_crop = allow_negative_crop
self.bbox_clip_border = bbox_clip_border
# The key correspondence from bboxes to labels and masks.
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def _crop_data(self, results, crop_size, allow_negative_crop):
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
crop_size (tuple): Expected absolute size after cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area. Default to False.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
for key in results.get('img_fields', ['img']):
img = results[key]
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
# If the crop does not contain any gt-bbox area and
# allow_negative_crop is False, skip this image.
if (key == 'gt_bboxes' and not valid_inds.any()
and not allow_negative_crop):
return None
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
return results
def _get_crop_size(self, image_size):
"""Randomly generates the absolute crop size based on `crop_type` and
`image_size`.
Args:
image_size (tuple): (h, w).
Returns:
crop_size (tuple): (crop_h, crop_w) in absolute pixels.
"""
h, w = image_size
if self.crop_type == 'absolute':
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == 'absolute_range':
assert self.crop_size[0] <= self.crop_size[1]
crop_h = np.random.randint(
min(h, self.crop_size[0]),
min(h, self.crop_size[1]) + 1)
crop_w = np.random.randint(
min(w, self.crop_size[0]),
min(w, self.crop_size[1]) + 1)
return crop_h, crop_w
elif self.crop_type == 'relative':
crop_h, crop_w = self.crop_size
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
elif self.crop_type == 'relative_range':
crop_size = np.asarray(self.crop_size, dtype=np.float32)
crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
def __call__(self, results):
"""Call function to randomly crop images, bounding boxes, masks,
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
image_size = results['img'].shape[:2]
crop_size = self._get_crop_size(image_size)
results = self._crop_data(results, crop_size, self.allow_negative_crop)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'crop_type={self.crop_type}, '
repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class SegRescale:
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
"""
def __init__(self, scale_factor=1, backend='cv2'):
self.scale_factor = scale_factor
self.backend = backend
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key],
self.scale_factor,
interpolation='nearest',
backend=self.backend)
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion:
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert img.dtype == np.float32, \
'PhotoMetricDistortion needs the input image of dtype np.float32,'\
' please set "to_float32=True" in "LoadImageFromFile" pipeline'
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
repr_str += 'contrast_range='
repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
repr_str += 'saturation_range='
repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
repr_str += f'hue_delta={self.hue_delta})'
return repr_str
@PIPELINES.register_module()
class Expand:
"""Random expand the image & bboxes.
Randomly place the original image on a canvas of 'ratio' x original image
size filled with mean values. The ratio is in the range of ratio_range.
Args:
mean (tuple): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (tuple): range of expand ratio.
prob (float): probability of applying this transformation
"""
def __init__(self,
mean=(0, 0, 0),
to_rgb=True,
ratio_range=(1, 4),
seg_ignore_label=None,
prob=0.5):
self.to_rgb = to_rgb
self.ratio_range = ratio_range
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
self.seg_ignore_label = seg_ignore_label
self.prob = prob
def __call__(self, results):
"""Call function to expand images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images, bounding boxes expanded
"""
if random.uniform(0, 1) > self.prob:
return results
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
# speedup expand when meets large image
if np.all(self.mean == self.mean[0]):
expand_img = np.empty((int(h * ratio), int(w * ratio), c),
img.dtype)
expand_img.fill(self.mean[0])
else:
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean,
dtype=img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
results['img'] = expand_img
# expand bboxes
for key in results.get('bbox_fields', []):
results[key] = results[key] + np.tile(
(left, top), 2).astype(results[key].dtype)
# expand masks
for key in results.get('mask_fields', []):
results[key] = results[key].expand(
int(h * ratio), int(w * ratio), top, left)
# expand segs
for key in results.get('seg_fields', []):
gt_seg = results[key]
expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
self.seg_ignore_label,
dtype=gt_seg.dtype)
expand_gt_seg[top:top + h, left:left + w] = gt_seg
results[key] = expand_gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label})'
return repr_str
@PIPELINES.register_module()
class MinIoURandomCrop:
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold for all intersections with
bounding boxes
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
The keys for bboxes, labels and masks should be paired. That is, \
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
`gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
"""
def __init__(self,
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3,
bbox_clip_border=True):
# 1: return ori img
self.min_ious = min_ious
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
self.bbox_clip_border = bbox_clip_border
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def __call__(self, results):
"""Call function to crop images and bounding boxes with minimum IoU
constraint.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images and bounding boxes cropped, \
'img_shape' key is updated.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert 'bbox_fields' in results
boxes = [results[key] for key in results['bbox_fields']]
boxes = np.concatenate(boxes, 0)
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
self.mode = mode
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
# Line or point crop is not allowed
if patch[2] == patch[0] or patch[3] == patch[1]:
continue
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if len(overlaps) > 0 and overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
if len(overlaps) > 0:
# adjust boxes
def is_center_of_bboxes_in_patch(boxes, patch):
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) *
(center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) *
(center[:, 1] < patch[3]))
return mask
mask = is_center_of_bboxes_in_patch(boxes, patch)
if not mask.any():
continue
for key in results.get('bbox_fields', []):
boxes = results[key].copy()
mask = is_center_of_bboxes_in_patch(boxes, patch)
boxes = boxes[mask]
if self.bbox_clip_border:
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results[key] = boxes
# labels
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][mask]
# mask fields
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
mask.nonzero()[0]].crop(patch)
# adjust the img no matter whether the gt is empty before crop
img = img[patch[1]:patch[3], patch[0]:patch[2]]
results['img'] = img
results['img_shape'] = img.shape
# seg fields
for key in results.get('seg_fields', []):
results[key] = results[key][patch[1]:patch[3],
patch[0]:patch[2]]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_ious={self.min_ious}, '
repr_str += f'min_crop_size={self.min_crop_size}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class Corrupt:
"""Corruption augmentation.
Corruption transforms implemented based on
`imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
Args:
corruption (str): Corruption name.
severity (int, optional): The severity of corruption. Default: 1.
"""
def __init__(self, corruption, severity=1):
self.corruption = corruption
self.severity = severity
def __call__(self, results):
"""Call function to corrupt image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images corrupted.
"""
if corrupt is None:
raise RuntimeError('imagecorruptions is not installed')
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
results['img'] = corrupt(
results['img'].astype(np.uint8),
corruption_name=self.corruption,
severity=self.severity)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(corruption={self.corruption}, '
repr_str += f'severity={self.severity})'
return repr_str
@PIPELINES.register_module()
class Albu:
"""Albumentation augmentation.
Adds custom transformations from Albumentations library.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
An example of ``transforms`` is as followed:
.. code-block::
[
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
Args:
transforms (list[dict]): A list of albu transformations
bbox_params (dict): Bbox_params for albumentation `Compose`
keymap (dict): Contains {'input key':'albumentation-style key'}
skip_img_without_anno (bool): Whether to skip the image if no ann left
after aug
"""
def __init__(self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False):
if Compose is None:
raise RuntimeError('albumentations is not installed')
# Args will be modified later, copying it will be safer
transforms = copy.deepcopy(transforms)
if bbox_params is not None:
bbox_params = copy.deepcopy(bbox_params)
if keymap is not None:
keymap = copy.deepcopy(keymap)
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
def albu_builder(self, cfg):
"""Import a module from albumentations.
It inherits some of :func:`build_from_cfg` logic.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if albumentations is None:
raise RuntimeError('albumentations is not installed')
obj_cls = getattr(albumentations, obj_type)
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'transforms' in args:
args['transforms'] = [
self.albu_builder(transform)
for transform in args['transforms']
]
return obj_cls(**args)
@staticmethod
def mapper(d, keymap):
"""Dictionary mapper. Renames keys according to keymap provided.
Args:
d (dict): old dict
keymap (dict): {'old_key':'new_key'}
Returns:
dict: new dict.
"""
updated_dict = {}
for k, v in zip(d.keys(), d.values()):
new_k = keymap.get(k, k)
updated_dict[new_k] = d[k]
return updated_dict
def __call__(self, results):
# dict to albumentations format
results = self.mapper(results, self.keymap_to_albu)
# TODO: add bbox_fields
if 'bboxes' in results:
# to list of boxes
if isinstance(results['bboxes'], np.ndarray):
results['bboxes'] = [x for x in results['bboxes']]
# add pseudo-field for filtration
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
# TODO: Support mask structure in albu
if 'masks' in results:
if isinstance(results['masks'], PolygonMasks):
raise NotImplementedError(
'Albu only supports BitMap masks now')
ori_masks = results['masks']
if albumentations.__version__ < '0.5':
results['masks'] = results['masks'].masks
else:
results['masks'] = [mask for mask in results['masks'].masks]
results = self.aug(**results)
if 'bboxes' in results:
if isinstance(results['bboxes'], list):
results['bboxes'] = np.array(
results['bboxes'], dtype=np.float32)
results['bboxes'] = results['bboxes'].reshape(-1, 4)
# filter label_fields
if self.filter_lost_elements:
for label in self.origin_label_fields:
results[label] = np.array(
[results[label][i] for i in results['idx_mapper']])
if 'masks' in results:
results['masks'] = np.array(
[results['masks'][i] for i in results['idx_mapper']])
results['masks'] = ori_masks.__class__(
results['masks'], results['image'].shape[0],
results['image'].shape[1])
if (not len(results['idx_mapper'])
and self.skip_img_without_anno):
return None
if 'gt_labels' in results:
if isinstance(results['gt_labels'], list):
results['gt_labels'] = np.array(results['gt_labels'])
results['gt_labels'] = results['gt_labels'].astype(np.int64)
# back to the original format
results = self.mapper(results, self.keymap_back)
# update final shape
if self.update_pad_shape:
results['pad_shape'] = results['img'].shape
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
return repr_str
@PIPELINES.register_module()
class RandomCenterCropPad:
"""Random center crop and random around padding for CornerNet.
This operation generates randomly cropped image from the original image and
pads it simultaneously. Different from :class:`RandomCrop`, the output
shape may not equal to ``crop_size`` strictly. We choose a random value
from ``ratios`` and the output shape could be larger or smaller than
``crop_size``. The padding operation is also different from :class:`Pad`,
here we use around padding instead of right-bottom padding.
The relation between output image (padding image) and original image:
.. code:: text
output image
+----------------------------+
| padded area |
+------|----------------------------|----------+
| | cropped area | |
| | +---------------+ | |
| | | . center | | | original image
| | | range | | |
| | +---------------+ | |
+------|----------------------------|----------+
| padded area |
+----------------------------+
There are 5 main areas in the figure:
- output image: output image of this operation, also called padding
image in following instruction.
- original image: input image of this operation.
- padded area: non-intersect area of output image and original image.
- cropped area: the overlap of output image and original image.
- center range: a smaller area where random center chosen from.
center range is computed by ``border`` and original image's shape
to avoid our random center is too close to original image's border.
Also this operation act differently in train and test mode, the summary
pipeline is listed below.
Train pipeline:
1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
will be ``random_ratio * crop_size``.
2. Choose a ``random_center`` in center range.
3. Generate padding image with center matches the ``random_center``.
4. Initialize the padding image with pixel value equals to ``mean``.
5. Copy the cropped area to padding image.
6. Refine annotations.
Test pipeline:
1. Compute output shape according to ``test_pad_mode``.
2. Generate padding image with center matches the original image
center.
3. Initialize the padding image with pixel value equals to ``mean``.
4. Copy the ``cropped area`` to padding image.
Args:
crop_size (tuple | None): expected size after crop, final size will
computed according to ratio. Requires (h, w) in train mode, and
None in test mode.
ratios (tuple): random select a ratio from tuple and crop image to
(crop_size[0] * ratio) * (crop_size[1] * ratio).
Only available in train mode.
border (int): max distance from center select area to image border.
Only available in train mode.
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB.
test_mode (bool): whether involve random variables in transform.
In train mode, crop_size is fixed, center coords and ratio is
random selected from predefined lists. In test mode, crop_size
is image's original shape, center coords and ratio is fixed.
test_pad_mode (tuple): padding method and padding shape value, only
available in test mode. Default is using 'logical_or' with
127 as padding shape value.
- 'logical_or': final_shape = input_shape | padding_shape_value
- 'size_divisor': final_shape = int(
ceil(input_shape / padding_shape_value) * padding_shape_value)
test_pad_add_pix (int): Extra padding pixel in test mode. Default 0.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
"""
def __init__(self,
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=128,
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=('logical_or', 127),
test_pad_add_pix=0,
bbox_clip_border=True):
if test_mode:
assert crop_size is None, 'crop_size must be None in test mode'
assert ratios is None, 'ratios must be None in test mode'
assert border is None, 'border must be None in test mode'
assert isinstance(test_pad_mode, (list, tuple))
assert test_pad_mode[0] in ['logical_or', 'size_divisor']
else:
assert isinstance(crop_size, (list, tuple))
assert crop_size[0] > 0 and crop_size[1] > 0, (
'crop_size must > 0 in train mode')
assert isinstance(ratios, (list, tuple))
assert test_pad_mode is None, (
'test_pad_mode must be None in train mode')
self.crop_size = crop_size
self.ratios = ratios
self.border = border
# We do not set default value to mean, std and to_rgb because these
# hyper-parameters are easy to forget but could affect the performance.
# Please use the same setting as Normalize for performance assurance.
assert mean is not None and std is not None and to_rgb is not None
self.to_rgb = to_rgb
self.input_mean = mean
self.input_std = std
if to_rgb:
self.mean = mean[::-1]
self.std = std[::-1]
else:
self.mean = mean
self.std = std
self.test_mode = test_mode
self.test_pad_mode = test_pad_mode
self.test_pad_add_pix = test_pad_add_pix
self.bbox_clip_border = bbox_clip_border
def _get_border(self, border, size):
"""Get final border for the target size.
This function generates a ``final_border`` according to image's shape.
The area between ``final_border`` and ``size - final_border`` is the
``center range``. We randomly choose center from the ``center range``
to avoid our random center is too close to original image's border.
Also ``center range`` should be larger than 0.
Args:
border (int): The initial border, default is 128.
size (int): The width or height of original image.
Returns:
int: The final border.
"""
k = 2 * border / size
i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
return border // i
def _filter_boxes(self, patch, boxes):
"""Check whether the center of each box is in the patch.
Args:
patch (list[int]): The cropped area, [left, top, right, bottom].
boxes (numpy array, (N x 4)): Ground truth boxes.
Returns:
mask (numpy array, (N,)): Each box is inside or outside the patch.
"""
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
return mask
def _crop_image_and_paste(self, image, center, size):
"""Crop image with a given center and size, then paste the cropped
image to a blank image with two centers align.
This function is equivalent to generating a blank image with ``size``
as its shape. Then cover it on the original image with two centers (
the center of blank image and the random center of original image)
aligned. The overlap area is paste from the original image and the
outside area is filled with ``mean pixel``.
Args:
image (np array, H x W x C): Original image.
center (list[int]): Target crop center coord.
size (list[int]): Target crop size. [target_h, target_w]
Returns:
cropped_img (np array, target_h x target_w x C): Cropped image.
border (np array, 4): The distance of four border of
``cropped_img`` to the original image area, [top, bottom,
left, right]
patch (list[int]): The cropped area, [left, top, right, bottom].
"""
center_y, center_x = center
target_h, target_w = size
img_h, img_w, img_c = image.shape
x0 = max(0, center_x - target_w // 2)
x1 = min(center_x + target_w // 2, img_w)
y0 = max(0, center_y - target_h // 2)
y1 = min(center_y + target_h // 2, img_h)
patch = np.array((int(x0), int(y0), int(x1), int(y1)))
left, right = center_x - x0, x1 - center_x
top, bottom = center_y - y0, y1 - center_y
cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
for i in range(img_c):
cropped_img[:, :, i] += self.mean[i]
y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
x_slice = slice(cropped_center_x - left, cropped_center_x + right)
cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_center_y - top, cropped_center_y + bottom,
cropped_center_x - left, cropped_center_x + right
],
dtype=np.float32)
return cropped_img, border, patch
def _train_aug(self, results):
"""Random crop and around padding the original image.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
boxes = results['gt_bboxes']
while True:
scale = random.choice(self.ratios)
new_h = int(self.crop_size[0] * scale)
new_w = int(self.crop_size[1] * scale)
h_border = self._get_border(self.border, h)
w_border = self._get_border(self.border, w)
for i in range(50):
center_x = random.randint(low=w_border, high=w - w_border)
center_y = random.randint(low=h_border, high=h - h_border)
cropped_img, border, patch = self._crop_image_and_paste(
img, [center_y, center_x], [new_h, new_w])
mask = self._filter_boxes(patch, boxes)
# if image do not have valid bbox, any crop patch is valid.
if not mask.any() and len(boxes) > 0:
continue
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape
results['pad_shape'] = cropped_img.shape
x0, y0, x1, y1 = patch
left_w, top_h = center_x - x0, center_y - y0
cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
mask = self._filter_boxes(patch, results[key])
bboxes = results[key][mask]
bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
if self.bbox_clip_border:
bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
keep = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
bboxes = bboxes[keep]
results[key] = bboxes
if key in ['gt_bboxes']:
if 'gt_labels' in results:
labels = results['gt_labels'][mask]
labels = labels[keep]
results['gt_labels'] = labels
if 'gt_masks' in results:
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
# crop semantic seg
for key in results.get('seg_fields', []):
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
return results
def _test_aug(self, results):
"""Around padding the original image without cropping.
The padding mode and value are from ``test_pad_mode``.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
results['img_shape'] = img.shape
if self.test_pad_mode[0] in ['logical_or']:
# self.test_pad_add_pix is only used for centernet
target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix
target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix
elif self.test_pad_mode[0] in ['size_divisor']:
divisor = self.test_pad_mode[1]
target_h = int(np.ceil(h / divisor)) * divisor
target_w = int(np.ceil(w / divisor)) * divisor
else:
raise NotImplementedError(
'RandomCenterCropPad only support two testing pad mode:'
'logical-or and size_divisor.')
cropped_img, border, _ = self._crop_image_and_paste(
img, [h // 2, w // 2], [target_h, target_w])
results['img'] = cropped_img
results['pad_shape'] = cropped_img.shape
results['border'] = border
return results
def __call__(self, results):
img = results['img']
assert img.dtype == np.float32, (
'RandomCenterCropPad needs the input image of dtype np.float32,'
' please set "to_float32=True" in "LoadImageFromFile" pipeline')
h, w, c = img.shape
assert c == len(self.mean)
if self.test_mode:
return self._test_aug(results)
else:
return self._train_aug(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'ratios={self.ratios}, '
repr_str += f'border={self.border}, '
repr_str += f'mean={self.input_mean}, '
repr_str += f'std={self.input_std}, '
repr_str += f'to_rgb={self.to_rgb}, '
repr_str += f'test_mode={self.test_mode}, '
repr_str += f'test_pad_mode={self.test_pad_mode}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class CutOut:
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Args:
n_holes (int | tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
shape of dropped regions. It can be `tuple[int, int]` to use a
fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
shape from the list.
cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
candidate ratio of dropped regions. It can be `tuple[float, float]`
to use a fixed ratio or `list[tuple[float, float]]` to randomly
choose ratio from the list. Please note that `cutout_shape`
and `cutout_ratio` cannot be both given at the same time.
fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Default: (0, 0, 0).
"""
def __init__(self,
n_holes,
cutout_shape=None,
cutout_ratio=None,
fill_in=(0, 0, 0)):
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.fill_in = fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
if not isinstance(self.candidates, list):
self.candidates = [self.candidates]
def __call__(self, results):
"""Call function to drop some regions of image."""
h, w, c = results['img'].shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
for _ in range(n_holes):
x1 = np.random.randint(0, w)
y1 = np.random.randint(0, h)
index = np.random.randint(0, len(self.candidates))
if not self.with_ratio:
cutout_w, cutout_h = self.candidates[index]
else:
cutout_w = int(self.candidates[index][0] * w)
cutout_h = int(self.candidates[index][1] * h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
results['img'][y1:y2, x1:x2, :] = self.fill_in
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in})'
return repr_str
| 75,130 | 38.418153 | 79 | py |
DDOD | DDOD-main/mmdet/datasets/pipelines/test_time_aug.py | import warnings
import mmcv
from ..builder import PIPELINES
from .compose import Compose
@PIPELINES.register_module()
class MultiScaleFlipAug:
"""Test-time augmentation with multiple scales and flipping.
An example configuration is as followed:
.. code-block::
img_scale=[(1333, 400), (1333, 800)],
flip=True,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
After MultiScaleFLipAug with above configuration, the results are wrapped
into lists of the same length as followed:
.. code-block::
dict(
img=[...],
img_shape=[...],
scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
flip=[False, True, False, True]
...
)
Args:
transforms (list[dict]): Transforms to apply in each augmentation.
img_scale (tuple | list[tuple] | None): Images scales for resizing.
scale_factor (float | list[float] | None): Scale factors for resizing.
flip (bool): Whether apply flip augmentation. Default: False.
flip_direction (str | list[str]): Flip augmentation directions,
options are "horizontal", "vertical" and "diagonal". If
flip_direction is a list, multiple flip augmentations will be
applied. It has no effect when flip == False. Default:
"horizontal".
"""
def __init__(self,
transforms,
img_scale=None,
scale_factor=None,
flip=False,
flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert (img_scale is None) ^ (scale_factor is None), (
'Must have but only one variable can be setted')
if img_scale is not None:
self.img_scale = img_scale if isinstance(img_scale,
list) else [img_scale]
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = scale_factor if isinstance(
scale_factor, list) else [scale_factor]
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = flip_direction if isinstance(
flip_direction, list) else [flip_direction]
assert mmcv.is_list_of(self.flip_direction, str)
if not self.flip and self.flip_direction != ['horizontal']:
warnings.warn(
'flip_direction has no effect when flip is set to False')
if (self.flip
and not any([t['type'] == 'RandomFlip' for t in transforms])):
warnings.warn(
'flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
"""Call function to apply test time augment transforms on results.
Args:
results (dict): Result dict contains the data to transform.
Returns:
dict[str: list]: The augmented data, where each value is wrapped
into a list.
"""
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction)
for direction in self.flip_direction]
for scale in self.img_scale:
for flip, direction in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
# list of dict to dict of list
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for key, val in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
| 4,421 | 35.545455 | 78 | py |
DDOD | DDOD-main/mmdet/utils/contextmanagers.py | import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,077 | 32.42623 | 79 | py |
DDOD | DDOD-main/mmdet/utils/util_mixins.py | """This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the object defines a ``__len__``
method, then the ``__nice__`` method defaults to something sensible, otherwise
it is treated as abstract and raises ``NotImplementedError``.
To use simply have your object inherit from :class:`NiceRepr`
(multi-inheritance should be ok).
This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
Example:
>>> # Objects that define __nice__ have a default __str__ and __repr__
>>> class Student(NiceRepr):
... def __init__(self, name):
... self.name = name
... def __nice__(self):
... return self.name
>>> s1 = Student('Alice')
>>> s2 = Student('Bob')
>>> print(f's1 = {s1}')
>>> print(f's2 = {s2}')
s1 = <Student(Alice)>
s2 = <Student(Bob)>
Example:
>>> # Objects that define __len__ have a default __nice__
>>> class Group(NiceRepr):
... def __init__(self, data):
... self.data = data
... def __len__(self):
... return len(self.data)
>>> g = Group([1, 2, 3])
>>> print(f'g = {g}')
g = <Group(3)>
"""
import warnings
class NiceRepr:
"""Inherit from this class and define ``__nice__`` to "nicely" print your
objects.
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
If the inheriting class has a ``__len__``, method then the default
``__nice__`` method will return its length.
Example:
>>> class Foo(NiceRepr):
... def __nice__(self):
... return 'info'
>>> foo = Foo()
>>> assert str(foo) == '<Foo(info)>'
>>> assert repr(foo).startswith('<Foo(info) at ')
Example:
>>> class Bar(NiceRepr):
... pass
>>> bar = Bar()
>>> import pytest
>>> with pytest.warns(None) as record:
>>> assert 'object at' in str(bar)
>>> assert 'object at' in repr(bar)
Example:
>>> class Baz(NiceRepr):
... def __len__(self):
... return 5
>>> baz = Baz()
>>> assert str(baz) == '<Baz(5)>'
"""
def __nice__(self):
"""str: a "nice" summary string describing this module"""
if hasattr(self, '__len__'):
# It is a common pattern for objects to use __len__ in __nice__
# As a convenience we define a default __nice__ for these objects
return str(len(self))
else:
# In all other cases force the subclass to overload __nice__
raise NotImplementedError(
f'Define the __nice__ method for {self.__class__!r}')
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f'<{classname}({nice}) at {hex(id(self))}>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f'<{classname}({nice})>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
| 3,664 | 33.904762 | 78 | py |
DDOD | DDOD-main/mmdet/utils/profiling.py | import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of code
suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
print(msg, end_stream)
| 1,288 | 31.225 | 73 | py |
DDOD | DDOD-main/mmdet/utils/util_random.py | """Helpers for random number generators."""
import numpy as np
def ensure_rng(rng=None):
"""Coerces input into a random number generator.
If the input is None, then a global random state is returned.
If the input is a numeric value, then that is used as a seed to construct a
random state. Otherwise the input is returned as-is.
Adapted from [1]_.
Args:
rng (int | numpy.random.RandomState | None):
if None, then defaults to the global rng. Otherwise this can be an
integer or a RandomState class
Returns:
(numpy.random.RandomState) : rng -
a numpy random number generator
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
"""
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
return rng
| 977 | 27.764706 | 119 | py |
DDOD | DDOD-main/mmdet/utils/logger.py | import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
return logger
| 481 | 23.1 | 77 | py |
DDOD | DDOD-main/mmdet/utils/collect_env.py | from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| 423 | 23.941176 | 74 | py |
DDOD | DDOD-main/mmdet/utils/__init__.py | from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
| 119 | 23 | 44 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/utils.py | import os
def mkpath(*paths):
"""Make path."""
path = os.path.join(*[str(path) for path in paths])
path = os.path.realpath(path)
return path
| 151 | 15.888889 | 53 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/dataset.py | from __future__ import annotations
import torch
import torch.nn.functional as F
from glob import glob
from typing import Literal, List
from pedalboard.io import ReadableAudioFile
from torch.utils.data import Dataset
from tfcrnn.utils import mkpath
from tfcrnn.config import Config
CLASSES = ['backward', 'bed', 'bird', 'cat', 'dog', 'down', 'eight', 'five', 'follow', 'forward', 'four', 'go', 'happy',
'house', 'learn', 'left', 'marvin', 'nine', 'no', 'off', 'on', 'one', 'right', 'seven', 'sheila', 'six',
'stop', 'three', 'tree', 'two', 'up', 'visual', 'wow', 'yes', 'zero']
NAME2IDX = {name: i for i, name in enumerate(CLASSES)}
class SpeechCommandsDataset(Dataset):
def __init__(
self,
split: Literal['train', 'valid', 'test'],
config: Config,
):
self.split = split
self.config = config
self.paths = load_audio_paths(config.dataset_dir, split)
def __getitem__(self, i):
path = self.paths[i]
label_name = path.split('/')[-2]
label = NAME2IDX.get(label_name, '_silence_')
with ReadableAudioFile(path) as af:
assert af.samplerate == self.config.sample_rate, (
f'The configured sampling rate is {self.config.sample_rate}, '
f'but got {af.samplerate} from: {path}'
)
x = af.read(self.config.input_size)
x = x.squeeze()
x = torch.from_numpy(x)
x = x / 0.0860
if len(x) < self.config.input_size:
# Pad with zeros if audio is shorter than 16000 (1 sec).
pad_size = self.config.input_size - len(x)
x = F.pad(x, (pad_size // 2, pad_size // 2 + pad_size % 2), mode='constant', value=0)
# Apply random cropping for the training set.
if self.split == 'train' and self.config.crop_size > 0:
crop_size = self.config.crop_size
i = torch.randint(0, crop_size, (1,))
x = x[i:i + x.shape[-1] - crop_size]
x = F.pad(x, (crop_size // 2, crop_size // 2), mode='constant', value=0)
# Add a dimension for channels.
x = x.unsqueeze(0)
assert x.shape == (1, self.config.input_size), (
f'The processed waveform should have a shape of {(1, self.config.input_size)}, '
f'but got {x.shape}'
)
return x, label
def __len__(self):
return len(self.paths)
def load_audio_paths(
dataset_dir: str,
split: Literal['train', 'valid', 'test'],
) -> List[str]:
if split != 'test':
with open(mkpath(dataset_dir, 'validation_list.txt')) as f:
valid_paths = f.read().splitlines()
valid_paths = [mkpath(dataset_dir, path) for path in valid_paths]
valid_paths.sort()
if split == 'valid':
return valid_paths
with open(mkpath(dataset_dir, 'testing_list.txt')) as f:
test_paths = f.read().splitlines()
test_paths = [mkpath(dataset_dir, path) for path in test_paths]
test_paths.sort()
if split == 'test':
return test_paths
audio_paths = glob(mkpath(dataset_dir, '*/*.wav'))
noise_paths = glob(mkpath(dataset_dir, '_background_noise_/*.wav'))
# Remove validation, test set, and noises from the training set.
train_paths = list(set(audio_paths) - set(valid_paths) - set(test_paths) - set(noise_paths))
train_paths.sort()
return train_paths
if __name__ == '__main__':
from tqdm import tqdm
from torch.utils.data import DataLoader
print('=> Start sanity check for the dataset')
config = Config()
config.parse_cli()
config.init_wandb()
config.print()
splits = ['train', 'valid', 'test']
datasets = [SpeechCommandsDataset(split, config) for split in splits]
for dataset in datasets:
loader = DataLoader(dataset, config.batch_size, shuffle=False, drop_last=False, num_workers=0)
for x, y in tqdm(loader, desc=dataset.split):
pass
| 3,763 | 30.630252 | 120 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/config.py | import os
import argparse
import wandb
from dataclasses import dataclass, asdict
from typing import Literal
from tfcrnn.utils import mkpath
@dataclass
class Config:
# Path configurations.
dataset_dir: str = mkpath(os.path.dirname(__file__), '../dataset')
# Data configurations.
input_seconds: float = 1.0
sample_rate: int = 16000
num_classes: int = 35
crop_milliseconds: int = 10
# Model configurations.
hidden_size: int = 256
window: int = 3200 # 200ms, timestep_size=100ms
skeleton: Literal['tfcrnn', 'cnn', 'crnn'] = 'tfcrnn'
block: Literal['basic', 'se', 'resse'] = 'basic'
init_features: int = 128
se_amp: float = 2 ** -3
# Training configurations.
batch_size: int = 64
initial_lr: float = 0.025
momentum: float = 0.9
lr_decay: float = 0.2
loss: Literal['many2many', 'many2one'] = 'many2many'
num_max_epochs: int = 200 # TODO num_max_epochs
num_decay: int = 3
dropout: float = 0.5
dropout_resse: float = 0.2
weight_decay: float = 0.0
patience: int = 2
gpu: int = 0
wandb_log_stepsize: int = 100
@property
def input_size(self):
return int(self.sample_rate * self.input_seconds)
@property
def crop_size(self):
return int(self.sample_rate * self.crop_milliseconds / 1000)
def parse_cli(self):
parser = argparse.ArgumentParser()
for k, v in asdict(self).items():
parser.add_argument(f'--{k}', type=type(v), default=v)
args = parser.parse_args()
for k, v in vars(args).items():
setattr(self, k, v)
if wandb.run is not None:
# wandb is initialized.
wandb.config.update(asdict(self), allow_val_change=True)
def print(self):
print('-' * 32 + ' Configurations ' + '-' * 32)
for k, v in asdict(self).items():
print(f'{k:20}: {v}')
print('-' * 80)
def init_wandb(self):
wandb.init(anonymous='allow')
if len(wandb.config.keys()) > 0:
# It's a sweep of W & B.
for k, v in wandb.config.items():
setattr(self, k, v)
wandb.config.update(asdict(self))
def as_dict(self):
return asdict(self)
| 2,099 | 24.925926 | 68 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/__init__.py | 0 | 0 | 0 | py | |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/train.py | import wandb
from tfcrnn.runners import SpeechCommandsRunner
from tfcrnn.config import Config
def main():
config = Config()
config.init_wandb()
config.parse_cli()
config.print()
runner = SpeechCommandsRunner(config)
print(runner.model)
print(f'\n=> Num params: {sum([p.numel() for p in runner.model.parameters()]):,}')
epoch = 0
for stage in range(config.num_decay):
print('-' * 80)
print(f'Stage {stage}')
print('-' * 80)
for epoch in range(epoch + 1, config.num_max_epochs + 1):
print(f'Epoch {epoch:2}')
loss_train, scores_train = runner.train()
loss_valid, scores_valid = runner.validate()
loss_test, scores_test = runner.test()
# Log the learning rate to watch lr decay.
log = {
'lr': runner.lr,
'epoch': epoch,
'loss_train': loss_train,
'loss_valid': loss_valid,
'loss_test': loss_test,
'num_trained_samples': runner.total_trained_samples,
**{f'{k}_train': v for k, v in scores_train.items()},
**{f'{k}_valid': v for k, v in scores_valid.items()},
**{f'{k}_test': v for k, v in scores_test.items()},
}
wandb.log(log, step=runner.total_trained_steps)
is_best = runner.is_best(loss_valid)
should_stop = runner.early_stop(loss_valid)
if is_best:
runner.save_checkpoint(**log)
print(f'=> Checkpoint saved.')
if should_stop:
break
# End of stage
checkpoint = runner.load_checkpoint() # back to the best weights
print(f'=> The end of stage {stage}. Checkpoint loaded.')
final_scores = {
f'final_{k.replace("_test", "")}': checkpoint[k]
for k in checkpoint.keys() if k.endswith('_test')
}
wandb.log(final_scores)
print()
for k, v in final_scores.items():
print(f'=> {k:12}: {v:.4f}')
print('=> Done.')
if __name__ == '__main__':
main()
| 1,916 | 27.191176 | 84 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/__init__.py | from .blocks import *
from .skeletons import *
| 47 | 15 | 24 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/blocks/tf_blocks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .plain_blocks import BasicBlock
class TFBasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size, amp_rate):
super().__init__()
self.base_block = BasicBlock(in_channels, out_channels)
self.excitation = nn.Sequential(OrderedDict([
('linear0', nn.Linear(hidden_size, int(out_channels * amp_rate))),
('relu', nn.ReLU(inplace=True)),
('linear1', nn.Linear(int(out_channels * amp_rate), out_channels)),
('scale', nn.Sigmoid()),
]))
# Old implementation:
# self.excitation = nn.Sequential(OrderedDict([
# ('linear', nn.Linear(hidden_size, out_channels)),
# ('scale', nn.Sigmoid()),
# ]))
def forward(self, x, hidden):
x = self.base_block(x)
scale = self.excitation(hidden)
return x * scale.unsqueeze(-1)
class TFSEBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size, amp_rate):
super().__init__()
self.base_block = BasicBlock(in_channels, out_channels)
self.excitation = nn.Sequential(OrderedDict([
('linear0', nn.Linear(out_channels + hidden_size, int(out_channels * amp_rate))),
('relu', nn.ReLU(inplace=True)),
('linear1', nn.Linear(int(out_channels * amp_rate), out_channels)),
('scale', nn.Sigmoid()),
]))
def forward(self, x, hidden):
x = self.base_block(x)
channel_stat = F.adaptive_avg_pool1d(x, 1).squeeze(-1)
scale = self.excitation(torch.cat([channel_stat, hidden], dim=1))
return x * scale.unsqueeze(-1)
class TFResSEBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size, amp_rate, dropout):
super().__init__()
self.identity = nn.Sequential()
if in_channels != out_channels:
self.identity.add_module('conv', nn.Conv1d(in_channels, out_channels, 1))
self.identity.add_module('norm', nn.BatchNorm1d(out_channels))
self.residual = nn.Sequential()
self.residual.add_module('conv0', nn.Conv1d(in_channels, out_channels, 3, 1, 1))
self.residual.add_module('norm0', nn.BatchNorm1d(out_channels))
self.residual.add_module('relu0', nn.ReLU(inplace=True))
if dropout > 0.0:
self.residual.add_module('drop', nn.Dropout(p=dropout))
self.residual.add_module('conv1', nn.Conv1d(out_channels, out_channels, 3, 1, 1))
self.residual.add_module('norm1', nn.BatchNorm1d(out_channels))
self.pool = nn.Sequential()
self.pool.add_module('relu', nn.ReLU(inplace=True))
self.pool.add_module('pool', nn.MaxPool1d(3))
self.excitation = nn.Sequential(OrderedDict([
('linear0', nn.Linear(out_channels + hidden_size, int(out_channels * amp_rate))),
('relu', nn.ReLU(inplace=True)),
('linear1', nn.Linear(int(out_channels * amp_rate), out_channels)),
('scale', nn.Sigmoid()),
]))
def forward(self, x, hidden):
shortcut = self.identity(x)
x = self.residual(x)
x = x + shortcut
x = self.pool(x)
channel_stat = F.adaptive_avg_pool1d(x, 1).squeeze(-1)
scale = self.excitation(torch.cat([channel_stat, hidden], dim=1))
return x * scale.unsqueeze(-1)
| 3,213 | 36.372093 | 87 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/blocks/plain_blocks.py | import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class BasicBlock(nn.Sequential):
def __init__(self, in_channels, out_channels):
super().__init__()
self.add_module('conv', nn.Conv1d(in_channels, out_channels, 3, 1, 1))
self.add_module('norm', nn.BatchNorm1d(out_channels))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('pool', nn.MaxPool1d(3))
class SEBlock(nn.Module):
def __init__(self, in_channels, out_channels, amp_rate):
super().__init__()
self.base_block = BasicBlock(in_channels, out_channels)
self.se = nn.Sequential(OrderedDict([
('linear0', nn.Linear(out_channels, int(out_channels * amp_rate))),
('relu', nn.ReLU(inplace=True)),
('linear1', nn.Linear(int(out_channels * amp_rate), out_channels)),
('excitation', nn.Sigmoid()),
]))
def forward(self, x):
x = self.base_block(x)
weight = self.se(F.adaptive_avg_pool1d(x, 1).squeeze(-1))
return x * weight.unsqueeze(-1)
class ResSEBlock(nn.Module):
def __init__(self, in_channels, out_channels, amp_rate, dropout=0.0):
super().__init__()
self.identity = nn.Sequential()
if in_channels != out_channels:
self.identity.add_module('conv', nn.Conv1d(in_channels, out_channels, 1))
self.identity.add_module('norm', nn.BatchNorm1d(out_channels))
self.residual = nn.Sequential()
self.residual.add_module('conv0', nn.Conv1d(in_channels, out_channels, 3, 1, 1))
self.residual.add_module('norm0', nn.BatchNorm1d(out_channels))
self.residual.add_module('relu0', nn.ReLU(inplace=True))
if dropout > 0.0:
self.residual.add_module('drop', nn.Dropout(p=dropout))
self.residual.add_module('conv1', nn.Conv1d(out_channels, out_channels, 3, 1, 1))
self.residual.add_module('norm1', nn.BatchNorm1d(out_channels))
self.pool = nn.Sequential()
self.pool.add_module('relu', nn.ReLU(inplace=True))
self.pool.add_module('pool', nn.MaxPool1d(3))
self.se = nn.Sequential(OrderedDict([
('linear0', nn.Linear(out_channels, int(out_channels * amp_rate))),
('relu', nn.ReLU(inplace=True)),
('linear1', nn.Linear(int(out_channels * amp_rate), out_channels)),
('excitation', nn.Sigmoid()),
]))
def forward(self, x):
shortcut = self.identity(x)
x = self.residual(x)
x = x + shortcut
x = self.pool(x)
channel_stat = F.adaptive_avg_pool1d(x, 1).squeeze(-1)
scale = self.se(channel_stat)
return x * scale.unsqueeze(-1)
| 2,546 | 35.385714 | 85 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/blocks/__init__.py | from .plain_blocks import *
from .tf_blocks import *
| 53 | 17 | 27 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/skeletons/crnn.py | from __future__ import annotations
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from tfcrnn.config import Config
from ..blocks import BasicBlock, SEBlock, ResSEBlock
class CRNN(nn.Module):
def __init__(self, config: Config):
super(CRNN, self).__init__()
self.config = config
self.window = config.window
self.hop = config.window // 2
self.hidden_size = config.hidden_size
num_features = config.init_features
self.init_block = nn.Sequential(OrderedDict([
('conv0', nn.Conv1d(1, num_features, 3, 3, 1)),
('norm0', nn.BatchNorm1d(num_features)),
('relu0', nn.ReLU(inplace=True)),
]))
num_blocks = int(math.log(config.window, 3)) - 1
self.blocks = nn.Sequential()
for i in range(num_blocks):
out_channels = num_features * 2 if i in [2, num_blocks - 1] else num_features
if num_blocks == 3:
out_channels = [128, 256, 512][i]
if config.block == 'basic':
block = BasicBlock(num_features, out_channels)
elif config.block == 'se':
block = SEBlock(num_features, out_channels, config.se_amp)
elif config.block == 'resse':
block = ResSEBlock(num_features, out_channels, config.se_amp, dropout=config.dropout_resse)
else:
raise Exception(f'Unknown block for RNN: {config.block}')
self.blocks.add_module(f'block{i}', block)
num_features = out_channels
self.cell = nn.GRUCell(num_features, self.hidden_size)
self.classifier = nn.Linear(self.hidden_size, config.num_classes)
def forward(self, x):
logits = []
hidden = torch.zeros(x.shape[0], self.hidden_size, dtype=x.dtype, device=x.device)
num_segments = 2 * (x.shape[-1] // self.window) - 1
for i in range(num_segments):
s = x[..., i * self.hop:i * self.hop + self.window]
s = self.init_block(s)
s = self.blocks(s)
s = F.adaptive_max_pool1d(s, 1).squeeze(-1)
if self.config.dropout > 0:
s = F.dropout(s, p=self.config.dropout, training=self.training)
hidden = self.cell(s, hidden)
logit = self.classifier(hidden)
logits.append(logit)
return torch.stack(logits), hidden
def forward_variable_length(self, x_batch):
"""This method is faster and better performing than temporal avg."""
sequence_lengths = [(x.shape[-1] // self.window) * 2 - 1 for x in x_batch]
# Ensure x_batch sequence length is in descending order
assert all([len1 >= len2 for len1, len2 in zip(sequence_lengths, sequence_lengths[1:])])
max_sequence_lenth = sequence_lengths[0]
hidden = torch.zeros(len(x_batch), self.hidden_size, dtype=x_batch[0].dtype, device=x_batch[0].device)
for i in range(max_sequence_lenth):
i_compute = [i < l for l in sequence_lengths]
batch_size = sum(i_compute)
x_this_batch = x_batch[:batch_size] # get batch run this time
s = [x[..., i * self.hop:i * self.hop + self.window] for x in x_this_batch]
s = torch.stack(s)
s = self.init_block(s)
s = self.blocks(s)
s = F.adaptive_max_pool1d(s, 1).squeeze(-1)
if self.config.dropout > 0:
s = F.dropout(s, p=self.config.dropout, training=self.training)
hidden[:batch_size] = self.cell(s, hidden[:batch_size])
logit = self.classifier(hidden)
return logit, hidden
| 3,438 | 35.2 | 106 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/skeletons/cnn.py | from __future__ import annotations
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from tfcrnn.config import Config
from ..blocks import BasicBlock, SEBlock, ResSEBlock
class CNN(nn.Module):
def __init__(self, config: Config):
super(CNN, self).__init__()
self.config = config
num_features = config.init_features
self.init_block = nn.Sequential(OrderedDict([
('conv0', nn.Conv1d(1, num_features, 3, 3, 1)),
('norm0', nn.BatchNorm1d(num_features)),
('relu0', nn.ReLU(inplace=True)),
]))
self.blocks = nn.Sequential()
num_blocks = int(round(math.log(config.input_size, 3), 5)) - 1
for i in range(num_blocks):
out_channels = num_features * 2 if i in [2, num_blocks - 1] else num_features
if config.block == 'basic':
self.blocks.add_module(f'block{i}', BasicBlock(num_features, out_channels))
elif config.block == 'se':
self.blocks.add_module(f'block{i}', SEBlock(num_features, out_channels, config.se_amp))
elif config.block == 'resse':
self.blocks.add_module(f'block{i}',
ResSEBlock(num_features, out_channels, config.se_amp, dropout=config.dropout_resse))
else:
raise Exception(f'Unknown block: {config.block}')
num_features = out_channels
self.aggregation = nn.AdaptiveMaxPool1d(1)
self.classifier = nn.Linear(num_features, config.num_classes)
def forward(self, x):
x = self.embed(x)
if self.config.dropout > 0.:
x = F.dropout(x, p=self.config.dropout, training=self.training)
x = self.classifier(x)
return x
def embed(self, x):
x = self.init_block(x)
x = self.blocks(x)
x = self.aggregation(x).squeeze(-1)
return x
def forward_variable_length(self, x_batch, F_prob=torch.sigmoid):
"""Aggregate features temporally"""
segment_counts = [2 * (x.shape[-1] // self.config.input_size) - 1 for x in x_batch]
max_num_segments = segment_counts[0]
input_hop = self.config.input_size // 2
embed_size = self.classifier.in_features
feature_sum = torch.zeros(len(x_batch), embed_size, dtype=x_batch[0].dtype, device=x_batch[0].device)
prob_sum = torch.zeros(len(x_batch), self.config.num_classes, dtype=x_batch[0].dtype, device=x_batch[0].device)
for i in range(max_num_segments):
i_compute = [i < n for n in segment_counts]
batch_size = sum(i_compute)
x_this_batch = x_batch[:batch_size] # get batch run this time
segment_batch = [x[:, i * input_hop:i * input_hop + self.config.input_size] for x in x_this_batch]
segment_batch = torch.stack(segment_batch)
embed = self.embed(segment_batch)
logit = self.classifier(embed)
feature_sum[:batch_size] += embed
prob_sum[:batch_size] += F_prob(logit)
feature_avg = feature_sum / torch.Tensor(segment_counts).unsqueeze(-1).to(feature_sum.device)
prob_avg = prob_sum / torch.Tensor(segment_counts).unsqueeze(-1).to(prob_sum.device)
logit = self.classifier(feature_avg)
return logit, feature_avg, prob_avg
| 3,175 | 36.364706 | 115 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/skeletons/tf_crnn.py | from __future__ import annotations
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from tfcrnn.config import Config
from ..blocks import TFBasicBlock, TFSEBlock, TFResSEBlock
class TFCRNN(nn.Module):
def __init__(self, config: Config):
super(TFCRNN, self).__init__()
self.config = config
self.window = config.window
self.hop = config.window // 2
self.hidden_size = config.hidden_size
num_features = config.init_features
self.init_block = nn.Sequential(OrderedDict([
('conv0', nn.Conv1d(1, num_features, 3, 3, 1)),
('norm0', nn.BatchNorm1d(num_features)),
('relu0', nn.ReLU(inplace=True)),
]))
num_blocks = int(math.log(config.window, 3)) - 1
self.blocks = nn.Sequential()
for i in range(num_blocks):
out_channels = num_features * 2 if i in [2, num_blocks - 1] else num_features
if num_blocks == 3:
out_channels = [128, 256, 512][i]
if config.block == 'basic':
block = TFBasicBlock(num_features, out_channels, self.hidden_size, config.se_amp)
elif config.block == 'se':
block = TFSEBlock(num_features, out_channels, self.hidden_size, config.se_amp)
elif config.block == 'resse':
block = TFResSEBlock(num_features, out_channels, self.hidden_size, config.se_amp, dropout=config.dropout_resse)
else:
raise Exception(f'Unknown block for RNN: {config.block}')
self.blocks.add_module(f'block{i}', block)
num_features = out_channels
self.cell = nn.GRUCell(num_features, self.hidden_size)
self.classifier = nn.Linear(self.hidden_size, config.num_classes)
def forward(self, x):
logits = []
hidden = torch.zeros(x.shape[0], self.hidden_size, dtype=x.dtype, device=x.device)
num_segments = 2 * (x.shape[-1] // self.window) - 1
for i in range(num_segments):
s = x[..., i * self.hop:i * self.hop + self.window]
s = self.init_block(s)
for block in self.blocks:
s = block(s, hidden)
s = F.adaptive_max_pool1d(s, 1).squeeze(-1)
if self.config.dropout > 0:
s = F.dropout(s, p=self.config.dropout, training=self.training)
hidden = self.cell(s, hidden)
logit = self.classifier(hidden)
logits.append(logit)
return torch.stack(logits), hidden
def forward_variable_length(self, x_batch):
"""This method is faster and better performing than temporal avg."""
sequence_lengths = [(x.shape[-1] // self.window) * 2 - 1 for x in x_batch]
# Ensure x_batch sequence length is in descending order
assert all([len1 >= len2 for len1, len2 in zip(sequence_lengths, sequence_lengths[1:])])
max_sequence_lenth = sequence_lengths[0]
hidden = torch.zeros(len(x_batch), self.hidden_size, dtype=x_batch[0].dtype, device=x_batch[0].device)
for i in range(max_sequence_lenth):
i_compute = [i < l for l in sequence_lengths]
batch_size = sum(i_compute)
x_this_batch = x_batch[:batch_size] # get batch run this time
s = [x[..., i * self.hop:i * self.hop + self.window] for x in x_this_batch]
s = torch.stack(s)
s = self.init_block(s)
for block in self.blocks:
s = block(s, hidden[:batch_size])
s = F.adaptive_max_pool1d(s, 1).squeeze(-1)
if self.config.dropout > 0:
s = F.dropout(s, p=self.config.dropout, training=self.training)
hidden[:batch_size] = self.cell(s, hidden[:batch_size])
logit = self.classifier(hidden)
return logit, hidden
| 3,636 | 35.009901 | 119 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/models/skeletons/__init__.py | from .cnn import CNN
from .crnn import CRNN
from .tf_crnn import TFCRNN
| 72 | 17.25 | 27 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/runners/speech_commands_runner.py | from __future__ import annotations
import os
import wandb
import torch
import torch.nn.functional as F
from collections import OrderedDict
from tqdm import tqdm
from torch.utils.data import DataLoader
from tfcrnn.config import Config
from tfcrnn.dataset import SpeechCommandsDataset
from .base_runner import BaseRunner
class SpeechCommandsRunner(BaseRunner):
def __init__(
self,
config: Config,
checkpoint_path: str = None,
):
super().__init__(config, checkpoint_path)
self.dataset_train = SpeechCommandsDataset('train', config)
self.dataset_valid = SpeechCommandsDataset('valid', config)
self.dataset_test = SpeechCommandsDataset('test', config)
num_workers = min(32, os.cpu_count())
if os.environ.get('WANDB_MODE') == 'dryrun':
num_workers = 0
self.loader_train = DataLoader(
self.dataset_train, config.batch_size,
num_workers=num_workers, shuffle=True, drop_last=True
)
self.loader_valid = DataLoader(
self.dataset_valid, config.batch_size,
num_workers=num_workers, shuffle=False, drop_last=False
)
self.loader_test = DataLoader(
self.dataset_test, config.batch_size,
num_workers=num_workers, shuffle=False, drop_last=False
)
def train(self):
self.model.train()
sum_loss, sum_acc, n = 0., 0., 0.
pbar = tqdm(self.loader_train)
for x, y in pbar:
x = x.to(self.device)
y = y.to(self.device)
if self.config.skeleton == 'cnn':
loss, logit = self.compute_loss_cnn(x, y)
acc = self.accuracy(logit, y)
else:
loss, logits = self.compute_loss_crnn(x, y)
acc = self.accuracy(logits[-1], y)
# Optimize.
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
# Log metrics.
batch_size = y.shape[0]
sum_loss += batch_size * loss.item()
sum_acc += batch_size * acc
n += batch_size
self.total_trained_samples += batch_size
self.total_trained_steps += 1
pbar.set_postfix(OrderedDict([
(f'loss_train', f'{sum_loss / n:.4f}'),
(f'acc_train', f'{sum_acc / n:.4f}'),
]))
if self.total_trained_steps % self.config.wandb_log_stepsize == 0:
wandb.log({'loss_batch': loss.item(),
'accuracy': acc,
'num_trained_samples': self.total_trained_samples},
step=self.total_trained_steps)
epoch_loss = sum_loss / len(self.dataset_train)
epoch_acc = sum_acc / len(self.dataset_train)
return epoch_loss, {'score': epoch_acc}
def eval(self, loader):
self.model.eval()
sum_loss, sum_acc, n = 0., 0., 0.
pbar = tqdm(loader)
with torch.no_grad():
for x, y in pbar:
x = x.to(self.device)
y = y.to(self.device)
if self.config.skeleton == 'cnn':
loss, logit = self.compute_loss_cnn(x, y)
acc = self.accuracy(logit, y)
else:
logits, _ = self.model(x)
loss = F.cross_entropy(logits[-1], y)
acc = self.accuracy(logits[-1], y)
# Log metrics.
batch_size = y.shape[0]
sum_loss += batch_size * loss.item()
sum_acc += batch_size * acc
n += batch_size
pbar.set_postfix(OrderedDict([
(f'loss_{loader.dataset.split}', f'{sum_loss / n:.4f}'),
(f'acc_{loader.dataset.split}', f'{sum_acc / n:.4f}'),
]))
epoch_loss = sum_loss / len(loader.dataset)
epoch_acc = sum_acc / len(loader.dataset)
return epoch_loss, {'score': epoch_acc}
| 3,649 | 28.918033 | 72 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/runners/__init__.py | from .base_runner import BaseRunner
from .speech_commands_runner import SpeechCommandsRunner
| 93 | 30.333333 | 56 | py |
temporal-feedback-crnn | temporal-feedback-crnn-main/tfcrnn/runners/base_runner.py | from __future__ import annotations
import numpy as np
import os
import abc
import wandb
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tfcrnn.models import CNN, CRNN, TFCRNN
from tfcrnn.config import Config
from tfcrnn.utils import mkpath
class BaseRunner(abc.ABC):
def __init__(
self,
config: Config,
checkpoint_path: str = None,
):
self.config = config
self.checkpoint_path = checkpoint_path or mkpath(wandb.run.dir, 'checkpoint.pth')
self.device = torch.device(f'cuda:{config.gpu}') if torch.cuda.is_available() else torch.device('cpu')
if config.skeleton == 'cnn':
self.model = CNN(config).to(self.device)
elif config.skeleton == 'crnn':
self.model = CRNN(config).to(self.device)
elif config.skeleton == 'tfcrnn':
self.model = TFCRNN(config).to(self.device)
else:
raise ValueError(f'Unknown skeleton: {config.skeleton}')
self.lr = config.initial_lr
self.optimizer = optim.SGD(self.model.parameters(), lr=self.lr, nesterov=False if config.momentum == 0. else True,
momentum=config.momentum, weight_decay=config.weight_decay)
self.scheduler = ReduceLROnPlateau(self.optimizer, factor=config.lr_decay, patience=config.patience,
threshold=1e-5, verbose=True)
self.loss_func = F.cross_entropy
self.best_loss = np.inf
self.total_trained_samples = 0
self.total_trained_steps = 0
# Child runners should implement.
self.dataset_train = None
self.dataset_valid = None
self.dataset_test = None
self.loader_train = None
self.loader_valid = None
self.loader_test = None
@abc.abstractmethod
def train(self):
...
@abc.abstractmethod
def eval(self, loader):
...
def validate(self):
return self.eval(self.loader_valid)
def test(self):
return self.eval(self.loader_test)
def compute_loss_cnn(self, x, y, mask=None):
logit = self.model(x)
if mask is None:
loss = self.loss_func(logit, y)
else:
loss = self.loss_func(logit, y, reduction='none')
# Give feedback to the model only using annotated label.
# The masked loss is averaged only for annotated label.
loss = (mask * loss).sum(dim=1) / mask.sum(dim=1) # average for each sample
loss = loss.mean()
return loss, logit
def compute_loss_crnn(self, x, y, mask=None):
logits, _ = self.model(x)
if self.config.loss == 'many2one':
# Use logit from the last time step.
if mask is None:
loss = self.loss_func(logits[-1], y)
else:
loss = self.loss_func(logits[-1], y, reduction='none')
# Give feedback to the model only using annotated label.
# The masked loss is averaged only for annotated label.
loss = (mask * loss).sum(dim=1) / mask.sum(dim=1) # average for each sample
loss = loss.mean()
elif self.config.loss == 'many2many':
weights = torch.ones(len(logits), device=self.device)
weights /= weights.sum()
loss = 0.
for logit, weight in zip(logits, weights):
if mask is None:
loss += torch.mean(self.loss_func(logit, y, reduction='none') * weight)
else:
loss_step = self.loss_func(logit, y, reduction='none')
# Give feedback to the model only using annotated label.
# The masked loss is averaged only for annotated label.
loss_step = (mask * loss_step).sum(dim=1) / mask.sum(dim=1) # average for each sample
loss_step = loss_step.mean()
loss += loss_step * weight
else:
raise ValueError(f'Unknown loss: {self.config.loss}')
return loss, logits
def accuracy(self, input, target):
input = input.max(1)[1].long().cpu()
target = target.cpu()
correct = (input == target).sum().item()
return correct / float(input.shape[0])
# Early stopping function for given validation loss
def early_stop(self, validation_loss):
self.scheduler.step(validation_loss)
if self.lr > self.optimizer.param_groups[0]['lr']:
self.lr = self.optimizer.param_groups[0]['lr']
return True
else:
return False
def is_best(self, validation_loss):
if validation_loss < (self.best_loss - self.scheduler.threshold):
self.best_loss = validation_loss
return True
else:
return False
def save_checkpoint(self, **kwargs):
torch.save({
'config': self.config.as_dict(),
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
**kwargs
}, self.checkpoint_path)
def load_checkpoint(self):
checkpoint = torch.load(self.checkpoint_path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.optimizer.param_groups[0]['lr'] = self.lr
return checkpoint
def rename_checkpoint(self, score_test):
new_path = self.checkpoint_path.replace('.pth', f'_{score_test:.4f}.pth')
os.rename(self.checkpoint_path, new_path)
return new_path
| 5,233 | 32.33758 | 118 | py |
switchprompt | switchprompt-main/databuilding_script.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import os
import re
import json
import shutil
import numpy as np
np.random.seed(123)
# Set this to the place where you extracted the MNLI files
base_path = '/path/to/mnli/data/'
train_file = 'multinli_1.0_train.txt'
dev_matched = 'multinli_1.0_dev_matched.txt'
dev_mismatched = 'multinli_1.0_dev_mismatched.txt'
def create_dir(path, clean=True):
if clean:
shutil.rmtree(path, ignore_errors=True) # Delete directory and its content
os.makedirs(path) # Recreate the directory path
return path
def read_mnli_file(filename):
instances_per_genre = {}
with open(filename, 'r', encoding='utf-8') as fin:
for i, line in enumerate(fin.read().splitlines()):
if i > 0 and line.strip():
gold_label, _, _, _, _, sentence1, sentence2, _, _, genre, _, _, _, _, _ = line.split('\t')
if genre not in instances_per_genre:
instances_per_genre[genre] = []
instances_per_genre[genre].append({
"lang": "en",
"sentence1": sentence1,
"sentence2": sentence2,
"gold": gold_label,
"genre": genre,
})
return instances_per_genre
def get_examples(instances, label, k, offset=0):
examples = []
for j, inst in enumerate(instances[offset:]):
if len(examples) == k:
return examples, j+offset
if inst['gold'] == label:
examples.append(inst)
return examples, len(instances)
create_dir('data/')
n_shots = [64, 16, 4, 2]
labels = ['neutral', 'entailment', 'contradiction']
for split, splitfile in [('mnli_train', train_file),
# We dont need mismatched, as we dont study cross-domain transfer
#('mnli_dev_mismatched', dev_mismatched),
('mnli_dev', dev_matched)]:
instances_per_genre = read_mnli_file(base_path + splitfile)
all_examples = {n: [] for n in n_shots}
for genre, instances in instances_per_genre.items():
np.random.shuffle(instances)
for label in labels:
max_offset = 0
for shots in n_shots:
# Create two different splits as new test file is given
# Use train_1 for training, train_2 for development, dev_X for testing
if split == 'mnli_train':
with open(f'data/{split}_1_{genre}_{shots}_shots.jsonl', 'a', encoding='utf-8') as fout:
examples_1, offset = get_examples(instances, label, shots)
max_offset = max(max_offset, offset) # is only triggered at 64 shots
for element in examples_1:
fout.write(json.dumps(element, ensure_ascii=False)+'\n')
with open(f'data/{split}_2_{genre}_{shots}_shots.jsonl', 'a', encoding='utf-8') as fout:
examples_2, _ = get_examples(instances, label, shots, max_offset)
for element in examples_2:
fout.write(json.dumps(element, ensure_ascii=False)+'\n')
else:
with open(f'data/{split}_{genre}_{shots}_shots.jsonl', 'a', encoding='utf-8') as fout:
examples, _ = get_examples(instances, label, shots)
for element in examples:
fout.write(json.dumps(element, ensure_ascii=False)+'\n') | 4,322 | 41.80198 | 108 | py |
switchprompt | switchprompt-main/arguments.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from enum import Enum
import argparse
import dataclasses
from dataclasses import dataclass, field
from typing import Optional
from transformers import HfArgumentParser, TrainingArguments
from tasks.utils import *
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.training_args
"""
task_name: str = field(
metadata={
"help": "The name of the task to train on: " + ", ".join(TASKS),
"choices": TASKS
},
)
dataset_name: str = field(
metadata={
"help": "The name of the dataset to use: " + ", ".join(DATASETS),
"choices": DATASETS
}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "A csv or a json file containing the test data."}
)
template_id: Optional[int] = field(
default=0,
metadata={
"help": "The specific prompt string to use"
}
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
generic_dataset: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
specific_dataset: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
dataset_selection: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
prefix: bool = field(
default=False,
metadata={
"help": "Will use P-tuning v2 during training"
}
)
prompt: bool = field(
default=False,
metadata={
"help": "Will use prompt tuning during training"
}
)
pre_seq_len: int = field(
default=4,
metadata={
"help": "The length of prompt"
}
)
num_static_keyword: int = field(
default=3,
metadata={
"help": "The length of prompt"
}
)
num_dynamic_keyword: int = field(
default=0,
metadata={
"help": "The length of prompt"
}
)
prefix_projection: bool = field(
default=False,
metadata={
"help": "Apply a two-layer MLP head over the prefix embeddings"
}
)
prefix_hidden_size: int = field(
default=512,
metadata={
"help": "The hidden size of the MLP projection head in Prefix Encoder if prefix projection is used"
}
)
hidden_dropout_prob: float = field(
default=0.1,
metadata={
"help": "The dropout probability used in the models"
}
)
@dataclass
class QuestionAnwseringArguments:
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": "The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
},
)
def get_args():
"""Parse all the args."""
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, QuestionAnwseringArguments))
args = parser.parse_args_into_dataclasses()
return args | 8,062 | 33.021097 | 119 | py |
switchprompt | switchprompt-main/app.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import sys
import evaluate
from evaluate.utils import launch_gradio_widget
sys.path = [p for p in sys.path if p != "/home/user/app"]
module = evaluate.load("seqeval")
sys.path = ["/home/user/app"] + sys.path
launch_gradio_widget(module)
| 986 | 36.961538 | 72 | py |
switchprompt | switchprompt-main/run.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import logging
import os
import sys
import numpy as np
from typing import Dict
import datasets
import transformers
from transformers import set_seed, Trainer
from transformers.trainer_utils import get_last_checkpoint
from arguments import get_args
from tasks.utils import *
os.environ["WANDB_DISABLED"] = "true"
logger = logging.getLogger(__name__)
def train(trainer, resume_from_checkpoint=None, last_checkpoint=None):
checkpoint = None
if resume_from_checkpoint is not None:
checkpoint = resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
# trainer.save_model()
metrics = train_result.metrics
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
trainer.log_best_metrics()
def evaluate(trainer):
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def predict(trainer, predict_dataset=None):
if predict_dataset is None:
logger.info("No dataset is available for testing")
elif isinstance(predict_dataset, dict):
for dataset_name, d in predict_dataset.items():
logger.info("*** Predict: %s ***" % dataset_name)
predictions, labels, metrics = trainer.predict(d, metric_key_prefix="predict")
predictions = np.argmax(predictions, axis=2)
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
else:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict")
predictions = np.argmax(predictions, axis=2)
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if __name__ == '__main__':
args = get_args()
_, data_args, training_args, _ = args
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if not os.path.isdir("checkpoints") or not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
if data_args.task_name.lower() == "clinic":
assert data_args.dataset_name.lower() in CLINIC_DATASETS
from tasks.clinic.get_trainer import get_trainer
else:
raise NotImplementedError('Task {} is not implemented. Please choose a task from: {}'.format(data_args.task_name, ", ".join(TASKS)))
set_seed(training_args.seed)
trainer, predict_dataset = get_trainer(args)
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
if training_args.do_train:
train(trainer, training_args.resume_from_checkpoint, last_checkpoint)
# if training_args.do_eval:
# evaluate(trainer)
# if training_args.do_predict:
# predict(trainer, predict_dataset)
| 5,263 | 34.809524 | 140 | py |
switchprompt | switchprompt-main/training/trainer_base.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import logging
import os
from typing import Dict, OrderedDict
from transformers import Trainer
logger = logging.getLogger(__name__)
_default_log_level = logging.INFO
logger.setLevel(_default_log_level)
class BaseTrainer(Trainer):
def __init__(self, *args, predict_dataset = None, test_key = "accuracy", **kwargs):
super().__init__(*args, **kwargs)
self.predict_dataset = predict_dataset
self.test_key = test_key
self.best_metrics = OrderedDict({
"best_epoch": 0,
f"best_eval_{self.test_key}": 0,
})
def log_best_metrics(self):
self.log_metrics("best", self.best_metrics)
self.save_metrics("best", self.best_metrics, combined=False)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
eval_metrics = None
if self.control.should_evaluate:
eval_metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, epoch, eval_metrics)
if eval_metrics["eval_"+self.test_key] > self.best_metrics["best_eval_"+self.test_key]:
self.best_metrics["best_epoch"] = epoch
self.best_metrics["best_eval_"+self.test_key] = eval_metrics["eval_"+self.test_key]
if self.predict_dataset is not None:
if isinstance(self.predict_dataset, dict):
for dataset_name, dataset in self.predict_dataset.items():
_, _, test_metrics = self.predict(dataset, metric_key_prefix="test")
self.best_metrics[f"best_test_{dataset_name}_{self.test_key}"] = test_metrics["test_"+self.test_key]
else:
_, _, test_metrics = self.predict(self.predict_dataset, metric_key_prefix="test")
self.best_metrics["best_test_"+self.test_key] = test_metrics["test_"+self.test_key]
logger.info(f"***** Epoch {epoch}: Best results *****")
for key, value in self.best_metrics.items():
logger.info(f"{key} = {value}")
self.log(self.best_metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=eval_metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
| 3,691 | 41.436782 | 128 | py |
switchprompt | switchprompt-main/training/trainer_exp.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import logging
import os
import random
import sys
from typing import Any, Dict, List, Optional, OrderedDict, Tuple, Union
import math
import random
import time
import warnings
import collections
from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
from transformers.trainer_callback import TrainerState
from transformers.trainer_pt_utils import IterableDatasetShard
from transformers.trainer_utils import (
HPSearchBackend,
ShardedDDPOption,
TrainOutput,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from transformers.file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
is_torch_tpu_available,
)
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from training.trainer_base import BaseTrainer, logger
class ExponentialTrainer(BaseTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
if self.lr_scheduler is None:
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.95, verbose=True)
return self.lr_scheduler
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (:obj:`List[str]`, `optional`)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
resume_from_checkpoint = None if not resume_from_checkpoint else resume_from_checkpoint
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# release memory
del state_dict
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
if trial is not None:
assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.state.trial_params = hp_params(assignments)
else:
self.state.trial_params = None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
step = -1
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss_step = self.training_step(model, inputs)
else:
tr_loss_step = self.training_step(model, inputs)
if (
args.logging_nan_inf_filter
and not is_torch_tpu_available()
and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else:
tr_loss += tr_loss_step
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed and (step + 1) == steps_in_epoch:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
if step < 0:
logger.warning(
f"There seems to be not a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
if os.path.exists(best_model_path):
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
else:
logger.warn(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
| 25,340 | 48.015474 | 130 | py |
switchprompt | switchprompt-main/model/sequence_classification.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from os import truncate
import torch
from torch._C import NoopLogger
import torch.nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
from transformers import BertModel, BertPreTrainedModel
from transformers import RobertaModel, RobertaPreTrainedModel
from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutput, Seq2SeqLMOutput
from model.prefix_encoder import PrefixEncoder
import copy
from transformers import (
AutoConfig,
AutoTokenizer,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BertPrefixForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels)
for param in self.bert.parameters():
param.requires_grad = False
self.pre_seq_len = config.pre_seq_len
self.n_layer = config.num_hidden_layers
self.n_head = config.num_attention_heads
self.n_embd = config.hidden_size // config.num_attention_heads
self.prefix_tokens = torch.arange(self.pre_seq_len).long()
self.prefix_tokens1 = torch.arange(9).long()
self.prefix_encoder = PrefixEncoder(config)
bert_param = 0
for name, param in self.bert.named_parameters():
bert_param += param.numel()
all_param = 0
for name, param in self.named_parameters():
all_param += param.numel()
total_param = all_param - bert_param
print('total param is {}'.format(total_param)) # 9860105
def get_prompt(self, context_word,pooled_output1,batch_size):
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.bert.device)
prefix_tokens1 = self.prefix_tokens1.unsqueeze(0).expand(batch_size, -1).to(self.bert.device)
context_word = context_word.to(self.bert.device)
past_key_values,seq_size = self.prefix_encoder(context_word,pooled_output1,self.bert.device,batch_size,prefix_tokens,prefix_tokens1)
past_key_values = past_key_values.view(
batch_size,
seq_size,
self.n_layer * 2,
self.n_head,
self.n_embd
)
past_key_values = self.dropout(past_key_values)
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2)
return past_key_values,seq_size
def context_word(self,device,input_ids_sentence):
final_state= []
keywords_final = []
for i in input_ids_sentence:
#self.tokenizer.sep_token_id
context_id = [t for t in i if t not in [self.tokenizer.sep_token_id, self.tokenizer.cls_token_id,self.tokenizer.pad_token_id]]
output = self.tokenizer.decode(context_id)
keywords = self.scorer.get_mixed_keywords(output, k_s = self.num_static_keyword, k_d = self.num_dynamic_keyword)
keywords = ', '.join(keywords)
keywords_final.append(keywords)
input_ids = self.tokenizer(keywords_final, padding="longest", return_tensors="pt",add_special_tokens = False)
input_ids = input_ids.to(device)
output = self.bert(**input_ids)
final_state = output.last_hidden_state
return final_state
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size = input_ids.shape[0]
context_word = self.context_word(self.bert.device,input_ids)
outputs1 = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output1 = outputs1[1]
past_key_values,seq_size = self.get_prompt(context_word,pooled_output1,batch_size=batch_size)
prefix_attention_mask = torch.ones(batch_size, seq_size).to(self.bert.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
past_key_values=past_key_values,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BertPromptForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.embeddings = self.bert.embeddings
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels)
for param in self.bert.parameters():
param.requires_grad = False
self.pre_seq_len = config.pre_seq_len
self.n_layer = config.num_hidden_layers
self.n_head = config.num_attention_heads
self.n_embd = config.hidden_size // config.num_attention_heads
self.prefix_tokens = torch.arange(self.pre_seq_len).long()
self.prefix_encoder = torch.nn.Embedding(self.pre_seq_len, config.hidden_size)
def get_prompt(self, batch_size):
prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.bert.device)
prompts = self.prefix_encoder(prefix_tokens)
return prompts
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size = input_ids.shape[0]
raw_embedding = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
)
prompts = self.get_prompt(batch_size=batch_size)
inputs_embeds = torch.cat((prompts, raw_embedding), dim=1)
prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.bert.device)
attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)
outputs = self.bert(
# input_ids,
attention_mask=attention_mask,
# token_type_ids=token_type_ids,
# position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
# past_key_values=past_key_values,
)
# pooled_output = outputs[1]
sequence_output = outputs[0]
sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous()
first_token_tensor = sequence_output[:, 0]
pooled_output = self.bert.pooler.dense(first_token_tensor)
pooled_output = self.bert.pooler.activation(pooled_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| 15,176 | 38.626632 | 140 | py |
switchprompt | switchprompt-main/model/utils.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from enum import Enum
from model.keyword_extractor import DomainScorer
import nltk
from model.sequence_classification import (
BertPrefixForSequenceClassification,
BertPromptForSequenceClassification,
)
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoModelForSequenceClassification,
AutoModelForQuestionAnswering,
AutoModelForMultipleChoice
)
class TaskType(Enum):
TOKEN_CLASSIFICATION = 1,
SEQUENCE_CLASSIFICATION = 2,
QUESTION_ANSWERING = 3,
MULTIPLE_CHOICE = 4
PREFIX_MODELS = {
"bert": {
TaskType.SEQUENCE_CLASSIFICATION: BertPrefixForSequenceClassification,
},
}
PROMPT_MODELS = {
"bert": {
TaskType.SEQUENCE_CLASSIFICATION: BertPromptForSequenceClassification,
},
}
AUTO_MODELS = {
TaskType.SEQUENCE_CLASSIFICATION: AutoModelForSequenceClassification,
}
def get_model(model_args, tokenizer, task_type: TaskType, config: AutoConfig, fix_bert: bool = False):
if model_args.prefix:
config.hidden_dropout_prob = model_args.hidden_dropout_prob
config.pre_seq_len = model_args.pre_seq_len
config.prefix_projection = model_args.prefix_projection
config.prefix_hidden_size = model_args.prefix_hidden_size
model_class = PREFIX_MODELS[config.model_type][task_type]
model = model_class.from_pretrained(
model_args.model_name_or_path,
config=config,
revision=model_args.model_revision,
)
elif model_args.prompt:
config.pre_seq_len = model_args.pre_seq_len
model_class = PROMPT_MODELS[config.model_type][task_type]
model = model_class.from_pretrained(
model_args.model_name_or_path,
config=config,
revision=model_args.model_revision,
)
else:
model_class = AUTO_MODELS[task_type]
model = model_class.from_pretrained(
model_args.model_name_or_path,
config=config,
revision=model_args.model_revision,
)
bert_param = 0
if fix_bert:
if config.model_type == "bert":
for param in model.bert.parameters():
param.requires_grad = False
for _, param in model.bert.named_parameters():
bert_param += param.numel()
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
total_param = all_param - bert_param
print('***** total param is {} *****'.format(total_param))
model.tokenizer = tokenizer
# call with:
def read_questions_file(filename,dataset_selection):
print('Read ' + filename)
with open(filename, 'r', encoding='utf-8') as fin:
if (dataset_selection =='clinic'):
content = fin.read().splitlines()
sentences = [q.split(' ')[1:] for q in content]
else:
content = fin.read().splitlines()
sentences = [q.split(' ') for q in content]
print(f'Found {len(sentences)} sentences')
return sentences
general_file_path =model_args.generic_dataset
dataset_file_path =model_args.specific_dataset
t_general = read_questions_file(general_file_path,model_args.dataset_selection)
t_clinical = read_questions_file(dataset_file_path,model_args.dataset_selection)
scorer = DomainScorer(t_general, t_clinical, transformer=model.bert, transformer_tokenizer=tokenizer)
# keywords = scorer.get_keywords(sentence)
model.num_dynamic_keyword = model_args.num_dynamic_keyword
model.num_static_keyword = model_args.num_static_keyword
model.scorer = scorer
return model
def get_model_deprecated(model_args, task_type: TaskType, config: AutoConfig, fix_bert: bool = False):
if model_args.prefix:
config.hidden_dropout_prob = model_args.hidden_dropout_prob
config.pre_seq_len = model_args.pre_seq_len
config.prefix_projection = model_args.prefix_projection
config.prefix_hidden_size = model_args.prefix_hidden_size
if task_type == TaskType.SEQUENCE_CLASSIFICATION:
from model.sequence_classification import BertPrefixModel, RobertaPrefixModel, DebertaPrefixModel, DebertaV2PrefixModel
if config.model_type == "bert":
model = BertPrefixModel.from_pretrained(
model_args.model_name_or_path,
config=config,
revision=model_args.model_revision,
)
else:
raise NotImplementedError
elif model_args.prompt:
config.pre_seq_len = model_args.pre_seq_len
from model.sequence_classification import BertPromptModel, RobertaPromptModel
if config.model_type == "bert":
model = BertPromptModel.from_pretrained(
model_args.model_name_or_path,
config=config,
revision=model_args.model_revision,
)
else:
raise NotImplementedError
else:
if task_type == TaskType.SEQUENCE_CLASSIFICATION:
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
config=config,
revision=model_args.model_revision,
)
bert_param = 0
if fix_bert:
if config.model_type == "bert":
for param in model.bert.parameters():
param.requires_grad = False
for _, param in model.bert.named_parameters():
bert_param += param.numel()
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
total_param = all_param - bert_param
print('***** total param is {} *****'.format(total_param))
return model
| 6,699 | 35.612022 | 131 | py |
switchprompt | switchprompt-main/model/keyword_extractor.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import numpy as np
import nltk
import string
from nltk.corpus import stopwords
#nltk.data.path.append('/home/kgo2rng/nltk_data/nltk_data')
from collections import Counter, defaultdict
class DomainScorer:
def __init__(self,
general_sentences,
domain_sentences,
domain_documents=None,
lowercase=True,
alpha=-1e5,
beta=1e2,
transformer=None,
transformer_tokenizer=None):
# Compute term-frequencies (tf) and inverse-document-frequencies (idf)
self.tf_general = self.tf(general_sentences, lowercase=lowercase)
self.tf_domain = self.tf(domain_sentences, lowercase=lowercase)
if domain_documents is None:
self.idf_domain = self.idf(domain_sentences, lowercase=lowercase)
else:
self.idf_domain = self.idf(domain_documents, lowercase=lowercase)
for key, value in self.tf(domain_documents, lowercase=lowercase).items():
if key in self.tf_domain:
self.tf_domain[key] += value
else:
self.tf_domain[key] = value
# Get and add stopwords
self._stopwords = set(stopwords.words('english'))
self._stopwords.add("'s")
self._stopwords.add("n't")
# Set lowercasing options
if lowercase:
self._stopwords = [s.lower() for s in self._stopwords]
self.lowercase = lowercase
# Set tf-idf hyperparameters and extract static domain keywords
self.alpha = alpha
self.beta = beta
self.static_keywords = self.get_dynamic_keywords(
' '.join([' '.join([token for token in sent]) for sent in domain_sentences]),
k=25,
)
# If available, compute BERT-embeddings of static keywords
if transformer is not None and transformer_tokenizer is not None:
self.transformer = transformer
self.transformer_tokenizer = transformer_tokenizer
#self.keyword_embeddings = np.zeros((
# len(self.static_keywords),
# self.transformer.config.hidden_size
#))
keyword_embeddings = []
for keyword in self.static_keywords:
embedding = self._get_cls_embedding(keyword)
keyword_embeddings.append(embedding)
self.keyword_embeddings = np.array(keyword_embeddings)
else:
self.transformer = None
self.transformer_tokenizer = None
@staticmethod
def tf(questions, smoothing_factor=1, normalize=True, lowercase=True):
"""
Estimates the term frequency on a given corpus:
term_frequency = number of times a given term appears in document
"""
term_freqs = Counter()
for q in questions:
for t in q:
term_freqs[t.lower() if lowercase else t] += 1
max_freq = -1
if smoothing_factor != 0 or normalize:
for t in term_freqs:
term_freqs[t] += smoothing_factor
max_freq = max(max_freq, term_freqs[t])
if normalize:
term_freqs_norm = defaultdict(lambda: 1e-5)
for t in term_freqs:
term_freqs_norm[t] = term_freqs[t] / max_freq
return term_freqs_norm
else:
term_freqs_smoothed = defaultdict(lambda: smoothing_factor)
for t in term_freqs:
term_freqs_smoothed[t] = term_freqs[t]
return term_freqs_smoothed
@staticmethod
def idf(questions, smoothing_factor=1, lowercase=True):
"""
Estimates the inverse document frequency on a domain-specific corpus:
inverse_document_frequency = log(total number of documents / number of documents with term) + 1
We use a version with smoothing that adds a "1" to the numerator and denominator:
inverse_document_frequency = log((1 + total_number_of_documents) / (number_of_documents_with_term +1)) + 1
"""
docs_with_term = Counter()
for q in questions:
for t in q:
docs_with_term[t.lower() if lowercase else t] += 1
idf = defaultdict(lambda: 1e-5)
for t in docs_with_term:
number_of_documents_with_term = docs_with_term[t] + smoothing_factor
number_of_documents = len(docs_with_term) + smoothing_factor
idf[t] = np.log(number_of_documents / number_of_documents_with_term) + 1
return idf
def domain_specificness(self, term, alpha=None, beta=None):
"""
Estimates the domain specificness of a term
domain_specificness = alpha * tf_general + beta * tf_domain + idf_doman
We use alpha (<= 0) and beta (>= 0) for weighting the
genral and domain-specific term frequency, respectively.
In practice, we found alpha=-1e5 and beta=1e2 work well.
This penalizes general domain rather high and boosts domain-specific terms.
"""
if alpha is None:
alpha = self.alpha
if beta is None:
beta = self.beta
score = alpha * self.tf_general[term] + beta * self.tf_domain[term] + self.idf_domain[term]
return score
@staticmethod
def _select_top_k_keywords(tokens, scores, k=3):
keywords = []
for i in range(min(len(tokens), k)):
best_idx = np.argmax(scores)
keywords.append(tokens[best_idx])
del scores[best_idx]
del tokens[best_idx]
return keywords
@staticmethod
def _get_selected_tokens(tokens, keywords, lowercased):
""" Keywords might be lowercased; get the "real" tokens """
output = []
for k in keywords:
for t in tokens:
if k == t or (lowercased and k == t.lower()):
output.append(t)
break
return output
def _filter_tokens(self, tokens, filter_pos_tags, filter_stopwords):
tokens = [t for t in tokens]
if filter_pos_tags:
pos_tags = nltk.pos_tag(tokens)
tokens = [t for t, (_, p) in zip(tokens, pos_tags) if p[0] in [
'N', # Nouns
#'V', # Verbs
'R', # Adverbs
'J', # Adjectives
]]
# See https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
if self.lowercase:
tokens = [t.lower() for t in tokens]
if filter_stopwords:
tokens = [t for t in tokens if t not in self._stopwords and t not in string.punctuation]
##added as we do not require only numbers as keywords
tokens = [t for t in tokens if not t.isdigit()]
return tokens
def _get_cls_embedding(self, input_text):
tokenized_input = self.transformer_tokenizer(input_text, return_tensors="pt")
tokenized_input = tokenized_input.to(self.transformer.device)
self.transformer.eval()
outputs = self.transformer(**tokenized_input)
cls_embedding = outputs.last_hidden_state[0][0]
cls_embedding = cls_embedding.cpu().detach().numpy()
return cls_embedding
def get_dynamic_keywords(self, input_tokens, k=3, alpha=None, beta=None, filter_pos_tags=False, filter_stopwords=True):
if isinstance(input_tokens, str):
input_tokens = nltk.word_tokenize(input_tokens)
tokens = self._filter_tokens(input_tokens, filter_pos_tags, filter_stopwords)
tokens = list(set(tokens))
scores = [self.domain_specificness(t, alpha, beta) for t in tokens]
keywords = self._select_top_k_keywords(tokens, scores, k)
keywords = self._get_selected_tokens(input_tokens, keywords, self.lowercase)
return keywords
def get_static_keywords(self, k=3):
return self.static_keywords[:k]
def get_mixed_keywords(self, input_tokens, k_s=3, k_d=3, k=6, alpha=None, beta=None, filter_pos_tags=False, filter_stopwords=True):
if isinstance(input_tokens, str):
input_tokens = nltk.word_tokenize(input_tokens)
if k_s is None or k_d is None:
if k % 2 == 0:
k_s, k_d = int(k/2), int(k/2)
else:
k_s, k_d = int(k/2)+1, int(k/2)
dynamic_keywords = self.get_dynamic_keywords(input_tokens, k_d, alpha, beta, filter_pos_tags, filter_stopwords)
static_keywords = self.get_static_keywords(k_s)
return static_keywords + dynamic_keywords
def get_most_similar_keywords(self, input_tokens, k=3):
if self.transformer is None:
return []
if isinstance(input_tokens, str):
input_tokens = nltk.word_tokenize(input_tokens)
x = self._get_cls_embedding(' '.join(input_tokens))
M = self.keyword_embeddings
# cosine similarity
dot_product = np.dot(x, M.T)
norm_a = np.linalg.norm(x)
norm_b = np.linalg.norm(M, axis=1)
score = dot_product / (norm_a * norm_b)
# print(self.static_keywords)
# print(list(score))
# print(k)
similar_keywords = self._select_top_k_keywords(list(self.static_keywords), list(score), k)
return similar_keywords | 10,273 | 40.261044 | 135 | py |
switchprompt | switchprompt-main/model/prefix_encoder.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import torch
from torch.autograd import Variable
import torch.nn as nn
class PrefixEncoder(torch.nn.Module):
r'''
The torch.nn model to encode the prefix
Input shape: (batch-size, prefix-length)
Output shape: (batch-size, prefix-length, 2*layers*hidden)
'''
def __init__(self, config):
super().__init__()
self.prefix_projection = config.prefix_projection
self.size = config.num_hidden_layers * 2 * config.hidden_size
self.pre_seq_len = config.pre_seq_len
if self.prefix_projection:
# Use a two-layer MLP to encode the prefix
self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size)
self.trans = torch.nn.Sequential(
torch.nn.Linear(config.hidden_size, config.prefix_hidden_size),
torch.nn.Tanh(),
torch.nn.Linear(config.prefix_hidden_size, config.num_hidden_layers * 2 * config.hidden_size)
)
else:
self.embedding1 = torch.nn.Embedding(config.pre_seq_len, config.num_hidden_layers * 2 * config.hidden_size)
self.embedding2 = torch.nn.Embedding(9, config.num_hidden_layers * 2 * config.hidden_size)
self.lstm_head = torch.nn.LSTM(
input_size=config.hidden_size,
hidden_size=config.hidden_size,
num_layers=1,
bidirectional=True,
batch_first=True,
)
self.new_layer = torch.nn.Linear(config.hidden_size, config.num_hidden_layers * 2 * config.hidden_size)
self.gate1 = torch.nn.Linear(config.hidden_size, config.num_hidden_layers * 2 * config.hidden_size)
self.gate2 = torch.nn.Linear(config.hidden_size, config.num_hidden_layers * 2 * config.hidden_size)
self.weight1 =nn.Parameter(torch.FloatTensor(1,config.num_hidden_layers * 2 * config.hidden_size), requires_grad=True)
self.weight2 = nn.Parameter(torch.FloatTensor(1,config.num_hidden_layers * 2 * config.hidden_size), requires_grad=True)
#initialization of weights in the range of xaviers normal distribution
torch.nn.init.xavier_normal_(self.weight1) #torch.nn.init.xavier_normal_
torch.nn.init.xavier_normal_(self.weight2)
torch.nn.init.xavier_normal_(self.new_layer.weight)
torch.nn.init.xavier_normal_(self.gate1.weight)
#torch.nn.init.xavier_normal_(self.embedding1.weight)
def forward(self,context_word,pooled_output1,device,batch_size, prefix: torch.Tensor,prefix1: torch.Tensor,):
if self.prefix_projection:
prefix_tokens1 = self.embedding1(prefix)
prefix_tokens2 = self.embedding2(prefix)
past_key_values = prefix_tokens1 + prefix_tokens2
past_key_values = torch.nn.Sigmoid(past_key_values)
else:
prefix_tokens1 = self.embedding1(prefix)
m = torch.nn.Sigmoid()
context_word = m(self.new_layer(context_word))
batch_size,word_size,embedding_dimension = context_word.shape
#keyword as prefix and suffix
padded_a = torch.cat([context_word,prefix_tokens1], dim = 1)
padded_b = torch.cat([prefix_tokens1, context_word], dim = 1)
batch_size,length,embedding = padded_a.shape
#calculation of gates
self.weight1 = self.weight1.to(device)
self.weight2 = self.weight2.to(device)
pooled_output1 = pooled_output1.unsqueeze(1)
pooled_output1 = pooled_output1.repeat(1,length,1)
gate1 = m(self.gate1(pooled_output1))
w1 = (self.weight1)
w2 = (self.weight2)
m = torch.nn.Sigmoid()
gate1 = m((w1 * gate1))
gate2 = m((w2 * gate1))
#padding to s = (m-n)
zeroes = torch.zeros(batch_size, (length-self.pre_seq_len) , self.size).long().to(device)
padded_prefix = torch.cat([prefix_tokens1,zeroes], dim = 1)
#switchprompt calculation
past_key_values = (gate1 * (padded_prefix)) + ((1 - (gate1)) * (gate2 * (padded_a) + ((1 - (gate2)) * (padded_b))))
batch_size,seq_size,embedding_dimension = past_key_values.shape
return past_key_values,seq_size | 5,075 | 51.329897 | 132 | py |
switchprompt | switchprompt-main/tasks/utils.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from tasks.glue.dataset import task_to_keys as glue_tasks
from tasks.superglue.dataset import task_to_keys as superglue_tasks
GLUE_DATASETS = list(glue_tasks.keys())
SUPERGLUE_DATASETS = list(superglue_tasks.keys())
CLINIC_DATASETS = ["clinic"]
TASKS = ["clinic"]
DATASETS = CLINIC_DATASETS
ADD_PREFIX_SPACE = {
'bert': False,
'roberta': True,
'deberta': True,
'gpt2': True,
'deberta-v2': True,
}
USE_FAST = {
'bert': True,
'roberta': True,
'deberta': True,
'gpt2': True,
'deberta-v2': False,
} | 1,285 | 30.365854 | 72 | py |
switchprompt | switchprompt-main/tasks/clinic/dataset.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import torch
from torch.utils import data
from torch.utils.data import Dataset
from datasets.arrow_dataset import Dataset as HFDataset
from datasets.load import load_dataset, load_metric
from transformers import (
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
default_data_collator,
)
import numpy as np
import logging
task_to_keys = {
"clinic": ("sentence_1", None),
}
logger = logging.getLogger(__name__)
class ClinicDataset():
def __init__(self, tokenizer: AutoTokenizer, data_args, training_args) -> None:
super().__init__()
raw_datasets = load_dataset(f'tasks/clinic/datasets/{data_args.dataset_name}.py')
self.tokenizer = tokenizer
self.data_args = data_args
#labels
self.is_regression = data_args.dataset_name == "stsb"
if not self.is_regression:
self.label_list = raw_datasets["train"].features["label"].names
self.num_labels = len(self.label_list)
else:
self.num_labels = 1
# Preprocessing the raw_datasets
self.sentence1_key, self.sentence2_key = task_to_keys[data_args.dataset_name]
# Padding strategy
if data_args.pad_to_max_length:
self.padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
self.padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
if not self.is_regression:
self.label2id = {l: i for i, l in enumerate(self.label_list)}
self.id2label = {id: label for label, id in self.label2id.items()}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
self.max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
raw_datasets = raw_datasets.map(
self.preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if training_args.do_train:
self.train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
self.train_dataset = self.train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
self.eval_dataset = raw_datasets["validation_matched" if data_args.dataset_name == "mnli" else "validation"]
if data_args.max_eval_samples is not None:
self.eval_dataset = self.eval_dataset.select(range(data_args.max_eval_samples))
if training_args.do_predict or data_args.dataset_name is not None or data_args.test_file is not None:
self.predict_dataset = raw_datasets["test_matched" if data_args.dataset_name == "mnli" else "test"]
if data_args.max_predict_samples is not None:
self.predict_dataset = self.predict_dataset.select(range(data_args.max_predict_samples))
#self.metric = load_metric("glue", data_args.dataset_name)
if data_args.pad_to_max_length:
self.data_collator = default_data_collator
elif training_args.fp16:
self.data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
def preprocess_function(self, examples):
# Tokenize the texts
args = (
(examples[self.sentence1_key],) if self.sentence2_key is None else (examples[self.sentence1_key], examples[self.sentence2_key])
)
result = self.tokenizer(*args, padding=self.padding, max_length=self.max_seq_length, truncation=True)
return result
def compute_metrics(self, p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if self.is_regression else np.argmax(preds, axis=1)
# if self.data_args.dataset_name is not None:
# result = self.metric.compute(predictions=preds, references=p.label_ids)
# if len(result) > 1:
# result["combined_score"] = np.mean(list(result.values())).item()
# return result
if self.is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
| 5,412 | 42.304 | 139 | py |
switchprompt | switchprompt-main/tasks/clinic/get_trainer.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import logging
import os
import random
import sys
from transformers import (
AutoConfig,
AutoTokenizer,
)
from model.utils import get_model, TaskType
from tasks.clinic.dataset import ClinicDataset
from training.trainer_base import BaseTrainer
logger = logging.getLogger(__name__)
def get_trainer(args):
model_args, data_args, training_args, _ = args
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
)
dataset = ClinicDataset(tokenizer, data_args, training_args)
if not dataset.is_regression:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=dataset.num_labels,
label2id=dataset.label2id,
id2label=dataset.id2label,
finetuning_task=data_args.dataset_name,
revision=model_args.model_revision,
)
else:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path,
num_labels=dataset.num_labels,
finetuning_task=data_args.dataset_name,
revision=model_args.model_revision,
)
model = get_model(model_args, tokenizer, TaskType.SEQUENCE_CLASSIFICATION, config)
# Initialize our Trainer
trainer = BaseTrainer(
model=model,
args=training_args,
train_dataset=dataset.train_dataset if training_args.do_train else None,
eval_dataset=dataset.eval_dataset if training_args.do_eval else None,
compute_metrics=dataset.compute_metrics,
tokenizer=tokenizer,
data_collator=dataset.data_collator,
)
return trainer, None | 2,474 | 33.375 | 86 | py |
switchprompt | switchprompt-main/tasks/clinic/datasets/clinic.py | """ Utility classes and functions related to SwitchPrompt (EACL 2023).
Copyright (c) 2022 Robert Bosch GmbH
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
# Lint as: python3
"""Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
import datasets
import json
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{lange2022clin,
title={CLIN-X: pre-trained language models and a study on cross-task transfer for concept extraction in the clinical domain},
author={Lange, Lukas and Adel, Heike and Str{\"o}tgen, Jannik and Klakow, Dietrich},
journal={Bioinformatics},
volume={38},
number={12},
pages={3267--3274},
year={2022},
publisher={Oxford University Press}
}
"""
_DESCRIPTION = """\
This is a question answer classification dataset
"""
_URL = "../../../clinic_dataset/"
_TRAINING_FILE = "train_split,2.jsonl"
_DEV_FILE = "dev_split,2.jsonl"
_TEST_FILE = "test_split,2.jsonl"
class clinicConfig(datasets.BuilderConfig):
"""BuilderConfig for CLIN-X"""
def __init__(self, **kwargs):
"""BuilderConfig for CLIN-X.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(clinicConfig, self).__init__(**kwargs)
class clinic(datasets.GeneratorBasedBuilder):
"""CLIN-X dataset."""
BUILDER_CONFIGS = [
clinicConfig(name="clinic", version=datasets.Version("1.0.0"), description="Clinic Dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"sentence_1": datasets.Value("string"),
"label":
datasets.features.ClassLabel(
names= ["positive", "negative"] #["Management", "Information", "Susceptibility", "Prognosis", "Diagnosis", "OtherEffect", "Cause", "Manifestation", "PersonOrg", "Complication", "Anatomy", "NotDisease"]
),
}
),
supervised_keys=None,
homepage=None,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
# sentence_1 = []
# gold = []
count = 0
for line in f:
row = json.loads(line)
# if (count < 10):
guid += 1
sentence_1 = row.get("sentence1")
gold = row.get("gold")
yield guid, {
"id": str(guid),
"sentence_1": sentence_1,
"label": gold,
}
# count = count + 1 | 4,266 | 34.558333 | 232 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/draw_crossover.py | from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem import rdMolHash
# smiles0 = "C1CC2=C3C(=CC=C2)C(=CN3C1)[C@H]4[C@@H](C(=O)NC4=O)C5=CNC6=CC=CC=C65"
smiles0 = "C1CC2=C3C(=CC=C2)C(=CN3C1)[C]4[C](C(=O)NC4=O)C5=CNC6=CC=CC=C65"
smiles1 = "C1CC2=C3C(=CC=C2)N(CN3C1)C4C(Cl)(C(=O)NC4=O)"
smiles2 = "C4(S)C(C(=O)NC4=O)C5=CNC6=CC=CC=C65"
smiles3 = "C4(S)C(Cl)(C(=O)NC4=O)"
# ligand_new_smiles = smiles_merge.run_main_smiles_merge(vars, selected_smiles_1, smiles_2)
mol0 = Chem.MolFromSmiles(smiles0)
mol1 = Chem.MolFromSmiles(smiles1)
mol2 = Chem.MolFromSmiles(smiles2)
mol3 = Chem.MolFromSmiles(smiles3)
# im = Chem.Draw.MolToImage(mol)
# Draw.MolsToGridImage([mol])
# Draw.MolToFile(mol, 'figure/example1.png', )
Draw.MolToFile(mol0, 'figure/example0.png', )
Draw.MolToFile(mol1, 'figure/example1.png', )
Draw.MolToFile(mol2, 'figure/example2.png', )
Draw.MolToFile(mol3, 'figure/example3.png', )
# ./autogrow/operators/mutation/smiles_click_chem/reaction_libraries/click_chem_rxns/ClickChem_rxn_library.json
| 1,033 | 27.722222 | 111 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/evaluate_baseline.py | # from tdc import utils
# names = utils.retrieve_benchmark_names('Docking_Group')
# print(names)
# pyscreener_path = '/project/molecular_data/graphnn/pyscreener/'
# from tdc.benchmark_group import docking_group
# group = docking_group(path = 'data/',
# file_format='1iep_docking',
# pyscreener_path = pyscreener_path)
# benchmark = group.get('DRD3', num_max_call = 1000)
import numpy as np
import os
import yaml
from tdc import Evaluator, Oracle
div_evaluator = Evaluator(name = 'Diversity')
nov_evaluator = Evaluator(name = 'Novelty')
qed_evaluator = Oracle('qed')
sa_evaluator = Oracle('sa')
base_path = '/project/molecular_data/graphnn/mol_opt/main'
method_list = ['graph_ga', 'jt_vae', 'MARS', 'moldqn', 'rationaleRL', 'REINVENT', 'screening', 'selfies_ga', ]
# method_list = ['smiles_ga']
def _normalize_docking_score(raw_score):
return 1/(1+np.exp((raw_score+7.5)))
def reverse_normalize(normalize_score):
return np.log(1/normalize_score - 1)-7.5
#### test ####
# raw_score_list = [-20, -15, -10, -5, 0, 5]
# for raw_score in raw_score_list:
# print(raw_score, reverse_normalize(_normalize_docking_score(raw_score)))
def evaluate_from_yaml_file(yaml_file):
"""
Args:
- yaml_file
Return:
- top-1
- top-10
- top-100
- novelty
- diversity
- qed
- sa
"""
result = yaml.load(open(yaml_file, "r").read(), Loader = yaml.Loader)
result_lst = [(smiles, reverse_normalize(normalize_score), idx) for smiles, (normalize_score, idx) in result.items()]
top_100 = np.mean([i[1] for i in result_lst[:100]])
top_10 = np.mean([i[1] for i in result_lst[:10]])
top_1 = result_lst[0][1]
top_100_smiles = [i[0] for i in result_lst[:100]]
div = div_evaluator(top_100_smiles)
qed_score = np.mean(qed_evaluator(top_100_smiles))
sa_score = np.mean(sa_evaluator(top_100_smiles))
return top_100, top_10, top_1, div, qed_score, sa_score
for method in method_list:
print('-------- ' + method + ' ----------')
result_path = os.path.join(base_path, method, 'results')
files = list(os.listdir(result_path))
files = list(filter(lambda x:'docking' in x, files))
result_lst = []
for file in files:
file = os.path.join(result_path, file)
result = evaluate_from_yaml_file(file)
result_lst.append(result)
top_100 = np.mean([i[0] for i in result_lst]), np.std([i[0] for i in result_lst]),
top_10 = np.mean([i[1] for i in result_lst]), np.std([i[1] for i in result_lst]),
top_1 = np.mean([i[2] for i in result_lst]), np.std([i[2] for i in result_lst]),
div = np.mean([i[3] for i in result_lst]), np.std([i[3] for i in result_lst]),
qed_score = np.mean([i[4] for i in result_lst]), np.std([i[4] for i in result_lst]),
sa_score = np.mean([i[5] for i in result_lst]), np.std([i[5] for i in result_lst]),
print('\ttop 100', top_100)
print('\ttop 10', top_10)
print('\ttop 1', top_1)
print('\tdiv', div)
print('\tqed', qed_score)
print('\tsa', sa_score)
| 2,944 | 31.722222 | 118 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/RGA.py | '''
- import and config
- policy network
- for i in 1,...,generation
- crossover
- RGA use policy network to select ligand ***
- crossover
- docking
- RGA update policy network ***
- mutation
- RGA use policy network to select ligand ***
- mutation
- docking
- RGA update policy network ***
- elites
'''
import argparse
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json",
"-j",
metavar="param.json",
help="Name of a json file containing all parameters. \
Overrides other arguments.",
)
# Allows the run in debug mode. Doesn't delete temp files.
PARSER.add_argument(
"--debug_mode",
"-d",
action="store_true",
default=False,
help="Run Autogrow in Debug mode. This keeps all \
temporary files and adds extra print statements.",
)
# receptor information
PARSER.add_argument(
"--filename_of_receptor",
"-r",
metavar="receptor.pdb",
default='./tutorial/PARP/4r6eA_PARP1_prepared.pdb',
help="The path to the receptor file. Should be .pdb file.",
)
PARSER.add_argument(
"--center_x",
"-x",
type=float,
default=-70.76,
help="x-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_y",
"-y",
type=float,
default=21.82,
help="y-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_z",
"-z",
type=float,
default=28.33,
help="z-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--size_x",
type=float,
default=25.0,
help="dimension of box to dock into in the x-axis (Angstrom)",
)
PARSER.add_argument(
"--size_y",
type=float,
default=20.0,
help="dimension of box to dock into in the y-axis (Angstrom)",
)
PARSER.add_argument(
"--size_z",
type=float,
default=25.0,
help="dimension of box to dock into in the z-axis (Angstrom)",
)
# Input/Output directories
PARSER.add_argument(
"--root_output_folder",
"-o",
type=str,
help="The Path to the folder which all output files will be placed.",
)
PARSER.add_argument(
"--source_compound_file",
"-s",
type=str,
default='./source_compounds/naphthalene_smiles.smi',
help="PATH to the file containing the source compounds. It must be \
tab-delineated .smi file. These ligands will seed the first generation.",
)
PARSER.add_argument(
"--filter_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="If True source ligands from source_compound_file will be \
filter using the user defined filter choices prior to the 1st generation being \
created. If False, ligands which would fail the ligand filters could seed \
the 1st generation. Default is True.",
)
PARSER.add_argument(
"--use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True source ligands will be docked prior to seeding generation 1. \
If True and the source_compound file already has docking/fitness metric score \
in -2 column of .smi file, it will not redock but reuse the scores from \
the source_compound_file.\
If True and no fitness metric score in -2 column of .smi file, it will \
dock each ligand from the source_compound_file and displayed as generation 0.\
If False, generation 1 will be randomly seeded by the source compounds with \
no preference and there will be no generation 0. \
If performing multiple simulations using same source compounds and protein, \
we recommend running once this and using the generation 0 ranked file as the \
source_compound_file for future simulations. \
Default is True.",
)
PARSER.add_argument(
"--start_a_new_run",
action="store_true",
default=False,
help="If False make a new folder and start a fresh simulation with Generation 0. \
If True find the last generation in the root_output_folder and continue to fill.\
Default is False.",
)
# SmilesMerge Settings
PARSER.add_argument(
"--max_time_MCS_prescreen",
type=int,
default=1,
help="amount time the pre-screen MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--max_time_MCS_thorough",
type=int,
default=1,
help="amount time the thorough MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--min_atom_match_MCS",
type=int,
default=4,
help="Determines the minimum number of atoms in common for a substructurematch. \
The higher the more restrictive, but the more likely for two ligands not to match",
)
PARSER.add_argument(
"--protanate_step",
action="store_true",
default=False,
help="Indicates if Smilesmerge uses protanated mols (if true) or deprot \
(if False) SmilesMerge is 10x faster when deprotanated",
)
# Mutation Settings
PARSER.add_argument(
"--rxn_library",
choices=["click_chem_rxns", "robust_rxns", "all_rxns", "Custom"],
default="all_rxns",
help="This set of reactions to be used in Mutation. \
If Custom, one must also provide rxn_file Path and function_group_library path",
)
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
help="This PATH for a dictionary of functional groups to be used for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=1,
help="Number of processors to use for parallel calculations. Set to -1 for all available CPUs.",
)
PARSER.add_argument(
"--multithread_mode",
default="multithreading",
choices=["mpi", "multithreading", "serial"],
help="Determine what style \
multithreading: mpi, multithreading, or serial. serial will override \
number_of_processors and force it to be on a single processor.",
)
# Genetic Algorithm Options
PARSER.add_argument(
"--selector_choice",
choices=["Roulette_Selector", "Rank_Selector", "Tournament_Selector"],
default="Roulette_Selector",
help="This determines whether the fitness criteria are chosen by a Weighted Roulette, \
Ranked, or Tournament style Selector. The Rank option is a non-redundant selector.\
Roulette and Tournament chose without replacement and are stoichastic options. \
Warning do not use Rank_Selector for small runs as there is potential that \
the number of desired ligands exceed the number of ligands to chose from.",
)
PARSER.add_argument(
"--tourn_size",
type=float,
default=0.1,
help="If using the Tournament_Selector this determines the size of each \
tournament. The number of ligands used for each tournament will the \
tourn_size * the number of considered ligands.",
)
# Seeding next gen and diversity
PARSER.add_argument(
"--top_mols_to_seed_next_generation_first_generation",
type=int,
help="Number of mols that seed next generation, for the first generation.\
Should be less than number_of_crossovers_first_generation + number_of_mutations_first_generation\
If not defined it will default to top_mols_to_seed_next_generation",
)
PARSER.add_argument(
"--top_mols_to_seed_next_generation",
type=int,
default=10,
help="Number of mols that seed next generation, for all generations after the first.\
Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_mols_to_seed_first_generation",
type=int,
default=10,
help="Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_seed_depreciation_per_gen",
type=int,
default=2,
help="Each gen diversity_mols_to_seed_first_generation will decrease this amount",
)
# Populations settings
PARSER.add_argument(
"--num_generations",
type=int,
default=10,
help="The number of generations to be created.",
)
PARSER.add_argument(
"--number_of_crossovers_first_generation",
type=int,
help="The number of ligands which will be created via crossovers in the \
first generation. If not defined it will default to number_of_crossovers",
)
PARSER.add_argument(
"--number_of_mutants_first_generation",
type=int,
help="The number of ligands which will be created via mutation in \
the first generation. If not defined it will default to number_of_mutants",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen_first_generation",
type=int,
help="The number of ligands chosen for elitism for the first generation \
These will advance from the previous generation directly into the next \
generation. This is purely advancing based on Docking/Rescore fitness. \
This does not select for diversity. If not defined it will default to \
number_elitism_advance_from_previous_gen",
)
PARSER.add_argument(
"--number_of_crossovers",
type=int,
default=10,
help="The number of ligands which will be created via crossover in each \
generation besides the first",
)
PARSER.add_argument(
"--number_of_mutants",
type=int,
default=10,
help="The number of ligands which will be created via mutation in each \
generation besides the first.",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen",
type=int,
default=10,
help="The number of ligands chosen for elitism. These will advance from \
the previous generation directly into the next generation. \
This is purely advancing based on Docking/Rescore \
fitness. This does not select for diversity.",
)
PARSER.add_argument(
"--redock_elite_from_previous_gen",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True than ligands chosen via Elitism (ie advanced from last generation) \
will be passed through Gypsum and docked again. This provides a better exploration of conformer space \
but also requires more computation time. If False, advancing ligands are simply carried forward by \
copying the PDBQT files.",
)
####### FILTER VARIABLES
PARSER.add_argument(
"--LipinskiStrictFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Strict implementation means a ligand must pass all requirements.",
)
PARSER.add_argument(
"--LipinskiLenientFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Lenient implementation means a ligand may fail all but one requirement and still passes.",
)
PARSER.add_argument(
"--GhoseFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms.",
)
PARSER.add_argument(
"--GhoseModifiedFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms. This is the same as the GhoseFilter, but \
the upper-bound of the molecular weight restrict is loosened from \
480Da to 500Da. This is intended to be run with Lipinski Filter and \
to match AutoGrow 3's Ghose Filter.",
)
PARSER.add_argument(
"--MozziconacciFilter",
action="store_true",
default=False,
help="Mozziconacci filters for drug-likeliness; filters by the number of \
rotatable bonds, rings, oxygens, and halogens.",
)
PARSER.add_argument(
"--VandeWaterbeemdFilter",
action="store_true",
default=False,
help="VandeWaterbeemd filters for drug likely to be blood brain barrier permeable. \
Filters by the number of molecular weight and Polar Sureface Area (PSA).",
)
PARSER.add_argument(
"--PAINSFilter",
action="store_true",
default=False,
help="PAINS filters against Pan Assay Interference Compounds using \
substructure a search.",
)
PARSER.add_argument(
"--NIHFilter",
action="store_true",
default=False,
help="NIH filters against molecules with undersirable functional groups \
using substructure a search.",
)
PARSER.add_argument(
"--BRENKFilter",
action="store_true",
default=False,
help="BRENK filter for lead-likeliness, by matching common false positive \
molecules to the current mol.",
)
PARSER.add_argument(
"--No_Filters",
action="store_true",
default=False,
help="No filters will be applied to compounds.",
)
PARSER.add_argument(
"--alternative_filter",
action="append",
help="If you want to add Custom filters to the filter child classes \
Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]",
)
# dependency variables
# DOCUMENT THE file conversion for docking inputs
PARSER.add_argument(
"--conversion_choice",
choices=["MGLToolsConversion", "ObabelConversion", "Custom"],
default="MGLToolsConversion",
help="Determines how .pdb files will be converted \
to the final format for docking. For Autodock Vina and QuickVina style docking software, \
files must be in .pdbqt format. MGLToolsConversion: uses MGLTools and is the \
recommended converter. MGLTools conversion is required for NNScore1/2 rescoring. \
ObabelConversion: uses commandline obabel. Easier to install but Vina docking has \
been optimized with MGLTools conversion.",
)
PARSER.add_argument(
"--custom_conversion_script",
metavar="custom_conversion_script",
default="",
help="The path to a python script for which is used to convert \
ligands. This is required for custom conversion_choice choices. \
Must be a list of strings \
[name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
PARSER.add_argument(
"--mgltools_directory",
metavar="mgltools_directory",
help="Required if using MGLTools conversion option \
(conversion_choice=MGLToolsConversion) \
Path may look like: /home/user/MGLTools-1.5.6/",
)
PARSER.add_argument(
"--mgl_python",
metavar="mgl_python",
required=False,
help="/home/user/MGLTools-1.5.4/bin/pythonsh",
)
PARSER.add_argument(
"--prepare_ligand4.py",
metavar="prepare_ligand4.py",
required=False,
help="/home/user/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py",
)
PARSER.add_argument(
"--prepare_receptor4.py",
metavar="prepare_receptor4.py",
required=False,
help="/home/userMGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py",
)
PARSER.add_argument(
"--obabel_path",
help="required if using obabel conversion \
option (conversion_choice=ObabelConversion).\
Path may look like PATH/envs/py37/bin/obabel; \
may be found on Linux by running: which obabel",
)
###################################
######### docking #################
###################################
PARSER.add_argument(
"--dock_choice",
metavar="dock_choice",
default="QuickVina2Docking",
choices=["VinaDocking", "QuickVina2Docking", "Custom"],
help="dock_choice assigns which docking software module to use.",
)
PARSER.add_argument(
"--docking_executable",
metavar="docking_executable",
default=None,
help="path to the docking_executable",
)
PARSER.add_argument(
"--docking_exhaustiveness",
metavar="docking_exhaustiveness",
default=None,
help="exhaustiveness of the global search (roughly proportional to time. \
see docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 8",
)
PARSER.add_argument(
"--docking_num_modes",
metavar="docking_num_modes",
default=None,
help=" maximum number of binding modes to generate in docking. \
See docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 9",
)
PARSER.add_argument(
"--docking_timeout_limit",
type=float,
default=120,
help="The maximum amount of time allowed to dock a single ligand into a \
pocket in seconds. Many factors influence the time required to dock, such as: \
processor speed, the docking software, rotatable bonds, exhaustiveness docking,\
and number of docking modes... \
The default docking_timeout_limit is 120 seconds, which is excess for most \
docking events using QuickVina2Docking under default settings. If run with \
more exhaustive settings or with highly flexible ligands, consider increasing \
docking_timeout_limit to accommodate. Default docking_timeout_limit is 120 seconds",
)
PARSER.add_argument(
"--custom_docking_script",
metavar="custom_docking_script",
default="",
help="The name and path to a python script for which is used to \
dock ligands. This is required for Custom docking choices Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# scoring
PARSER.add_argument(
"--scoring_choice",
metavar="scoring_choice",
choices=["VINA", "NN1", "NN2", "Custom"],
default="VINA",
help="The scoring_choice to use to assess the ligands docking fitness. \
Default is using Vina/QuickVina2 ligand affinity while NN1/NN2 use a Neural Network \
to assess the docking pose. Custom requires providing a file path for a Custom \
scoring function. If Custom scoring function, confirm it selects properly, \
Autogrow is largely set to select for a more negative score.",
)
PARSER.add_argument(
"--rescore_lig_efficiency",
action="store_true",
default=False,
help="This will divide the final scoring_choice output by the number of \
non-Hydrogen atoms in the ligand. This adjusted ligand efficiency score will \
override the scoring_choice value. This is compatible with all scoring_choice options.",
)
PARSER.add_argument(
"--custom_scoring_script",
metavar="custom_scoring_script",
type=str,
default="",
help="The path to a python script for which is used to \
assess the ligands docking fitness. Autogrow is largely set to select for a most \
negative scores (ie binding affinity the more negative is best). Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# gypsum # max variance is the number of conformers made per ligand
PARSER.add_argument(
"--max_variants_per_compound",
type=int,
default=3,
help="number of conformers made per ligand. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--gypsum_thoroughness",
"-t",
type=int,
default = 3,
help="How widely Gypsum-DL will search for \
low-energy conformers. Larger values increase \
run times but can produce better results. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="Minimum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="Maximum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--pka_precision",
metavar="D",
type=float,
default=1.0,
help="Size of pH substructure ranges. See Dimorphite-DL \
publication for details.",
)
PARSER.add_argument(
"--gypsum_timeout_limit",
type=float,
default=15,
help="Maximum time gypsum is allowed to run for a given ligand in seconds. \
On average Gypsum-DL takes on several seconds to run for a given ligand, but \
factors such as mol size, rotatable bonds, processor speed, and gypsum \
settings (ie gypsum_thoroughness or max_variants_per_compound) will change \
how long it takes to run. If increasing gypsum settings it is best to increase \
the gypsum_timeout_limit. Default gypsum_timeout_limit is 15 seconds",
)
# Reduce files down. This compiles and compresses the files in the PDBs folder
# (contains docking outputs, pdb, pdbqt...). This reduces the data size and
# makes data transfer quicker, but requires running the
# file_concatenation_and_compression.py in the Utility script folder to
# separate these files out for readability.
PARSER.add_argument(
"--reduce_files_sizes",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Run this combines all files in the PDBs folder into a \
single text file. Useful when data needs to be transferred.",
)
# Make a line plot of the simulation at the end of the run.
PARSER.add_argument(
"--generate_plot",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Make a line plot of the simulation at the end of the run.",
)
# mpi mode pre-Run so there are python cache files without EOF Errors
PARSER.add_argument(
"--cache_prerun",
"-c",
action="store_true",
help="Run this before running gypsum in mpi-mode.",
)
args_dict = vars(PARSER.parse_args())
from autogrow.user_vars import multiprocess_handling, define_defaults, determine_bash_timeout_vs_gtimeout
# args_dict = define_defaults()
import numpy as np
from tqdm import tqdm
from collections import defaultdict
import os, json, pickle, time, sys, copy, random
INPUTS = copy.deepcopy(args_dict)
for k, v in args_dict.items():
if v is None:
del INPUTS[k]
if args_dict["cache_prerun"] is False:
# load the commandline parameters
from autogrow.user_vars import load_in_commandline_parameters
args_dict, printout = load_in_commandline_parameters(INPUTS)
args_dict = multiprocess_handling(args_dict)
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option in ["timeout", "gtimeout"]:
args_dict["timeout_vs_gtimeout"] = timeout_option
else:
raise Exception("Something is very wrong. This OS may not be supported by Autogrow or you may need to execute through Bash.")
vars = args_dict
topk = 10
############## canonical ##############
from rdkit import Chem
def canonicalize(smiles):
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
return Chem.MolToSmiles(mol, isomericSmiles=True)
else:
return None
##########################################################
### A. mutate
import autogrow.operators.mutation.smiles_click_chem.smiles_click_chem as SmileClickClass
rxn_library_variables = [vars["rxn_library"], vars["rxn_library_file"], vars["function_group_library"], vars["complementary_mol_directory"]] # Package user vars specifying Reaction library for mutation
new_mutation_smiles_list = [] # List of SMILES from mutation
a_smiles_click_chem_object = SmileClickClass.SmilesClickChem(rxn_library_variables, new_mutation_smiles_list, vars["filter_object_dict"])
##########################################################
##########################################################
### B. crossover
## crossover between 2 ligands: find common structure
import autogrow.operators.crossover.smiles_merge.smiles_merge as smiles_merge
import autogrow.operators.crossover.execute_crossover as execute_crossover
import autogrow.operators.filter.execute_filters as Filter
##########################################################
##########################################################
# C. smiles2docking
# - smiles2pdbqt: smiles -> sdf -> pdb -> pdbqt
# - docking pdbqt
########### smiles -> sdf -> pdb -> pdbqt -> pdbqt.vina
import autogrow.operators.convert_files.conversion_to_3d as conversion_to_3d
# conversion_to_3d.convert_smi_to_sdfs_with_gypsum
# conversion_to_3d.convert_sdf_to_pdbs
# convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path)
# conversion_to_3d.convert_single_sdf_to_pdb
# convert_ligand_pdb_file_to_pdbqt #### in run_docking_common lig_convert_multithread
def smiles_to_sdfs(vars, gen_smiles_file, smile_file_directory):
# adapted from conversion_to_3d.convert_smi_to_sdfs_with_gypsum
max_variants_per_compound = vars["max_variants_per_compound"]
gypsum_thoroughness = vars["gypsum_thoroughness"]
min_ph = vars["min_ph"]
max_ph = vars["max_ph"]
pka_precision = vars["pka_precision"]
gypsum_timeout_limit = vars["gypsum_timeout_limit"]
# Make a new folder to put gypsum .smi's and json. Name folder gypsum_submission_files.
folder_path = "{}gypsum_submission_files{}".format(smile_file_directory, os.sep)
if os.path.exists(folder_path) is False:
os.makedirs(folder_path)
# Make Output for Gypsum folder (where .sdf's go)
gypsum_output_folder_path = "{}_SDF{}".format(smile_file_directory, os.sep)
if os.path.exists(gypsum_output_folder_path) is False:
os.makedirs(gypsum_output_folder_path)
# Make a folder to put the log files into within the 3D_SDFs folder
gypsum_log_path = "{}log{}".format(gypsum_output_folder_path, os.sep)
if os.path.exists(gypsum_log_path) is False:
os.makedirs(gypsum_log_path)
# Make All of the json files to submit to gypsum
list_of_gypsum_params = conversion_to_3d.make_smi_and_gyspum_params(
gen_smiles_file,
folder_path,
gypsum_output_folder_path,
max_variants_per_compound, gypsum_thoroughness,
min_ph, max_ph, pka_precision, )
# create a the job_inputs to run gypsum in multithread
job_input = tuple([(gypsum_log_path, gypsum_params, gypsum_timeout_limit) for gypsum_params in list_of_gypsum_params])
sys.stdout.flush()
failed_to_convert = vars["parallelizer"].run(job_input, conversion_to_3d.run_gypsum_multiprocessing)
sys.stdout.flush()
### fail: return smiles
### success: return None
lig_failed_to_convert = [x for x in failed_to_convert if x is not None]
lig_failed_to_convert = list(set(lig_failed_to_convert))
if len(lig_failed_to_convert) > 0:
print("The Following ligands Failed to convert in Gypsum")
print("Likely due to a Timeout")
print(lig_failed_to_convert)
sys.stdout.flush()
return gypsum_output_folder_path
from autogrow.docking.execute_docking import pick_run_conversion_class_dict, pick_docking_class_dict, lig_convert_multithread
def pdb_to_pdbqt(vars, pdb_dir):
### adapted from run_docking_common
dock_choice = vars["dock_choice"]
conversion_choice = vars["conversion_choice"]
receptor = vars["filename_of_receptor"]
# Use a temp vars dict so you don't put mpi multiprocess info through itself...
temp_vars = {}
for key in list(vars.keys()):
if key == "parallelizer":
continue
temp_vars[key] = vars[key]
file_conversion_class_object = pick_run_conversion_class_dict(conversion_choice)
file_conversion_class_object = file_conversion_class_object(temp_vars, receptor, test_boot=False)
dock_class = pick_docking_class_dict(dock_choice)
docking_object = dock_class(temp_vars, receptor, file_conversion_class_object, test_boot=False)
if vars["docking_executable"] is None:
docking_executable = docking_object.get_docking_executable_file(temp_vars)
vars["docking_executable"] = docking_executable
##### vina or Qvina
# Find PDB's
pdbs_in_folder = docking_object.find_pdb_ligands(pdb_dir)
print(' pdb files:', pdbs_in_folder[:2], pdb_dir, len(pdbs_in_folder))
job_input_convert_lig = tuple([(docking_object, pdb) for pdb in pdbs_in_folder])
# print(" Convert Ligand from PDB to PDBQT format")
smiles_names_failed_to_convert = vars["parallelizer"].run(job_input_convert_lig, lig_convert_multithread)
pdbqts_in_folder = docking_object.find_converted_ligands(pdb_dir)
print(' pdbqt file: ', len(pdbqts_in_folder), pdbqts_in_folder[:2])
return docking_object
from autogrow.docking.execute_docking import run_dock_multithread, run_docking_common
import autogrow.docking.scoring.execute_scoring_mol as Scoring
import autogrow.docking.ranking.ranking_mol as Ranking
def docking_pdbqt(vars, docking_object, pdbqt_folder, full_smiles_file):
pdbqts_in_folder = docking_object.find_converted_ligands(pdbqt_folder)
job_input_dock_lig = tuple([tuple([docking_object, pdbqt]) for pdbqt in pdbqts_in_folder])
smiles_names_failed_to_dock = vars["parallelizer"].run(job_input_dock_lig, run_dock_multithread)
### main docking, (including delete failed docking file)
deleted_smiles_names_list_dock = [x for x in smiles_names_failed_to_dock if x is not None]
deleted_smiles_names_list_dock = list(set(deleted_smiles_names_list_dock))
print("THE FOLLOWING LIGANDS WHICH FAILED TO DOCK:", deleted_smiles_names_list_dock)
# print("#################### \n Begin Ranking and Saving results")
# folder_with_pdbqts = current_generation_dir + "PDBs" + os.sep
# Run any compatible Scoring Function
print(full_smiles_file, pdbqt_folder)
smiles_list = Scoring.run_scoring_common(vars, full_smiles_file, pdbqt_folder)
print('---------', smiles_list[:3], 'smiles_list[:3] --------------')
# Output format of the .smi file will be: SMILES Full_lig_name
# shorthandname ...AnyCustominfo... Fitness_metric diversity
# Normally the docking score is the fitness metric but if we use a
# Custom metric than dock score gets moved to index -3 and the new
# fitness metric gets -2
# sort list by the affinity of each sublist (which is the last index of sublist)
smiles_list.sort(key=lambda x: float(x[-1]), reverse=False)
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3", -9.2]
# score the diversity of each ligand compared to the rest of the ligands in the group this adds on a float in the last column for the
# sum of pairwise comparisons the lower the diversity score the more unique a molecule is from the other mols in the same generation
smiles_list = Ranking.score_and_append_diversity_scores(smiles_list)
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3", -9.2, 40.14 (diversity)]
pdbqts_in_folder = [pdbqt + '.vina' for pdbqt in pdbqts_in_folder if os.path.exists(pdbqt + '.vina')]
print('pdbqts [:4]', pdbqts_in_folder[:3], len(pdbqts_in_folder))
id2pdbqt = defaultdict(lambda:[])
for pdbqt in pdbqts_in_folder:
smiles_id = pdbqt.split('/')[-1].split('__')[0]
id2pdbqt[smiles_id].append(pdbqt)
for idx,ss in enumerate(smiles_list):
smiles_id = ss[1]
smiles_list[idx].append(id2pdbqt[smiles_id])
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3",
# '-9.2', 40.14 (diversity), ['results_xxxx/xxxx__1.pdbqt', 'results_xxxx/xxxxx__2.pdbqt']]
return smiles_list
def docking(smiles_folder, smiles_file, args_dict):
sdfs_folder_path = smiles_folder.strip('/') + '_SDF/'
pdb_dir = smiles_folder.strip('/') + '_PDB/'
smiles_to_sdfs(args_dict, gen_smiles_file=os.path.join(smiles_folder,smiles_file), smile_file_directory=smiles_folder)
conversion_to_3d.convert_sdf_to_pdbs(args_dict, gen_folder_path=smiles_folder, sdfs_folder_path=sdfs_folder_path)
docking_object = pdb_to_pdbqt(vars = args_dict, pdb_dir = pdb_dir)
smiles_list = docking_pdbqt(args_dict, docking_object, pdb_dir, os.path.join(smiles_folder, smiles_file))
return smiles_list
##########################################################
# ['N=[N+]=[N+]=C(Cc1ccc2ccccc2c1)[N+](=O)[O-]', 'N=[N+]=Nc1c(N=[N+]=N)c(N=[N+]=N)c2ccccc2c1O', ...]
############# receptor ##############
receptor_info_list = [
('4r6e', './pdb/4r6e.pdb', -70.76, 21.82, 28.33, 15.0, 15.0, 15.0),
('3pbl', './pdb/3pbl.pdb', 9, 22.5, 26, 15, 15, 15),
('1iep', './pdb/1iep.pdb', 15.6138918, 53.38013513, 15.454837, 15, 15, 15), ]
# ('2rgp', './pdb/2rgp.pdb', 16.29212, 34.870818, 92.0353, 15, 15, 15),
# ('3eml', './pdb/3eml.pdb', -9.06363, -7.1446, 55.86259999, 15, 15, 15),
# ('3ny8', './pdb/3ny8.pdb', 2.2488, 4.68495, 51.39820000000001, 15, 15, 15),
# ('4rlu', './pdb/4rlu.pdb', -0.73599, 22.75547, -31.23689, 15, 15, 15),
# ('4unn', './pdb/4unn.pdb', 5.684346153, 18.1917, -7.3715, 15, 15, 15),
# ('5mo4', './pdb/5mo4.pdb', -44.901, 20.490354, 8.48335, 15, 15, 15),
# ('7l11', './pdb/7l11.pdb', -21.81481, -4.21606, -27.98378, 15, 15, 15), ]
def update_receptor_info(vars, receptor_info):
name_of_receptor, filename_of_receptor, center_x, center_y, center_z, size_x, size_y, size_z = receptor_info
vars['name_of_receptor'] = name_of_receptor
vars['filename_of_receptor'] = filename_of_receptor
vars['center_x'] = center_x
vars['center_y'] = center_y
vars['center_z'] = center_z
vars['size_x'] = size_x
vars['size_y'] = size_y
vars['size_z'] = size_z
return vars
smiles2info = defaultdict(lambda: dict())
id2smiles = dict()
def random_generate_id(id2smiles):
while True:
smiles_id = str(random.randint(1000000, 9999999))
if smiles_id not in id2smiles:
return smiles_id
##########################################################
################ initialize population ################
source_compound_file = args_dict['source_compound_file']
smiles_file = 'smiles.txt'
with open(source_compound_file, 'r') as fin:
smiles_list = fin.readlines()
initial_smiles_list = [smiles.split()[0] for smiles in smiles_list]
####### docking initial smiles list
for receptor_info in receptor_info_list:
vars = update_receptor_info(vars, receptor_info)
name_of_receptor = receptor_info[0]
print("---------- 0.1. save smiles ----------") ###### new_smiles_set -> smiles_file
meta_result_folder = './results_' + name_of_receptor + '_'
results_folder = meta_result_folder + "000" ### 'results_4r6e_000'
new_smiles_list = initial_smiles_list[:30] #### debug
if not os.path.exists(results_folder):
os.makedirs(results_folder)
full_smiles_file = os.path.join(results_folder, smiles_file)
with open(full_smiles_file, 'w') as fout:
for smiles in new_smiles_list:
smiles_id = random_generate_id(id2smiles)
fout.write(smiles + '\t' + smiles_id + '\n')
id2smiles[smiles_id] = smiles
print("---------- 0.2. docking ----------")
smiles_list = docking(smiles_folder = results_folder, smiles_file = smiles_file, args_dict = vars)
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3",
# '-9.2', 40.14 (diversity), ['results_xxxx/xxxx__1.pdbqt', 'results_xxxx/xxxxx__2.pdbqt']]
for info in smiles_list:
smiles, smiles_id, binding_score, pdbqt_list = info[0], info[1], float(info[-3]), info[-1]
smiles2info[name_of_receptor][smiles] = [smiles_id, binding_score, pdbqt_list]
print('------ 0.3. top-K smiles for next generation -------')
new_smiles_list = [(smiles, smiles2info[name_of_receptor][smiles][1], smiles2info[name_of_receptor][smiles][2]) \
for smiles in new_smiles_list if smiles in smiles2info[name_of_receptor]]
new_smiles_list.sort(key=lambda x:x[1])
new_smiles_list = new_smiles_list[:topk]
smiles_info_list = [(smiles,pdbqt_list) for smiles,binding_score,pdbqt_list in new_smiles_list]
smiles2info[name_of_receptor]['smiles_info_list'] = copy.deepcopy(smiles_info_list)
##########################################################
################# model ################
import torch
from model import Ligand2D, Ligand2D_product, ENN, featurize_receptor_and_ligand
# featurize_receptor_and_ligand(pdbfile, centers, pocket_size, pdbqt_file,)
# pdbtofeature(pdbfile, centers, pocket_size) & pdbqtvina2feature(pdbqt_file)
# crossover_policy_net_1 = Ligand2D() ##### TODO pocket & center
# crossover_policy_net_2 = Ligand2D_product()
# crossover_policy_net_1 = ENN()
# crossover_policy_net_2 = ENN()
crossover_policy_net_1 = torch.load('save_model/crossover_policy_net_1.ckpt')
crossover_policy_net_2 = torch.load('save_model/crossover_policy_net_2.ckpt')
crossover_budget = 20
crossover_optimizer = torch.optim.Adam(list(crossover_policy_net_1.parameters()) + list(crossover_policy_net_2.parameters()), lr=1e-3)
# mutation_policy_net_1 = ENN()
# mutation_policy_net_2 = Ligand2D_product()
mutation_policy_net_1 = torch.load('save_model/mutation_policy_net_1.ckpt')
mutation_policy_net_2 = torch.load('save_model/mutation_policy_net_2.ckpt')
mutation_budget = 20
mutation_optimizer = torch.optim.Adam(list(mutation_policy_net_1.parameters()) + list(mutation_policy_net_2.parameters()), lr=1e-3)
################# model ################
crossover_train_data = defaultdict(lambda: dict())
mutation_train_data = defaultdict(lambda: dict())
canonicalize = lambda x:x
crossover_done_set = defaultdict(lambda: set())
mutation_done_set = defaultdict(lambda: set())
########################## main loop ############################
for num_gen in tqdm(range(args_dict['num_generations'])):
for receptor_info in receptor_info_list:
##### input: smiles_list (including pdbqtvina, from previous-generation) & receptor
vars = update_receptor_info(vars, receptor_info)
name_of_receptor = vars['name_of_receptor']
smiles_info_list = copy.deepcopy(smiles2info[name_of_receptor]['smiles_info_list']) ###### [(smiles_1, pdbqtvina_list_1), (smiles_2, pdbqtvina_list_2), ...]
print('===== 1. beginning of the generation: smiles_info_list =====', len(smiles_info_list), smiles_info_list[:5], )
for info in smiles_info_list:
pdbqtvina = info[1][0]
assert os.path.exists(pdbqtvina)
smiles_list = [smiles for smiles, pdbqtvina_list in smiles_info_list]
smiles2pdbqtvina_local = {smiles:pdbqtvina_list[0] for smiles,pdbqtvina_list in smiles_info_list}
if len(smiles_list) <=3:
continue
new_smiles_set = set()
print("---------- 2. RGA: crossover ----------")
pdbqtvina_list = [smiles2pdbqtvina_local[smiles] for smiles in smiles_list]
print('length of ligand:', len(pdbqtvina_list))
if num_gen > -1:
##### evaluate probability distribution in RGA
_, crossover_sample_probability_list = crossover_policy_net_1.forward_ligand_list(
name_of_receptor = vars['name_of_receptor'],
pdbqtvina_list = pdbqtvina_list)
else:
crossover_sample_probability_list = [1.0/len(pdbqtvina_list) for i in pdbqtvina_list]
sampled_idx = random.choices(list(range(len(crossover_sample_probability_list))),
weights = crossover_sample_probability_list,
k = crossover_budget)
####### RGA outer loop, first ligand ########
for idx in tqdm(sampled_idx):
selected_smiles_1 = smiles_list[idx]
mol = execute_crossover.convert_mol_from_smiles(selected_smiles_1)
holdout_smiles_list = [smiles for smiles in smiles_list if smiles!=selected_smiles_1]
if holdout_smiles_list == []:
continue
holdout_pdbqtvina_list = [smiles2pdbqtvina_local[smiles] for smiles in holdout_smiles_list]
if num_gen > -1:
##### evaluate probability distribution in RGA
_, crossover_sample_probability_list_2 = crossover_policy_net_2.forward_ligand_list(
name_of_receptor = vars['name_of_receptor'],
pdbqtvina_list = holdout_pdbqtvina_list)
else:
crossover_sample_probability_list_2 = [1.0/len(holdout_pdbqtvina_list) for i in holdout_pdbqtvina_list]
sampled_idx_2 = random.choices(list(range(len(holdout_pdbqtvina_list))),
weights = crossover_sample_probability_list_2, k = 10)
########## RGA inner loop, second ligand #########
for idx2 in sampled_idx_2:
smiles_2 = holdout_smiles_list[idx2]
if (selected_smiles_1, smiles_2) not in crossover_done_set[name_of_receptor]:
crossover_done_set[name_of_receptor].add((selected_smiles_1, smiles_2))
crossover_done_set[name_of_receptor].add((smiles_2, selected_smiles_1))
else:
continue
mol2 = execute_crossover.convert_mol_from_smiles(smiles_2)
if execute_crossover.test_for_mcs(vars, mol, mol2) is None:
continue
for i in range(3):
ligand_new_smiles = smiles_merge.run_main_smiles_merge(vars, selected_smiles_1, smiles_2)
if ligand_new_smiles is not None:
break
if ligand_new_smiles is not None:
ligand_new_smiles = canonicalize(ligand_new_smiles)
pass_or_not = Filter.run_filter_on_just_smiles(ligand_new_smiles, vars["filter_object_dict"]) #### True, False
if pass_or_not:
new_smiles_set.add(ligand_new_smiles)
crossover_train_data[name_of_receptor][ligand_new_smiles] = [smiles_list, selected_smiles_1, pdbqtvina_list,
holdout_smiles_list, smiles_2, holdout_pdbqtvina_list,]
else:
# print(" >>> not pass filter")
pass
else:
# print(' >>> '+ selected_smiles_1 + ' ' + smiles_2 +' merge fail ')
pass
print(" >>>>>> number of smiles generated by crossover", len(new_smiles_set), ' >>>')
print("---------- 3. RGA: mutation ----------")
pdbqtvina_list = [smiles2pdbqtvina_local[smiles] for smiles in smiles_list]
if num_gen > -1:
##### evaluate probability distribution for first action in mutation in RGA
_, mutation_sample_probability_list = mutation_policy_net_1.forward_ligand_list(
name_of_receptor = vars['name_of_receptor'],
pdbqtvina_list = pdbqtvina_list)
else:
mutation_sample_probability_list = [1/len(pdbqtvina_list) for i in pdbqtvina_list]
sampled_idx = random.choices(list(range(len(mutation_sample_probability_list))),
weights = mutation_sample_probability_list,
k = mutation_budget)
for idx in sampled_idx:
selected_smiles_1 = smiles_list[idx]
if selected_smiles_1 in mutation_done_set[name_of_receptor]:
continue
else:
mutation_done_set[name_of_receptor].add(selected_smiles_1)
new_mutation_smiles_list = []
a_smiles_click_chem_object = SmileClickClass.SmilesClickChem(rxn_library_variables=rxn_library_variables,
list_of_already_made_smiles=new_mutation_smiles_list,
filter_object_dict=vars["filter_object_dict"])
s_list = a_smiles_click_chem_object.run_smiles_click2(selected_smiles_1) ### smiles list
if s_list is None or len(s_list)<=1:
continue
s_list = [canonicalize(s) for s in s_list]
##### evaluate probability distribution for first action in mutation in RGA
_, mutation_sample_probability_list_2 = mutation_policy_net_2(selected_smiles_1, s_list)
sampled_idx_2 = random.choices(list(range(len(mutation_sample_probability_list_2))),
weights = mutation_sample_probability_list_2,
k = 2)
for idx2 in sampled_idx_2:
smiles_2 = s_list[idx2]
mutation_train_data[name_of_receptor][smiles_2] = [smiles_list, selected_smiles_1, pdbqtvina_list, s_list]
new_smiles_set.add(smiles_2)
print(">>>>>> number of smiles generated by crossover and mutation", len(new_smiles_set))
print("---------- 4. elite from previous generation ----------")
print("---------- 5. save smiles ----------") ###### new_smiles_set -> smiles_file
meta_result_folder = './results_' + name_of_receptor + '_'
results_folder = meta_result_folder + str(num_gen) ### 'results_0', 'results_1', ...
new_smiles_list = list(new_smiles_set)
new_smiles_list = new_smiles_list[:30] #### debug
if not os.path.exists(results_folder):
os.makedirs(results_folder)
full_smiles_file = os.path.join(results_folder, smiles_file)
with open(full_smiles_file, 'w') as fout:
for smiles in new_smiles_list:
smiles_id = random_generate_id(id2smiles)
fout.write(smiles + '\t' + smiles_id + '\n')
id2smiles[smiles_id] = smiles
print('new_smiles_list', len(new_smiles_list))
print("---------- 6. docking ----------")
smiles_list = docking(smiles_folder = results_folder, smiles_file = smiles_file, args_dict = vars) #### receptor is in "vars"
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3", '-9.2', 40.14 (diversity), ['results_xxxx/xxxx__1.pdbqt', 'results_xxxx/xxxxx__2.pdbqt']]
for info in smiles_list:
smiles, smiles_id, binding_score, pdbqtvina_list = info[0], info[1], float(info[-3]), info[-1]
smiles2info[name_of_receptor][smiles] = [smiles_id, binding_score, pdbqtvina_list]
############# policy gradient ##############
print('------ 7. RGA: crossover policy network optimization -------')
for ligand_new_smiles, (smiles_list, selected_smiles_1, pdbqtvina_list, holdout_smiles_list, smiles_2, holdout_pdbqtvina_list,) \
in tqdm(crossover_train_data[name_of_receptor].items()):
if not (selected_smiles_1 in smiles2info[name_of_receptor] \
and smiles_2 in smiles2info[name_of_receptor] \
and ligand_new_smiles in smiles2info[name_of_receptor]):
continue
##### log_likelihood
log_crossover_sample_probability, _ = crossover_policy_net_1.forward_ligand_list(
name_of_receptor = vars['name_of_receptor'],
pdbqtvina_list = pdbqtvina_list)
idx = smiles_list.index(selected_smiles_1)
log_likelihood_1 = log_crossover_sample_probability[idx]
log_crossover_sample_probability_2, _ = crossover_policy_net_2.forward_ligand_list(
name_of_receptor = vars['name_of_receptor'],
pdbqtvina_list = holdout_pdbqtvina_list)
idx2 = holdout_smiles_list.index(smiles_2)
log_likelihood_2 = log_crossover_sample_probability_2[idx2]
####### reward
v1 = smiles2info[name_of_receptor][selected_smiles_1][1]
v2 = smiles2info[name_of_receptor][smiles_2][1]
v3 = smiles2info[name_of_receptor][ligand_new_smiles][1]
reward = -v3 - max(-v1, -v2) #### max is better
####### policy gradient
log_likelihood = - (log_likelihood_1 + log_likelihood_2) * reward
crossover_optimizer.zero_grad()
log_likelihood.backward()
crossover_optimizer.step()
print('------ 8. RGA: mutation policy network optimization -------')
for smiles_2, (smiles_list, selected_smiles_1, pdbqtvina_list, s_list) in tqdm(mutation_train_data[name_of_receptor].items()):
if not (selected_smiles_1 in smiles2info[name_of_receptor] and smiles_2 in smiles2info[name_of_receptor]):
continue
##### log_likelihood
log_mutation_sample_probability, _ = mutation_policy_net_1.forward_ligand_list(
name_of_receptor = vars['name_of_receptor'],
pdbqtvina_list = pdbqtvina_list)
idx = smiles_list.index(selected_smiles_1)
log_likelihood_1 = log_mutation_sample_probability[idx]
log_mutation_sample_probability_2, _ = mutation_policy_net_2(selected_smiles_1, s_list)
idx2 = s_list.index(smiles_2)
log_likelihood_2 = log_mutation_sample_probability_2[idx2]
####### reward
v1 = smiles2info[name_of_receptor][selected_smiles_1][1]
v2 = smiles2info[name_of_receptor][smiles_2][1]
reward = -v2 - (-v1) #### max is better
####### policy gradient
log_likelihood = - (log_likelihood_1 + log_likelihood_2) * reward
mutation_optimizer.zero_grad()
log_likelihood.backward()
mutation_optimizer.step()
print('------ 9. top-K smiles for next generation -------')
new_smiles_list = [(smiles, smiles2info[name_of_receptor][smiles][1], smiles2info[name_of_receptor][smiles][2]) for smiles in new_smiles_list if smiles in smiles2info[name_of_receptor]]
new_smiles_list.sort(key=lambda x:x[1])
new_smiles_list = new_smiles_list[:topk]
print('new_smiles_list', len(new_smiles_list))
smiles_info_list = [(smiles,pdbqt_list) for smiles,binding_score,pdbqt_list in new_smiles_list]
smiles2info[name_of_receptor]['smiles_info_list'] = copy.deepcopy(smiles_info_list)
for name_of_receptor, smiles_info_list in smiles2info.items():
print('-------- Evaluating ' + name_of_receptor + ' ----------')
# print(smiles2info[name_of_receptor])
results = [[k,v[0],v[1],v[2]] for k,v in smiles2info[name_of_receptor].items() if k!='smiles_info_list']
results.sort(key=lambda x:x[2])
with open('result_'+name_of_receptor + '.txt', 'w') as fo:
for result in results:
fo.write('\t'.join([str(i) for i in result]) + '\n')
torch.save(crossover_policy_net_1, 'save_model/crossover_policy_net_1.ckpt')
torch.save(crossover_policy_net_2, 'save_model/crossover_policy_net_2.ckpt')
torch.save(mutation_policy_net_1, 'save_model/mutation_policy_net_1.ckpt')
torch.save(mutation_policy_net_2, 'save_model/mutation_policy_net_2.ckpt')
'''
rm -rf results_*
python RGA.py \
--filename_of_receptor ./tutorial/PARP/4r6eA_PARP1_prepared.pdb \
--center_x -70.76 --center_y 21.82 --center_z 28.33 \
--size_x 25.0 --size_y 16.0 --size_z 25.0 \
--source_compound_file ./source_compounds/naphthalene_smiles.smi \
--root_output_folder ./output \
--number_of_mutants_first_generation 50 \
--number_of_crossovers_first_generation 50 \
--number_of_mutants 50 \
--number_of_crossovers 50 \
--top_mols_to_seed_next_generation 50 \
--number_elitism_advance_from_previous_gen 50 \
--number_elitism_advance_from_previous_gen_first_generation 10 \
--diversity_mols_to_seed_first_generation 10 \
--diversity_seed_depreciation_per_gen 10 \
--num_generations 10 \
--mgltools_directory ./mgltools_x86_64Linux2_1.5.6/ \
--number_of_processors -1 \
--scoring_choice VINA \
--LipinskiLenientFilter \
--start_a_new_run \
--rxn_library click_chem_rxns \
--selector_choice Rank_Selector \
--dock_choice VinaDocking \
--max_variants_per_compound 5 \
--redock_elite_from_previous_gen False \
--generate_plot True \
--reduce_files_sizes True \
--use_docked_source_compounds True
'''
| 53,684 | 42.329298 | 204 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/RunAutogrow.py | # !/usr/bin/env python
"""This is the executable file for Autogrow 4.0.3. This script should come
first. It should obtain and verify all the parameters work. This than should
pass these parameters variables to the main execution function titled
AutogrowMainExecute.py found in MainFunctions
If you use AutoGrow 4.0.3 in your research, please cite the following reference:
Spiegel, J.O., Durrant, J.D. AutoGrow4: an open-source genetic algorithm
for de novo drug design and lead optimization. J Cheminform 12, 25 (2020).
[doi: 10.1186/s13321-020-00429-4]
"""
import __future__
import argparse
import copy
import datetime
# Imports of files are burried below to prevent EOF issues in MPI mode
################
# Run AutoGrow #
################
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json",
"-j",
metavar="param.json",
help="Name of a json file containing all parameters. \
Overrides other arguments.",
)
# Allows the run in debug mode. Doesn't delete temp files.
PARSER.add_argument(
"--debug_mode",
"-d",
action="store_true",
default=False,
help="Run Autogrow in Debug mode. This keeps all \
temporary files and adds extra print statements.",
)
# receptor information
PARSER.add_argument(
"--filename_of_receptor",
"-r",
metavar="receptor.pdb",
default='./tutorial/PARP/4r6eA_PARP1_prepared.pdb',
help="The path to the receptor file. Should be .pdb file.",
)
PARSER.add_argument(
"--center_x",
"-x",
type=float,
default=-70.76,
help="x-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_y",
"-y",
type=float,
default=21.82,
help="y-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_z",
"-z",
type=float,
default=28.33,
help="z-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--size_x",
type=float,
default=25.0,
help="dimension of box to dock into in the x-axis (Angstrom)",
)
PARSER.add_argument(
"--size_y",
type=float,
default=20.0,
help="dimension of box to dock into in the y-axis (Angstrom)",
)
PARSER.add_argument(
"--size_z",
type=float,
default=25.0,
help="dimension of box to dock into in the z-axis (Angstrom)",
)
# Input/Output directories
PARSER.add_argument(
"--root_output_folder",
"-o",
type=str,
help="The Path to the folder which all output files will be placed.",
)
PARSER.add_argument(
"--source_compound_file",
"-s",
type=str,
default='./source_compounds/naphthalene_smiles.smi',
help="PATH to the file containing the source compounds. It must be \
tab-delineated .smi file. These ligands will seed the first generation.",
)
PARSER.add_argument(
"--filter_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="If True source ligands from source_compound_file will be \
filter using the user defined filter choices prior to the 1st generation being \
created. If False, ligands which would fail the ligand filters could seed \
the 1st generation. Default is True.",
)
PARSER.add_argument(
"--use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True source ligands will be docked prior to seeding generation 1. \
If True and the source_compound file already has docking/fitness metric score \
in -2 column of .smi file, it will not redock but reuse the scores from \
the source_compound_file.\
If True and no fitness metric score in -2 column of .smi file, it will \
dock each ligand from the source_compound_file and displayed as generation 0.\
If False, generation 1 will be randomly seeded by the source compounds with \
no preference and there will be no generation 0. \
If performing multiple simulations using same source compounds and protein, \
we recommend running once this and using the generation 0 ranked file as the \
source_compound_file for future simulations. \
Default is True.",
)
PARSER.add_argument(
"--start_a_new_run",
action="store_true",
default=False,
help="If False make a new folder and start a fresh simulation with Generation 0. \
If True find the last generation in the root_output_folder and continue to fill.\
Default is False.",
)
# SmilesMerge Settings
PARSER.add_argument(
"--max_time_MCS_prescreen",
type=int,
default=1,
help="amount time the pre-screen MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--max_time_MCS_thorough",
type=int,
default=1,
help="amount time the thorough MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--min_atom_match_MCS",
type=int,
default=4,
help="Determines the minimum number of atoms in common for a substructurematch. \
The higher the more restrictive, but the more likely for two ligands not to match",
)
PARSER.add_argument(
"--protanate_step",
action="store_true",
default=False,
help="Indicates if Smilesmerge uses protanated mols (if true) or deprot \
(if False) SmilesMerge is 10x faster when deprotanated",
)
# Mutation Settings
PARSER.add_argument(
"--rxn_library",
choices=["click_chem_rxns", "robust_rxns", "all_rxns", "Custom"],
default="all_rxns",
help="This set of reactions to be used in Mutation. \
If Custom, one must also provide rxn_file Path and function_group_library path",
)
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
help="This PATH for a dictionary of functional groups to be used for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=1,
help="Number of processors to use for parallel calculations. Set to -1 for all available CPUs.",
)
PARSER.add_argument(
"--multithread_mode",
default="multithreading",
choices=["mpi", "multithreading", "serial"],
help="Determine what style \
multithreading: mpi, multithreading, or serial. serial will override \
number_of_processors and force it to be on a single processor.",
)
# Genetic Algorithm Options
PARSER.add_argument(
"--selector_choice",
choices=["Roulette_Selector", "Rank_Selector", "Tournament_Selector"],
default="Roulette_Selector",
help="This determines whether the fitness criteria are chosen by a Weighted Roulette, \
Ranked, or Tournament style Selector. The Rank option is a non-redundant selector.\
Roulette and Tournament chose without replacement and are stoichastic options. \
Warning do not use Rank_Selector for small runs as there is potential that \
the number of desired ligands exceed the number of ligands to chose from.",
)
PARSER.add_argument(
"--tourn_size",
type=float,
default=0.1,
help="If using the Tournament_Selector this determines the size of each \
tournament. The number of ligands used for each tournament will the \
tourn_size * the number of considered ligands.",
)
# Seeding next gen and diversity
PARSER.add_argument(
"--top_mols_to_seed_next_generation_first_generation",
type=int,
help="Number of mols that seed next generation, for the first generation.\
Should be less than number_of_crossovers_first_generation + number_of_mutations_first_generation\
If not defined it will default to top_mols_to_seed_next_generation",
)
PARSER.add_argument(
"--top_mols_to_seed_next_generation",
type=int,
default=10,
help="Number of mols that seed next generation, for all generations after the first.\
Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_mols_to_seed_first_generation",
type=int,
default=10,
help="Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_seed_depreciation_per_gen",
type=int,
default=2,
help="Each gen diversity_mols_to_seed_first_generation will decrease this amount",
)
# Populations settings
PARSER.add_argument(
"--num_generations",
type=int,
default=10,
help="The number of generations to be created.",
)
PARSER.add_argument(
"--number_of_crossovers_first_generation",
type=int,
help="The number of ligands which will be created via crossovers in the \
first generation. If not defined it will default to number_of_crossovers",
)
PARSER.add_argument(
"--number_of_mutants_first_generation",
type=int,
help="The number of ligands which will be created via mutation in \
the first generation. If not defined it will default to number_of_mutants",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen_first_generation",
type=int,
help="The number of ligands chosen for elitism for the first generation \
These will advance from the previous generation directly into the next \
generation. This is purely advancing based on Docking/Rescore fitness. \
This does not select for diversity. If not defined it will default to \
number_elitism_advance_from_previous_gen",
)
PARSER.add_argument(
"--number_of_crossovers",
type=int,
default=10,
help="The number of ligands which will be created via crossover in each \
generation besides the first",
)
PARSER.add_argument(
"--number_of_mutants",
type=int,
default=10,
help="The number of ligands which will be created via mutation in each \
generation besides the first.",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen",
type=int,
default=10,
help="The number of ligands chosen for elitism. These will advance from \
the previous generation directly into the next generation. \
This is purely advancing based on Docking/Rescore \
fitness. This does not select for diversity.",
)
PARSER.add_argument(
"--redock_elite_from_previous_gen",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True than ligands chosen via Elitism (ie advanced from last generation) \
will be passed through Gypsum and docked again. This provides a better exploration of conformer space \
but also requires more computation time. If False, advancing ligands are simply carried forward by \
copying the PDBQT files.",
)
####### FILTER VARIABLES
PARSER.add_argument(
"--LipinskiStrictFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Strict implementation means a ligand must pass all requirements.",
)
PARSER.add_argument(
"--LipinskiLenientFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Lenient implementation means a ligand may fail all but one requirement and still passes.",
)
PARSER.add_argument(
"--GhoseFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms.",
)
PARSER.add_argument(
"--GhoseModifiedFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms. This is the same as the GhoseFilter, but \
the upper-bound of the molecular weight restrict is loosened from \
480Da to 500Da. This is intended to be run with Lipinski Filter and \
to match AutoGrow 3's Ghose Filter.",
)
PARSER.add_argument(
"--MozziconacciFilter",
action="store_true",
default=False,
help="Mozziconacci filters for drug-likeliness; filters by the number of \
rotatable bonds, rings, oxygens, and halogens.",
)
PARSER.add_argument(
"--VandeWaterbeemdFilter",
action="store_true",
default=False,
help="VandeWaterbeemd filters for drug likely to be blood brain barrier permeable. \
Filters by the number of molecular weight and Polar Sureface Area (PSA).",
)
PARSER.add_argument(
"--PAINSFilter",
action="store_true",
default=False,
help="PAINS filters against Pan Assay Interference Compounds using \
substructure a search.",
)
PARSER.add_argument(
"--NIHFilter",
action="store_true",
default=False,
help="NIH filters against molecules with undersirable functional groups \
using substructure a search.",
)
PARSER.add_argument(
"--BRENKFilter",
action="store_true",
default=False,
help="BRENK filter for lead-likeliness, by matching common false positive \
molecules to the current mol.",
)
PARSER.add_argument(
"--No_Filters",
action="store_true",
default=False,
help="No filters will be applied to compounds.",
)
PARSER.add_argument(
"--alternative_filter",
action="append",
help="If you want to add Custom filters to the filter child classes \
Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]",
)
# dependency variables
# DOCUMENT THE file conversion for docking inputs
PARSER.add_argument(
"--conversion_choice",
choices=["MGLToolsConversion", "ObabelConversion", "Custom"],
default="MGLToolsConversion",
help="Determines how .pdb files will be converted \
to the final format for docking. For Autodock Vina and QuickVina style docking software, \
files must be in .pdbqt format. MGLToolsConversion: uses MGLTools and is the \
recommended converter. MGLTools conversion is required for NNScore1/2 rescoring. \
ObabelConversion: uses commandline obabel. Easier to install but Vina docking has \
been optimized with MGLTools conversion.",
)
PARSER.add_argument(
"--custom_conversion_script",
metavar="custom_conversion_script",
default="",
help="The path to a python script for which is used to convert \
ligands. This is required for custom conversion_choice choices. \
Must be a list of strings \
[name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
PARSER.add_argument(
"--mgltools_directory",
metavar="mgltools_directory",
help="Required if using MGLTools conversion option \
(conversion_choice=MGLToolsConversion) \
Path may look like: /home/user/MGLTools-1.5.6/",
)
PARSER.add_argument(
"--mgl_python",
metavar="mgl_python",
required=False,
help="/home/user/MGLTools-1.5.4/bin/pythonsh",
)
PARSER.add_argument(
"--prepare_ligand4.py",
metavar="prepare_ligand4.py",
required=False,
help="/home/user/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py",
)
PARSER.add_argument(
"--prepare_receptor4.py",
metavar="prepare_receptor4.py",
required=False,
help="/home/userMGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py",
)
PARSER.add_argument(
"--obabel_path",
help="required if using obabel conversion \
option (conversion_choice=ObabelConversion).\
Path may look like PATH/envs/py37/bin/obabel; \
may be found on Linux by running: which obabel",
)
###################################
######### docking #################
###################################
PARSER.add_argument(
"--dock_choice",
metavar="dock_choice",
default="QuickVina2Docking",
choices=["VinaDocking", "QuickVina2Docking", "Custom"],
help="dock_choice assigns which docking software module to use.",
)
PARSER.add_argument(
"--docking_executable",
metavar="docking_executable",
default=None,
help="path to the docking_executable",
)
PARSER.add_argument(
"--docking_exhaustiveness",
metavar="docking_exhaustiveness",
default=None,
help="exhaustiveness of the global search (roughly proportional to time. \
see docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 8",
)
PARSER.add_argument(
"--docking_num_modes",
metavar="docking_num_modes",
default=None,
help=" maximum number of binding modes to generate in docking. \
See docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 9",
)
PARSER.add_argument(
"--docking_timeout_limit",
type=float,
default=120,
help="The maximum amount of time allowed to dock a single ligand into a \
pocket in seconds. Many factors influence the time required to dock, such as: \
processor speed, the docking software, rotatable bonds, exhaustiveness docking,\
and number of docking modes... \
The default docking_timeout_limit is 120 seconds, which is excess for most \
docking events using QuickVina2Docking under default settings. If run with \
more exhaustive settings or with highly flexible ligands, consider increasing \
docking_timeout_limit to accommodate. Default docking_timeout_limit is 120 seconds",
)
PARSER.add_argument(
"--custom_docking_script",
metavar="custom_docking_script",
default="",
help="The name and path to a python script for which is used to \
dock ligands. This is required for Custom docking choices Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# scoring
PARSER.add_argument(
"--scoring_choice",
metavar="scoring_choice",
choices=["VINA", "NN1", "NN2", "Custom"],
default="VINA",
help="The scoring_choice to use to assess the ligands docking fitness. \
Default is using Vina/QuickVina2 ligand affinity while NN1/NN2 use a Neural Network \
to assess the docking pose. Custom requires providing a file path for a Custom \
scoring function. If Custom scoring function, confirm it selects properly, \
Autogrow is largely set to select for a more negative score.",
)
PARSER.add_argument(
"--rescore_lig_efficiency",
action="store_true",
default=False,
help="This will divide the final scoring_choice output by the number of \
non-Hydrogen atoms in the ligand. This adjusted ligand efficiency score will \
override the scoring_choice value. This is compatible with all scoring_choice options.",
)
PARSER.add_argument(
"--custom_scoring_script",
metavar="custom_scoring_script",
type=str,
default="",
help="The path to a python script for which is used to \
assess the ligands docking fitness. Autogrow is largely set to select for a most \
negative scores (ie binding affinity the more negative is best). Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# gypsum # max variance is the number of conformers made per ligand
PARSER.add_argument(
"--max_variants_per_compound",
type=int,
default=3,
help="number of conformers made per ligand. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--gypsum_thoroughness",
"-t",
type=str,
help="How widely Gypsum-DL will search for \
low-energy conformers. Larger values increase \
run times but can produce better results. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="Minimum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="Maximum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--pka_precision",
metavar="D",
type=float,
default=1.0,
help="Size of pH substructure ranges. See Dimorphite-DL \
publication for details.",
)
PARSER.add_argument(
"--gypsum_timeout_limit",
type=float,
default=15,
help="Maximum time gypsum is allowed to run for a given ligand in seconds. \
On average Gypsum-DL takes on several seconds to run for a given ligand, but \
factors such as mol size, rotatable bonds, processor speed, and gypsum \
settings (ie gypsum_thoroughness or max_variants_per_compound) will change \
how long it takes to run. If increasing gypsum settings it is best to increase \
the gypsum_timeout_limit. Default gypsum_timeout_limit is 15 seconds",
)
# Reduce files down. This compiles and compresses the files in the PDBs folder
# (contains docking outputs, pdb, pdbqt...). This reduces the data size and
# makes data transfer quicker, but requires running the
# file_concatenation_and_compression.py in the Utility script folder to
# separate these files out for readability.
PARSER.add_argument(
"--reduce_files_sizes",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Run this combines all files in the PDBs folder into a \
single text file. Useful when data needs to be transferred.",
)
# Make a line plot of the simulation at the end of the run.
PARSER.add_argument(
"--generate_plot",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Make a line plot of the simulation at the end of the run.",
)
# mpi mode pre-Run so there are python cache files without EOF Errors
PARSER.add_argument(
"--cache_prerun",
"-c",
action="store_true",
help="Run this before running gypsum in mpi-mode.",
)
args_dict = vars(PARSER.parse_args())
# copying args_dict so we can delete out of while iterating through the
# original args_dict
INPUTS = copy.deepcopy(args_dict)
for k, v in args_dict.items():
if v is None:
del INPUTS[k]
if args_dict["cache_prerun"] is False:
start_time = str(datetime.datetime.now())
# load the commandline parameters
from autogrow.user_vars import load_in_commandline_parameters
vars, printout = load_in_commandline_parameters(INPUTS)
# print out the UserVars for the record
print("\n=====================================================")
print("============== Parameters as list: ===============")
for key in list(vars.keys()):
print(key, vars[key])
print("\n=====================================================")
print("=========== Parameters as dictionary: ============")
print(vars)
print("=====================================================")
print("=====================================================\n\n")
# Run AUTOGROW. Import move here to prevent EOF in MPI mode. importing
# files before the Parallelizer class is established in MPI mode can have
# errors
import autogrow.autogrow_main_execute as AutogrowMainExecute
#####################
#### main run #######
#####################
AutogrowMainExecute.main_execute(vars)
#####################
#####################
# Print completion message
printout = "\nAutoGrow4 run started at: {}\nAutoGrow4 ".format(start_time)
printout = printout + "run completed at: {}\n".format(str(datetime.datetime.now()))
print(printout)
print("AUTOGROW FINISHED")
# # kill mpi workers
vars["parallelizer"].end(vars["multithread_mode"])
# else: # cache prerun. This is necessary to prevent race conditions in mpi mode.
# import autogrow.user_vars
# import autogrow.autogrow_main_execute as AutogrowMainExecute
# import autogrow.operators.convert_files.gypsum_dl.gypsum_dl.Parallelizer
| 25,053 | 33.942817 | 107 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/demo_docking.py | import argparse
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json",
"-j",
metavar="param.json",
help="Name of a json file containing all parameters. \
Overrides other arguments.",
)
# Allows the run in debug mode. Doesn't delete temp files.
PARSER.add_argument(
"--debug_mode",
"-d",
action="store_true",
default=False,
help="Run Autogrow in Debug mode. This keeps all \
temporary files and adds extra print statements.",
)
# receptor information
PARSER.add_argument(
"--filename_of_receptor",
"-r",
metavar="receptor.pdb",
default='./tutorial/PARP/4r6eA_PARP1_prepared.pdb',
help="The path to the receptor file. Should be .pdb file.",
)
PARSER.add_argument(
"--center_x",
"-x",
type=float,
default=-70.76,
help="x-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_y",
"-y",
type=float,
default=21.82,
help="y-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_z",
"-z",
type=float,
default=28.33,
help="z-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--size_x",
type=float,
default=25.0,
help="dimension of box to dock into in the x-axis (Angstrom)",
)
PARSER.add_argument(
"--size_y",
type=float,
default=20.0,
help="dimension of box to dock into in the y-axis (Angstrom)",
)
PARSER.add_argument(
"--size_z",
type=float,
default=25.0,
help="dimension of box to dock into in the z-axis (Angstrom)",
)
# Input/Output directories
PARSER.add_argument(
"--root_output_folder",
"-o",
type=str,
help="The Path to the folder which all output files will be placed.",
)
PARSER.add_argument(
"--source_compound_file",
"-s",
type=str,
default='./source_compounds/naphthalene_smiles.smi',
help="PATH to the file containing the source compounds. It must be \
tab-delineated .smi file. These ligands will seed the first generation.",
)
PARSER.add_argument(
"--filter_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="If True source ligands from source_compound_file will be \
filter using the user defined filter choices prior to the 1st generation being \
created. If False, ligands which would fail the ligand filters could seed \
the 1st generation. Default is True.",
)
PARSER.add_argument(
"--use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True source ligands will be docked prior to seeding generation 1. \
If True and the source_compound file already has docking/fitness metric score \
in -2 column of .smi file, it will not redock but reuse the scores from \
the source_compound_file.\
If True and no fitness metric score in -2 column of .smi file, it will \
dock each ligand from the source_compound_file and displayed as generation 0.\
If False, generation 1 will be randomly seeded by the source compounds with \
no preference and there will be no generation 0. \
If performing multiple simulations using same source compounds and protein, \
we recommend running once this and using the generation 0 ranked file as the \
source_compound_file for future simulations. \
Default is True.",
)
PARSER.add_argument(
"--start_a_new_run",
action="store_true",
default=False,
help="If False make a new folder and start a fresh simulation with Generation 0. \
If True find the last generation in the root_output_folder and continue to fill.\
Default is False.",
)
# SmilesMerge Settings
PARSER.add_argument(
"--max_time_MCS_prescreen",
type=int,
default=1,
help="amount time the pre-screen MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--max_time_MCS_thorough",
type=int,
default=1,
help="amount time the thorough MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--min_atom_match_MCS",
type=int,
default=4,
help="Determines the minimum number of atoms in common for a substructurematch. \
The higher the more restrictive, but the more likely for two ligands not to match",
)
PARSER.add_argument(
"--protanate_step",
action="store_true",
default=False,
help="Indicates if Smilesmerge uses protanated mols (if true) or deprot \
(if False) SmilesMerge is 10x faster when deprotanated",
)
# Mutation Settings
PARSER.add_argument(
"--rxn_library",
choices=["click_chem_rxns", "robust_rxns", "all_rxns", "Custom"],
default="all_rxns",
help="This set of reactions to be used in Mutation. \
If Custom, one must also provide rxn_file Path and function_group_library path",
)
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
help="This PATH for a dictionary of functional groups to be used for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=1,
help="Number of processors to use for parallel calculations. Set to -1 for all available CPUs.",
)
PARSER.add_argument(
"--multithread_mode",
default="multithreading",
choices=["mpi", "multithreading", "serial"],
help="Determine what style \
multithreading: mpi, multithreading, or serial. serial will override \
number_of_processors and force it to be on a single processor.",
)
# Genetic Algorithm Options
PARSER.add_argument(
"--selector_choice",
choices=["Roulette_Selector", "Rank_Selector", "Tournament_Selector"],
default="Roulette_Selector",
help="This determines whether the fitness criteria are chosen by a Weighted Roulette, \
Ranked, or Tournament style Selector. The Rank option is a non-redundant selector.\
Roulette and Tournament chose without replacement and are stoichastic options. \
Warning do not use Rank_Selector for small runs as there is potential that \
the number of desired ligands exceed the number of ligands to chose from.",
)
PARSER.add_argument(
"--tourn_size",
type=float,
default=0.1,
help="If using the Tournament_Selector this determines the size of each \
tournament. The number of ligands used for each tournament will the \
tourn_size * the number of considered ligands.",
)
# Seeding next gen and diversity
PARSER.add_argument(
"--top_mols_to_seed_next_generation_first_generation",
type=int,
help="Number of mols that seed next generation, for the first generation.\
Should be less than number_of_crossovers_first_generation + number_of_mutations_first_generation\
If not defined it will default to top_mols_to_seed_next_generation",
)
PARSER.add_argument(
"--top_mols_to_seed_next_generation",
type=int,
default=10,
help="Number of mols that seed next generation, for all generations after the first.\
Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_mols_to_seed_first_generation",
type=int,
default=10,
help="Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_seed_depreciation_per_gen",
type=int,
default=2,
help="Each gen diversity_mols_to_seed_first_generation will decrease this amount",
)
# Populations settings
PARSER.add_argument(
"--num_generations",
type=int,
default=10,
help="The number of generations to be created.",
)
PARSER.add_argument(
"--number_of_crossovers_first_generation",
type=int,
help="The number of ligands which will be created via crossovers in the \
first generation. If not defined it will default to number_of_crossovers",
)
PARSER.add_argument(
"--number_of_mutants_first_generation",
type=int,
help="The number of ligands which will be created via mutation in \
the first generation. If not defined it will default to number_of_mutants",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen_first_generation",
type=int,
help="The number of ligands chosen for elitism for the first generation \
These will advance from the previous generation directly into the next \
generation. This is purely advancing based on Docking/Rescore fitness. \
This does not select for diversity. If not defined it will default to \
number_elitism_advance_from_previous_gen",
)
PARSER.add_argument(
"--number_of_crossovers",
type=int,
default=10,
help="The number of ligands which will be created via crossover in each \
generation besides the first",
)
PARSER.add_argument(
"--number_of_mutants",
type=int,
default=10,
help="The number of ligands which will be created via mutation in each \
generation besides the first.",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen",
type=int,
default=10,
help="The number of ligands chosen for elitism. These will advance from \
the previous generation directly into the next generation. \
This is purely advancing based on Docking/Rescore \
fitness. This does not select for diversity.",
)
PARSER.add_argument(
"--redock_elite_from_previous_gen",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True than ligands chosen via Elitism (ie advanced from last generation) \
will be passed through Gypsum and docked again. This provides a better exploration of conformer space \
but also requires more computation time. If False, advancing ligands are simply carried forward by \
copying the PDBQT files.",
)
####### FILTER VARIABLES
PARSER.add_argument(
"--LipinskiStrictFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Strict implementation means a ligand must pass all requirements.",
)
PARSER.add_argument(
"--LipinskiLenientFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Lenient implementation means a ligand may fail all but one requirement and still passes.",
)
PARSER.add_argument(
"--GhoseFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms.",
)
PARSER.add_argument(
"--GhoseModifiedFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms. This is the same as the GhoseFilter, but \
the upper-bound of the molecular weight restrict is loosened from \
480Da to 500Da. This is intended to be run with Lipinski Filter and \
to match AutoGrow 3's Ghose Filter.",
)
PARSER.add_argument(
"--MozziconacciFilter",
action="store_true",
default=False,
help="Mozziconacci filters for drug-likeliness; filters by the number of \
rotatable bonds, rings, oxygens, and halogens.",
)
PARSER.add_argument(
"--VandeWaterbeemdFilter",
action="store_true",
default=False,
help="VandeWaterbeemd filters for drug likely to be blood brain barrier permeable. \
Filters by the number of molecular weight and Polar Sureface Area (PSA).",
)
PARSER.add_argument(
"--PAINSFilter",
action="store_true",
default=False,
help="PAINS filters against Pan Assay Interference Compounds using \
substructure a search.",
)
PARSER.add_argument(
"--NIHFilter",
action="store_true",
default=False,
help="NIH filters against molecules with undersirable functional groups \
using substructure a search.",
)
PARSER.add_argument(
"--BRENKFilter",
action="store_true",
default=False,
help="BRENK filter for lead-likeliness, by matching common false positive \
molecules to the current mol.",
)
PARSER.add_argument(
"--No_Filters",
action="store_true",
default=False,
help="No filters will be applied to compounds.",
)
PARSER.add_argument(
"--alternative_filter",
action="append",
help="If you want to add Custom filters to the filter child classes \
Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]",
)
# dependency variables
# DOCUMENT THE file conversion for docking inputs
PARSER.add_argument(
"--conversion_choice",
choices=["MGLToolsConversion", "ObabelConversion", "Custom"],
default="MGLToolsConversion",
help="Determines how .pdb files will be converted \
to the final format for docking. For Autodock Vina and QuickVina style docking software, \
files must be in .pdbqt format. MGLToolsConversion: uses MGLTools and is the \
recommended converter. MGLTools conversion is required for NNScore1/2 rescoring. \
ObabelConversion: uses commandline obabel. Easier to install but Vina docking has \
been optimized with MGLTools conversion.",
)
PARSER.add_argument(
"--custom_conversion_script",
metavar="custom_conversion_script",
default="",
help="The path to a python script for which is used to convert \
ligands. This is required for custom conversion_choice choices. \
Must be a list of strings \
[name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
PARSER.add_argument(
"--mgltools_directory",
metavar="mgltools_directory",
help="Required if using MGLTools conversion option \
(conversion_choice=MGLToolsConversion) \
Path may look like: /home/user/MGLTools-1.5.6/",
)
PARSER.add_argument(
"--mgl_python",
metavar="mgl_python",
required=False,
help="/home/user/MGLTools-1.5.4/bin/pythonsh",
)
PARSER.add_argument(
"--prepare_ligand4.py",
metavar="prepare_ligand4.py",
required=False,
help="/home/user/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py",
)
PARSER.add_argument(
"--prepare_receptor4.py",
metavar="prepare_receptor4.py",
required=False,
help="/home/userMGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py",
)
PARSER.add_argument(
"--obabel_path",
help="required if using obabel conversion \
option (conversion_choice=ObabelConversion).\
Path may look like PATH/envs/py37/bin/obabel; \
may be found on Linux by running: which obabel",
)
###################################
######### docking #################
###################################
PARSER.add_argument(
"--dock_choice",
metavar="dock_choice",
default="QuickVina2Docking",
choices=["VinaDocking", "QuickVina2Docking", "Custom"],
help="dock_choice assigns which docking software module to use.",
)
PARSER.add_argument(
"--docking_executable",
metavar="docking_executable",
default=None,
help="path to the docking_executable",
)
PARSER.add_argument(
"--docking_exhaustiveness",
metavar="docking_exhaustiveness",
default=None,
help="exhaustiveness of the global search (roughly proportional to time. \
see docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 8",
)
PARSER.add_argument(
"--docking_num_modes",
metavar="docking_num_modes",
default=None,
help=" maximum number of binding modes to generate in docking. \
See docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 9",
)
PARSER.add_argument(
"--docking_timeout_limit",
type=float,
default=120,
help="The maximum amount of time allowed to dock a single ligand into a \
pocket in seconds. Many factors influence the time required to dock, such as: \
processor speed, the docking software, rotatable bonds, exhaustiveness docking,\
and number of docking modes... \
The default docking_timeout_limit is 120 seconds, which is excess for most \
docking events using QuickVina2Docking under default settings. If run with \
more exhaustive settings or with highly flexible ligands, consider increasing \
docking_timeout_limit to accommodate. Default docking_timeout_limit is 120 seconds",
)
PARSER.add_argument(
"--custom_docking_script",
metavar="custom_docking_script",
default="",
help="The name and path to a python script for which is used to \
dock ligands. This is required for Custom docking choices Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# scoring
PARSER.add_argument(
"--scoring_choice",
metavar="scoring_choice",
choices=["VINA", "NN1", "NN2", "Custom"],
default="VINA",
help="The scoring_choice to use to assess the ligands docking fitness. \
Default is using Vina/QuickVina2 ligand affinity while NN1/NN2 use a Neural Network \
to assess the docking pose. Custom requires providing a file path for a Custom \
scoring function. If Custom scoring function, confirm it selects properly, \
Autogrow is largely set to select for a more negative score.",
)
PARSER.add_argument(
"--rescore_lig_efficiency",
action="store_true",
default=False,
help="This will divide the final scoring_choice output by the number of \
non-Hydrogen atoms in the ligand. This adjusted ligand efficiency score will \
override the scoring_choice value. This is compatible with all scoring_choice options.",
)
PARSER.add_argument(
"--custom_scoring_script",
metavar="custom_scoring_script",
type=str,
default="",
help="The path to a python script for which is used to \
assess the ligands docking fitness. Autogrow is largely set to select for a most \
negative scores (ie binding affinity the more negative is best). Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# gypsum # max variance is the number of conformers made per ligand
PARSER.add_argument(
"--max_variants_per_compound",
type=int,
default=3,
help="number of conformers made per ligand. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--gypsum_thoroughness",
"-t",
type=int,
default = 3,
help="How widely Gypsum-DL will search for \
low-energy conformers. Larger values increase \
run times but can produce better results. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="Minimum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="Maximum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--pka_precision",
metavar="D",
type=float,
default=1.0,
help="Size of pH substructure ranges. See Dimorphite-DL \
publication for details.",
)
PARSER.add_argument(
"--gypsum_timeout_limit",
type=float,
default=15,
help="Maximum time gypsum is allowed to run for a given ligand in seconds. \
On average Gypsum-DL takes on several seconds to run for a given ligand, but \
factors such as mol size, rotatable bonds, processor speed, and gypsum \
settings (ie gypsum_thoroughness or max_variants_per_compound) will change \
how long it takes to run. If increasing gypsum settings it is best to increase \
the gypsum_timeout_limit. Default gypsum_timeout_limit is 15 seconds",
)
# Reduce files down. This compiles and compresses the files in the PDBs folder
# (contains docking outputs, pdb, pdbqt...). This reduces the data size and
# makes data transfer quicker, but requires running the
# file_concatenation_and_compression.py in the Utility script folder to
# separate these files out for readability.
PARSER.add_argument(
"--reduce_files_sizes",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Run this combines all files in the PDBs folder into a \
single text file. Useful when data needs to be transferred.",
)
# Make a line plot of the simulation at the end of the run.
PARSER.add_argument(
"--generate_plot",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Make a line plot of the simulation at the end of the run.",
)
# mpi mode pre-Run so there are python cache files without EOF Errors
PARSER.add_argument(
"--cache_prerun",
"-c",
action="store_true",
help="Run this before running gypsum in mpi-mode.",
)
args_dict = vars(PARSER.parse_args())
from autogrow.user_vars import multiprocess_handling, define_defaults, determine_bash_timeout_vs_gtimeout
# args_dict = define_defaults()
import copy
INPUTS = copy.deepcopy(args_dict)
for k, v in args_dict.items():
if v is None:
del INPUTS[k]
if args_dict["cache_prerun"] is False:
# load the commandline parameters
from autogrow.user_vars import load_in_commandline_parameters
args_dict, printout = load_in_commandline_parameters(INPUTS)
args_dict = multiprocess_handling(args_dict)
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option in ["timeout", "gtimeout"]:
args_dict["timeout_vs_gtimeout"] = timeout_option
else:
raise Exception("Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash.")
'''
python run.py \
--filename_of_receptor ./tutorial/PARP/4r6eA_PARP1_prepared.pdb \
--center_x -70.76 --center_y 21.82 --center_z 28.33 \
--size_x 25.0 --size_y 16.0 --size_z 25.0 \
--source_compound_file ./source_compounds/naphthalene_smiles.smi \
--root_output_folder ./output \
--number_of_mutants_first_generation 50 \
--number_of_crossovers_first_generation 50 \
--number_of_mutants 50 \
--number_of_crossovers 50 \
--top_mols_to_seed_next_generation 50 \
--number_elitism_advance_from_previous_gen 50 \
--number_elitism_advance_from_previous_gen_first_generation 10 \
--diversity_mols_to_seed_first_generation 10 \
--diversity_seed_depreciation_per_gen 10 \
--num_generations 5 \
--mgltools_directory ./mgltools_x86_64Linux2_1.5.6/ \
--number_of_processors -1 \
--scoring_choice VINA \
--LipinskiLenientFilter \
--start_a_new_run \
--rxn_library click_chem_rxns \
--selector_choice Rank_Selector \
--dock_choice VinaDocking \
--max_variants_per_compound 5 \
--redock_elite_from_previous_gen False \
--generate_plot True \
--reduce_files_sizes True \
--use_docked_source_compounds True
'''
import numpy as np
import os, json, pickle, time, sys
# import torch
# from autogrow.model import Ligand2D
"""
- docking
- reaction (mutation)
- common structure (crossover)
- train policy
"""
filename_of_receptor = "./tutorial/PARP/4r6eA_PARP1_prepared.pdb"
center = [-70.76, 21.82, 28.33]
box_size = [20.0, 20.0, 20.0]
source_compound_file = args_dict['source_compound_file']
with open(source_compound_file, 'r') as fin:
lines = fin.readlines()
population_list = [line.split()[0] for line in lines]
## TODO1 smiles -> sdf -> pdb -> pdbqt
import autogrow.operators.convert_files.conversion_to_3d as conversion_to_3d
# conversion_to_3d.convert_smi_to_sdfs_with_gypsum
# conversion_to_3d.convert_sdf_to_pdbs
# convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path)
# conversion_to_3d.convert_single_sdf_to_pdb
# convert_ligand_pdb_file_to_pdbqt #### in run_docking_common lig_convert_multithread
def smiles_to_sdfs(vars, gen_smiles_file, smile_file_directory):
# conversion_to_3d.convert_smi_to_sdfs_with_gypsum
max_variants_per_compound = vars["max_variants_per_compound"]
gypsum_thoroughness = vars["gypsum_thoroughness"]
min_ph = vars["min_ph"]
max_ph = vars["max_ph"]
pka_precision = vars["pka_precision"]
gypsum_timeout_limit = vars["gypsum_timeout_limit"]
# Make a new folder to put gypsum .smi's and json. Name folder gypsum_submission_files.
folder_path = "{}gypsum_submission_files{}".format(smile_file_directory, os.sep)
if os.path.exists(folder_path) is False:
os.makedirs(folder_path)
# Make Output for Gypsum folder (where .sdf's go)
gypsum_output_folder_path = "{}3D_SDFs{}".format(smile_file_directory, os.sep)
if os.path.exists(gypsum_output_folder_path) is False:
os.makedirs(gypsum_output_folder_path)
# Make a folder to put the log files into within the 3D_SDFs folder
gypsum_log_path = "{}log{}".format(gypsum_output_folder_path, os.sep)
if os.path.exists(gypsum_log_path) is False:
os.makedirs(gypsum_log_path)
# Make All of the json files to submit to gypsum
list_of_gypsum_params = conversion_to_3d.make_smi_and_gyspum_params(
gen_smiles_file,
folder_path,
gypsum_output_folder_path,
max_variants_per_compound, gypsum_thoroughness,
min_ph, max_ph, pka_precision, )
# create a the job_inputs to run gypsum in multithread
job_input = tuple([(gypsum_log_path, gypsum_params, gypsum_timeout_limit) for gypsum_params in list_of_gypsum_params])
sys.stdout.flush()
failed_to_convert = vars["parallelizer"].run(job_input, conversion_to_3d.run_gypsum_multiprocessing)
sys.stdout.flush()
### fail: return smiles
### success: return None
lig_failed_to_convert = [x for x in failed_to_convert if x is not None]
lig_failed_to_convert = list(set(lig_failed_to_convert))
if len(lig_failed_to_convert) > 0:
print("The Following ligands Failed to convert in Gypsum")
print("Likely due to a Timeout")
print(lig_failed_to_convert)
sys.stdout.flush()
return gypsum_output_folder_path
from autogrow.docking.execute_docking import pick_run_conversion_class_dict, pick_docking_class_dict, lig_convert_multithread
def pdb_to_pdbqt(vars, pdb_dir):
### adapted from run_docking_common
dock_choice = vars["dock_choice"]
conversion_choice = vars["conversion_choice"]
receptor = vars["filename_of_receptor"]
# Use a temp vars dict so you don't put mpi multiprocess info through itself...
temp_vars = {}
for key in list(vars.keys()):
if key == "parallelizer":
continue
temp_vars[key] = vars[key]
file_conversion_class_object = pick_run_conversion_class_dict(conversion_choice)
file_conversion_class_object = file_conversion_class_object(temp_vars, receptor, test_boot=False)
dock_class = pick_docking_class_dict(dock_choice)
docking_object = dock_class(temp_vars, receptor, file_conversion_class_object, test_boot=False)
if vars["docking_executable"] is None:
docking_executable = docking_object.get_docking_executable_file(temp_vars)
vars["docking_executable"] = docking_executable
##### vina or Qvina
# Find PDB's
pdbs_in_folder = docking_object.find_pdb_ligands(pdb_dir)
print('all pdb file [:10]', pdbs_in_folder[:10], pdb_dir)
job_input_convert_lig = tuple([(docking_object, pdb) for pdb in pdbs_in_folder])
print("Convert Ligand from PDB to PDBQT format")
smiles_names_failed_to_convert = vars["parallelizer"].run(job_input_convert_lig, lig_convert_multithread)
pdbqts_in_folder = docking_object.find_converted_ligands(pdb_dir)
print('pdbqt file [:10]', pdbqts_in_folder[:10])
return docking_object
gypsum_output_folder_path = smiles_to_sdfs(args_dict, source_compound_file, smile_file_directory='./source_compounds')
conversion_to_3d.convert_sdf_to_pdbs(args_dict, gen_folder_path='./source_compounds', sdfs_folder_path='./source_compounds3D_SDFs')
docking_object = pdb_to_pdbqt(vars = args_dict, pdb_dir = './source_compoundsPDBs/')
### output directory: source_compoundsPDBs
## TODO2 docking an pdbqt
from autogrow.docking.execute_docking import run_dock_multithread, run_docking_common
import autogrow.docking.scoring.execute_scoring_mol as Scoring
import autogrow.docking.ranking.ranking_mol as Ranking
# run_dock
# dock_ligand
def docking_pdbqt(vars, docking_object, pdbqt_folder):
pdbqts_in_folder = docking_object.find_converted_ligands(pdbqt_folder)
print('pdbqts [:10]', pdbqts_in_folder[:10])
job_input_dock_lig = tuple([tuple([docking_object, pdbqt]) for pdbqt in pdbqts_in_folder])
smiles_names_failed_to_dock = vars["parallelizer"].run(job_input_dock_lig, run_dock_multithread) ### main
deleted_smiles_names_list_dock = [x for x in smiles_names_failed_to_dock if x is not None]
deleted_smiles_names_list_dock = list(set(deleted_smiles_names_list_dock))
print("THE FOLLOWING LIGANDS WHICH FAILED TO DOCK:", deleted_smiles_names_list_dock)
print("####################")
print("\nBegin Ranking and Saving results")
# folder_with_pdbqts = current_generation_dir + "PDBs" + os.sep
folder_with_pdbqts = pdbqt_folder
smile_file = 'source_compounds/naphthalene_smiles.smi'
# Run any compatible Scoring Function
smiles_list = Scoring.run_scoring_common(vars, smile_file, folder_with_pdbqts)
print('---------', smiles_list[:10], 'smiles_list[:10] --------------')
'''
######################################################
###### add ligands from last generation ###############
#######################################################
# Before ranking these we need to handle Pass-Through ligands from the
# last generation If it's current_gen_int==1 or if
# vars['redock_elite_from_previous_gen'] is True -Both of these states
# dock all ligands from the last generation so all of the pass-through
# lig are already in the PDB's folder thus they should be accounted
# for in smiles_list If vars['redock_elite_from_previous_gen'] is False
# and current_gen_int != 1 - We need to append the scores form the
# last gen to smiles_list
# Only add these when we haven't already redocked the ligand
if self.vars["redock_elite_from_previous_gen"] is False and current_gen_int != 0:
# Go to previous generation folder
prev_gen_num = str(current_gen_int - 1)
run_folder = self.vars["output_directory"]
previous_gen_folder = run_folder + "generation_{}{}".format(str(prev_gen_num), os.sep)
ranked_smi_file_prev_gen = previous_gen_folder + "generation_{}_ranked.smi".format(str(prev_gen_num))
# Also check sometimes Generation 1 won't have a previous
# generation to do this with and sometimes it will
if current_gen_int == 1 and os.path.exists(ranked_smi_file_prev_gen) is False:
pass
else:
print("Getting ligand scores from the previous generation")
# Shouldn't happen but to be safe.
if os.path.exists(ranked_smi_file_prev_gen) is False:
raise Exception("Previous generation ranked .smi file does not exist. "
+ "Check if output folder has been moved")
# Get the data for all ligands from previous generation ranked file
prev_gen_data_list = Ranking.get_usable_format(ranked_smi_file_prev_gen)
# Get the list of pass through ligands
current_gen_pass_through_smi = current_generation_dir
+ "SeedFolder{}Chosen_Elite_To_advance_Gen_{}.smi".format(os.sep, str(current_gen_int))
pass_through_list = Ranking.get_usable_format(current_gen_pass_through_smi)
# Convert lists to searchable Dictionaries.
prev_gen_data_dict = Ranking.convert_usable_list_to_lig_dict(prev_gen_data_list)
pass_through_data = []
for lig in pass_through_list:
smile_plus_id = str(lig[0] + lig[1])
lig_data = prev_gen_data_dict[smile_plus_id]
lig_info_remove_diversity = [lig_data[x] for x in range(0, len(lig_data) - 1)]
pass_through_data.append(lig_info_remove_diversity)
smiles_list.extend(pass_through_data)
'''
# Output format of the .smi file will be: SMILES Full_lig_name
# shorthandname ...AnyCustominfo... Fitness_metric diversity
# Normally the docking score is the fitness metric but if we use a
# Custom metric than dock score gets moved to index -3 and the new
# fitness metric gets -2
# sort list by the affinity of each sublist (which is the last index of sublist)
smiles_list.sort(key=lambda x: float(x[-1]), reverse=False)
# score the diversity of each ligand compared to the rest of the
# ligands in the group this adds on a float in the last column for the
# sum of pairwise comparisons the lower the diversity score the more
# unique a molecule is from the other mols in the same generation
smiles_list = Ranking.score_and_append_diversity_scores(smiles_list)
# name for the output file
output_ranked_smile_file = smile_file.replace(".smi", "") + "_ranked.smi"
# save to a new output smiles file. ie. save to ranked_smiles_file
with open(output_ranked_smile_file, "w") as output:
for ligand_info_list in smiles_list:
str_ligand_info_list = [str(x) for x in ligand_info_list]
output_line = "\t".join(str_ligand_info_list) + "\n"
output.write(output_line)
# unweighted_ranked_smile_file = docking_object.rank_and_save_output_smi(
# vars,
# current_generation_dir,
# current_gen_int,
# smile_file_new_gen,
# deleted_smiles_names_list,
# )
docking_pdbqt(args_dict, docking_object, './source_compoundsPDBs/')
print('docking done')
# ## TODO3 reaction for mutate
# reaction_list = []
# make_mutants
## TODO4 crossover between 2 ligands
exit()
def train():
mutate_ligand_select_policy_net = Ligand2D()
mutate_reaction_select_policy_net = Ligand2D()
crossover_ligand1_policy_net = Ligand2D()
crossover_ligand2_policy_net = Ligand2D()
opt1 = torch.optim.Adam(mutate_ligand_select_policy_net.parameters(), lr=1e-3)
opt2 = torch.optim.Adam(mutate_reaction_select_policy_net.parameters(), lr=1e-3)
opt3 = torch.optim.Adam(crossover_ligand1_policy_net.parameters(), lr=1e-3)
opt4 = torch.optim.Adam(crossover_ligand2_policy_net.parameters(), lr=1e-3)
return
for step in range(n_step):
## select protein pocket
reaction
# 1. mutation
for i in range(n_mutation):
## 1.1 select ligand_1 from population_list
population_list
## get 3d ligand
## evaluate likelihood
## sample the ligand from policy distribution
mutated_smiles, mutated_smiles_likelihood = policy_net.sample()
### 1.2 select reaction from reaction_list
reaction_list
reaction, mutated_reaction_likelihood = policy_net.sample()
new_smiles = mutation(mutated_smiles, reaction)
### reward
score = oracle(new_smiles)
reward = score
# crossover
for i in range(n_crossover):
## select ligand_1
crossover_ligand_1, crossover_ligand1_likelihood = policy_net.sample()
## select ligand_2 conditioned on ligand_1
crossover_ligand_2, crossover_ligand2_likelihood = policy_net.sample()
new_smiles = crossover(ligand_1, ligand_2)
### reward
score = oracle(new_smiles)
reward = score
#### add Experience
## optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
##### logging
if __name__ == '__main__':
train()
"""
TODO
Experience
"""
| 37,208 | 35.055233 | 131 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/model.py | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Descriptors
def smiles2fp(smiles_string):
mol = Chem.MolFromSmiles(smiles_string)
Chem.SanitizeMol(mol)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=2048)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, features)
fingerprint = torch.from_numpy(features).float().view(1,-1)
return fingerprint ### [1,2048] torch.Tensor
class Ligand2D(nn.Module):
"""
input: SMILES
output: scalar
"""
def __init__(self, ):
super(Ligand2D, self).__init__()
self.input_mlp = nn.Linear(2048, 100)
self.output_mlp = nn.Linear(100, 1)
def forward(self, smiles_):
"""
:param smiles_
- list of SMILES string
- SMILES string
"""
if type(smiles_) == list:
fps = [smiles2fp(s) for s in smiles_]
fps = torch.cat(fps, 0)
hidden_state = F.relu(self.input_mlp(fps))
output = self.output_mlp(hidden_state)
output = output.view(-1)
log_output = F.log_softmax(output)
prob_output = F.softmax(output)
return log_output, prob_output.tolist()
else:
### smiles string
fingerprint = smiles2fp(smiles_)
hidden_state = F.relu(self.input_mlp(fingerprint))
output = self.output_mlp(hidden_state)
return output ### [1,1]
class Ligand2D_product(nn.Module):
'''
input: ligand2d & product_smiles
output: scalar
'''
def __init__(self, ):
super(Ligand2D_product, self).__init__()
self.ligand_mlp = nn.Linear(2048, 100)
self.product_mlp = nn.Linear(2048, 100)
self.output_mlp = nn.Linear(200, 1)
def forward(self, ligand_smiles, product_smiles_list):
n = len(product_smiles_list)
ligand_fp = smiles2fp(ligand_smiles)
ligand_embedding = F.relu(self.ligand_mlp(ligand_fp))
ligand_embedding = ligand_embedding.repeat(n,1)
product_fps = [smiles2fp(smiles) for smiles in product_smiles_list]
product_fps = torch.cat(product_fps, 0)
product_embeddings = F.relu(self.product_mlp(product_fps))
latent_variable = torch.cat([ligand_embedding, product_embeddings], 1)
output = self.output_mlp(latent_variable).view(-1)
log_output = F.log_softmax(output)
prob_output = F.softmax(output)
return log_output, prob_output.tolist()
def atom2int(atom):
atom_list = ['C', 'N', 'S', 'O', 'H', 'unknown']
if atom in atom_list:
return atom_list.index(atom)
return len(atom_list)-1
def pdbtofeature(pdbfile, centers, pocket_size):
""" centers=(center_x, center_y, center_z); pocket_size=(size_x, size_y, size_z) """
with open(pdbfile, 'r') as fin:
lines = fin.readlines()
def featurize(line):
if line.split()[0]!='ATOM':
return None
if int(line.split()[1])!=float(line.split()[1]):
return None
center_x, center_y, center_z = centers
size_x, size_y, size_z = pocket_size
# xx, yy, zz = float(line.split()[6]), float(line.split()[7]), float(line.split()[8])
xx = float(line[30:38])
yy = float(line[38:46])
zz = float(line[46:54])
# print('>>>> ', xx, yy, zz)
if xx < center_x-size_x/2 or xx > center_x+size_x/2:
return None
# print('<<<< ', xx, yy, zz)
if yy < center_y-size_y/2 or yy > center_y+size_y/2:
return None
# print('++++ ', xx, yy, zz)
if zz < center_z-size_z/2 or zz > center_z+size_z/2:
return None
# print('----- ', xx, yy, zz)
atom_type = line.split()[-1]
atom_type = atom2int(atom_type)
coordinates = torch.FloatTensor([xx, yy, zz]).view(1, -1)
return atom_type, coordinates
lines = list(map(featurize, lines))
features = list(filter(lambda x:x is not None, lines))
atom_idx = torch.LongTensor([feature[0] for feature in features]).view(1,-1) #### (1,N)
mask = torch.ByteTensor([True for feature in features]).view(1,-1) ##### (1,N)
positions = torch.cat([feature[1] for feature in features], dim=0) ##### (N,3)
return atom_idx, positions, mask
receptor_info_list = [
('4r6e', './pdb/4r6e.pdb', -70.76, 21.82, 28.33, 15.0, 15.0, 15.0),
('3pbl', './pdb/3pbl.pdb', 9, 22.5, 26, 15, 15, 15),
('1iep', './pdb/1iep.pdb', 15.6138918, 53.38013513, 15.454837, 15, 15, 15),
('2rgp', './pdb/2rgp.pdb', 16.29212, 34.870818, 92.0353, 15, 15, 15),
('3eml', './pdb/3eml.pdb', -9.06363, -7.1446, 55.86259999, 15, 15, 15),
('3ny8', './pdb/3ny8.pdb', 2.2488, 4.68495, 51.39820000000001, 15, 15, 15),
('4rlu', './pdb/4rlu.pdb', -0.73599, 22.75547, -31.23689, 15, 15, 15),
('4unn', './pdb/4unn.pdb', 5.684346153, 18.1917, -7.3715, 15, 15, 15),
('5mo4', './pdb/5mo4.pdb', -44.901, 20.490354, 8.48335, 15, 15, 15),
('7l11', './pdb/7l11.pdb', -21.81481, -4.21606, -27.98378, 15, 15, 15), ]
receptor2pdbfeature = dict()
for receptor_info in receptor_info_list:
name_of_receptor, filename_of_receptor, center_x, center_y, center_z, size_x, size_y, size_z = receptor_info
# print('------ ' + name_of_receptor + ' --------')
atom_idx, positions, mask = pdbtofeature(pdbfile=filename_of_receptor,
centers=(center_x, center_y, center_z),
pocket_size=(size_x, size_y, size_z))
receptor2pdbfeature[name_of_receptor] = (atom_idx, positions, mask)
def pdbqtvina2feature(pdbqt_file):
with open(pdbqt_file, 'r') as fin:
lines = fin.readlines()
lines = [line.strip() for line in lines]
line_indx = lines.index("MODEL 2")
lines = lines[1:line_indx]
def featurize(line):
if line.split()[0]!='HETATM':
return None
atom = line.split()[2] #### 'C12'
atom = [i for i in atom if i > '9'] #### 'C'
atom = ''.join(atom)
atom = atom2int(atom)
coordinates = torch.FloatTensor([float(i) for i in line.split()[6:9]]).view(1, -1)
return atom, coordinates
### 2, torch.Tensor([-82.905, 15.268, 40.501])
lines = list(map(featurize, lines))
features = list(filter(lambda x:x is not None, lines))
atom_idx = torch.LongTensor([feature[0] for feature in features]).view(1,-1) #### (1,N)
mask = torch.ByteTensor([True for feature in features]).view(1,-1) #### (1,N)
positions = torch.cat([feature[1] for feature in features], dim=0) #### (N,3)
return atom_idx, positions, mask
def featurize_receptor_and_ligand(name_of_receptor, pdbqt_file):
# receptor_atom_idx, receptor_positions, receptor_mask = pdbtofeature(pdbfile, centers, pocket_size)
receptor_atom_idx, receptor_positions, receptor_mask = receptor2pdbfeature[name_of_receptor]
ligand_atom_idx, ligand_positions, ligand_mask = pdbqtvina2feature(pdbqt_file)
atom_idx = torch.cat([receptor_atom_idx, ligand_atom_idx], dim=1) #### (1,N)
positions = torch.cat([receptor_positions, ligand_positions], dim=0)
positions = torch.unsqueeze(positions, 0) ##### (1,N,3)
mask = torch.cat([receptor_mask, ligand_mask], dim=1) ###### (1,N)
return atom_idx, positions, mask
def featurize_receptor_and_ligand_list(name_of_receptor, pdbqt_file_list):
""" TODO """
# receptor_atom_idx, receptor_positions, receptor_mask = pdbtofeature(pdbfile, centers, pocket_size)
receptor_atom_idx, receptor_positions, receptor_mask = receptor2pdbfeature[name_of_receptor]
feature_list = []
for pdbqt_file in pdbqt_file_list:
ligand_atom_idx, ligand_positions, ligand_mask = pdbqtvina2feature(pdbqt_file)
atom_idx = torch.cat([receptor_atom_idx, ligand_atom_idx], dim=1) #### (1,N)
positions = torch.cat([receptor_positions, ligand_positions], dim=0)
positions = torch.unsqueeze(positions, 0) ##### (1,N,3)
mask = torch.cat([receptor_mask, ligand_mask], dim=1) ###### (1,N)
feature_list.append((atom_idx, positions, mask))
return feature_list
"""https://arxiv.org/pdf/2105.09016.pdf
TODO
- test equivariance
"""
class ENN(nn.Module):
"""Args:
1. amino's categories
2. amino's position
"""
def __init__(self, latent_dim = 50, device = torch.device('cpu'), is_one_hot = True, layer = 1, vocab_size=6, coordinate_dim = 3):
super(ENN, self).__init__()
self.latent_dim = latent_dim
self.layer = layer
self.is_one_hot = is_one_hot
self.vocab_size = vocab_size
self.coordinate_dim = 3
self.device = device
self.aggregate = torch.mean
if is_one_hot:
self.node_embedding = nn.Embedding(vocab_size, latent_dim).to(device)
self.phi_e = nn.Sequential(
nn.Linear(2*self.latent_dim+1, self.latent_dim),
nn.Tanh(),
nn.Linear(self.latent_dim, self.latent_dim),
nn.Tanh()).to(device) ## 2d+1 -> d
self.phi_x = nn.Sequential(
nn.Linear(self.latent_dim, self.latent_dim),
nn.Tanh(),
nn.Linear(self.latent_dim, 1),
nn.Tanh(),
).to(device) ### d->1
self.phi_h = nn.Sequential(
nn.Linear(self.latent_dim, self.latent_dim),
nn.Tanh(),
nn.Linear(self.latent_dim, self.latent_dim),
nn.Tanh(),
).to(device) ### d->d
self.phi_inf = nn.Sequential(
nn.Linear(self.latent_dim, 1),
nn.Sigmoid(),
).to(device) ## d->1
self.output_mlp = nn.Linear(self.latent_dim, 1)
# self.output_mlp = nn.Sequential(
# nn.Linear(self.latent_dim, 1),
# nn.Sigmoid(),
# )
def forward(self, input_data, coordinate, mask):
"""
Args:
input_data: LongTensor(b,N) & FloatTensor(b,N,d)
coordinate: b,N,3
mask: b,N
where b = batchsize, N = max_num_of_atom
Returns:
(b,1)
"""
transform = False
H = self.node_embedding(input_data) if self.is_one_hot else input_data
if H.dim() == 4: ##### (a1,a2,N,d)
transform = True
a1,a2,a3,a4 = H.shape
H = H.view(-1,a3,a4) ## b,N,d
b1,b2,b3,b4 = coordinate.shape
coordinate = coordinate.view(-1,b3,b4) ### b,N,3
d1,d2,d3 = mask.shape
mask = mask.view(-1,d3) ## b,N
b, N = H.shape[0], H.shape[1]
X = coordinate ### b,N,3
mask_expand = mask.unsqueeze(-1) #### b,N,1
mask_expand2 = mask_expand.permute(0,2,1) ### b,1,N
mask_square = mask_expand * mask_expand2 ### b,N,N
mask_square = mask_square.unsqueeze(-1) ### b,N,N,1
for l in range(self.layer):
### 1. m_ij = phi_e(h_i, h_j, ||x_i^l - x_j^l||^2)
H1 = H.unsqueeze(2).repeat(1,1,N,1) ### b,N,N,d
H2 = H.unsqueeze(1).repeat(1,N,1,1) ### b,N,N,d
x1 = X.unsqueeze(2).repeat(1,1,N,1) ### b,N,N,3
x2 = X.unsqueeze(1).repeat(1,N,1,1) ### b,N,N,3
x12 = torch.sum((x1-x2)**2 * mask_square, dim=-1, keepdim=True) ### b,N,N,1
H12x = torch.cat([H1,H2,x12], -1) ### b,N,N,2d+1
M = self.phi_e(H12x)*mask_square ### b,N,N,d
### 2. e_ij = phi_inf(m_ij)
E = self.phi_inf(M) ### b,N,N,1
### 3. m_i = \sum e_ij m_ij
M2 = torch.sum(M*E,1) ## b,N,d
### 4. x_i^{l+1} = x_i^l + \sum_{j\neq i} (x_i^l - x_j^l) phi_x(m_ij)
X = X + torch.sum((x1 - x2) * mask_square * self.phi_x(M), dim=1) ## b,N,3
### 5. h_i^{l+1} = phi_h(h_i^l, m_i)
H = self.phi_h(M2) + H ### b,N,d
H = H * mask_expand ### b,N,d
if transform:
H = H.view(a1,a2,a3,a4)
mask = mask.view(d1,d2,d3)
H = self.aggregate(H*mask.unsqueeze(-1), dim = -2)
H = nn.ReLU()(H)
H = self.output_mlp(H)
return H
def forward_ligand_list(self, name_of_receptor, pdbqtvina_list):
feature_list = featurize_receptor_and_ligand_list(name_of_receptor, pdbqtvina_list)
output_list = []
for atom_idx, positions, mask in feature_list:
output = self.forward(atom_idx, positions, mask) #### [1,1]
output_list.append(output)
outputs = torch.cat(output_list, dim=0).view(-1)
log_output = F.log_softmax(outputs, 0)
prob_output = F.softmax(outputs, 0)
# print("output probability", prob_output.tolist()[:3])
return log_output, prob_output.tolist()
if __name__ == "__main__":
# model = Ligand2D()
# smiles = ['CCC', 'CCC']
# output = model(smiles)
# print(output.shape, output)
# output = model(smiles[0])
# model = Ligand2D_product()
# output = model(smiles[0], smiles)
# print(output)
# atom_idx, positions, mask = pdbqtvina2feature(pdbqt_file='4r6e_example.pdbqt.vina')
# print(atom_idx, positions, atom_idx.shape, positions.shape)
# atom_idx, positions, mask = pdbtofeature(pdbfile='./pdb/4r6e.pdb', centers=(-70.76, 21.82, 28.33), pocket_size=(18.0, 18.0, 18.0))
# print(atom_idx.shape, positions.shape)
# atom_idx, positions, mask = featurize_receptor_and_ligand(pdbfile='./pdb/4r6e.pdb',
# centers=(-70.76, 21.82, 28.33),
# pocket_size=(18.0, 18.0, 18.0),
# pdbqt_file='4r6e_example.pdbqt.vina')
# enn = ENN()
# output = enn(input_data = atom_idx, coordinate = positions, mask = mask)
# print(output.shape, output)
# output = enn(input_data = atom_idx, coordinate = positions+2, mask = mask)
# print(output.shape, output)
pass
| 12,570 | 33.535714 | 133 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/run.py | import argparse
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json",
"-j",
metavar="param.json",
help="Name of a json file containing all parameters. \
Overrides other arguments.",
)
# Allows the run in debug mode. Doesn't delete temp files.
PARSER.add_argument(
"--debug_mode",
"-d",
action="store_true",
default=False,
help="Run Autogrow in Debug mode. This keeps all \
temporary files and adds extra print statements.",
)
# receptor information
PARSER.add_argument(
"--filename_of_receptor",
"-r",
metavar="receptor.pdb",
default='./tutorial/PARP/4r6eA_PARP1_prepared.pdb',
help="The path to the receptor file. Should be .pdb file.",
)
PARSER.add_argument(
"--center_x",
"-x",
type=float,
default=-70.76,
help="x-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_y",
"-y",
type=float,
default=21.82,
help="y-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_z",
"-z",
type=float,
default=28.33,
help="z-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--size_x",
type=float,
default=25.0,
help="dimension of box to dock into in the x-axis (Angstrom)",
)
PARSER.add_argument(
"--size_y",
type=float,
default=20.0,
help="dimension of box to dock into in the y-axis (Angstrom)",
)
PARSER.add_argument(
"--size_z",
type=float,
default=25.0,
help="dimension of box to dock into in the z-axis (Angstrom)",
)
# Input/Output directories
PARSER.add_argument(
"--root_output_folder",
"-o",
type=str,
help="The Path to the folder which all output files will be placed.",
)
PARSER.add_argument(
"--source_compound_file",
"-s",
type=str,
default='./source_compounds/naphthalene_smiles.smi',
help="PATH to the file containing the source compounds. It must be \
tab-delineated .smi file. These ligands will seed the first generation.",
)
PARSER.add_argument(
"--filter_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="If True source ligands from source_compound_file will be \
filter using the user defined filter choices prior to the 1st generation being \
created. If False, ligands which would fail the ligand filters could seed \
the 1st generation. Default is True.",
)
PARSER.add_argument(
"--use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True source ligands will be docked prior to seeding generation 1. \
If True and the source_compound file already has docking/fitness metric score \
in -2 column of .smi file, it will not redock but reuse the scores from \
the source_compound_file.\
If True and no fitness metric score in -2 column of .smi file, it will \
dock each ligand from the source_compound_file and displayed as generation 0.\
If False, generation 1 will be randomly seeded by the source compounds with \
no preference and there will be no generation 0. \
If performing multiple simulations using same source compounds and protein, \
we recommend running once this and using the generation 0 ranked file as the \
source_compound_file for future simulations. \
Default is True.",
)
PARSER.add_argument(
"--start_a_new_run",
action="store_true",
default=False,
help="If False make a new folder and start a fresh simulation with Generation 0. \
If True find the last generation in the root_output_folder and continue to fill.\
Default is False.",
)
# SmilesMerge Settings
PARSER.add_argument(
"--max_time_MCS_prescreen",
type=int,
default=1,
help="amount time the pre-screen MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--max_time_MCS_thorough",
type=int,
default=1,
help="amount time the thorough MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--min_atom_match_MCS",
type=int,
default=4,
help="Determines the minimum number of atoms in common for a substructurematch. \
The higher the more restrictive, but the more likely for two ligands not to match",
)
PARSER.add_argument(
"--protanate_step",
action="store_true",
default=False,
help="Indicates if Smilesmerge uses protanated mols (if true) or deprot \
(if False) SmilesMerge is 10x faster when deprotanated",
)
# Mutation Settings
PARSER.add_argument(
"--rxn_library",
choices=["click_chem_rxns", "robust_rxns", "all_rxns", "Custom"],
default="all_rxns",
help="This set of reactions to be used in Mutation. \
If Custom, one must also provide rxn_file Path and function_group_library path",
)
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
help="This PATH for a dictionary of functional groups to be used for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=1,
help="Number of processors to use for parallel calculations. Set to -1 for all available CPUs.",
)
PARSER.add_argument(
"--multithread_mode",
default="multithreading",
choices=["mpi", "multithreading", "serial"],
help="Determine what style \
multithreading: mpi, multithreading, or serial. serial will override \
number_of_processors and force it to be on a single processor.",
)
# Genetic Algorithm Options
PARSER.add_argument(
"--selector_choice",
choices=["Roulette_Selector", "Rank_Selector", "Tournament_Selector"],
default="Roulette_Selector",
help="This determines whether the fitness criteria are chosen by a Weighted Roulette, \
Ranked, or Tournament style Selector. The Rank option is a non-redundant selector.\
Roulette and Tournament chose without replacement and are stoichastic options. \
Warning do not use Rank_Selector for small runs as there is potential that \
the number of desired ligands exceed the number of ligands to chose from.",
)
PARSER.add_argument(
"--tourn_size",
type=float,
default=0.1,
help="If using the Tournament_Selector this determines the size of each \
tournament. The number of ligands used for each tournament will the \
tourn_size * the number of considered ligands.",
)
# Seeding next gen and diversity
PARSER.add_argument(
"--top_mols_to_seed_next_generation_first_generation",
type=int,
help="Number of mols that seed next generation, for the first generation.\
Should be less than number_of_crossovers_first_generation + number_of_mutations_first_generation\
If not defined it will default to top_mols_to_seed_next_generation",
)
PARSER.add_argument(
"--top_mols_to_seed_next_generation",
type=int,
default=10,
help="Number of mols that seed next generation, for all generations after the first.\
Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_mols_to_seed_first_generation",
type=int,
default=10,
help="Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_seed_depreciation_per_gen",
type=int,
default=2,
help="Each gen diversity_mols_to_seed_first_generation will decrease this amount",
)
# Populations settings
PARSER.add_argument(
"--num_generations",
type=int,
default=10,
help="The number of generations to be created.",
)
PARSER.add_argument(
"--number_of_crossovers_first_generation",
type=int,
help="The number of ligands which will be created via crossovers in the \
first generation. If not defined it will default to number_of_crossovers",
)
PARSER.add_argument(
"--number_of_mutants_first_generation",
type=int,
help="The number of ligands which will be created via mutation in \
the first generation. If not defined it will default to number_of_mutants",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen_first_generation",
type=int,
help="The number of ligands chosen for elitism for the first generation \
These will advance from the previous generation directly into the next \
generation. This is purely advancing based on Docking/Rescore fitness. \
This does not select for diversity. If not defined it will default to \
number_elitism_advance_from_previous_gen",
)
PARSER.add_argument(
"--number_of_crossovers",
type=int,
default=10,
help="The number of ligands which will be created via crossover in each \
generation besides the first",
)
PARSER.add_argument(
"--number_of_mutants",
type=int,
default=10,
help="The number of ligands which will be created via mutation in each \
generation besides the first.",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen",
type=int,
default=10,
help="The number of ligands chosen for elitism. These will advance from \
the previous generation directly into the next generation. \
This is purely advancing based on Docking/Rescore \
fitness. This does not select for diversity.",
)
PARSER.add_argument(
"--redock_elite_from_previous_gen",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True than ligands chosen via Elitism (ie advanced from last generation) \
will be passed through Gypsum and docked again. This provides a better exploration of conformer space \
but also requires more computation time. If False, advancing ligands are simply carried forward by \
copying the PDBQT files.",
)
####### FILTER VARIABLES
PARSER.add_argument(
"--LipinskiStrictFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Strict implementation means a ligand must pass all requirements.",
)
PARSER.add_argument(
"--LipinskiLenientFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Lenient implementation means a ligand may fail all but one requirement and still passes.",
)
PARSER.add_argument(
"--GhoseFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms.",
)
PARSER.add_argument(
"--GhoseModifiedFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms. This is the same as the GhoseFilter, but \
the upper-bound of the molecular weight restrict is loosened from \
480Da to 500Da. This is intended to be run with Lipinski Filter and \
to match AutoGrow 3's Ghose Filter.",
)
PARSER.add_argument(
"--MozziconacciFilter",
action="store_true",
default=False,
help="Mozziconacci filters for drug-likeliness; filters by the number of \
rotatable bonds, rings, oxygens, and halogens.",
)
PARSER.add_argument(
"--VandeWaterbeemdFilter",
action="store_true",
default=False,
help="VandeWaterbeemd filters for drug likely to be blood brain barrier permeable. \
Filters by the number of molecular weight and Polar Sureface Area (PSA).",
)
PARSER.add_argument(
"--PAINSFilter",
action="store_true",
default=False,
help="PAINS filters against Pan Assay Interference Compounds using \
substructure a search.",
)
PARSER.add_argument(
"--NIHFilter",
action="store_true",
default=False,
help="NIH filters against molecules with undersirable functional groups \
using substructure a search.",
)
PARSER.add_argument(
"--BRENKFilter",
action="store_true",
default=False,
help="BRENK filter for lead-likeliness, by matching common false positive \
molecules to the current mol.",
)
PARSER.add_argument(
"--No_Filters",
action="store_true",
default=False,
help="No filters will be applied to compounds.",
)
PARSER.add_argument(
"--alternative_filter",
action="append",
help="If you want to add Custom filters to the filter child classes \
Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]",
)
# dependency variables
# DOCUMENT THE file conversion for docking inputs
PARSER.add_argument(
"--conversion_choice",
choices=["MGLToolsConversion", "ObabelConversion", "Custom"],
default="MGLToolsConversion",
help="Determines how .pdb files will be converted \
to the final format for docking. For Autodock Vina and QuickVina style docking software, \
files must be in .pdbqt format. MGLToolsConversion: uses MGLTools and is the \
recommended converter. MGLTools conversion is required for NNScore1/2 rescoring. \
ObabelConversion: uses commandline obabel. Easier to install but Vina docking has \
been optimized with MGLTools conversion.",
)
PARSER.add_argument(
"--custom_conversion_script",
metavar="custom_conversion_script",
default="",
help="The path to a python script for which is used to convert \
ligands. This is required for custom conversion_choice choices. \
Must be a list of strings \
[name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
PARSER.add_argument(
"--mgltools_directory",
metavar="mgltools_directory",
help="Required if using MGLTools conversion option \
(conversion_choice=MGLToolsConversion) \
Path may look like: /home/user/MGLTools-1.5.6/",
)
PARSER.add_argument(
"--mgl_python",
metavar="mgl_python",
required=False,
help="/home/user/MGLTools-1.5.4/bin/pythonsh",
)
PARSER.add_argument(
"--prepare_ligand4.py",
metavar="prepare_ligand4.py",
required=False,
help="/home/user/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py",
)
PARSER.add_argument(
"--prepare_receptor4.py",
metavar="prepare_receptor4.py",
required=False,
help="/home/userMGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py",
)
PARSER.add_argument(
"--obabel_path",
help="required if using obabel conversion \
option (conversion_choice=ObabelConversion).\
Path may look like PATH/envs/py37/bin/obabel; \
may be found on Linux by running: which obabel",
)
###################################
######### docking #################
###################################
PARSER.add_argument(
"--dock_choice",
metavar="dock_choice",
default="QuickVina2Docking",
choices=["VinaDocking", "QuickVina2Docking", "Custom"],
help="dock_choice assigns which docking software module to use.",
)
PARSER.add_argument(
"--docking_executable",
metavar="docking_executable",
default=None,
help="path to the docking_executable",
)
PARSER.add_argument(
"--docking_exhaustiveness",
metavar="docking_exhaustiveness",
default=None,
help="exhaustiveness of the global search (roughly proportional to time. \
see docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 8",
)
PARSER.add_argument(
"--docking_num_modes",
metavar="docking_num_modes",
default=None,
help=" maximum number of binding modes to generate in docking. \
See docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 9",
)
PARSER.add_argument(
"--docking_timeout_limit",
type=float,
default=120,
help="The maximum amount of time allowed to dock a single ligand into a \
pocket in seconds. Many factors influence the time required to dock, such as: \
processor speed, the docking software, rotatable bonds, exhaustiveness docking,\
and number of docking modes... \
The default docking_timeout_limit is 120 seconds, which is excess for most \
docking events using QuickVina2Docking under default settings. If run with \
more exhaustive settings or with highly flexible ligands, consider increasing \
docking_timeout_limit to accommodate. Default docking_timeout_limit is 120 seconds",
)
PARSER.add_argument(
"--custom_docking_script",
metavar="custom_docking_script",
default="",
help="The name and path to a python script for which is used to \
dock ligands. This is required for Custom docking choices Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# scoring
PARSER.add_argument(
"--scoring_choice",
metavar="scoring_choice",
choices=["VINA", "NN1", "NN2", "Custom"],
default="VINA",
help="The scoring_choice to use to assess the ligands docking fitness. \
Default is using Vina/QuickVina2 ligand affinity while NN1/NN2 use a Neural Network \
to assess the docking pose. Custom requires providing a file path for a Custom \
scoring function. If Custom scoring function, confirm it selects properly, \
Autogrow is largely set to select for a more negative score.",
)
PARSER.add_argument(
"--rescore_lig_efficiency",
action="store_true",
default=False,
help="This will divide the final scoring_choice output by the number of \
non-Hydrogen atoms in the ligand. This adjusted ligand efficiency score will \
override the scoring_choice value. This is compatible with all scoring_choice options.",
)
PARSER.add_argument(
"--custom_scoring_script",
metavar="custom_scoring_script",
type=str,
default="",
help="The path to a python script for which is used to \
assess the ligands docking fitness. Autogrow is largely set to select for a most \
negative scores (ie binding affinity the more negative is best). Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# gypsum # max variance is the number of conformers made per ligand
PARSER.add_argument(
"--max_variants_per_compound",
type=int,
default=3,
help="number of conformers made per ligand. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--gypsum_thoroughness",
"-t",
type=int,
default = 3,
help="How widely Gypsum-DL will search for \
low-energy conformers. Larger values increase \
run times but can produce better results. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="Minimum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="Maximum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--pka_precision",
metavar="D",
type=float,
default=1.0,
help="Size of pH substructure ranges. See Dimorphite-DL \
publication for details.",
)
PARSER.add_argument(
"--gypsum_timeout_limit",
type=float,
default=15,
help="Maximum time gypsum is allowed to run for a given ligand in seconds. \
On average Gypsum-DL takes on several seconds to run for a given ligand, but \
factors such as mol size, rotatable bonds, processor speed, and gypsum \
settings (ie gypsum_thoroughness or max_variants_per_compound) will change \
how long it takes to run. If increasing gypsum settings it is best to increase \
the gypsum_timeout_limit. Default gypsum_timeout_limit is 15 seconds",
)
# Reduce files down. This compiles and compresses the files in the PDBs folder
# (contains docking outputs, pdb, pdbqt...). This reduces the data size and
# makes data transfer quicker, but requires running the
# file_concatenation_and_compression.py in the Utility script folder to
# separate these files out for readability.
PARSER.add_argument(
"--reduce_files_sizes",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Run this combines all files in the PDBs folder into a \
single text file. Useful when data needs to be transferred.",
)
# Make a line plot of the simulation at the end of the run.
PARSER.add_argument(
"--generate_plot",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Make a line plot of the simulation at the end of the run.",
)
# mpi mode pre-Run so there are python cache files without EOF Errors
PARSER.add_argument(
"--cache_prerun",
"-c",
action="store_true",
help="Run this before running gypsum in mpi-mode.",
)
args_dict = vars(PARSER.parse_args())
from autogrow.user_vars import multiprocess_handling, define_defaults, determine_bash_timeout_vs_gtimeout
# args_dict = define_defaults()
import copy
INPUTS = copy.deepcopy(args_dict)
for k, v in args_dict.items():
if v is None:
del INPUTS[k]
if args_dict["cache_prerun"] is False:
# load the commandline parameters
from autogrow.user_vars import load_in_commandline_parameters
args_dict, printout = load_in_commandline_parameters(INPUTS)
args_dict = multiprocess_handling(args_dict)
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option in ["timeout", "gtimeout"]:
args_dict["timeout_vs_gtimeout"] = timeout_option
else:
raise Exception("Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash.")
'''
python run.py \
--filename_of_receptor ./tutorial/PARP/4r6eA_PARP1_prepared.pdb \
--center_x -70.76 --center_y 21.82 --center_z 28.33 \
--size_x 25.0 --size_y 16.0 --size_z 25.0 \
--source_compound_file ./source_compounds/naphthalene_smiles.smi \
--root_output_folder ./output \
--number_of_mutants_first_generation 50 \
--number_of_crossovers_first_generation 50 \
--number_of_mutants 50 \
--number_of_crossovers 50 \
--top_mols_to_seed_next_generation 50 \
--number_elitism_advance_from_previous_gen 50 \
--number_elitism_advance_from_previous_gen_first_generation 10 \
--diversity_mols_to_seed_first_generation 10 \
--diversity_seed_depreciation_per_gen 10 \
--num_generations 5 \
--mgltools_directory ./mgltools_x86_64Linux2_1.5.6/ \
--number_of_processors -1 \
--scoring_choice VINA \
--LipinskiLenientFilter \
--start_a_new_run \
--rxn_library click_chem_rxns \
--selector_choice Rank_Selector \
--dock_choice VinaDocking \
--max_variants_per_compound 5 \
--redock_elite_from_previous_gen False \
--generate_plot True \
--reduce_files_sizes True \
--use_docked_source_compounds True
'''
import numpy as np
import os, json, pickle, time, sys
# import torch
# from autogrow.model import Ligand2D
"""
- docking
- reaction (mutation)
- common structure (crossover)
- train policy
"""
filename_of_receptor = "./tutorial/PARP/4r6eA_PARP1_prepared.pdb"
center = [-70.76, 21.82, 28.33]
box_size = [20.0, 20.0, 20.0]
source_compound_file = args_dict['source_compound_file']
with open(source_compound_file, 'r') as fin:
lines = fin.readlines()
population_list = [line.split()[0] for line in lines]
## TODO1 smiles -> sdf -> pdb -> pdbqt
import autogrow.operators.convert_files.conversion_to_3d as conversion_to_3d
# conversion_to_3d.convert_smi_to_sdfs_with_gypsum
# conversion_to_3d.convert_sdf_to_pdbs
# convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path)
# conversion_to_3d.convert_single_sdf_to_pdb
# convert_ligand_pdb_file_to_pdbqt #### in run_docking_common lig_convert_multithread
def smiles_to_sdfs(vars, gen_smiles_file, smile_file_directory):
# conversion_to_3d.convert_smi_to_sdfs_with_gypsum
max_variants_per_compound = vars["max_variants_per_compound"]
gypsum_thoroughness = vars["gypsum_thoroughness"]
min_ph = vars["min_ph"]
max_ph = vars["max_ph"]
pka_precision = vars["pka_precision"]
gypsum_timeout_limit = vars["gypsum_timeout_limit"]
# Make a new folder to put gypsum .smi's and json. Name folder gypsum_submission_files.
folder_path = "{}gypsum_submission_files{}".format(smile_file_directory, os.sep)
if os.path.exists(folder_path) is False:
os.makedirs(folder_path)
# Make Output for Gypsum folder (where .sdf's go)
gypsum_output_folder_path = "{}3D_SDFs{}".format(smile_file_directory, os.sep)
if os.path.exists(gypsum_output_folder_path) is False:
os.makedirs(gypsum_output_folder_path)
# Make a folder to put the log files into within the 3D_SDFs folder
gypsum_log_path = "{}log{}".format(gypsum_output_folder_path, os.sep)
if os.path.exists(gypsum_log_path) is False:
os.makedirs(gypsum_log_path)
# Make All of the json files to submit to gypsum
list_of_gypsum_params = conversion_to_3d.make_smi_and_gyspum_params(
gen_smiles_file,
folder_path,
gypsum_output_folder_path,
max_variants_per_compound, gypsum_thoroughness,
min_ph, max_ph, pka_precision, )
# create a the job_inputs to run gypsum in multithread
job_input = tuple([(gypsum_log_path, gypsum_params, gypsum_timeout_limit) for gypsum_params in list_of_gypsum_params])
sys.stdout.flush()
failed_to_convert = vars["parallelizer"].run(job_input, conversion_to_3d.run_gypsum_multiprocessing)
sys.stdout.flush()
### fail: return smiles
### success: return None
lig_failed_to_convert = [x for x in failed_to_convert if x is not None]
lig_failed_to_convert = list(set(lig_failed_to_convert))
if len(lig_failed_to_convert) > 0:
print("The Following ligands Failed to convert in Gypsum")
print("Likely due to a Timeout")
print(lig_failed_to_convert)
sys.stdout.flush()
return gypsum_output_folder_path
from autogrow.docking.execute_docking import pick_run_conversion_class_dict, pick_docking_class_dict, lig_convert_multithread
def pdb_to_pdbqt(vars, pdb_dir):
### adapted from run_docking_common
dock_choice = vars["dock_choice"]
conversion_choice = vars["conversion_choice"]
receptor = vars["filename_of_receptor"]
# Use a temp vars dict so you don't put mpi multiprocess info through itself...
temp_vars = {}
for key in list(vars.keys()):
if key == "parallelizer":
continue
temp_vars[key] = vars[key]
file_conversion_class_object = pick_run_conversion_class_dict(conversion_choice)
file_conversion_class_object = file_conversion_class_object(temp_vars, receptor, test_boot=False)
dock_class = pick_docking_class_dict(dock_choice)
docking_object = dock_class(temp_vars, receptor, file_conversion_class_object, test_boot=False)
if vars["docking_executable"] is None:
docking_executable = docking_object.get_docking_executable_file(temp_vars)
vars["docking_executable"] = docking_executable
##### vina or Qvina
# Find PDB's
pdbs_in_folder = docking_object.find_pdb_ligands(pdb_dir)
print('all pdb file', pdbs_in_folder, pdb_dir)
job_input_convert_lig = tuple([(docking_object, pdb) for pdb in pdbs_in_folder])
print("Convert Ligand from PDB to PDBQT format")
smiles_names_failed_to_convert = vars["parallelizer"].run(job_input_convert_lig, lig_convert_multithread)
pdbqts_in_folder = docking_object.find_converted_ligands(pdb_dir)
print('pdbqt file', pdbqts_in_folder)
return docking_object
gypsum_output_folder_path = smiles_to_sdfs(args_dict, source_compound_file, smile_file_directory='./source_compounds')
conversion_to_3d.convert_sdf_to_pdbs(args_dict, gen_folder_path='./source_compounds', sdfs_folder_path='./source_compounds3D_SDFs')
docking_object = pdb_to_pdbqt(vars = args_dict, pdb_dir = './source_compoundsPDBs/')
### output directory: source_compoundsPDBs
## TODO2 docking an pdbqt
from autogrow.docking.execute_docking import run_dock_multithread, run_docking_common
import autogrow.docking.scoring.execute_scoring_mol as Scoring
# run_dock
# dock_ligand
def docking_pdbqt(vars, docking_object, pdbqt_folder):
pdbqts_in_folder = docking_object.find_converted_ligands(pdbqt_folder)
job_input_dock_lig = tuple([tuple([docking_object, pdbqt]) for pdbqt in pdbqts_in_folder])
smiles_names_failed_to_dock = vars["parallelizer"].run(job_input_dock_lig, run_dock_multithread)
deleted_smiles_names_list_dock = [x for x in smiles_names_failed_to_dock if x is not None]
deleted_smiles_names_list_dock = list(set(deleted_smiles_names_list_dock))
print("THE FOLLOWING LIGANDS WHICH FAILED TO DOCK:", deleted_smiles_names_list_dock)
print("####################")
print("#################### save results #####################")
print("\nBegin Ranking and Saving results")
smiles_list = Scoring.run_scoring_common(vars, smile_file, folder_with_pdbqts)
# unweighted_ranked_smile_file = docking_object.rank_and_save_output_smi(
# vars,
# current_generation_dir,
# current_gen_int,
# smile_file_new_gen,
# deleted_smiles_names_list,
# )
docking_pdbqt(args_dict, docking_object, './source_compoundsPDBs')
print('docking done')
# ## TODO3 reaction for mutate
# reaction_list = []
# make_mutants
## TODO4 crossover between 2 ligands
exit()
def train():
mutate_ligand_select_policy_net = Ligand2D()
mutate_reaction_select_policy_net = Ligand2D()
crossover_ligand1_policy_net = Ligand2D()
crossover_ligand2_policy_net = Ligand2D()
opt1 = torch.optim.Adam(mutate_ligand_select_policy_net.parameters(), lr=1e-3)
opt2 = torch.optim.Adam(mutate_reaction_select_policy_net.parameters(), lr=1e-3)
opt3 = torch.optim.Adam(crossover_ligand1_policy_net.parameters(), lr=1e-3)
opt4 = torch.optim.Adam(crossover_ligand2_policy_net.parameters(), lr=1e-3)
return
for step in range(n_step):
## select protein pocket
reaction
# 1. mutation
for i in range(n_mutation):
## 1.1 select ligand_1 from population_list
population_list
## get 3d ligand
## evaluate likelihood
## sample the ligand from policy distribution
mutated_smiles, mutated_smiles_likelihood = policy_net.sample()
### 1.2 select reaction from reaction_list
reaction_list
reaction, mutated_reaction_likelihood = policy_net.sample()
new_smiles = mutation(mutated_smiles, reaction)
### reward
score = oracle(new_smiles)
reward = score
# crossover
for i in range(n_crossover):
## select ligand_1
crossover_ligand_1, crossover_ligand1_likelihood = policy_net.sample()
## select ligand_2 conditioned on ligand_1
crossover_ligand_2, crossover_ligand2_likelihood = policy_net.sample()
new_smiles = crossover(ligand_1, ligand_2)
### reward
score = oracle(new_smiles)
reward = score
#### add Experience
## optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
##### logging
if __name__ == '__main__':
train()
"""
TODO
Experience
"""
| 32,721 | 33.553326 | 131 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/smiles2dockscore.py | import argparse
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json",
"-j",
metavar="param.json",
help="Name of a json file containing all parameters. \
Overrides other arguments.",
)
# Allows the run in debug mode. Doesn't delete temp files.
PARSER.add_argument(
"--debug_mode",
"-d",
action="store_true",
default=False,
help="Run Autogrow in Debug mode. This keeps all \
temporary files and adds extra print statements.",
)
# receptor information
PARSER.add_argument(
"--filename_of_receptor",
"-r",
metavar="receptor.pdb",
default='./tutorial/PARP/4r6eA_PARP1_prepared.pdb',
help="The path to the receptor file. Should be .pdb file.",
)
PARSER.add_argument(
"--center_x",
"-x",
type=float,
default=-70.76,
help="x-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_y",
"-y",
type=float,
default=21.82,
help="y-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_z",
"-z",
type=float,
default=28.33,
help="z-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--size_x",
type=float,
default=25.0,
help="dimension of box to dock into in the x-axis (Angstrom)",
)
PARSER.add_argument(
"--size_y",
type=float,
default=20.0,
help="dimension of box to dock into in the y-axis (Angstrom)",
)
PARSER.add_argument(
"--size_z",
type=float,
default=25.0,
help="dimension of box to dock into in the z-axis (Angstrom)",
)
# Input/Output directories
PARSER.add_argument(
"--root_output_folder",
"-o",
type=str,
help="The Path to the folder which all output files will be placed.",
)
PARSER.add_argument(
"--source_compound_file",
"-s",
type=str,
default='./source_compounds/naphthalene_smiles.smi',
help="PATH to the file containing the source compounds. It must be \
tab-delineated .smi file. These ligands will seed the first generation.",
)
PARSER.add_argument(
"--filter_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="If True source ligands from source_compound_file will be \
filter using the user defined filter choices prior to the 1st generation being \
created. If False, ligands which would fail the ligand filters could seed \
the 1st generation. Default is True.",
)
PARSER.add_argument(
"--use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True source ligands will be docked prior to seeding generation 1. \
If True and the source_compound file already has docking/fitness metric score \
in -2 column of .smi file, it will not redock but reuse the scores from \
the source_compound_file.\
If True and no fitness metric score in -2 column of .smi file, it will \
dock each ligand from the source_compound_file and displayed as generation 0.\
If False, generation 1 will be randomly seeded by the source compounds with \
no preference and there will be no generation 0. \
If performing multiple simulations using same source compounds and protein, \
we recommend running once this and using the generation 0 ranked file as the \
source_compound_file for future simulations. \
Default is True.",
)
PARSER.add_argument(
"--start_a_new_run",
action="store_true",
default=False,
help="If False make a new folder and start a fresh simulation with Generation 0. \
If True find the last generation in the root_output_folder and continue to fill.\
Default is False.",
)
# SmilesMerge Settings
PARSER.add_argument(
"--max_time_MCS_prescreen",
type=int,
default=1,
help="amount time the pre-screen MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--max_time_MCS_thorough",
type=int,
default=1,
help="amount time the thorough MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--min_atom_match_MCS",
type=int,
default=4,
help="Determines the minimum number of atoms in common for a substructurematch. \
The higher the more restrictive, but the more likely for two ligands not to match",
)
PARSER.add_argument(
"--protanate_step",
action="store_true",
default=False,
help="Indicates if Smilesmerge uses protanated mols (if true) or deprot \
(if False) SmilesMerge is 10x faster when deprotanated",
)
# Mutation Settings
PARSER.add_argument(
"--rxn_library",
choices=["click_chem_rxns", "robust_rxns", "all_rxns", "Custom"],
default="all_rxns",
help="This set of reactions to be used in Mutation. \
If Custom, one must also provide rxn_file Path and function_group_library path",
)
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
help="This PATH for a dictionary of functional groups to be used for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=1,
help="Number of processors to use for parallel calculations. Set to -1 for all available CPUs.",
)
PARSER.add_argument(
"--multithread_mode",
default="multithreading",
choices=["mpi", "multithreading", "serial"],
help="Determine what style \
multithreading: mpi, multithreading, or serial. serial will override \
number_of_processors and force it to be on a single processor.",
)
# Genetic Algorithm Options
PARSER.add_argument(
"--selector_choice",
choices=["Roulette_Selector", "Rank_Selector", "Tournament_Selector"],
default="Roulette_Selector",
help="This determines whether the fitness criteria are chosen by a Weighted Roulette, \
Ranked, or Tournament style Selector. The Rank option is a non-redundant selector.\
Roulette and Tournament chose without replacement and are stoichastic options. \
Warning do not use Rank_Selector for small runs as there is potential that \
the number of desired ligands exceed the number of ligands to chose from.",
)
PARSER.add_argument(
"--tourn_size",
type=float,
default=0.1,
help="If using the Tournament_Selector this determines the size of each \
tournament. The number of ligands used for each tournament will the \
tourn_size * the number of considered ligands.",
)
# Seeding next gen and diversity
PARSER.add_argument(
"--top_mols_to_seed_next_generation_first_generation",
type=int,
help="Number of mols that seed next generation, for the first generation.\
Should be less than number_of_crossovers_first_generation + number_of_mutations_first_generation\
If not defined it will default to top_mols_to_seed_next_generation",
)
PARSER.add_argument(
"--top_mols_to_seed_next_generation",
type=int,
default=10,
help="Number of mols that seed next generation, for all generations after the first.\
Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_mols_to_seed_first_generation",
type=int,
default=10,
help="Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_seed_depreciation_per_gen",
type=int,
default=2,
help="Each gen diversity_mols_to_seed_first_generation will decrease this amount",
)
# Populations settings
PARSER.add_argument(
"--num_generations",
type=int,
default=10,
help="The number of generations to be created.",
)
PARSER.add_argument(
"--number_of_crossovers_first_generation",
type=int,
help="The number of ligands which will be created via crossovers in the \
first generation. If not defined it will default to number_of_crossovers",
)
PARSER.add_argument(
"--number_of_mutants_first_generation",
type=int,
help="The number of ligands which will be created via mutation in \
the first generation. If not defined it will default to number_of_mutants",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen_first_generation",
type=int,
help="The number of ligands chosen for elitism for the first generation \
These will advance from the previous generation directly into the next \
generation. This is purely advancing based on Docking/Rescore fitness. \
This does not select for diversity. If not defined it will default to \
number_elitism_advance_from_previous_gen",
)
PARSER.add_argument(
"--number_of_crossovers",
type=int,
default=10,
help="The number of ligands which will be created via crossover in each \
generation besides the first",
)
PARSER.add_argument(
"--number_of_mutants",
type=int,
default=10,
help="The number of ligands which will be created via mutation in each \
generation besides the first.",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen",
type=int,
default=10,
help="The number of ligands chosen for elitism. These will advance from \
the previous generation directly into the next generation. \
This is purely advancing based on Docking/Rescore \
fitness. This does not select for diversity.",
)
PARSER.add_argument(
"--redock_elite_from_previous_gen",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True than ligands chosen via Elitism (ie advanced from last generation) \
will be passed through Gypsum and docked again. This provides a better exploration of conformer space \
but also requires more computation time. If False, advancing ligands are simply carried forward by \
copying the PDBQT files.",
)
####### FILTER VARIABLES
PARSER.add_argument(
"--LipinskiStrictFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Strict implementation means a ligand must pass all requirements.",
)
PARSER.add_argument(
"--LipinskiLenientFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Lenient implementation means a ligand may fail all but one requirement and still passes.",
)
PARSER.add_argument(
"--GhoseFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms.",
)
PARSER.add_argument(
"--GhoseModifiedFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms. This is the same as the GhoseFilter, but \
the upper-bound of the molecular weight restrict is loosened from \
480Da to 500Da. This is intended to be run with Lipinski Filter and \
to match AutoGrow 3's Ghose Filter.",
)
PARSER.add_argument(
"--MozziconacciFilter",
action="store_true",
default=False,
help="Mozziconacci filters for drug-likeliness; filters by the number of \
rotatable bonds, rings, oxygens, and halogens.",
)
PARSER.add_argument(
"--VandeWaterbeemdFilter",
action="store_true",
default=False,
help="VandeWaterbeemd filters for drug likely to be blood brain barrier permeable. \
Filters by the number of molecular weight and Polar Sureface Area (PSA).",
)
PARSER.add_argument(
"--PAINSFilter",
action="store_true",
default=False,
help="PAINS filters against Pan Assay Interference Compounds using \
substructure a search.",
)
PARSER.add_argument(
"--NIHFilter",
action="store_true",
default=False,
help="NIH filters against molecules with undersirable functional groups \
using substructure a search.",
)
PARSER.add_argument(
"--BRENKFilter",
action="store_true",
default=False,
help="BRENK filter for lead-likeliness, by matching common false positive \
molecules to the current mol.",
)
PARSER.add_argument(
"--No_Filters",
action="store_true",
default=False,
help="No filters will be applied to compounds.",
)
PARSER.add_argument(
"--alternative_filter",
action="append",
help="If you want to add Custom filters to the filter child classes \
Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]",
)
# dependency variables
# DOCUMENT THE file conversion for docking inputs
PARSER.add_argument(
"--conversion_choice",
choices=["MGLToolsConversion", "ObabelConversion", "Custom"],
default="MGLToolsConversion",
help="Determines how .pdb files will be converted \
to the final format for docking. For Autodock Vina and QuickVina style docking software, \
files must be in .pdbqt format. MGLToolsConversion: uses MGLTools and is the \
recommended converter. MGLTools conversion is required for NNScore1/2 rescoring. \
ObabelConversion: uses commandline obabel. Easier to install but Vina docking has \
been optimized with MGLTools conversion.",
)
PARSER.add_argument(
"--custom_conversion_script",
metavar="custom_conversion_script",
default="",
help="The path to a python script for which is used to convert \
ligands. This is required for custom conversion_choice choices. \
Must be a list of strings \
[name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
PARSER.add_argument(
"--mgltools_directory",
metavar="mgltools_directory",
help="Required if using MGLTools conversion option \
(conversion_choice=MGLToolsConversion) \
Path may look like: /home/user/MGLTools-1.5.6/",
)
PARSER.add_argument(
"--mgl_python",
metavar="mgl_python",
required=False,
help="/home/user/MGLTools-1.5.4/bin/pythonsh",
)
PARSER.add_argument(
"--prepare_ligand4.py",
metavar="prepare_ligand4.py",
required=False,
help="/home/user/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py",
)
PARSER.add_argument(
"--prepare_receptor4.py",
metavar="prepare_receptor4.py",
required=False,
help="/home/userMGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py",
)
PARSER.add_argument(
"--obabel_path",
help="required if using obabel conversion \
option (conversion_choice=ObabelConversion).\
Path may look like PATH/envs/py37/bin/obabel; \
may be found on Linux by running: which obabel",
)
###################################
######### docking #################
###################################
PARSER.add_argument(
"--dock_choice",
metavar="dock_choice",
default="QuickVina2Docking",
choices=["VinaDocking", "QuickVina2Docking", "Custom"],
help="dock_choice assigns which docking software module to use.",
)
PARSER.add_argument(
"--docking_executable",
metavar="docking_executable",
default=None,
help="path to the docking_executable",
)
PARSER.add_argument(
"--docking_exhaustiveness",
metavar="docking_exhaustiveness",
default=None,
help="exhaustiveness of the global search (roughly proportional to time. \
see docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 8",
)
PARSER.add_argument(
"--docking_num_modes",
metavar="docking_num_modes",
default=None,
help=" maximum number of binding modes to generate in docking. \
See docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 9",
)
PARSER.add_argument(
"--docking_timeout_limit",
type=float,
default=120,
help="The maximum amount of time allowed to dock a single ligand into a \
pocket in seconds. Many factors influence the time required to dock, such as: \
processor speed, the docking software, rotatable bonds, exhaustiveness docking,\
and number of docking modes... \
The default docking_timeout_limit is 120 seconds, which is excess for most \
docking events using QuickVina2Docking under default settings. If run with \
more exhaustive settings or with highly flexible ligands, consider increasing \
docking_timeout_limit to accommodate. Default docking_timeout_limit is 120 seconds",
)
PARSER.add_argument(
"--custom_docking_script",
metavar="custom_docking_script",
default="",
help="The name and path to a python script for which is used to \
dock ligands. This is required for Custom docking choices Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# scoring
PARSER.add_argument(
"--scoring_choice",
metavar="scoring_choice",
choices=["VINA", "NN1", "NN2", "Custom"],
default="VINA",
help="The scoring_choice to use to assess the ligands docking fitness. \
Default is using Vina/QuickVina2 ligand affinity while NN1/NN2 use a Neural Network \
to assess the docking pose. Custom requires providing a file path for a Custom \
scoring function. If Custom scoring function, confirm it selects properly, \
Autogrow is largely set to select for a more negative score.",
)
PARSER.add_argument(
"--rescore_lig_efficiency",
action="store_true",
default=False,
help="This will divide the final scoring_choice output by the number of \
non-Hydrogen atoms in the ligand. This adjusted ligand efficiency score will \
override the scoring_choice value. This is compatible with all scoring_choice options.",
)
PARSER.add_argument(
"--custom_scoring_script",
metavar="custom_scoring_script",
type=str,
default="",
help="The path to a python script for which is used to \
assess the ligands docking fitness. Autogrow is largely set to select for a most \
negative scores (ie binding affinity the more negative is best). Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# gypsum # max variance is the number of conformers made per ligand
PARSER.add_argument(
"--max_variants_per_compound",
type=int,
default=3,
help="number of conformers made per ligand. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--gypsum_thoroughness",
"-t",
type=int,
default = 3,
help="How widely Gypsum-DL will search for \
low-energy conformers. Larger values increase \
run times but can produce better results. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="Minimum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="Maximum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--pka_precision",
metavar="D",
type=float,
default=1.0,
help="Size of pH substructure ranges. See Dimorphite-DL \
publication for details.",
)
PARSER.add_argument(
"--gypsum_timeout_limit",
type=float,
default=15,
help="Maximum time gypsum is allowed to run for a given ligand in seconds. \
On average Gypsum-DL takes on several seconds to run for a given ligand, but \
factors such as mol size, rotatable bonds, processor speed, and gypsum \
settings (ie gypsum_thoroughness or max_variants_per_compound) will change \
how long it takes to run. If increasing gypsum settings it is best to increase \
the gypsum_timeout_limit. Default gypsum_timeout_limit is 15 seconds",
)
# Reduce files down. This compiles and compresses the files in the PDBs folder
# (contains docking outputs, pdb, pdbqt...). This reduces the data size and
# makes data transfer quicker, but requires running the
# file_concatenation_and_compression.py in the Utility script folder to
# separate these files out for readability.
PARSER.add_argument(
"--reduce_files_sizes",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Run this combines all files in the PDBs folder into a \
single text file. Useful when data needs to be transferred.",
)
# Make a line plot of the simulation at the end of the run.
PARSER.add_argument(
"--generate_plot",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Make a line plot of the simulation at the end of the run.",
)
# mpi mode pre-Run so there are python cache files without EOF Errors
PARSER.add_argument(
"--cache_prerun",
"-c",
action="store_true",
help="Run this before running gypsum in mpi-mode.",
)
args_dict = vars(PARSER.parse_args())
from autogrow.user_vars import multiprocess_handling, define_defaults, determine_bash_timeout_vs_gtimeout
# args_dict = define_defaults()
import copy
INPUTS = copy.deepcopy(args_dict)
for k, v in args_dict.items():
if v is None:
del INPUTS[k]
if args_dict["cache_prerun"] is False:
# load the commandline parameters
from autogrow.user_vars import load_in_commandline_parameters
args_dict, printout = load_in_commandline_parameters(INPUTS)
args_dict = multiprocess_handling(args_dict)
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option in ["timeout", "gtimeout"]:
args_dict["timeout_vs_gtimeout"] = timeout_option
else:
raise Exception("Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash.")
import numpy as np
import os, json, pickle, time, sys
########### smiles -> sdf -> pdb -> pdbqt
import autogrow.operators.convert_files.conversion_to_3d as conversion_to_3d
# conversion_to_3d.convert_smi_to_sdfs_with_gypsum
# conversion_to_3d.convert_sdf_to_pdbs
# convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path)
# conversion_to_3d.convert_single_sdf_to_pdb
# convert_ligand_pdb_file_to_pdbqt #### in run_docking_common lig_convert_multithread
def smiles_to_sdfs(vars, gen_smiles_file, smile_file_directory):
# adapted from conversion_to_3d.convert_smi_to_sdfs_with_gypsum
max_variants_per_compound = vars["max_variants_per_compound"]
gypsum_thoroughness = vars["gypsum_thoroughness"]
min_ph = vars["min_ph"]
max_ph = vars["max_ph"]
pka_precision = vars["pka_precision"]
gypsum_timeout_limit = vars["gypsum_timeout_limit"]
# Make a new folder to put gypsum .smi's and json. Name folder gypsum_submission_files.
folder_path = "{}gypsum_submission_files{}".format(smile_file_directory, os.sep)
if os.path.exists(folder_path) is False:
os.makedirs(folder_path)
# Make Output for Gypsum folder (where .sdf's go)
gypsum_output_folder_path = "{}_SDF{}".format(smile_file_directory, os.sep)
if os.path.exists(gypsum_output_folder_path) is False:
os.makedirs(gypsum_output_folder_path)
# Make a folder to put the log files into within the 3D_SDFs folder
gypsum_log_path = "{}log{}".format(gypsum_output_folder_path, os.sep)
if os.path.exists(gypsum_log_path) is False:
os.makedirs(gypsum_log_path)
# Make All of the json files to submit to gypsum
list_of_gypsum_params = conversion_to_3d.make_smi_and_gyspum_params(
gen_smiles_file,
folder_path,
gypsum_output_folder_path,
max_variants_per_compound, gypsum_thoroughness,
min_ph, max_ph, pka_precision, )
# create a the job_inputs to run gypsum in multithread
job_input = tuple([(gypsum_log_path, gypsum_params, gypsum_timeout_limit) for gypsum_params in list_of_gypsum_params])
sys.stdout.flush()
failed_to_convert = vars["parallelizer"].run(job_input, conversion_to_3d.run_gypsum_multiprocessing)
sys.stdout.flush()
### fail: return smiles
### success: return None
lig_failed_to_convert = [x for x in failed_to_convert if x is not None]
lig_failed_to_convert = list(set(lig_failed_to_convert))
if len(lig_failed_to_convert) > 0:
print("The Following ligands Failed to convert in Gypsum")
print("Likely due to a Timeout")
print(lig_failed_to_convert)
sys.stdout.flush()
return gypsum_output_folder_path
from autogrow.docking.execute_docking import pick_run_conversion_class_dict, pick_docking_class_dict, lig_convert_multithread
def pdb_to_pdbqt(vars, pdb_dir):
### adapted from run_docking_common
dock_choice = vars["dock_choice"]
conversion_choice = vars["conversion_choice"]
receptor = vars["filename_of_receptor"]
# Use a temp vars dict so you don't put mpi multiprocess info through itself...
temp_vars = {}
for key in list(vars.keys()):
if key == "parallelizer":
continue
temp_vars[key] = vars[key]
file_conversion_class_object = pick_run_conversion_class_dict(conversion_choice)
file_conversion_class_object = file_conversion_class_object(temp_vars, receptor, test_boot=False)
dock_class = pick_docking_class_dict(dock_choice)
docking_object = dock_class(temp_vars, receptor, file_conversion_class_object, test_boot=False)
if vars["docking_executable"] is None:
docking_executable = docking_object.get_docking_executable_file(temp_vars)
vars["docking_executable"] = docking_executable
##### vina or Qvina
# Find PDB's
pdbs_in_folder = docking_object.find_pdb_ligands(pdb_dir)
print('all pdb file [:10]', pdbs_in_folder[:10], pdb_dir)
job_input_convert_lig = tuple([(docking_object, pdb) for pdb in pdbs_in_folder])
print("Convert Ligand from PDB to PDBQT format")
smiles_names_failed_to_convert = vars["parallelizer"].run(job_input_convert_lig, lig_convert_multithread)
pdbqts_in_folder = docking_object.find_converted_ligands(pdb_dir)
print('pdbqt file [:10]', pdbqts_in_folder[:10])
return docking_object
from autogrow.docking.execute_docking import run_dock_multithread, run_docking_common
import autogrow.docking.scoring.execute_scoring_mol as Scoring
import autogrow.docking.ranking.ranking_mol as Ranking
def docking_pdbqt(vars, docking_object, pdbqt_folder):
pdbqts_in_folder = docking_object.find_converted_ligands(pdbqt_folder)
print('pdbqts [:10]', pdbqts_in_folder[:10])
job_input_dock_lig = tuple([tuple([docking_object, pdbqt]) for pdbqt in pdbqts_in_folder])
smiles_names_failed_to_dock = vars["parallelizer"].run(job_input_dock_lig, run_dock_multithread) ### main
deleted_smiles_names_list_dock = [x for x in smiles_names_failed_to_dock if x is not None]
deleted_smiles_names_list_dock = list(set(deleted_smiles_names_list_dock))
print("THE FOLLOWING LIGANDS WHICH FAILED TO DOCK:", deleted_smiles_names_list_dock)
print("####################")
print("\nBegin Ranking and Saving results")
# folder_with_pdbqts = current_generation_dir + "PDBs" + os.sep
folder_with_pdbqts = pdbqt_folder
smile_file = 'source_compounds/naphthalene_smiles.smi'
# Run any compatible Scoring Function
smiles_list = Scoring.run_scoring_common(vars, smile_file, folder_with_pdbqts)
print('---------', smiles_list[:10], 'smiles_list[:10] --------------')
# Output format of the .smi file will be: SMILES Full_lig_name
# shorthandname ...AnyCustominfo... Fitness_metric diversity
# Normally the docking score is the fitness metric but if we use a
# Custom metric than dock score gets moved to index -3 and the new
# fitness metric gets -2
# sort list by the affinity of each sublist (which is the last index of sublist)
smiles_list.sort(key=lambda x: float(x[-1]), reverse=False)
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3", -9.2]
# score the diversity of each ligand compared to the rest of the ligands in the group this adds on a float in the last column for the
# sum of pairwise comparisons the lower the diversity score the more unique a molecule is from the other mols in the same generation
smiles_list = Ranking.score_and_append_diversity_scores(smiles_list)
# ["[N-]=[NH+]/N=C/c1[nH+]nc(-c2cccc3ccccc23)o1", "naphthalene_35", "naphthalene_35", "naphthalene_35__3", -9.2, 40.14 (diversity)]
return smiles_list
def docking(smiles_folder, smiles_file):
sdfs_folder_path = smiles_folder.strip('/') + '_SDF/'
pdb_dir = smiles_folder.strip('/') + '_PDB/'
smiles_to_sdfs(args_dict, gen_smiles_file = smiles_file, smile_file_directory=smiles_folder)
conversion_to_3d.convert_sdf_to_pdbs(args_dict, gen_folder_path=smiles_folder, sdfs_folder_path='./smiles_SDF/')
docking_object = pdb_to_pdbqt(vars = args_dict, pdb_dir = pdb_dir)
smiles_list = docking_pdbqt(args_dict, docking_object, pdb_dir)
return smiles_list
smiles_list = docking(smiles_folder = './smiles/', smiles_file = args_dict['source_compound_file'])
'''
python smiles2dockscore.py \
--filename_of_receptor ./tutorial/PARP/4r6eA_PARP1_prepared.pdb \
--center_x -70.76 --center_y 21.82 --center_z 28.33 \
--size_x 25.0 --size_y 16.0 --size_z 25.0 \
--source_compound_file ./source_compounds/naphthalene_smiles.smi \
--root_output_folder ./output \
--number_of_mutants_first_generation 50 \
--number_of_crossovers_first_generation 50 \
--number_of_mutants 50 \
--number_of_crossovers 50 \
--top_mols_to_seed_next_generation 50 \
--number_elitism_advance_from_previous_gen 50 \
--number_elitism_advance_from_previous_gen_first_generation 10 \
--diversity_mols_to_seed_first_generation 10 \
--diversity_seed_depreciation_per_gen 10 \
--num_generations 5 \
--mgltools_directory ./mgltools_x86_64Linux2_1.5.6/ \
--number_of_processors -1 \
--scoring_choice VINA \
--LipinskiLenientFilter \
--start_a_new_run \
--rxn_library click_chem_rxns \
--selector_choice Rank_Selector \
--dock_choice VinaDocking \
--max_variants_per_compound 5 \
--redock_elite_from_previous_gen False \
--generate_plot True \
--reduce_files_sizes True \
--use_docked_source_compounds True
'''
| 31,771 | 36.511216 | 137 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/demo_GAoperation.py | import argparse
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json",
"-j",
metavar="param.json",
help="Name of a json file containing all parameters. \
Overrides other arguments.",
)
# Allows the run in debug mode. Doesn't delete temp files.
PARSER.add_argument(
"--debug_mode",
"-d",
action="store_true",
default=False,
help="Run Autogrow in Debug mode. This keeps all \
temporary files and adds extra print statements.",
)
# receptor information
PARSER.add_argument(
"--filename_of_receptor",
"-r",
metavar="receptor.pdb",
default='./tutorial/PARP/4r6eA_PARP1_prepared.pdb',
help="The path to the receptor file. Should be .pdb file.",
)
PARSER.add_argument(
"--center_x",
"-x",
type=float,
default=-70.76,
help="x-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_y",
"-y",
type=float,
default=21.82,
help="y-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--center_z",
"-z",
type=float,
default=28.33,
help="z-coordinate for the center of the pocket to be tested by docking. (Angstrom)",
)
PARSER.add_argument(
"--size_x",
type=float,
default=25.0,
help="dimension of box to dock into in the x-axis (Angstrom)",
)
PARSER.add_argument(
"--size_y",
type=float,
default=20.0,
help="dimension of box to dock into in the y-axis (Angstrom)",
)
PARSER.add_argument(
"--size_z",
type=float,
default=25.0,
help="dimension of box to dock into in the z-axis (Angstrom)",
)
# Input/Output directories
PARSER.add_argument(
"--root_output_folder",
"-o",
type=str,
help="The Path to the folder which all output files will be placed.",
)
PARSER.add_argument(
"--source_compound_file",
"-s",
type=str,
default='./source_compounds/naphthalene_smiles.smi',
help="PATH to the file containing the source compounds. It must be \
tab-delineated .smi file. These ligands will seed the first generation.",
)
PARSER.add_argument(
"--filter_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="If True source ligands from source_compound_file will be \
filter using the user defined filter choices prior to the 1st generation being \
created. If False, ligands which would fail the ligand filters could seed \
the 1st generation. Default is True.",
)
PARSER.add_argument(
"--use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True source ligands will be docked prior to seeding generation 1. \
If True and the source_compound file already has docking/fitness metric score \
in -2 column of .smi file, it will not redock but reuse the scores from \
the source_compound_file.\
If True and no fitness metric score in -2 column of .smi file, it will \
dock each ligand from the source_compound_file and displayed as generation 0.\
If False, generation 1 will be randomly seeded by the source compounds with \
no preference and there will be no generation 0. \
If performing multiple simulations using same source compounds and protein, \
we recommend running once this and using the generation 0 ranked file as the \
source_compound_file for future simulations. \
Default is True.",
)
PARSER.add_argument(
"--start_a_new_run",
action="store_true",
default=False,
help="If False make a new folder and start a fresh simulation with Generation 0. \
If True find the last generation in the root_output_folder and continue to fill.\
Default is False.",
)
# SmilesMerge Settings
PARSER.add_argument(
"--max_time_MCS_prescreen",
type=int,
default=1,
help="amount time the pre-screen MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--max_time_MCS_thorough",
type=int,
default=1,
help="amount time the thorough MCS times out. Time out doesnt prevent \
mcs matching just takes what it has up to that point",
)
PARSER.add_argument(
"--min_atom_match_MCS",
type=int,
default=4,
help="Determines the minimum number of atoms in common for a substructurematch. \
The higher the more restrictive, but the more likely for two ligands not to match",
)
PARSER.add_argument(
"--protanate_step",
action="store_true",
default=False,
help="Indicates if Smilesmerge uses protanated mols (if true) or deprot \
(if False) SmilesMerge is 10x faster when deprotanated",
)
# Mutation Settings
PARSER.add_argument(
"--rxn_library",
choices=["click_chem_rxns", "robust_rxns", "all_rxns", "Custom"],
default="all_rxns",
help="This set of reactions to be used in Mutation. \
If Custom, one must also provide rxn_file Path and function_group_library path",
)
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
help="This PATH for a dictionary of functional groups to be used for Mutation. \
Only provide if using the Custom option for rxn_library.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=1,
help="Number of processors to use for parallel calculations. Set to -1 for all available CPUs.",
)
PARSER.add_argument(
"--multithread_mode",
default="multithreading",
choices=["mpi", "multithreading", "serial"],
help="Determine what style \
multithreading: mpi, multithreading, or serial. serial will override \
number_of_processors and force it to be on a single processor.",
)
# Genetic Algorithm Options
PARSER.add_argument(
"--selector_choice",
choices=["Roulette_Selector", "Rank_Selector", "Tournament_Selector"],
default="Roulette_Selector",
help="This determines whether the fitness criteria are chosen by a Weighted Roulette, \
Ranked, or Tournament style Selector. The Rank option is a non-redundant selector.\
Roulette and Tournament chose without replacement and are stoichastic options. \
Warning do not use Rank_Selector for small runs as there is potential that \
the number of desired ligands exceed the number of ligands to chose from.",
)
PARSER.add_argument(
"--tourn_size",
type=float,
default=0.1,
help="If using the Tournament_Selector this determines the size of each \
tournament. The number of ligands used for each tournament will the \
tourn_size * the number of considered ligands.",
)
# Seeding next gen and diversity
PARSER.add_argument(
"--top_mols_to_seed_next_generation_first_generation",
type=int,
help="Number of mols that seed next generation, for the first generation.\
Should be less than number_of_crossovers_first_generation + number_of_mutations_first_generation\
If not defined it will default to top_mols_to_seed_next_generation",
)
PARSER.add_argument(
"--top_mols_to_seed_next_generation",
type=int,
default=10,
help="Number of mols that seed next generation, for all generations after the first.\
Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_mols_to_seed_first_generation",
type=int,
default=10,
help="Should be less than number_of_crossovers_first_generation \
+ number_of_mutations_first_generation",
)
PARSER.add_argument(
"--diversity_seed_depreciation_per_gen",
type=int,
default=2,
help="Each gen diversity_mols_to_seed_first_generation will decrease this amount",
)
# Populations settings
PARSER.add_argument(
"--num_generations",
type=int,
default=10,
help="The number of generations to be created.",
)
PARSER.add_argument(
"--number_of_crossovers_first_generation",
type=int,
help="The number of ligands which will be created via crossovers in the \
first generation. If not defined it will default to number_of_crossovers",
)
PARSER.add_argument(
"--number_of_mutants_first_generation",
type=int,
help="The number of ligands which will be created via mutation in \
the first generation. If not defined it will default to number_of_mutants",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen_first_generation",
type=int,
help="The number of ligands chosen for elitism for the first generation \
These will advance from the previous generation directly into the next \
generation. This is purely advancing based on Docking/Rescore fitness. \
This does not select for diversity. If not defined it will default to \
number_elitism_advance_from_previous_gen",
)
PARSER.add_argument(
"--number_of_crossovers",
type=int,
default=10,
help="The number of ligands which will be created via crossover in each \
generation besides the first",
)
PARSER.add_argument(
"--number_of_mutants",
type=int,
default=10,
help="The number of ligands which will be created via mutation in each \
generation besides the first.",
)
PARSER.add_argument(
"--number_elitism_advance_from_previous_gen",
type=int,
default=10,
help="The number of ligands chosen for elitism. These will advance from \
the previous generation directly into the next generation. \
This is purely advancing based on Docking/Rescore \
fitness. This does not select for diversity.",
)
PARSER.add_argument(
"--redock_elite_from_previous_gen",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True than ligands chosen via Elitism (ie advanced from last generation) \
will be passed through Gypsum and docked again. This provides a better exploration of conformer space \
but also requires more computation time. If False, advancing ligands are simply carried forward by \
copying the PDBQT files.",
)
####### FILTER VARIABLES
PARSER.add_argument(
"--LipinskiStrictFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Strict implementation means a ligand must pass all requirements.",
)
PARSER.add_argument(
"--LipinskiLenientFilter",
action="store_true",
default=False,
help="Lipinski filters for orally available drugs following Lipinski rule of fives. \
Filters by molecular weight, logP and number of hydrogen bond donors and acceptors. \
Lenient implementation means a ligand may fail all but one requirement and still passes.",
)
PARSER.add_argument(
"--GhoseFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms.",
)
PARSER.add_argument(
"--GhoseModifiedFilter",
action="store_true",
default=False,
help="Ghose filters for drug-likeliness; filters by molecular weight,\
logP and number of atoms. This is the same as the GhoseFilter, but \
the upper-bound of the molecular weight restrict is loosened from \
480Da to 500Da. This is intended to be run with Lipinski Filter and \
to match AutoGrow 3's Ghose Filter.",
)
PARSER.add_argument(
"--MozziconacciFilter",
action="store_true",
default=False,
help="Mozziconacci filters for drug-likeliness; filters by the number of \
rotatable bonds, rings, oxygens, and halogens.",
)
PARSER.add_argument(
"--VandeWaterbeemdFilter",
action="store_true",
default=False,
help="VandeWaterbeemd filters for drug likely to be blood brain barrier permeable. \
Filters by the number of molecular weight and Polar Sureface Area (PSA).",
)
PARSER.add_argument(
"--PAINSFilter",
action="store_true",
default=False,
help="PAINS filters against Pan Assay Interference Compounds using \
substructure a search.",
)
PARSER.add_argument(
"--NIHFilter",
action="store_true",
default=False,
help="NIH filters against molecules with undersirable functional groups \
using substructure a search.",
)
PARSER.add_argument(
"--BRENKFilter",
action="store_true",
default=False,
help="BRENK filter for lead-likeliness, by matching common false positive \
molecules to the current mol.",
)
PARSER.add_argument(
"--No_Filters",
action="store_true",
default=False,
help="No filters will be applied to compounds.",
)
PARSER.add_argument(
"--alternative_filter",
action="append",
help="If you want to add Custom filters to the filter child classes \
Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]",
)
# dependency variables
# DOCUMENT THE file conversion for docking inputs
PARSER.add_argument(
"--conversion_choice",
choices=["MGLToolsConversion", "ObabelConversion", "Custom"],
default="MGLToolsConversion",
help="Determines how .pdb files will be converted \
to the final format for docking. For Autodock Vina and QuickVina style docking software, \
files must be in .pdbqt format. MGLToolsConversion: uses MGLTools and is the \
recommended converter. MGLTools conversion is required for NNScore1/2 rescoring. \
ObabelConversion: uses commandline obabel. Easier to install but Vina docking has \
been optimized with MGLTools conversion.",
)
PARSER.add_argument(
"--custom_conversion_script",
metavar="custom_conversion_script",
default="",
help="The path to a python script for which is used to convert \
ligands. This is required for custom conversion_choice choices. \
Must be a list of strings \
[name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
PARSER.add_argument(
"--mgltools_directory",
metavar="mgltools_directory",
help="Required if using MGLTools conversion option \
(conversion_choice=MGLToolsConversion) \
Path may look like: /home/user/MGLTools-1.5.6/",
)
PARSER.add_argument(
"--mgl_python",
metavar="mgl_python",
required=False,
help="/home/user/MGLTools-1.5.4/bin/pythonsh",
)
PARSER.add_argument(
"--prepare_ligand4.py",
metavar="prepare_ligand4.py",
required=False,
help="/home/user/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py",
)
PARSER.add_argument(
"--prepare_receptor4.py",
metavar="prepare_receptor4.py",
required=False,
help="/home/userMGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py",
)
PARSER.add_argument(
"--obabel_path",
help="required if using obabel conversion \
option (conversion_choice=ObabelConversion).\
Path may look like PATH/envs/py37/bin/obabel; \
may be found on Linux by running: which obabel",
)
###################################
######### docking #################
###################################
PARSER.add_argument(
"--dock_choice",
metavar="dock_choice",
default="QuickVina2Docking",
choices=["VinaDocking", "QuickVina2Docking", "Custom"],
help="dock_choice assigns which docking software module to use.",
)
PARSER.add_argument(
"--docking_executable",
metavar="docking_executable",
default=None,
help="path to the docking_executable",
)
PARSER.add_argument(
"--docking_exhaustiveness",
metavar="docking_exhaustiveness",
default=None,
help="exhaustiveness of the global search (roughly proportional to time. \
see docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 8",
)
PARSER.add_argument(
"--docking_num_modes",
metavar="docking_num_modes",
default=None,
help=" maximum number of binding modes to generate in docking. \
See docking software for settings. Unless specified Autogrow uses the \
docking softwares default setting. For AutoDock Vina 1.1.2 that is 9",
)
PARSER.add_argument(
"--docking_timeout_limit",
type=float,
default=120,
help="The maximum amount of time allowed to dock a single ligand into a \
pocket in seconds. Many factors influence the time required to dock, such as: \
processor speed, the docking software, rotatable bonds, exhaustiveness docking,\
and number of docking modes... \
The default docking_timeout_limit is 120 seconds, which is excess for most \
docking events using QuickVina2Docking under default settings. If run with \
more exhaustive settings or with highly flexible ligands, consider increasing \
docking_timeout_limit to accommodate. Default docking_timeout_limit is 120 seconds",
)
PARSER.add_argument(
"--custom_docking_script",
metavar="custom_docking_script",
default="",
help="The name and path to a python script for which is used to \
dock ligands. This is required for Custom docking choices Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# scoring
PARSER.add_argument(
"--scoring_choice",
metavar="scoring_choice",
choices=["VINA", "NN1", "NN2", "Custom"],
default="VINA",
help="The scoring_choice to use to assess the ligands docking fitness. \
Default is using Vina/QuickVina2 ligand affinity while NN1/NN2 use a Neural Network \
to assess the docking pose. Custom requires providing a file path for a Custom \
scoring function. If Custom scoring function, confirm it selects properly, \
Autogrow is largely set to select for a more negative score.",
)
PARSER.add_argument(
"--rescore_lig_efficiency",
action="store_true",
default=False,
help="This will divide the final scoring_choice output by the number of \
non-Hydrogen atoms in the ligand. This adjusted ligand efficiency score will \
override the scoring_choice value. This is compatible with all scoring_choice options.",
)
PARSER.add_argument(
"--custom_scoring_script",
metavar="custom_scoring_script",
type=str,
default="",
help="The path to a python script for which is used to \
assess the ligands docking fitness. Autogrow is largely set to select for a most \
negative scores (ie binding affinity the more negative is best). Must be a list of \
strings [name_custom_conversion_class, Path/to/name_custom_conversion_class.py]",
)
# gypsum # max variance is the number of conformers made per ligand
PARSER.add_argument(
"--max_variants_per_compound",
type=int,
default=3,
help="number of conformers made per ligand. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--gypsum_thoroughness",
"-t",
type=int,
default = 3,
help="How widely Gypsum-DL will search for \
low-energy conformers. Larger values increase \
run times but can produce better results. \
See Gypsum-DL publication for details",
)
PARSER.add_argument(
"--min_ph",
metavar="MIN",
type=float,
default=6.4,
help="Minimum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--max_ph",
metavar="MAX",
type=float,
default=8.4,
help="Maximum pH to consider.See Gypsum-DL \
and Dimorphite-D publication for details.",
)
PARSER.add_argument(
"--pka_precision",
metavar="D",
type=float,
default=1.0,
help="Size of pH substructure ranges. See Dimorphite-DL \
publication for details.",
)
PARSER.add_argument(
"--gypsum_timeout_limit",
type=float,
default=15,
help="Maximum time gypsum is allowed to run for a given ligand in seconds. \
On average Gypsum-DL takes on several seconds to run for a given ligand, but \
factors such as mol size, rotatable bonds, processor speed, and gypsum \
settings (ie gypsum_thoroughness or max_variants_per_compound) will change \
how long it takes to run. If increasing gypsum settings it is best to increase \
the gypsum_timeout_limit. Default gypsum_timeout_limit is 15 seconds",
)
# Reduce files down. This compiles and compresses the files in the PDBs folder
# (contains docking outputs, pdb, pdbqt...). This reduces the data size and
# makes data transfer quicker, but requires running the
# file_concatenation_and_compression.py in the Utility script folder to
# separate these files out for readability.
PARSER.add_argument(
"--reduce_files_sizes",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Run this combines all files in the PDBs folder into a \
single text file. Useful when data needs to be transferred.",
)
# Make a line plot of the simulation at the end of the run.
PARSER.add_argument(
"--generate_plot",
choices=[True, False, "True", "False", "true", "false"],
default=True,
help="Make a line plot of the simulation at the end of the run.",
)
# mpi mode pre-Run so there are python cache files without EOF Errors
PARSER.add_argument(
"--cache_prerun",
"-c",
action="store_true",
help="Run this before running gypsum in mpi-mode.",
)
args_dict = vars(PARSER.parse_args())
from autogrow.user_vars import multiprocess_handling, define_defaults, determine_bash_timeout_vs_gtimeout
# args_dict = define_defaults()
import copy
INPUTS = copy.deepcopy(args_dict)
for k, v in args_dict.items():
if v is None:
del INPUTS[k]
if args_dict["cache_prerun"] is False:
# load the commandline parameters
from autogrow.user_vars import load_in_commandline_parameters
args_dict, printout = load_in_commandline_parameters(INPUTS)
args_dict = multiprocess_handling(args_dict)
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option in ["timeout", "gtimeout"]:
args_dict["timeout_vs_gtimeout"] = timeout_option
else:
raise Exception("Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash.")
import numpy as np
import os, json, pickle, time, sys
file = 'source_compounds/naphthalene_smiles.smi'
with open(file, 'r') as fin:
smiles_list = fin.readlines()
smiles_list = [smiles.split()[0] for smiles in smiles_list]
vars = args_dict
##########################################################
# A. mutate
##########################################################
"""
import autogrow.operators.mutation.smiles_click_chem.smiles_click_chem as SmileClickClass
rxn_library_variables = [
vars["rxn_library"],
vars["rxn_library_file"],
vars["function_group_library"],
vars["complementary_mol_directory"],
] # Package user vars specifying the Reaction library to use for mutation
new_mutation_smiles_list = [] # List of SMILES from mutation
a_smiles_click_chem_object = SmileClickClass.SmilesClickChem(rxn_library_variables, new_mutation_smiles_list, vars["filter_object_dict"])
for smiles in smiles_list:
result_of_run = a_smiles_click_chem_object.run_smiles_click2(smiles)
print('-------------', result_of_run)
"""
##########################################################
### B. crossover
##########################################################
## crossover between 2 ligands
''' crossover: find common structure '''
import autogrow.operators.crossover.smiles_merge.smiles_merge as smiles_merge
import autogrow.operators.crossover.execute_crossover as execute_crossover
import autogrow.operators.filter.execute_filters as Filter
# execute_crossover.test_for_mcs
# test_for_mcs(vars, mol_1, mol_2)
# execute_crossover.convert_mol_from_smiles
for smiles in smiles_list:
mol = execute_crossover.convert_mol_from_smiles(smiles)
for s2 in smiles_list:
if smiles == s2:
continue
mol2 = execute_crossover.convert_mol_from_smiles(s2)
if execute_crossover.test_for_mcs(vars, mol, mol2) is None:
continue
for i in range(3):
ligand_new_smiles = smiles_merge.run_main_smiles_merge(vars, smiles, s2)
if ligand_new_smiles is not None:
break
if ligand_new_smiles is None:
print('====fail to merge')
else:
pass_or_not = Filter.run_filter_on_just_smiles(ligand_new_smiles, vars["filter_object_dict"]) #### True, False
if pass_or_not:
print('-----success----', ligand_new_smiles)
'''
python demo_GAoperation.py \
--filename_of_receptor ./tutorial/PARP/4r6eA_PARP1_prepared.pdb \
--center_x -70.76 --center_y 21.82 --center_z 28.33 \
--size_x 25.0 --size_y 16.0 --size_z 25.0 \
--source_compound_file ./source_compounds/naphthalene_smiles.smi \
--root_output_folder ./output \
--number_of_mutants_first_generation 50 \
--number_of_crossovers_first_generation 50 \
--number_of_mutants 50 \
--number_of_crossovers 50 \
--top_mols_to_seed_next_generation 50 \
--number_elitism_advance_from_previous_gen 50 \
--number_elitism_advance_from_previous_gen_first_generation 10 \
--diversity_mols_to_seed_first_generation 10 \
--diversity_seed_depreciation_per_gen 10 \
--num_generations 5 \
--mgltools_directory ./mgltools_x86_64Linux2_1.5.6/ \
--number_of_processors -1 \
--scoring_choice VINA \
--LipinskiLenientFilter \
--start_a_new_run \
--rxn_library click_chem_rxns \
--selector_choice Rank_Selector \
--dock_choice VinaDocking \
--max_variants_per_compound 5 \
--redock_elite_from_previous_gen False \
--generate_plot True \
--reduce_files_sizes True \
--use_docked_source_compounds True
'''
| 26,805 | 33.994778 | 137 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/draw_mutation.py | from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
with open('mutation_example.txt', 'r') as fin:
lines = fin.readlines()
idx = 0
for line in lines[idx:idx+1]:
input_smiles, smart, output_smiles = line.split()[:3]
mol = Chem.MolFromSmiles(input_smiles, sanitize=False)
Draw.MolToFile(mol, 'figure/mutation_input.png', )
# try:
# if True:
# mol = Chem.MolFromSmarts(smart)
# d2d = Draw.MolDraw2DSVG(250,200)
# d2d.DrawMolecule(mol)
# Draw.MolToFile(mol, 'figure/mutation_smart.png', )
# except:
# pass
#### draw SMARTS
# https://www.rdkit.org/docs/GettingStartedInPython.html
print(smart)
rxn = AllChem.ReactionFromSmarts(smart)
d2d = Draw.MolDraw2DCairo(800,300)
d2d.DrawReaction(rxn)
png = d2d.GetDrawingText()
open('./figure/mutation_smart.png','wb+').write(png)
mol = Chem.MolFromSmiles(output_smiles, sanitize=False)
Draw.MolToFile(mol, 'figure/mutation_output.png', )
# >>> from rdkit.Chem import Draw
# >>> rxn = AllChem.ReactionFromSmarts('[cH:5]1[cH:6][c:7]2[cH:8][n:9][cH:10][cH:11][c:12]2[c:3]([cH:4]1)[C:2](=[O:1])O.[N-:13]=[N+:14]=[N-:15]>C(Cl)Cl.C(=O)(C(=O)Cl)Cl>[cH:5]1[cH:6][c:7]2[cH:8][n:9][cH:10][cH:11][c:12]2[c:3]([cH:4]1)[C:2](=[O:1])[N:13]=[N+:14]=[N-:15]',useSmiles=True)
# >>> d2d = Draw.MolDraw2DCairo(800,300)
# >>> d2d.DrawReaction(rxn)
# >>> png = d2d.GetDrawingText()
# >>> open('./images/reaction1.o.png','wb+').write(png)
# import json
# reaction_file = "autogrow/operators/mutation/smiles_click_chem/reaction_libraries/all_rxns/All_Rxns_rxn_library.json"
# smiles_file = 'source_compounds/naphthalene_smiles.smi'
# with open(smiles_file, 'r') as fin:
# smiles_lst = fin.readlines()
# smiles_lst = [line.split()[0] for line in smiles_lst]
# mol_list = []
# for smiles in smiles_lst:
# mol = Chem.MolFromSmiles(smiles, sanitize=False)
# mol_list.append(mol)
# reaction_dict = json.load(open(reaction_file))
# # print(reaction_dict)
# for k,v in reaction_dict.items():
# a_reaction_dict = v
# rxn = AllChem.ReactionFromSmarts(str(a_reaction_dict["reaction_string"]))
# rxn.Initialize()
# for mol in mol_list:
# try:
# y = rxn.RunReactants((mol,))
# print(y)
# except:
# pass
| 2,257 | 30.802817 | 286 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/file_concatenate_and_compression.py | """
This script is used to decompress or recompress AutoGrow data.
If you use the reduce_files_sizes option AutoGrow will convert concatenate and compress
all files in the PDBs directory of each generation. This is useful when doing larger runs as
data transfer is faster and data storage is reduced when files are merged and compressed.
-The concatenation script that is run in AutoGrow 4 can be found at:
autogrow4/autogrow/docking/concatenate_files.py
This script will either:
1) Return the files back to their original uncompressed and deconcatenated formatting
or
2) concatenate and then compress the files into a single file.
The formatting of the concatenation is:
"\n##############################File_name: {}\n".format(os.path.basename(file_name_1))
... Content of the 1st file...
"\n##############################$$END_FILE$$ {}".format(os.path.basename(file_name_1))
"\n##############################File_name: {}\n".format(os.path.basename(file_name_2))
... Content of the 2nd file...
"\n##############################$$END_FILE$$ {}".format(os.path.basename(file_name_2))
Example decompression:
python autogrow4/accessory_scripts/file_concatenation_and_compression.py \
--compress_or_decompress decompress \
--input_folder_or_file PATH_TO_RUN/Run_0/generation_1/PDBs/compressed_PDBS.txt.gz
Example compression:
python autogrow4/accessory_scripts/file_concatenation_and_compression.py \
--compress_or_decompress compress \
--input_folder_or_file PATH_TO_RUN/Run_0/generation_1/PDBs/
This concatenated file is tar.gz compressed.
"""
import __future__
import glob
import os
import gzip
import shutil
import argparse
import support_scripts.Multiprocess as mp
def compress_file(file_name):
"""
Compress the concatenated file
Inputs:
:param str file_name: the path to the file to compress.
"""
with open(file_name, "r") as f:
printout = f.read()
printout = printout.encode("utf-8")
with gzip.open(file_name + ".gz", "wb") as f:
f.write(printout)
#######
def decompress_file(decompressed_file):
"""
Decompress a file. Not used in running the program but is the counter of
def compress_file(file_name)
Inputs:
:param str decompressed_file: the path to the file to decompress.
Returns:
:returns: str decompressed_file: the path to the file to decompress.
"""
out_file = decompressed_file.replace(".gz", "")
with gzip.open(decompressed_file, "rb") as f_comp:
with open(out_file, "wb") as f_decomp:
shutil.copyfileobj(f_comp, f_decomp)
return out_file
#######
def separate_files(compressed_file):
"""
Separate a concatenated file. Not used in running the program but is the
counter of def compress_file(file_name)
Inputs:
:param str compressed_file: the path to the file to separate/decompress.
"""
directory = (
os.path.abspath(compressed_file.split(os.path.basename(compressed_file))[0])
+ os.sep
)
compressed_file = os.path.abspath(compressed_file)
decompressed_file = decompress_file(compressed_file)
if os.path.exists(decompressed_file) is False:
raise Exception("Failed to decompress the file")
printout = ""
list_of_new_files = []
out_file = None
with open(decompressed_file, "r") as f:
for line in f.readlines():
if "$$END_FILE$$" in line:
if out_file is not None and os.path.exists(out_file) is False:
with open(out_file, "w") as f:
f.write(printout + "\n")
out_file = None
printout = ""
continue
if "File_name:" in line:
printout = ""
# Split the line up and grab the relative file path convert to
# absolute path
out_file = (
directory
+ os.sep
+ line.split("##############################File_name: ")[
1
].replace("\n", "")
)
out_file = os.path.abspath(out_file)
list_of_new_files.append(out_file)
continue
printout = printout + line
continue
all_are_made = True
for f in list_of_new_files:
if os.path.exists(f) is False:
print("file failed to decompress: {}".format(f))
all_are_made = False
if all_are_made is True:
to_run = "rm {}".format(decompressed_file)
os.system(to_run)
#######
def get_file_info(file_name):
"""
Used for concatenating files together. This function appends a seperator
and the filename of a file before and after the text of the file
file_name. It returns it as a string
Inputs:
:param str file_name: the path to the file to compress.
Returns:
:returns: str concat: the text of the file file_name with a seperator and
label before and after the file text.
"""
file_name_insert = "\n##############################File_name: {}\n".format(
os.path.basename(file_name)
)
file_termination_insert = "\n##############################$$END_FILE$$ {}".format(
os.path.basename(file_name)
)
concat = file_name_insert + open(file_name).read() + file_termination_insert
return concat
#######
def del_files(file_name):
"""
This function deletes a given file file_name.
Inputs:
:param str file_name: the path to delete.
"""
if os.path.exists(file_name):
try:
os.system("rm {}".format(file_name))
except:
print("couldn't delete file: {}".format(file_name))
#######
def run_concatenation(directory):
"""
This function concatenates and compresses every file in a directory. This
makes data transfer easier later on.
To decompress the folder please use script in
$PATH/autogrow4/accessory_scripts/file_concatenation_and_compression.py
Inputs:
:param str directory: the path to the folder which will be compiled and compressed.
"""
concat_file = directory + os.sep + "compressed_PDBS.txt"
print(
"Start Concatenation: To separate files use the \
file_concatenation_and_compression.py in the Utility script folder."
)
file_list = glob.glob(directory + os.sep + "*")
file_list = [os.path.abspath(x) for x in file_list]
with open(concat_file, "a+") as f:
for file_name in file_list:
f.write(get_file_info(file_name))
job_list = tuple([(file_path,) for file_path in file_list])
print("\tFinish Concatenation")
print("\tRemoving files that were concatenated")
mp.multi_threading(job_list, -1, del_files)
print("\tCompressing file")
compress_file(concat_file)
if os.path.exists(concat_file + ".gz"):
del_files(concat_file)
print("Finished Compression")
######
def run_main(vars):
"""
This function runs the functions for compression or decompression.
Inputs:
:param dict vars: dictionary of user variables.
"""
if vars["compress_or_decompress"] == "compress":
print("BEFORE")
input_folder = vars["input_folder_or_file"]
print(os.path.getsize(input_folder))
run_concatenation(input_folder)
print("FINISH CONCATENATE")
print("After concatenate")
print(os.path.getsize(input_folder))
elif vars["compress_or_decompress"] == "decompress":
compressed_file = vars["input_folder_or_file"]
if os.path.exists(compressed_file) is False:
raise Exception("File to Decompress doesn't exist")
input_folder = os.path.abspath(compressed_file.split(os.path.basename(compressed_file))[0]) + os.sep
print("BEFORE")
print(os.path.getsize(input_folder))
separate_files(compressed_file)
print("After deconcatenate")
print(os.path.getsize(input_folder))
del_files(compressed_file)
print("After deconcatenate")
print(os.path.getsize(input_folder))
#######
def get_arguments_from_argparse(arg_dict):
"""
This function handles the arg parser arguments for the script.
Inputs:
:param dict arg_dict: dictionary of parameters
Returns:
:returns: dict arg_dict: dictionary of parameters
"""
# Argument handling
if type(arg_dict["compress_or_decompress"]) != str:
raise Exception("Must chose between compress or decompress")
if arg_dict["compress_or_decompress"].lower() not in ["compress", "decompress"]:
raise Exception("Must chose between compress or decompress")
# set to lower case to prevent issues
arg_dict["compress_or_decompress"] = arg_dict["compress_or_decompress"].lower()
# argument_handling
if type(arg_dict["input_folder_or_file"]) is not str:
raise Exception("--input_folder_or_file required: Path to directory to \
compress or decompress.")
if os.path.exists(arg_dict["input_folder_or_file"]) is False:
raise Exception("--input_folder_or_file could not be found: \
{}".format(arg_dict["input_folder_or_file"]))
# Make sure variable is full path and add os.sep if directory
arg_dict["input_folder_or_file"] = os.path.abspath(arg_dict["input_folder_or_file"])
if os.path.isdir(arg_dict["input_folder_or_file"]):
arg_dict["input_folder_or_file"] = arg_dict["input_folder_or_file"] + os.sep
return arg_dict
#
# Argument parsing
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--compress_or_decompress', type=str, required=True,
choices=["compress", "decompress"],
help='Chose whether to compress or decompress a directory')
PARSER.add_argument('--input_folder_or_file', '-i', type=str,
required=True, default=None,
help='Path to directory/file to compress or decompress.')
ARGS_DICT = vars(PARSER.parse_args())
ARGS_DICT = get_arguments_from_argparse(ARGS_DICT)
run_main(ARGS_DICT)
| 10,196 | 32.432787 | 108 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/test_complementary_mol_library.py | """
This script will test a complementary molecule library to ensure all compounds
react in all reactions they may be used in.
Example submit:
python autogrow4/accessory_scripts/test_complementary_mol_library.py \
--rxn_library_file \
autogrow4/autogrow/operators/mutation/smiles_click_chem/reaction_libraries/click_chem_rxns/ClickChem_rxn_library.json \
--function_group_library \
autogrow4/autogrow/operators/mutation/smiles_click_chem/reaction_libraries/click_chem_rxns/ClickChem_functional_groups.json \
--complementary_mol_directory \
autogrow4/autogrow/operators/mutation/smiles_click_chem/reaction_libraries/click_chem_rxns/complementary_mol_dir \
--output_folder autogrow4/accessory_scripts/output/
"""
import __future__
import os
import json
import copy
import argparse
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import AllChem
# Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog("rdApp.*")
import support_scripts.Multiprocess as mp
import support_scripts.mol_object_handling as MOH
class SmilesClickChem():
"""
This class will take a molecule and Mutate it by reacting it.
This is modified from the AutoGrow source code file:
/autogrow4/autogrow/operators/mutation/smiles_click_chem/smiles_click_chem.py
Unused sections were removed for simplicity.
"""
def __init__(self, rxn_library_variables, list_of_already_made_smiles):
"""
init for SmilesClickChem. This will set up all the reaction and
functional dictionaries required to Mutate a molecular
Inputs:
:param list rxn_library_variables: a list of user variables which
define the rxn_library, rxn_library_file,
complementary_mol_directory, and function_group_library. ie.
rxn_library_variables = [vars['rxn_library'],
vars['rxn_library_file'],
vars['function_group_library'],vars['complementary_mol_directory']]
:param list list_of_already_made_smiles: a list of lists. Each
sublist contains info about a smiles made in this generation via
mutation ie.[['O=C([O-])',
'(Gen_3_Mutant_37_747+ZINC51)Gen_4_Mutant_15_52']]
"""
# Unpackage the rxn_library_variables
rxn_library = rxn_library_variables[0]
rxn_library_file = rxn_library_variables[1]
function_group_library = rxn_library_variables[2]
complementary_mol_dir = rxn_library_variables[3]
self.reaction_dict = self.retrieve_reaction_dict(
rxn_library, rxn_library_file
)
# Retrieve the dictionary containing
# all the possible ClickChem Reactions
self.list_of_reaction_names = list(self.reaction_dict.keys())
self.functional_group_dict = self.retrieve_functional_group_dict(
rxn_library, function_group_library
)
self.complementary_mol_dict = self.retrieve_complementary_dictionary(
rxn_library, complementary_mol_dir
)
# List of already predicted smiles
self.list_of_already_made_smiles = [x[0] for x in list_of_already_made_smiles]
def rxn_lib_format_json_dict_of_dict(self, old_dict):
"""
json dictionaries import as type unicode. This script converts all
the keys and items to strings, with a few specific exceptions. It
takes both the functional group dictionary and the reaction library.
The reaction library is a dictionary of dictionary and has a few
exceptions which are not intended to be strings. ie. the num_reactants
which converts to interger and functional_groups which convert to a
list of strings.
The functional_group_dictionary is simply a dictionary with all items
and keys needing to be strings.
Inputs:
:param dic old_dict: a dictionary of the the reaction library or
functional groups. This is what is importanted from the .json file.
Returns:
:returns: dic new_dict: a dictionary of the the reaction library or
functional groups where the unicode type items have been replaced with
the proper python data types.
"""
new_dict = {}
for rxn_key in old_dict.keys():
rxn_dic_old = old_dict[rxn_key]
key_str = str(rxn_key)
# For reaction libraries
if type(rxn_dic_old) == dict:
new_sub_dict = {}
for key in rxn_dic_old.keys():
sub_key_str = str(key)
item = rxn_dic_old[key]
if sub_key_str == "num_reactants":
item = int(item)
elif sub_key_str == "functional_groups":
new_list = []
for i in item:
i_str = str(i)
new_list.append(i_str)
item = new_list
else:
item = str(item)
new_sub_dict[sub_key_str] = item
new_dict[key_str] = new_sub_dict
# For functional groups
else:
item = old_dict[rxn_key]
new_dict[key_str] = str(item)
return new_dict
def retrieve_reaction_dict(self, rxn_library, rxn_library_file):
"""
This is where all the chemical reactions for SmartClickChem are
retrieved. If you want to add more just add a Custom set of reactions
please add a folder to
PATH/autogrow/operators/mutation/smiles_click_chem/Reaction_libraries/.
They should be formatted as a dictionary of dictionary using the same
format as :
os.path.join(pwd,"reaction_libraries",
"click_chem_rxns","ClickChem_rxn_library.json")
The reactions are written as SMARTS-reaction strings.
This dictionary uses the reaction name as the key and the Reaction
Smarts as the value.
Inputs:
:param str rxn_library: A string defining the choice of the reaction
library. ClickChem uses the set of reactions from Autogrow 3.1.2.
Custom means you've defined a path to a Custom library in
vars['rxn_library_file']
:param str rxn_library_file: a PATH to a Custom reaction library file
formatted in a dictionary of dictionaries. in a .json file. This will
be a blank string if one choses a predefined rxn_library option.
Returns:
:returns: dict reaction_dict: A dictionary containing all the
reactions for ClickChemistry and all the information required to run
the reaction
"""
# Get the JSON file to import the proper reaction library
pwd = os.path.dirname(__file__)
if rxn_library_file == "":
if rxn_library == "click_chem_rxns":
rxn_library_file = os.path.join(
pwd,
"reaction_libraries",
"click_chem_rxns",
"ClickChem_rxn_library.json"
)
elif rxn_library == "robust_rxns":
rxn_library_file = os.path.join(
pwd,
"reaction_libraries",
"robust_rxns",
"Robust_Rxns_rxn_library.json"
)
elif rxn_library == "all_rxns":
rxn_library_file = os.path.join(
pwd,
"reaction_libraries",
"all_rxns",
"All_Rxns_rxn_library.json"
)
elif rxn_library == "Custom":
if os.path.exists(rxn_library_file) is False:
raise Exception(
"Custom rxn_library_file cannot be found. "
+ "Please check the path: ",
rxn_library_file,
)
else:
raise Exception(
"rxn_library is not incorporated into smiles_click_chem.py"
)
# Import the proper reaction library JSON file
try:
with open(rxn_library_file, "r") as rxn_file:
reaction_dict_raw = json.load(rxn_file)
except:
raise Exception(
"rxn_library_file json file not able to be imported."
+ " Check that the rxn_library is formatted correctly"
)
elif type(rxn_library_file) == str:
if os.path.exists(rxn_library_file) is False:
raise Exception(
"Custom specified rxn_library_file directory can not be found"
)
if os.path.isfile(rxn_library_file) is False:
raise Exception(
"Custom specified rxn_library_file is not a file"
)
try:
extension = os.path.splitext(rxn_library_file)[1]
except:
raise Exception(
"Custom specified rxn_library_file is not .json file."
+ " It must be a .json dictionary"
)
if extension != ".json":
raise Exception(
"Custom specified rxn_library_file is not .json file."
+ " It must be a .json dictionary"
)
# Import the proper reaction library JSON file
try:
with open(rxn_library_file, "r") as rxn_file:
reaction_dict_raw = json.load(rxn_file)
except:
raise Exception(
"Custom specified rxn_library_file json file not able to "
+ "be imported. Check that the rxn_library is "
+ "formatted correctly"
)
else:
raise Exception(
"Custom specified rxn_library_file directory can not be found"
)
# Convert the reaction_dict_raw from unicode to the proper
reaction_dict = self.rxn_lib_format_json_dict_of_dict(reaction_dict_raw)
return reaction_dict
def retrieve_functional_group_dict(self, rxn_library, function_group_library):
"""
This retrieves a dictionary of all functional groups required for the
respective reactions. This dictionary will be used to identify
possible reactions.
This is where all the functional groups which will be used in the
SmartClickChem reactions are retrieved. If you want to add more just
add a Custom set of reactions please add a folder to
PATH/autogrow/operators/mutation/smiles_click_chem/Reaction_libraries/.
They should be formatted as a dictionary of dictionary using the same
format as :
os.path.join(pwd,"reaction_libraries","click_chem_rxns",
"ClickChem_functional_groups.json")
IF YOU CHOSE TO DO A Custom REACTION SET YOU MUST PROVIDE A DICTIONARY
OF ALL FUNCTIONAL GROUPS IT WILL REACT. IF YOU FORGET TO ADD A
FUNCTIONAL GROUP TO YOUR Custom DICTIONARY, THE REACTION MAY NEVER BE
UTILIZED.
Please note if your functional groups involve stereochemistry
notations such as '\' please replace with '\\' (all functional
groups should be formatted as SMARTS)
Inputs:
:param str rxn_library: A string defining the choice of the reaction
library. ClickChem uses the set of reactions from Autogrow 3.1.2.
Custom means you've defined a path to a Custom library in
vars['function_group_library']
:param str function_group_library: a PATH to a Custom functional group
dictionary in a .json file. This will be a blank string if one choses
a predefined functional groups option.
Returns:
:returns: dict functional_group_dict: A dictionary containing all
SMARTS for identifying the functional groups for ClickChemistry
"""
# Get the JSON file to import the proper reaction library
pwd = os.path.dirname(__file__)
if function_group_library == "":
if rxn_library == "click_chem_rxns":
function_group_library = os.path.join(
pwd, "reaction_libraries",
"click_chem_rxns",
"ClickChem_functional_groups.json",
)
elif rxn_library == "robust_rxns":
function_group_library = os.path.join(
pwd, "reaction_libraries",
"robust_rxns",
"Robust_Rxns_functional_groups.json",
)
elif rxn_library == "all_rxns":
function_group_library = os.path.join(
pwd, "reaction_libraries",
"all_rxns", "All_Rxns_functional_groups.json",
)
elif rxn_library == "Custom":
if os.path.exists(function_group_library) is False:
raise Exception(
"Custom function_group_library cannot be found. "
+ "Please check the path: ",
function_group_library,
)
else:
raise Exception(
"rxn_library is not incorporated into smiles_click_chem.py"
)
# Import the proper function_group_library JSON file
try:
with open(function_group_library, "r") as func_dict_file:
functional_group_dict_raw = json.load(func_dict_file)
except:
raise Exception(
"function_group_library json file not able to be imported. "
+ "Check that the rxn_library is formatted correctly"
)
elif type(function_group_library) == str:
if os.path.exists(function_group_library) is False:
raise Exception(
"Custom specified function_group_library directory can not be found"
)
if os.path.isfile(function_group_library) is False:
raise Exception("Custom specified function_group_library is not a file")
try:
extension = os.path.splitext(function_group_library)[1]
except:
raise Exception(
"Custom specified function_group_library is not .json "
+ "file. It must be a .json dictionary"
)
if extension != ".json":
raise Exception(
"Custom specified function_group_library is not .json "
+ "file. It must be a .json dictionary"
)
# Import the proper function_group_library JSON file
try:
with open(function_group_library, "r") as func_dict_file:
functional_group_dict_raw = json.load(func_dict_file)
except:
raise Exception(
"function_group_library json file not able to be imported."
+ " Check that the rxn_library is formatted correctly"
)
else:
raise Exception(
"Custom specified function_group_library directory can not be found"
)
# Convert the reaction_dict_raw from unicode to the proper
functional_group_dict = self.rxn_lib_format_json_dict_of_dict(
functional_group_dict_raw
)
return functional_group_dict
def retrieve_complementary_dictionary(self, rxn_library, complementary_mol_dir):
"""
Based on user controlled variables, this definition will retrieve a
dictionary of molecules separated into classes by their functional
groups. The sorting of a .smi file into this should be handled in the
user parameter testing when autogrow is initially started.
Inputs:
:param str rxn_library: A string defining the choice of the reaction
library. ClickChem uses the set of reactions from Autogrow 3.1.2.
Custom means you've defined a path to a Custom library in
vars['complementary_mol_dir']
:param dict complementary_mol_dir: the path to the
complementary_mol_dir directory. It may be an empty string in which
case the complementary_mol_dir directory will default to those of the
rxn_library
Returns:
:returns: dict complementary_mols_dict: a dictionary of complementary molecules
"""
script_dir = os.path.dirname(os.path.realpath(__file__))
if complementary_mol_dir == "":
if rxn_library == "click_chem_rxns":
complementary_mol_dir = os.path.join(
script_dir,
"reaction_libraries",
"click_chem_rxns",
"complementary_mol_dir",
)
elif rxn_library == "robust_rxns":
complementary_mol_dir = os.path.join(
script_dir,
"reaction_libraries",
"robust_rxns",
"complementary_mol_dir",
)
elif rxn_library == "all_rxns":
complementary_mol_dir = os.path.join(
script_dir,
"reaction_libraries",
"all_rxns",
"complementary_mol_dir",
)
elif rxn_library == "Custom":
if os.path.isdir(complementary_mol_dir) is False:
raise Exception(
"Custom complementary_mol_dir cannot be found. "
+ "Please check the path: ",
complementary_mol_dir,
)
else:
raise Exception(
"rxn_library is not incorporated into smiles_click_chem.py"
)
else:
if os.path.isdir(complementary_mol_dir) is False:
raise Exception(
"complementary_mol_dir is not a directory. It must be a \
directory with .smi files containing SMILES specified by \
functional groups.These .smi files must be named the same \
as the files in the complementary_mol_dir."
)
# Make a list of all the functional groups. These will be the name of
# the .smi folders already separated by group.
functional_groups = self.functional_group_dict.keys()
missing_smi_files = []
complementary_mols_dict = {}
for group in functional_groups:
filepath = "{}{}{}.smi".format(complementary_mol_dir, os.sep, group)
if os.path.isfile(filepath) is True:
complementary_mols_dict[group] = filepath
else:
missing_smi_files.append(filepath)
print(
"Could not find the following .smi file for complementary "
+ " molecules for Mutation: {}".format(filepath)
)
if len(missing_smi_files) != 0:
raise Exception(
"The following .smi file for complementary molecules "
+ "for Mutation is missing: ",
missing_smi_files,
)
return complementary_mols_dict
#
def get_usable_format(infile):
"""
This code takes a string for an file which is formatted as an .smi file. It
opens the file and reads in the components into a usable list.
The .smi must follow the following format for each line:
MANDATORY INFO
part 1 is the SMILES string
part 2 is the SMILES name/ID
Optional info
part -1 (the last piece of info) is the SMILES diversity score
relative to its population
part -2 (the second to last piece of info) is the fitness metric
for evaluating
- For default setting this is the Docking score
- If you add a unique scoring function Docking score should be
-3 and that score function should be -2
Any other information MUST be between part 2 and part -2 (this
allows for the expansion of features without disrupting the rest of the code)
Inputs:
:param str infile: the string of the PATHname of a formatted .smi file to
be read into the program
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
# IMPORT SMILES FROM THE PREVIOUS GENERATION
usable_list_of_smiles = []
if os.path.exists(infile) is False:
print("\nFile of Source compounds does not exist: {}\n".format(infile))
raise Exception("File of Source compounds does not exist")
with open(infile) as smiles_file:
for line in smiles_file:
line = line.replace("\n", "")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 1:
parts = line.split(
" "
) # split line into parts separated by 4-spaces
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
usable_list_of_smiles.append(choice_list)
return usable_list_of_smiles
#
def react_with_multiple_reactants(mol_tuple, mol_name, rxn_obj):
"""
This will run a single molecule through a 1-reactant reaction.
If it fails it will return the name of mol (mol_info[1])
If it passes it will return None
Inputs:
:param tuple mol_tuple: a tuple of all mols to react
:param str mol_name: name of the molecule being tested
:param rdkit.Chem.rdChemReactions.ChemicalReaction rxn_obj: the reaction object to use
Returns:
:returns: str mol_name: returns the mol_name if it fails to react;
returns None if it passes reaction
"""
try:
# if reaction works keep it
reaction_products_list = [
x[0] for x in rxn_obj.RunReactants(mol_tuple)
]
except:
return mol_name
if len(reaction_products_list) == 0:
return mol_name
# created a new compound so it passes
return None
#
def run_a_single_reactant_reaction(mol_info, rxn_obj):
"""
This will run a single molecule through a 1-reactant reaction.
If it fails it will return the name of mol (mol_info[1])
If it passes it will return None
Inputs:
:param list mol_info: list of mol info
mol_info[0] is the SMILES,
mol_info[1] is the name,
mol_info[-1] is the rdkit mol obj,
:param rdkit.Chem.rdChemReactions.ChemicalReaction rxn_obj: the reaction object to use
Returns:
:returns: str mol_name: returns the mol_name if it fails to react;
returns None if it passes reaction
"""
mol_name = mol_info[1]
mol_1 = mol_info[-1]
try:
# if reaction works keep it
reaction_products_list = rxn_obj.RunReactants((mol_1,))
except:
return mol_name
if len(reaction_products_list) == 0:
return mol_name
# created a new compound so it passes
return None
#
def get_rxn_and_examples(current_rxn_dict):
"""
get the example reaction molecules from current_rxn_dict, create the rxn_obj,
and test examples in the rxn.
Inputs:
:param dict current_rxn_dict: a dictionary of information about a reaction
Returns:
:returns: tuple example_rxn_reactants: a tuple of rdkit
mol objects that are example compounds
:returns: rdkit.Chem.rdChemReactions.ChemicalReaction rxn_obj: the
reaction object to use
"""
rxn_name = current_rxn_dict["reaction_name"]
# Test example reactants
example_smiles_rxn_reactants = current_rxn_dict["example_rxn_reactants"]
example_smiles_rxn_reactants = example_smiles_rxn_reactants.replace("['", "").replace("']", "")
example_smiles_rxn_reactants = example_smiles_rxn_reactants.replace(" ", "").replace('"', "")
example_smiles_rxn_reactants = example_smiles_rxn_reactants.split("','")
example_rxn_reactants = []
for smile_str in example_smiles_rxn_reactants:
smile_str = smile_str.replace("'", "").replace('"', "")
smile_str = smile_str.replace(" ", "")
example_mol = Chem.MolFromSmiles(smile_str)
example_mol = MOH.check_sanitization(example_mol)
if example_mol is None:
print(smile_str)
printout = "example mol from rxn: {}".format(rxn_name)
printout = printout + " failed to sanitize in RDKit"
print(printout)
raise Exception(printout)
example_rxn_reactants.append(example_mol)
# convert example_rxn_reactants to tuple
example_rxn_reactants = tuple(example_rxn_reactants)
reaction_string = current_rxn_dict["reaction_string"]
try:
rxn_obj = AllChem.ReactionFromSmarts(reaction_string)
rxn_obj.Initialize()
except:
printout = "rxn {} failed to be created.".format(rxn_name)
printout = printout + "Rxn SMART is flawed"
print(printout)
raise Exception(printout)
# Demo on example reactants
example_results = react_with_multiple_reactants(example_rxn_reactants,
"test_reactions", rxn_obj)
if example_results is not None:
printout = "rxn {} failed to run on example compounds.".format(rxn_name)
printout = printout + "\nPlease check example compounds"
print(printout)
raise Exception(printout)
return example_rxn_reactants, rxn_obj
#
def run_all_for_fun_group(vars, fun_group, rxns_by_fun_group, a_smiles_click_object):
"""
This runs the all testing for a single functional group.
This will also write the compounds which pass to a .smi file.
Inputs:
:param dict vars: Dictionary of User variables
:param str fun_group: functional group name
:param dict rxns_by_fun_group: Dictionary of rxns names organized by
functional groups
:param obj a_smiles_click_object: a a_smiles_click_object class object.
This provides useful pathing information.
Returns:
:returns: list failed_to_react: a list of mol names which failed to react
:returns: list failed_to_sanitize: a list of mol names which failed to sanitize
"""
# unpack variables
complementary_mol_dict = a_smiles_click_object.complementary_mol_dict
reaction_dict = a_smiles_click_object.reaction_dict
number_of_processors = vars["number_of_processors"]
output_folder = vars["output_folder"]
smi_comp_file = complementary_mol_dict[fun_group]
fun_group_list = get_usable_format(smi_comp_file)
fun_group_mol_list = []
failed_to_sanitize = []
for info in fun_group_list:
mol = Chem.MolFromSmiles(info[0])
mol = MOH.check_sanitization(mol)
if mol is None:
failed_to_sanitize.append(info)
continue
temp = copy.deepcopy(info)
temp.append(mol)
fun_group_mol_list.append(temp)
# print info about failures
if len(failed_to_sanitize) != 0:
printout = "{} compounds ".format(len(failed_to_sanitize))
printout = printout + "failed to sanitize from: {}".format(fun_group)
print(printout)
failed_to_react = []
for rxn_name in rxns_by_fun_group[fun_group]:
current_rxn_dict = reaction_dict[rxn_name]
example_reactants, rxn_obj = get_rxn_and_examples(current_rxn_dict)
list_of_reactants = []
functional_groups_rxn = current_rxn_dict["functional_groups"]
i_count_to_use = None
for i_count in range(len(functional_groups_rxn)):
f_group = functional_groups_rxn[i_count]
if fun_group == f_group:
i_count_to_use = i_count
else:
continue
if i_count_to_use is None:
raise Exception("This is a code error.")
list_of_reactants = []
for mol_info in fun_group_mol_list:
mol_tuple_temp = []
for i_count in range(len(functional_groups_rxn)):
if i_count == i_count_to_use:
mol_tuple_temp.append(mol_info[-1])
else:
mol_tuple_temp.append(example_reactants[i_count])
list_of_reactants.append(tuple([tuple(mol_tuple_temp), mol_info[1], rxn_obj]))
output = mp.multi_threading(list_of_reactants, number_of_processors,
react_with_multiple_reactants)
output = [x for x in output if x is not None]
failed_to_react.append([rxn_name, output])
# print info about failures
if len(output) != 0:
printout = "{} compounds failed to react from ".format(len(output))
printout = printout + "react from {} ".format(fun_group)
printout = printout + "in rxn: {}".format(rxn_name)
print(printout)
master_failed_to_react = []
master_passes_reactions = []
for fail_mol_list in failed_to_react:
master_failed_to_react.extend(fail_mol_list[1])
for mol_info in fun_group_list:
if mol_info[1] in master_failed_to_react:
continue
master_passes_reactions.append(" ".join(mol_info))
# write to output .smi file
with open(output_folder + fun_group + ".smi", "w") as f:
f.write("\n".join(master_passes_reactions))
return failed_to_react, failed_to_sanitize
#
def run_main(vars):
"""
This runs the main testing.
Inputs:
:param dict vars: Dictionary of User variables
"""
# Force rxn_library to be custom because why else run this
rxn_library = "Custom"
output_folder = vars["output_folder"]
rxn_library_file = vars["rxn_library_file"]
function_group_library = vars["function_group_library"]
complementary_mol_dir = vars["complementary_mol_directory"]
rxn_library_variables = [
rxn_library,
rxn_library_file,
function_group_library,
complementary_mol_dir
]
new_mutation_smiles_list = []
a_smiles_click_chem_object = SmilesClickChem(
rxn_library_variables, new_mutation_smiles_list
)
list_of_reaction_names = a_smiles_click_chem_object.list_of_reaction_names
functional_group_dict = a_smiles_click_chem_object.functional_group_dict
reaction_dict = a_smiles_click_chem_object.reaction_dict
rxns_by_fun_group = {}
for fun_group in functional_group_dict.keys():
rxns_by_fun_group[fun_group] = []
for rxn_name in list_of_reaction_names:
current_rxn_dict = reaction_dict[rxn_name]
for fun_group in current_rxn_dict["functional_groups"]:
temp_list = rxns_by_fun_group[fun_group]
temp_list.append(rxn_name)
rxns_by_fun_group[fun_group] = temp_list
failed_to_sanitize_by_fun_group = {}
failed_to_react_by_fun_group = {}
for fun_group in rxns_by_fun_group.keys():
failed_to_react, failed_to_sanitize = run_all_for_fun_group(vars, fun_group,
rxns_by_fun_group,
a_smiles_click_chem_object)
failed_to_react_by_fun_group[fun_group] = failed_to_react
failed_to_sanitize_by_fun_group[fun_group] = failed_to_sanitize
# Handle saving log
with open(output_folder + "failed_to_sanitize_mol_by_fun_group.json", "w") as fp:
json.dump(failed_to_sanitize_by_fun_group, fp, indent=4)
with open(output_folder + "failed_to_react_by_fun_group.json", "w") as fp:
json.dump(failed_to_react_by_fun_group, fp, indent=4)
master_failed_list = []
for fun_group in failed_to_react_by_fun_group.keys():
temp = [x[1] for x in failed_to_react_by_fun_group[fun_group]]
for x in temp:
master_failed_list.extend(x)
master_failed_list = list(set(master_failed_list))
if len(master_failed_list) == 0:
print("All compounds passed!")
else:
print("{} compounds failed. Please check logs".format(len(master_failed_list)))
#
def get_arguments_from_argparse(args_dict):
"""
This function handles the arg parser arguments for the script.
Inputs:
:param dict args_dict: dictionary of parameters
Returns:
:returns: dict args_dict: dictionary of parameters
"""
# Argument handling
if args_dict["rxn_library_file"] == "" or args_dict["function_group_library"] == "":
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY \
THE PATH TO THE REACTION LIBRARY USING INPUT PARAMETER rxn_library"
)
if os.path.exists(args_dict["rxn_library_file"]) is False:
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY \
THE PATH TO THE REACTION LIBRARY USING INPUT PARAMETER rxn_library"
)
if args_dict["complementary_mol_directory"] == "":
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY THE PATH \
TO THE REACTION LIBRARY USING INPUT PARAMETER function_group_library"
)
if os.path.isdir(args_dict["complementary_mol_directory"]) is False:
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY THE PATH \
TO THE REACTION LIBRARY USING INPUT PARAMETER complementary_mol_directory"
)
if "number_of_processors" not in args_dict.keys():
args_dict["number_of_processors"] = -1
try:
args_dict["number_of_processors"] = int(args_dict["number_of_processors"])
except:
raise ValueError(
"number_of_processors must be an int. \
To use all processors set to -1.")
if "output_folder" not in args_dict.keys():
printout = "output_folder is a required variable. it is the PATH to where " + \
"filtered .smi file and log files will be placed. Will save a file " + \
"in this directory for mols which failed sanitization, mols which " + \
"failed to react in specific reactions, and .smi files that contain " + \
"all mols that reacted properly."
raise ValueError(printout)
if type(args_dict["output_folder"]) != str or args_dict["output_folder"] == "":
printout = "output_folder is a required variable. it is the PATH to where " + \
"filtered .smi file and log files will be placed. Will save a file " + \
"in this directory for mols which failed sanitization, mols which " + \
"failed to react in specific reactions, and .smi files that contain " + \
"all mols that reacted properly."
raise ValueError(printout)
args_dict["output_folder"] = os.path.abspath(args_dict["output_folder"]) + os.sep
if os.path.exists(args_dict["output_folder"]) is True:
if os.path.isdir(args_dict["output_folder"]) is False:
print(args_dict["output_folder"])
printout = "output_folder must be a directory. Please check input arguments"
raise ValueError(printout)
else:
try:
os.mkdir(args_dict["output_folder"])
except:
pass
if os.path.exists(args_dict["output_folder"]) is False:
raise Exception("output_folder could not be made or found.")
return args_dict
#
# Argument parsing
PARSER = argparse.ArgumentParser()
# Mutation Settings
PARSER.add_argument(
"--rxn_library_file",
type=str,
default="",
required=True,
help="This PATH to a Custom json file of SMARTS reactions to use for Mutation."
)
PARSER.add_argument(
"--function_group_library",
type=str,
default="",
required=True,
help="This PATH for a dictionary of functional groups to be used for Mutation.",
)
PARSER.add_argument(
"--complementary_mol_directory",
type=str,
default="",
required=True,
help="This PATH to the directory containing all the molecules being used \
to react with. The directory should contain .smi files contain SMILES of \
molecules containing the functional group represented by that file. Each file \
should be named with the same title as the functional groups described in \
rxn_library_file & function_group_library +.smi \
All Functional groups specified function_group_library must have its \
own .smi file. We recommend you filter these dictionaries prior to Autogrow \
for the Drug-likeliness and size filters you will Run Autogrow with.",
)
PARSER.add_argument(
"--output_folder",
type=str,
default="",
required=True,
help="This PATH to where filtered .smi file and log files will be placed. \
Will save a file in this directory for mols which failed sanitization, \
mols which failed to react in specific reactions, and .smi files \
that contain all mols that reacted properly.",
)
# processors and multithread mode
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
default=-1,
help="Number of processors to use for parallel calculations. \
Set to -1 for all available CPUs.",
)
ARGS_DICT = vars(PARSER.parse_args())
ARGS_DICT = get_arguments_from_argparse(ARGS_DICT)
run_main(ARGS_DICT)
print("done")
| 37,953 | 38.617954 | 125 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/make_lineage_figures.py | """
This script creates figures for all ligands which parented a given ligand.
All compounds for the entire AutoGrow run will be compiled into a dictionary \
which is used to search when tracing lineages. We pickle these dictionaries so \
that if this script is run multiple times these dictionaries do not need to be \
recreated. For this reason the 1st time running this script on a data set will \
take longer than future runs.
"""
import os
import sys
import glob
import argparse
import json
import copy
import pickle
import matplotlib.pyplot as plt
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem import Draw, AllChem
from PIL import Image
#Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog('rdApp.*')
##################################################################
##################################################################
########### BASIC OPERATIONS #####################################
##################################################################
##################################################################
def get_obj_from_pickle_file(file_path):
"""
This functions retrieves objects from a pickle_file
Inputs:
:param str file_path: path to pickle File
Returns:
:returns: unknown objects: object(s) from a pickle file
"""
with open(file_path, 'rb') as handle:
objects = pickle.load(handle)
return objects
def write_pickle_to_file(file_path, obj):
"""
This functions pickles an object into a pickle_file
Inputs:
:param str file_path: path to output pickle File
:param unknown obj: object(s) to pickle
"""
with open(file_path, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL)
def get_usable_format(infile):
"""
This code takes a string for an file which is formatted as an .smi file. It
opens the file and reads in the components into a usable list.
The .smi must follow the following format for each line:
MANDATORY INFO
part 1 is the SMILES string
part 2 is the SMILES name/ID
Optional info
part -1 (the last piece of info) is the SMILES diversity score
relative to its population
part -2 (the second to last piece of info) is the fitness metric
for evaluating
- For default setting this is the Docking score
- If you add a unique scoring function Docking score should be
-3 and that score function should be -2
Any other information MUST be between part 2 and part -2 (this
allows for the expansion of features without disrupting the rest of the code)
Inputs:
:param str infile: the string of the PATHname of a formatted .smi file to
be read into the program
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
# IMPORT SMILES FROM THE PREVIOUS GENERATION
usable_list_of_smiles = []
if os.path.exists(infile) is False:
print("\nFile of Source compounds does not exist: {}\n".format(infile))
raise Exception("File of Source compounds does not exist")
with open(infile) as smiles_file:
for line in smiles_file:
line = line.replace("\n", "")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 1:
parts = line.split(
" "
) # split line into parts separated by 4-spaces
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
usable_list_of_smiles.append(choice_list)
return usable_list_of_smiles
#####################################################################
# Make images
#####################################################################
def get_image_dimensions(imagefile):
"""
Helper function that returns the image dimensions.
:param: imagefile str (path to image)
:return dict (of the form: {width:<int>, height=<int>, size_bytes=<size_bytes>)
"""
# Inline import for PIL because it is not a common library
with Image.open(imagefile) as img:
# Calculate the width and hight of an image
width, height = img.size
# calculate the size in bytes
size_bytes = os.path.getsize(imagefile)
return dict(width=width, height=height, size_bytes=size_bytes)
def get_grid_img(img_files_list, list_printout_info, result_grid_filename):
"""
This will plot a row of imgs and save them to a file.
Inputs:
:param list img_files_list: list of paths to img file for each subplot
:param list list_printout_info: list of info to add as
caption to each subplot in order
:param str result_grid_filename: path to outfile
"""
images_count = len(img_files_list)
dimmension_dict = get_image_dimensions(img_files_list[0])
width = dimmension_dict["width"] / 30
height = dimmension_dict["height"]/ 30
# size_bytes = dimmension_dict["size_bytes"]
fig, axs_list = plt.subplots(1, images_count, figsize=(width*images_count, height))
if len(img_files_list) == 1:
sub_ax = axs_list
image_filename = img_files_list[0]
printout = list_printout_info[0]
plt_image = plt.imread(os.path.abspath(image_filename), printout)
sub_ax.imshow(plt_image)
sub_ax.set_title(printout, fontsize=40, fontweight="bold")
sub_ax.grid(False)
sub_ax.axis(False)
sub_ax.autoscale_view('tight')
else:
for sub_ax, image_filename, printout in zip(axs_list, img_files_list, list_printout_info):
plt_image = plt.imread(os.path.abspath(image_filename), printout)
sub_ax.imshow(plt_image)
sub_ax.set_title(printout, fontsize=40)
sub_ax.grid(False)
sub_ax.axis(False)
sub_ax.autoscale_view('tight')
del plt_image
plt.savefig(result_grid_filename)
del fig
def make_single_image_files(vars, lineage_dict, mol_dict):
"""
This function will create individual image files for each ligand in
an ancestry
Inputs:
:param dict vars: dictionary of variable to use
:param dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
:param str mol_name: full-length name of child ligand to find parents.
Returns:
:returns: dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
"""
if len(list(lineage_dict.keys())) <= 6:
img_size = 500
else:
img_size = 250
# make single img files for each ligand
# make a blank None image used later for spacers
mol_none = Chem.MolFromSmiles("")
img = Draw.MolsToGridImage([mol_none], molsPerRow=1, subImgSize=(img_size, img_size))
img_file_name = vars["single_image_folder"]+ "None.png"
img.save(img_file_name)
for mol_name in mol_dict.keys():
mol = copy.deepcopy(mol_dict[mol_name][-1])
tmp = AllChem.Compute2DCoords(mol)
img = Draw.MolsToGridImage([mol], molsPerRow=1,
subImgSize=(img_size, img_size))
img_file_name = vars["single_image_folder"]+ mol_name + ".png"
img.save(img_file_name)
del tmp
#
def make_image_files(vars, lineage_dict, mol_dict):
"""
This function will create individual image files for each ligand in
an ancestry and a full ancestry
Inputs:
:param dict vars: dictionary of variable to use
:param dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
:param str mol_name: full-length name of child ligand to find parents.
Returns:
:returns: dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
"""
# make individual ligand images
make_single_image_files(vars, lineage_dict, mol_dict)
if os.path.exists(vars["ancestry_image_folder"]) is False:
os.mkdir(vars["ancestry_image_folder"])
for gen_num in lineage_dict.keys():
result_grid_filename = vars["ancestry_image_folder"] + \
str(gen_num) + ".png"
lineage_name_list = lineage_dict[gen_num]
img_files_list = []
list_printout_info = []
for mol_name in lineage_name_list:
if mol_name is None:
img_files_list.append(vars["single_image_folder"] + \
"None.png")
list_printout_info.append("")
else:
img_files_list.append(vars["single_image_folder"] + \
mol_name + ".png")
# set properties
if mol_dict[mol_name][4] is None:
printout = str(mol_dict[mol_name][2]) + "\nVina: " \
+ "COMP kcal/mol"
else:
printout = str(mol_dict[mol_name][2]) + "\nVina: " \
+ str(mol_dict[mol_name][4])+ " kcal/mol"
list_printout_info.append(printout)
get_grid_img(img_files_list, list_printout_info, result_grid_filename)
#####################################################################
# get parents for a ligand
#####################################################################
def get_parents_full_names(child_name, master_shortname_mol_dict):
"""
Get full-length names for each parent for a given child ligand.
Will return as list of names for parents. This will always be a list of 2.
There are three options of what is returned:
1) child ligand has no parents: ie) source/complementary ligand)
will return [None, None]
2) child ligand has 1 parent: ie) single reactant mutant
will return ["parent_1_name", None]
3) child ligand has 2 parent: ie) crossover or two reactant mutation
will return ["parent_1_name", "parent_2_name"]
Inputs:
:param str child_name: full-length name of child ligand to find parents.
ligand from the AutoGrow run. keys are full-length name of the ligands.
:param dict master_shortname_mol_dict: dictionary where keys are
shorthand names and the items the full-length name.
Returns:
:returns: list parent_list: a list of string or Nones for each parent.
1) child ligand has no parents: ie) source/complementary ligand)
will return [None, None]
2) child ligand has 1 parent: ie) single reactant mutant
will return ["parent_1_name", None]
3) child ligand has 2 parent: ie) crossover or two reactant mutation
will return ["parent_1_name", "parent_2_name"]
"""
# Handle if no parents
if "(" not in child_name and ")" not in child_name:
return [None, None]
parents_info = child_name.split(")")[0].replace("(", "")
# Handle single parent cases
if "+" not in parents_info:
parent_1_short = parents_info
if parent_1_short not in master_shortname_mol_dict.keys():
raise Exception("a parent is not in master_shortname_mol_dict " \
+ "this means that the dictionary is missing information on" \
+ " a ligand. missing parrent is: {}".format(parent_1_short))
parent_1_name = master_shortname_mol_dict[parent_1_short]
return [parent_1_name, None]
parent_1_short = parents_info.split("+")[0]
parent_2_short = parents_info.split("+")[1]
if parent_1_short not in master_shortname_mol_dict.keys():
raise Exception("a parent is not in master_shortname_mol_dict " \
+ "this means that the dictionary is missing information on" \
+ " a ligand. missing parrent is: {}".format(parent_1_short))
if parent_2_short not in master_shortname_mol_dict.keys():
raise Exception("a parent is not in master_shortname_mol_dict " \
+ "this means that the dictionary is missing information on" \
+ " a ligand. missing parrent is: {}".format(parent_2_short))
parent_1_name = master_shortname_mol_dict[parent_1_short]
parent_2_name = master_shortname_mol_dict[parent_2_short]
return [parent_1_name, parent_2_name]
#
def get_all_ancestors(mol_name, master_shortname_mol_dict):
"""
This function will obtain all ancestors and store them in a
diction where the key is the generation number (relative to the creation of the
mol the user requested) and the items are lists of full-length molecule names.
These lists must be ordered and will contain None as a place holder.
Each previous generation will be double the length of its successors, even if
multiple entries are None. This way we can create a lineage tree.
Gen 0: [ A, None, B, C, D, None, None, None]
| | | | | | | |
______ ______ ______ ______
| | | |
Gen 1: [ (A)E, (B+C)F (D)G, None ]
| | | |
____________________ ____________________
| |
Gen 2: [ (E+F)H, (G)I ]
| |
_____________________________________
|
Gen 3: [ (H+I)J ]
This tree would return:
{
0:[A, None, B, C, D, None, None, None],
1:[(A)E, (B+C)F (D)G, None],
2:[(E+F)H, (G)I],
3:[(H+I)J],
}
Inputs:
:param str mol_name: full-length name of child ligand to find parents.
:param dict master_mol_dict: dictionary containing the information from every
ligand from the AutoGrow run. keys are full-length name of the ligands.
Returns:
:returns: dict lineage: a dict of lists of ancestors where the keys are the
generation number relative to the creation of mol_name and the lists
are each parent ligands full-length name, or None if there aren't two
parents
"""
if ")Gen_" not in mol_name:
raise Exception("mol_name provided either does not have parents " \
+"and is likely from source compound list.")
start_generation = int(mol_name.split(")Gen_")[-1].split("_")[0])
lineage_dictionary = {}
lineage_dictionary[start_generation] = [mol_name]
# check that parents exist for main mol
parents_to_check = get_parents_full_names(mol_name, master_shortname_mol_dict)
if parents_to_check == [None, None]:
raise Exception("mol_name provided either does not have parents " \
+"and is likely from source compound list.")
lineage_dictionary[start_generation - 1] = parents_to_check
current_gen = start_generation - 2
while current_gen >= 0:
grand_parent_list = []
for parent in parents_to_check:
if parent is None:
grand_parent_list.extend([None, None])
else:
parent_list = get_parents_full_names(parent,
master_shortname_mol_dict)
grand_parent_list.extend(parent_list)
parents_to_check = grand_parent_list
lineage_dictionary[current_gen] = grand_parent_list
current_gen = current_gen -1
if list(set(parents_to_check)) == [None]:
# All ancestors are None
break
return lineage_dictionary
#
#####################################################################
# make/retrive pickle dictionaries
#####################################################################
def make_master_shortname_mol_dict(vars, master_mol_dict):
"""
Create and save a dictionary that can look up a full-length name using
a shorthand name; keys are shorthand name w full-length names as items
Save to a pickle file named master_shortname_mol_dict_pickle.
Inputs:
:param dict vars: dictionary of variable to use
:param dict master_mol_dict: master dictionary with
the long names of ligands as keys
Returns:
:returns: dict master_shortname_mol_dict: dictionary where keys are
shorthand names and the items the full-length name.
"""
master_shortname_mol_dict = {}
for mol_entry in master_mol_dict.keys():
short_name = master_mol_dict[mol_entry][2]
master_shortname_mol_dict[short_name] = mol_entry
# Write to pickle file
master_shortname_mol_dict_pickle = vars["master_shortname_mol_dict_pickle"]
write_pickle_to_file(master_shortname_mol_dict_pickle, master_shortname_mol_dict)
return master_shortname_mol_dict
#
def merge_comp_and_ranked_dicts(vars):
"""
Merge two dictions into one.
Save to a pickle file named master_mol_dict_pickle.
Inputs:
:param dict vars: dictionary of variable to use
Returns:
:returns: dict master_mol_dict: master dictionary with all ligands entered
"""
ranked_mol_dict_pickle = vars["ranked_mol_dict_pickle"]
comp_dict_pickle = vars["comp_dict_pickle"]
comp_mol_dict = get_obj_from_pickle_file(comp_dict_pickle)
master_mol_dict = copy.deepcopy(comp_mol_dict)
del comp_mol_dict
ranked_mol_dict = get_obj_from_pickle_file(ranked_mol_dict_pickle)
# Since there shouldn't be docking information in the comp_mol_dict
# we can feel free to overwrite any duplicate entries. Any duplicate
# entries would be caused by a ligand being in both the source ligands
# and the complementary molecule library.
for mol_entry in ranked_mol_dict.keys():
master_mol_dict[mol_entry] = ranked_mol_dict[mol_entry]
del ranked_mol_dict
# Write to pickle file
master_mol_dict_pickle = vars["master_mol_dict_pickle"]
write_pickle_to_file(master_mol_dict_pickle, master_mol_dict)
return master_mol_dict
#
def make_comp_mol_dict(vars):
"""
Create and pickled dictionary of all complementary molecules.
These mol_dicts can be quite large and memory intensive so we will create
and save as pickle. And reopen later to minimize memory overhead.
Inputs:
:param dict vars: dictionary of variable to use
"""
# Add complementary mols from reactions
# Only valid for autoclickchem rxns
comp_smi_list = glob.glob(vars["complementary_mol_directory"] + os.sep + "*.smi")
if len(comp_smi_list) == 0:
raise Exception("No .smi files found for complementary_mol_directory.\n" + \
"please check: {}".format(vars["complementary_mol_directory"]))
comp_dict = {}
for smi in comp_smi_list:
comp_mol_list = get_usable_format(smi)
for mol_entry in comp_mol_list:
comp_dict[mol_entry[1]] = mol_entry[0]
del comp_mol_list
del comp_smi_list
#Make this match those with scores
for mol_name in comp_dict.keys():
mol = Chem.MolFromSmiles(comp_dict[mol_name])
temp_info = [comp_dict[mol_name], mol_name, mol_name, mol_name, None, None, mol]
comp_dict[mol_name] = temp_info
comp_dict_pickle = vars["comp_dict_pickle"]
write_pickle_to_file(comp_dict_pickle, comp_dict)
del comp_dict
#
def make_ranked_files_mol_dict(vars):
"""
Create and pickle dictionary of all ranked ligands.
These mol_dicts can be quite large and memory intensive so we will create
and save as pickle. And reopen later to minimize memory overhead.
Inputs:
:param dict vars: dictionary of variable to use
"""
dir_w_all_gens = vars["input_dir"]
ranked_file_list = glob.glob(str(os.sep).join([dir_w_all_gens,
"generation_*",
"generation_*_ranked.smi"]))
if len(ranked_file_list) == 0:
raise Exception("Could not find any ranked.smi files within the input_dir.\
Please make sure input_dir has folders named 'generation_' + int that \
contain .smi files named 'generation_{}_ranked.smi'.format(int) ")
source_compound_file = vars["source_compound_file"]
ranked_file_list = [x for x in list(set(ranked_file_list)) if x is not source_compound_file]
# make a list of molecule information
mol_list = []
for i in ranked_file_list:
mol_list.extend(get_usable_format(i))
# Add Source compounds
source_compound_list = get_usable_format(source_compound_file)
len_of_each_mol_info = [len(x) for x in source_compound_list]
if len(list(set(len_of_each_mol_info))) != 1:
print(list(set(len_of_each_mol_info)))
raise Exception("The source compound file is inconsistently with the number\
of columns per line. Please correct this so that each line has the \
same number of columns.")
# If the source compounds were previously docked keep all info because there is docking info
if vars["use_docked_source_compounds"] is True:
mol_list.extend(mol_list)
else:
# If there are only two columns we assume it is the SMILES and the source name
# Otherwise we print a message saying we are ignoring any additional information
# because the --use_docked_source_compounds==False
if list(set(len_of_each_mol_info))[0] != 2:
print("\nWARNING: There are multiple columns within the source \
compound file ({}), but --use_docked_source_compounds is set \
to False. You will also need to delete the pickled dictionaries \
produced by this script before re-running the script. \n\
We will ignore any information other than the first \
two columns in the source compound file. This may mean that we \
ignore docking scores or use the full-length names of compounds \
in generation zero.\n".format(source_compound_file))
new_source_compound_list = []
for mol_info in source_compound_list:
temp_info = [mol_info[0], mol_info[1], mol_info[1], mol_info[1], None, None]
new_source_compound_list.append(temp_info)
mol_list.extend(new_source_compound_list)
new_list = []
for x in mol_list:
temp = []
for y in x:
try:
y = float(y)
temp.append(y)
except:
temp.append(y)
temp.append(Chem.MolFromSmiles(temp[0]))
new_list.append(temp)
del mol_list
mol_dict = {}
for mol_entry in new_list:
if mol_entry[1] in mol_dict.keys():
# source compounds may be unranked or may
# be ranked from advancement but not in the source compound .smi
# so we check to try to keep the ranked version if it exists.
if type(mol_entry[-3]) not in [float, int]:
# the current line is not ranked
continue
#
if type(mol_dict[mol_entry[1]][-3]) not in [float, int]:
# the current line is ranked but the previously entered version
# was not. Lets overwrite it
mol_dict[mol_entry[1]] = mol_entry
if float(mol_entry[-3]) < float(mol_dict[mol_entry[1]][-3]):
# Both entries of the ligand are ranked. Let us take
# the better ranked version of the two
mol_dict[mol_entry[1]] = mol_entry
else:
# the previous version was ranked better. Lets keep that.
continue
else:
# new entry lets add it to the dictionary.
mol_dict[mol_entry[1]] = mol_entry
del new_list
# Write to pickle file
ranked_mol_dict_pickle = vars["ranked_mol_dict_pickle"]
write_pickle_to_file(ranked_mol_dict_pickle, mol_dict)
del mol_dict
#
def get_mol_dict(vars):
"""
Retrieve pickled dictionary of all ligand information if it already has
been created, or it creates the dictionary of ligand information and saves
it as a pickle file.
Inputs:
:param dict vars: dictionary of variable to use
Returns:
:returns: dict master_mol_dict: dictionary containing the information from every
ligand from the AutoGrow run. keys are full-length name of the ligands.
:returns: dict master_shortname_mol_dict: dictionary where keys are
shorthand names and the items the full-length name.
"""
ranked_mol_dict_pickle = vars["ranked_mol_dict_pickle"]
comp_dict_pickle = vars["comp_dict_pickle"]
master_mol_dict_pickle = vars["master_mol_dict_pickle"]
master_shortname_mol_dict_pickle = vars["master_shortname_mol_dict_pickle"]
if os.path.exists(master_mol_dict_pickle) is False:
# Handle ranked files
if os.path.exists(ranked_mol_dict_pickle) is False:
# ranked_mol_dict_pickle does not exist. Need to make and pickle
# Will reopen later to minimize memory overhead
print("Creating ranked_files_mol_dict")
make_ranked_files_mol_dict(vars)
# Handle complementary molecules
if os.path.exists(comp_dict_pickle) is False:
# comp_dict_pickle does not exist. Need to make and pickle
# Will reopen later to minimize memory overhead
print("Creating comp_mol_dict")
make_comp_mol_dict(vars)
# merge molecule dictionaries together to create a master dictionary
print("Creating master_mol_dict")
master_mol_dict = merge_comp_and_ranked_dicts(vars)
else:
# The master dictionary already exists so we don't need to make the
# smaller dictionaries.
# Just get it from the pickle file
print("Getting master_mol_dict from pickle file")
master_mol_dict = get_obj_from_pickle_file(master_mol_dict_pickle)
if os.path.exists(master_shortname_mol_dict_pickle) is False:
# Need to create the master_shortname_mol_dict
master_shortname_mol_dict = make_master_shortname_mol_dict(vars, master_mol_dict)
else:
# The master_shortname_mol_dict
# Get it from the pickle file
print("Getting master_shortname_mol_dict from pickle file")
master_shortname_mol_dict = get_obj_from_pickle_file(master_shortname_mol_dict_pickle)
return master_mol_dict, master_shortname_mol_dict
##################################################################
#####################################################################
# I/O
#####################################################################
def get_full_length_mol_name(vars, master_mol_dict, master_shortname_mol_dict):
"""
Get full-length mol_name and make sure that it is in the master_mol_dict
Inputs:
:param dict vars: dictionary of variable to use
:param dict master_mol_dict: dictionary containing the information from every
ligand from the AutoGrow run. keys are full-length name of the ligands.
:param dict master_shortname_mol_dict: dictionary where keys are
shorthand names and the items the full-length name.
Returns:
:returns: str mol_name: full-length name of ligand.
"""
if vars["mol_name"] not in master_mol_dict.keys():
if vars["mol_name"] not in master_shortname_mol_dict.keys():
# may be a gypsum variant with '__{}'.format(num) at the end
# ie Gen_5_Mutant_46_684401 could be represented as Gen_5_Mutant_46_684401__1
if "__" in vars["mol_name"]:
test_name = vars["mol_name"].split("__")[0]
if (test_name not in master_shortname_mol_dict.keys() and \
test_name not in master_mol_dict.keys()):
printout = "mol_name provided not found in shorthand or" \
+ "full-length dictionaries. Please check that mol_name is in" \
+ "the AutoGrow run tested. \n" \
+ "Name provided is :\n\t{}".format(vars["mol_name"] \
+ "\nName should look like is :" \
+ "\n\t (Gen_2_Mutant_7_97143)Gen_4_Mutant_7_802531" \
+ "\n\t\t or \n\t Gen_4_Mutant_7_802531")
print(printout)
raise Exception(printout)
if test_name in master_shortname_mol_dict.keys():
mol_name = master_shortname_mol_dict[test_name]
else:
mol_name = test_name
del test_name
else:
printout = "mol_name provided not found in shorthand or" \
+ "full-length dictionaries. Please check that mol_name is in" \
+ "the AutoGrow run tested. \n" \
+ "Name provided is :\n\t{}".format(vars["mol_name"] \
+ "\nName should look like is :" \
+ "\n\t (Gen_2_Mutant_7_97143)Gen_4_Mutant_7_802531" \
+ "\n\t\t or \n\t Gen_4_Mutant_7_802531")
print(printout)
raise Exception(printout)
mol_name = master_shortname_mol_dict[vars["mol_name"]]
else:
# original name is already full-length name
mol_name = vars["mol_name"]
return mol_name
#
def run_purge_previous_pickled_files(vars):
"""
This will delete previously created pickled files within the input_dir.
The four files it will delete are:
`$input_dir/comp_dict_pickle`, `$input_dir/master_mol_dict_pickle`,
`$input_dir/master_shortname_mol_dict_pickle`,
and `$input_dir/ranked_mol_dict_pickle`.
These files save time when you are tracing the lineage of multiple
compounds, however purging these files may be helpful for space saving
or if it had been previously run with an invalid input variable.=
Following file deletion the program will terminate.
inputs:
:params vars inputs: dictionary of argparse parameters
"""
print("\nDELETING PREVIOUSLY GENERATED PICKLED FILES.\n")
input_dir = vars["input_dir"] + os.sep
if os.path.exists(input_dir) is False:
raise Exception("Input folder {} does not\
exist.".format(input_dir))
for file_name in ["comp_dict_pickle", "master_mol_dict_pickle",
"master_shortname_mol_dict_pickle", "ranked_mol_dict_pickle"]:
file_path = input_dir + file_name
if os.path.exists(file_path) is False:
printout = "Could not delete {} file".format(file_name)
printout = printout + " as it was not located at:\n\t {}\n".format(file_path)
print(printout)
else:
try:
os.remove(file_path)
print("Deleted: {}".format(file_path))
except:
printout = "WARNING: Could not delete {} file.\n".format(file_name)
printout = printout + "\tPlease check file permissions of:"
printout = printout + "\n\t\t {}\n".format(file_path)
print(printout)
# Check that is was successfully deleted
if os.path.exists(file_path) is False:
print("Deleted: {}".format(file_path))
print("Attempt to delete files completed.")
sys.exit(0)
def process_inputs(inputs):
"""
This will handle processing all parameters.
inputs:
:params dict inputs: dictionary of argparse parameters
Returns:
:returns: dict inputs: dictionary of argparse parameters
"""
# handle input information
inputs["input_dir"] = os.path.abspath(inputs["input_dir"]) + os.sep
if os.path.exists(inputs["input_dir"]) is False:
raise Exception("Input folder {} does not\
exist.".format(inputs["input_dir"]))
# get vars dict from last run
inputs["vars_json"] = inputs["input_dir"] + "vars.json"
if os.path.exists(inputs["vars_json"]) is False:
raise Exception("Input folder {} does not contain the vars.json file \
necessary to run script. Please make sure the vars.json is in the \
folder.".format(inputs["input_dir"]))
try:
with open(inputs["vars_json"], "r") as f:
vars_dict = json.load(f)
except:
raise Exception("variable file would not import. It should be the \
vars.json file written by AutoGrow in the output folder of the run.")
if inputs["complementary_mol_directory"] in ["", None]:
# Get complementary_mol_directory from vars.json
if vars_dict["complementary_mol_directory"] not in ["", None]:
if os.path.exists(vars_dict["complementary_mol_directory"]):
inputs["complementary_mol_directory"] = vars_dict["complementary_mol_directory"]
else:
# Can not find the one used in vars.json list
raise Exception("Please provide path to complementary_mol_directory. \
vars.json file lists custom path to complementary_mol_director={} \
but this directory can not be found. Please provide path using \
--complementary_mol_directory $PATH/TO/complementary_mol_directory/")
# Get complementary_mol_directory from vars.json
elif vars_dict["rxn_library"].lower() in ["click_chem_rxns", "robust_rxns", "all_rxns"]:
dir_above_script_dir = str(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
complementary_mol_directory = str(os.sep).join([
dir_above_script_dir, "autogrow", "operators", "mutation",
"smiles_click_chem", "reaction_libraries",
vars_dict["rxn_library"].lower(), "complementary_mol_dir"])
complementary_mol_directory = os.path.abspath(complementary_mol_directory)
if os.path.exists(complementary_mol_directory) is False:
raise Exception("Please provide path to complementary_mol_directory. \
Could not find the location of the directory")
inputs["complementary_mol_directory"] = complementary_mol_directory + os.sep
else:
raise Exception("Please provide path to complementary_mol_directory. \
Could not find the location of the directory")
else: # complementary_mol_directory was provided.
inputs["complementary_mol_directory"] = \
os.path.abspath(inputs["complementary_mol_directory"]) + os.sep
if os.path.exists(inputs["complementary_mol_directory"]) is False:
# Can not find the one used in vars.json list
raise Exception("Please provide path to complementary_mol_directory. \
provided path could not be found " \
+ ":\n\t{}".format(inputs["complementary_mol_directory"]))
if len(glob.glob(inputs["complementary_mol_directory"] + "*.smi")) == 0:
sub_dir = inputs["complementary_mol_directory"] + os.sep \
+ "complementary_mol_dir" + os.sep
if len(glob.glob(sub_dir + "*.smi")) == 0:
raise Exception("Please provide path to complementary_mol_directory. " \
+ "provided path had no .smi files: " \
+ "\n\t{}".format(inputs["complementary_mol_directory"]))
# They provided 1 directory up...
inputs["complementary_mol_directory"] = sub_dir
if "use_docked_source_compounds" not in inputs.keys() or \
inputs["use_docked_source_compounds"] in ["", None]:
# Get whether they used use_docked_source_compounds from vars.json
if "use_docked_source_compounds" in vars_dict:
if vars_dict["use_docked_source_compounds"] in [True, False]:
inputs["use_docked_source_compounds"] = vars_dict["use_docked_source_compounds"]
else:
raise Exception("Please provide the --use_docked_source_compounds setting \
used during the run. We could not auto-detect from the vars file.")
else:
raise Exception("Please provide the --use_docked_source_compounds setting \
used during the run. We could not auto-detect from the vars file.")
else:
if inputs["use_docked_source_compounds"] in [True, "true", "True"]:
inputs["use_docked_source_compounds"] = True
elif inputs["use_docked_source_compounds"] in [False, "false", "False"]:
inputs["use_docked_source_compounds"] = False
else:
raise Exception("Please check the --use_docked_source_compounds setting provided." \
" --use_docked_source_compounds should be True or False.")
# Handle output directory
inputs["output_dir"] = os.path.abspath(inputs["output_dir"]) + os.sep
if os.path.exists(inputs["output_dir"]) is False:
try:
os.mkdir(inputs["output_dir"])
print("Made the output dir at: {}".format((inputs["output_dir"])))
except:
pass
if os.path.exists(inputs["output_dir"]) is False:
raise Exception("Output folder {} does not\
exist.".format(inputs["output_dir"]))
# handle source_compound_file .smi file
if type(inputs["source_compound_file"]) is not str or inputs["source_compound_file"] == "":
raise Exception("--source_compound_file must be provided. It should be \
the tab-delineated .smi file used to seed generation zero of the \
AutoGrow run. This is a mandatory file.")
inputs["source_compound_file"] = os.path.abspath(inputs["source_compound_file"])
if os.path.exists(inputs["source_compound_file"]) is False:
raise Exception("source_compound_file could not be found \
at: {}".format(inputs["source_compound_file"]))
if inputs["source_compound_file"].split(".")[-1] != "smi":
raise Exception("--source_compound_file must be provided. It should be \
the tab-delineated .smi file used to seed generation zero of the \
AutoGrow run. This is a mandatory file.")
# assign the destination for our pickle files (may already exist)
inputs["ranked_mol_dict_pickle"] = inputs["input_dir"] + "ranked_mol_dict_pickle"
inputs["comp_dict_pickle"] = inputs["input_dir"] + "comp_dict_pickle"
inputs["master_mol_dict_pickle"] = inputs["input_dir"] + "master_mol_dict_pickle"
inputs["master_shortname_mol_dict_pickle"] = inputs["input_dir"] \
+ "master_shortname_mol_dict_pickle"
# handle singles image folder
inputs["single_image_folder"] = inputs["output_dir"] + "single_image_folder" + os.sep
if os.path.exists(inputs["single_image_folder"]) is False:
os.mkdir(inputs["single_image_folder"])
if "mol_name" not in inputs.keys():
inputs["mol_name"] = None
if inputs["pre_run"] is False:
inputs["ancestry_image_folder"] = inputs["output_dir"] \
+ "ancestry_"+ inputs["mol_name"] + os.sep
# Will wait to create this folder until its needed
# Handle the cleanup variable purge_previous_pickled_files
if "purge_previous_pickled_files" in inputs.keys():
if inputs["purge_previous_pickled_files"] in [True, "true", "True"]:
# We will delete files
inputs["purge_previous_pickled_files"] = True
elif inputs["purge_previous_pickled_files"] in [False, "false", "False"]:
# We will not delete files
inputs["purge_previous_pickled_files"] = False
else:
# Can not understand the input option
raise Exception("Please check the --purge_previous_pickled_files setting provided." \
" --purge_previous_pickled_files should be True or False.")
else:
inputs["purge_previous_pickled_files"] = False
# If true delete files and terminate program
if inputs["purge_previous_pickled_files"] is True:
run_purge_previous_pickled_files(inputs)
return inputs
#
def run_everything(vars):
"""
This script runs everything
Inputs:
:params dict INPUTS: dictionary of argparse parameters
"""
master_mol_dict, master_shortname_mol_dict = get_mol_dict(vars)
if vars["pre_run"] is True or vars["mol_name"] in [None, "None", ""]:
print("pre-run completed")
sys.exit(0)
mol_name = get_full_length_mol_name(vars, master_mol_dict, master_shortname_mol_dict)
print("The full-length name of the ligand is: ", mol_name)
print("")
lineage_dict = get_all_ancestors(mol_name, master_shortname_mol_dict)
# make a simplified master_mol_dict
mol_dict = {}
for gen_list in lineage_dict.keys():
for lig_name in lineage_dict[gen_list]:
if lig_name is not None:
mol_dict[lig_name] = master_mol_dict[lig_name]
del master_mol_dict
del master_shortname_mol_dict
# Write all information of lieage to .smi file
lineage_smi = vars["output_dir"] + \
str(mol_name) + "_lineage.smi"
lineage_list = []
for gen_num in lineage_dict.keys():
lineage_list.extend(lineage_dict[gen_num])
printout = ""
for lig_name in lineage_list:
if lig_name is None:
continue
temp = copy.deepcopy(mol_dict[lig_name])
del temp[-1] # remove last item which is rdkit mol
temp = "\t".join([str(x) for x in temp]) + "\n"
printout = printout + temp
with open(lineage_smi, 'w') as f:
f.write(printout)
# generate images
make_image_files(vars, lineage_dict, mol_dict)
######################################
######################################
######################################
PARSER = argparse.ArgumentParser()
# Get needed info
PARSER.add_argument(
"--output_dir",
"-o",
metavar="param.output_dir",
required=True,
help="Path to folder to output files. will be created if does not exist",
)
PARSER.add_argument(
"--input_dir",
"-i",
metavar="param.input_dir",
required=True,
help="Path to input folder containing the AutoGrow run. This should be the \
top folder which contains the vars.json file.",
)
PARSER.add_argument(
"--complementary_mol_directory",
metavar="param.complementary_mol_directory",
required=False,
default="",
help="If using a custom complementary molecule library for mutations this \
path is required. If not the script will try to autodetect the location of \
the predefined complementary_mol_directory. Many molecules generated by \
mutation will required the complementary molecule that helped spawn them.",
)
PARSER.add_argument(
"--source_compound_file",
metavar="param.source_compound_file",
required=True,
default="",
help="This is the source .smi file used to seed generation zero of the \
AutoGrow run. This is an essential file.",
)
PARSER.add_argument(
"--pre_run",
metavar="param.pre_run",
default=False,
help="If True this will compile the necessary dictions/picklefiles and then \
terminate. These pickle files are stored in the input folder containing the \
vars.json file from the AutoGrow run.",
)
PARSER.add_argument(
"--mol_name",
metavar="param.mol_name",
default=None,
help="This is the name of the molecule whose lineage will be traced back. \
If not provided or None, the script will simply compile the necessary \
dictions/picklefiles and then terminate. These pickle files are stored \
in the input folder containing the vars.json file from the AutoGrow run.\
example mol_name: Gen_5_Cross_203131 or Gen_4_Mutant_7_802531 \
can also be provided as full-name ie: \
(Gen_2_Mutant_7_97143)Gen_4_Mutant_7_802531",
)
PARSER.add_argument(
"--use_docked_source_compounds",
metavar="param.use_docked_source_compounds",
choices=[True, False, "True", "False", "true", "false"],
default=None,
help="If True source ligands were docked prior to seeding generation 1. \
If True and the source_compound file may already have the docking/fitness \
metric score in -2 column of .smi file.\
If False, generation 1 was randomly seeded by the source compounds with \
no preference and there was no generation 0 testing. \
If not provided this script will autodetect it from the vars.json \
file if possible.",
)
PARSER.add_argument(
"--purge_previous_pickled_files",
metavar="param.purge_previous_pickled_files",
choices=[True, False, "True", "False", "true", "false"],
default=False,
help="If True the script will delete the four pickled files previously \
created by this script: `comp_dict_pickle`, `master_mol_dict_pickle`, \
`master_shortname_mol_dict_pickle`, and `ranked_mol_dict_pickle`. \
These files save time when you are tracing the lineage of multiple \
compounds, however purging these files may be helpful for space saving \
or if it had been previously run with an invalid input variable. \
This does not affect the lineage files located in `output_dir`. \
Program will terminate once these files are deleted.",
)
ARGSDICT = vars(PARSER.parse_args())
# copying ARGSDICT so we can delete out of while iterating through the
# original ARGSDICT
INPUTS = copy.deepcopy(ARGSDICT)
for k, v in ARGSDICT.items():
if v is None:
del INPUTS[k]
VARS = process_inputs(INPUTS)
run_everything(VARS)
| 46,445 | 40.843243 | 100 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/convert_vina_docked_pdbqt_to_pdbs.py | """
This script will convert a docked .pdbqt.vina file into separate .pdb file.
This is done by splitting up a single .pdbqt.vina into separate .pdbqt
files for each docked pose.
Then it removes a column of the .pdbqt and saves as a .pdb file.
If variable --max_num_of_poses is not set it will convert all poses.
If --max_num_of_poses == 1 it will only convert the top docked pose to .pdb
If --max_num_of_poses == 2 it will only convert the top 2 docked poses to .pdb
If --max_num_of_poses == 10 but there only 8 poses it will convert the 8 poses and stop
If --max_docking_score is not set it will convert all poses to .pdb;
If --max_docking_score == -10.0 it will only convert poses with docking scores less than
or equal to -10.0 (Remember docking scores are better when more negative)
--max_docking_score and --max_num_of_poses work as AND type operators.
Example:
--max_docking_score == -11.4 and --max_num_of_poses==5
It will take the top 5 poses as long as they also have docking scores <=-11.4
Example submit:
python PATH/autogrow4/accessory_scripts/convert_vina_docked_pdbqt_to_pdbs.py \
--vina_docked_pdbqt_file \
PATH/Run_1/Run_0/generation_30/PDBs/Gen_30_Cross_313228__1.pdbqt.vina \
--output_folder PATH/outfolder/ \
--max_num_of_poses 1 --number_of_processors -1
"""
import os
import copy
import argparse
import glob
import support_scripts.Multiprocess as mp
def run_conversion_for_a_vina_file(vina_file, output_folder, max_num_of_poses,
max_docking_score, min_docking_score):
"""
This script will convert .pdbqt.vina file to multiple .pdb files.
If criteria such as max_num_of_poses, max_docking_score,
min_docking_score a pose must meet all criteria to be converted.
Inputs:
:param str vina_file: Path to vina file to convert
:param str output_folder: Path to vina file to output folder
:param int max_num_of_poses: Max number of poses to convert to pdb
:param float max_docking_score: Most positive docking score to be converted; float or None
:param float min_docking_score: Most negative docking score to be converted; float or None
"""
if os.path.exists(vina_file) is False:
raise Exception("CANT FIND FILE:", vina_file)
if os.path.exists(output_folder) is False:
raise Exception("CANT FIND outfolder:", output_folder)
short_name = os.path.basename(vina_file).replace(".pdbqt.vina", "")
with open(vina_file, "r") as f:
pose_number = 1
printout_list = []
score = 0.0
terminate_run = False
write_pose = True
for line in f.readlines():
if pose_number > max_num_of_poses and max_num_of_poses != -1:
# break if hit max number of poses
break
if terminate_run is True:
break
if "REMARK VINA RESULT" in line:
write_pose = True
if max_docking_score is None and min_docking_score is None:
printout_list.append(line)
else:
temp_line = copy.deepcopy(line)
for i in range(10):
temp_line = temp_line.replace(" ", " ")
temp_line = temp_line.split("RESULT:")[1]
temp_line = [x for x in temp_line.split(" ") if x != "" and x != " "]
try:
score = float(temp_line[0])
except:
raise Exception("Score not in remark line for {}".format(vina_file))
if max_docking_score is not None:
if score > max_docking_score:
terminate_run = True
write_pose = False
break
if min_docking_score is not None:
if score < min_docking_score:
# This score is bellow the minimum but the
# poses after may not be docked as well.
# Normally this should be a stop but may
# be useful for studying poor poses...
write_pose = False
printout_list.append(line)
elif "ENDMDL" in line:
if write_pose is True:
printout_list.append(line)
# convert list of pdbqt info to
# .pdb format by removing the partial charge info in ATOM line
printout_pdb = convert_pdbqt_to_pdb(printout_list)
# write to a file
outfile = output_folder + os.sep + short_name +\
"_pose_{}.pdb".format(pose_number)
with open(outfile, "w") as f:
f.write(printout_pdb)
# Reset variables for the next iteration
printout_list = []
score = 0.0
terminate_run = False
write_pose = True
printout_pdb = ""
# update the counter of the pose number
pose_number += 1
else:
printout_list.append(line)
#
def convert_pdbqt_to_pdb(list_of_lines):
"""
Converts lines from a pdbqt.vina file pose to pdb format.
Inputs:
:param list list_of_lines: list of lines of a docked pdbqt pose
Returns:
:returns: str printout: A string for a .pdb to write to a file
"""
printout = ""
line_index_range = [x for x in range(0, 61)] + [x for x in range(70, 80)]
for line in list_of_lines:
if "ATOM" in line or "HETATM" in line:
short_line = ""
for i in line_index_range:
# print(i)
if i >= len(line):
continue
short_line = short_line + line[i]
printout = printout + short_line
elif "REMARK x y z vdW Elec" + \
" q Type" in line \
or "REMARK _______ _______ _______ _____ _____" + \
" ______ ____" in line:
short_line = ""
for i in line_index_range:
# print(i)
if i >= len(line):
continue
short_line = short_line + line[i]
printout = printout + short_line + "\n"
else:
printout = printout + line
return printout
#
def start_run_main(vars):
"""
This will run the main arguments for the script.
Inputs:
:param dict vars: dictionary of user variables.
"""
output_folder = vars["output_folder"] + os.sep
max_num_of_poses = vars["max_num_of_poses"]
max_docking_score = vars["max_docking_score"]
min_docking_score = vars["min_docking_score"]
vina_docked_pdbqt_file = vars["vina_docked_pdbqt_file"]
if os.path.isfile(vina_docked_pdbqt_file) is True:
run_conversion_for_a_vina_file(vina_docked_pdbqt_file, output_folder,
max_num_of_poses, max_docking_score,
min_docking_score)
else:
# vina_docked_pdbqt_file is a folder run for all .pdbqt.vina files
pdbqt_files = glob.glob(vina_docked_pdbqt_file + "*.pdbqt.vina")
pdbqt_files.extend(glob.glob(vina_docked_pdbqt_file + "*.PDBQT.vina"))
pdbqt_files.extend(glob.glob(vina_docked_pdbqt_file + "*.pdbqt.VINA"))
pdbqt_files.extend(glob.glob(vina_docked_pdbqt_file + "*.PDBQT.VINA"))
pdbqt_files = list(set(pdbqt_files))
if len(pdbqt_files) == 0:
printout = "No .pdbqt.vina were found at: {}".format(vina_docked_pdbqt_file)
raise Exception(printout)
job_input = tuple([tuple([vina_docked_pdbqt_file, output_folder, max_num_of_poses,
max_docking_score, min_docking_score]) \
for vina_docked_pdbqt_file in pdbqt_files])
# run convert in multithread
mol_usable_list = mp.multi_threading(job_input, -1, run_conversion_for_a_vina_file)
#
def get_arguments_from_argparse(args_dict):
"""
This function handles the arg parser arguments for the script.
Inputs:
:param dict args_dict: dictionary of parameters
Returns:
:returns: dict args_dict: dictionary of parameters
"""
# Argument handling
if type(args_dict["vina_docked_pdbqt_file"]) != str:
raise Exception("provided vina_docked_pdbqt_file must be either a docked vina file or \
a directory of docked vina files.")
if type(args_dict["output_folder"]) != str:
raise Exception("provided output_folder must be a directory.")
# argument_handling
if os.path.exists(args_dict["vina_docked_pdbqt_file"]) is False:
raise Exception("provided vina_docked_pdbqt_file can not be found.")
if ".pdbqt.vina" not in args_dict["vina_docked_pdbqt_file"].lower():
if os.path.isdir(args_dict["vina_docked_pdbqt_file"]) is False:
raise Exception("provided vina_docked_pdbqt_file must be either a docked vina file \
containing .pdbqt.vina in file name or a directory of docked vina files.")
args_dict["vina_docked_pdbqt_file"] = args_dict["vina_docked_pdbqt_file"] + os.sep
if os.path.exists(args_dict["output_folder"]) is False:
try:
os.mkdir(args_dict["output_folder"])
except:
pass
if os.path.exists(args_dict["output_folder"]) is False:
raise Exception("output_folder could not be made or found.")
else:
if os.path.isdir(args_dict["output_folder"]) is False:
raise Exception("output_folder needs to be a directory.")
args_dict["output_folder"] = os.path.abspath(args_dict["output_folder"]) + os.sep
# handle max_num_of_poses
if args_dict["max_num_of_poses"] is not None:
if type(args_dict["max_num_of_poses"]) != float and \
type(args_dict["max_num_of_poses"]) != int:
raise Exception("max_num_of_poses must be a int or None")
if type(args_dict["max_num_of_poses"]) == float:
args_dict["max_num_of_poses"] = int(args_dict["max_num_of_poses"])
# handle max_docking_score
if args_dict["max_docking_score"] is not None:
if type(args_dict["max_docking_score"]) != float or \
type(args_dict["max_docking_score"]) != int:
raise Exception("max_docking_score must be a float or None")
if type(args_dict["max_docking_score"]) == int:
args_dict["max_docking_score"] = float(args_dict["max_num_of_poses"])
# handle min_docking_score
if args_dict["min_docking_score"] is not None:
if type(args_dict["min_docking_score"]) != float or \
type(args_dict["min_docking_score"]) != int:
raise Exception("min_docking_score must be a float or None")
if type(args_dict["min_docking_score"]) == int:
args_dict["min_docking_score"] = float(args_dict["max_num_of_poses"])
return args_dict
#
# Argument parsing
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--vina_docked_pdbqt_file', '-f',
required=True, default=None,
help='Path to .pdbqt.vina file to split into 1 .pdb file per pose that matches all criteria. \
if this is a directory it will convert all of the files with the extension .pdbqt.vina'
)
PARSER.add_argument(
'--output_folder', '-o', type=str, default=None,
help='Path to folder where the .pdb files will be placed. \
Files will be the basename of the docked file with _pose_{pose_number}.pdb \
replacing the extension .pdbqt.vina.'
)
PARSER.add_argument(
'--max_num_of_poses', type=int, required=False, default=-1,
help='Each docked file will have 1 or more poses of the ligand. This setting \
controls how many are converted. default is -1 which means all poses possible. \
max_num_of_poses=1 means only the best docked pose will be converted. \
If additional criteria like max_docking_score is applied a pose must meet both criteria \
to be converted. ie) if max_num_of_poses= 5 and max_docking_score=-13.0 \
for a pose to be converted it must be between the 1st and 5th pose in the file and \
must have docked with a score less than or equal to -13.0.'
)
PARSER.add_argument(
'--max_docking_score', type=float, required=False, default=None,
help='The most positive docking score to be converted. (More negative scores \
are predicted to bind better). If additional criteria such as \
max_num_of_poses is applied a pose must meet both criteria \
to be converted. ie) if max_num_of_poses= 5 and max_docking_score=-13.0 \
for a pose to be converted it must be between the 1st and 5th pose in the file and \
must have docked with a score less than or equal to -13.0.'
)
PARSER.add_argument(
'--min_docking_score', type=float, required=False, default=None,
help='The most negative docking score to be converted. (More negative scores \
are predicted to bind better). If additional criteria such as \
max_num_of_poses is applied a pose must meet both criteria \
to be converted. ie) if min_docking_score= -15.0 and max_docking_score=-13.0 \
for a pose to be converted it must: \
-13.0. <= docking score <= -15.0'
)
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=-1,
help="Number of processors to use for parallel calculations.\
Set to -1 for all available CPUs."
)
ARGS_DICT = vars(PARSER.parse_args())
ARGS_DICT = get_arguments_from_argparse(ARGS_DICT)
start_run_main(ARGS_DICT)
print("finished")
| 13,901 | 39.530612 | 98 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/convert_single_ligand_pdbqt_to_pdb.py | """
This script will convert a pdbqt file into a .pdb file.
This is done by removing a column of the PDB file.
# Run example:
#
# output example:
# python convert_ligands_pdb_to_smi.py \
# -source_folder $PATH/OF/PDBS/ \
# -output_folder $PATH/TO/OUTPUT/ \
# -number_of_processors -1
"""
import __future__
import os
import argparse
def convert_pdbqt_to_pdb(pdbqt_file_in, pdb_file_out):
"""
Converts a pdbqt file to a pdb file by removing the 3rd to last column.
Inputs:
:param str pdbqt_file_in: the string of .pdbqt to be formatted
:param str pdb_file_out: the string of the output .pdb
"""
printout = ""
line_index_range = [x for x in range(0, 61)] + [x for x in range(70, 80)]
with open(pdbqt_file_in) as f:
for line in f.readlines():
if "ATOM" in line:
short_line = ""
for i in line_index_range:
# print(i)
if i >= len(line):
continue
short_line = short_line + line[i]
printout = printout + short_line
elif "REMARK x y z vdW Elec" + \
" q Type" in line \
or "REMARK _______ _______ _______ _____ _____"+ \
" ______ ____" in line:
short_line = ""
for i in line_index_range:
# print(i)
if i >= len(line):
continue
short_line = short_line + line[i]
printout = printout + short_line + "\n"
else:
printout = printout + line
with open(pdb_file_out, "w") as f:
f.write(printout)
#
def get_arguments_from_argparse(args_dict):
"""
This function handles the arg parser arguments for the script.
Inputs:
:param dict args_dict: dictionary of parameters
Returns:
:returns: dict args_dict: dictionary of parameters
"""
# Argument handling
if type(args_dict["pdbqt_file"]) != str:
raise Exception("provided pdbqt_file must be a .pdbqt file.")
# argument_handling
if os.path.exists(args_dict["pdbqt_file"]) is False:
raise Exception("provided pdbqt_file must be a .pdbqt file.")
else:
if args_dict["pdbqt_file"].split(".")[-1] != "pdbqt" and \
args_dict["pdbqt_file"].split(".")[-1] != "PDBQT":
raise Exception("provided pdbqt_file must be a .pdbqt file.")
if args_dict["output_file"] is not None:
if args_dict["output_file"].split(".")[-1] != "pdb" and \
args_dict["output_file"].split(".")[-1] != "PDB":
raise Exception("provided output_file must be a .pdb file.")
if os.path.exists(os.path.dirname(args_dict["output_file"])) is False:
try:
os.mkdir(os.path.dirname(args_dict["output_file"]))
except:
pass
if os.path.exists(os.path.dirname(args_dict["output_file"])) is False:
raise Exception("directory to output the file could not be made or found.")
else:
args_dict["output_file"] = os.path.dirname(args_dict["pdbqt_file"]) + \
args_dict["pdbqt_file"].replace(".pdbqt", ".pdb").replace(".PDBQT", ".pdb")
return args_dict
#
# Argument parsing
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--pdbqt_file', '-f', required=True, default=None,
help='Path to .pdbqt file to convert to a .pdb file. This must be a single \
ligand and must end with .pdbqt')
PARSER.add_argument(
'--output_file', '-o', type=str, default=None,
help='Path to file where we will output .pdb file. \
If not provide the output .pdb will be the same as the input \
pdbqt_file but ending with .pdb instead of .pdbqt.')
ARGS_DICT = vars(PARSER.parse_args())
ARGS_DICT = get_arguments_from_argparse(ARGS_DICT)
# Run Converter
convert_pdbqt_to_pdb(ARGS_DICT["pdbqt_file"], ARGS_DICT["output_file"])
print("finished")
| 4,099 | 31.539683 | 91 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/fragmenter_of_smi_mol.py | """
This script will fragment a .smi
Example Run:
python fragmenter_of_smi_mol.py \
--smi_file autogrow4/source_compounds/PARPi.smi
"""
import itertools
import copy
import random
import os
import argparse
import rdkit
import rdkit.Chem as Chem
from rdkit.Chem.BRICS import BRICSDecompose
from rdkit import RDLogger
# Turn off warnings
RDLogger.DisableLog("rdApp.*")
import support_scripts.Multiprocess as mp
import support_scripts.mol_object_handling as MOH
def get_atom_w_iso_num(mol, iso_num):
"""
Find all permutations of bonds to cut on a molecule.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
:param int iso_num: the isotope number to get index
Returns:
:returns: int atom_idx: the atom.GetIdx() of the atom with the
isotope label. If not in atom return None
"""
for atom in mol.GetAtoms():
if atom.GetIsotope() == iso_num:
return atom.GetIdx()
continue
return None
#
def label_iso_num_w_idx(mol):
"""
Find all permutations of bonds to cut on a molecule.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
Returns:
:returns: rdkit.Chem.rdchem.Mol mol: a rdkit mol
"""
for atom in mol.GetAtoms():
atom.SetIsotope(atom.GetIdx())
return mol
#
def get_rot_bond_permutations_to_cut(mol, c_c_bonds_off=False):
"""
Find all permutations of bonds to cut on a molecule.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
:param bool c_c_bonds_off: whether to fragment C-C bonds
Returns:
:returns: list permutations_of_bonds_to_remove: list of bonds to cut for a mol
"""
rotatable_bond = Chem.MolFromSmarts("[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]")
rotatable_bonds_set = mol.GetSubstructMatches(rotatable_bond)
rotatable_bonds_to_frag = []
for rot_bond in rotatable_bonds_set:
atom1 = mol.GetAtomWithIdx(rot_bond[0])
atom2 = mol.GetAtomWithIdx(rot_bond[1])
atom_isos = [atom1.GetIsotope(), atom2.GetIsotope()]
bond = mol.GetBondBetweenAtoms(rot_bond[0], rot_bond[1])
if bond.GetIsAromatic() is True:
continue
# Remove any bonds including Hydrogen
if atom1.GetAtomicNum() == 1 or atom2.GetAtomicNum() == 1:
continue
# Remove any C-C single bonds
if atom1.GetAtomicNum() == 6 and atom2.GetAtomicNum() == 6:
if c_c_bonds_off is True:
continue
rotatable_bonds_to_frag.append(atom_isos)
rotatable_bonds_to_frag.append(atom_isos)
permutations_of_bonds_to_remove = []
for i in range(1, len(rotatable_bonds_to_frag) + 1):
# xrange will return the values 1, 2, 3, 4 in this loop
temp_perm_list = list(itertools.combinations(rotatable_bonds_to_frag, i))
permutations_of_bonds_to_remove.extend(temp_perm_list)
return permutations_of_bonds_to_remove
#
def remove_atoms(mol, list_of_idx_to_remove):
"""
This function removes atoms from an rdkit mol based on
a provided list. The RemoveAtom function in Rdkit requires
converting the mol to an more editable version of the rdkit mol
object (Chem.EditableMol).
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
:param list list_of_idx_to_remove: a list of idx values to remove
from mol
Returns:
:returns: rdkit.Chem.rdchem.Mol new_mol: the rdkit mol as input but with
the atoms from the list removed
"""
if mol is None:
return None
try:
atoms_to_remove = list_of_idx_to_remove
atoms_to_remove.sort(reverse=True)
except:
return None
try:
em1 = Chem.EditableMol(mol)
for atom in atoms_to_remove:
em1.RemoveAtom(atom)
new_mol = em1.GetMol()
return new_mol
except:
return None
#
def get_brics_permutations(mol, min_frag_size=3):
"""
Fragment a mol using BRICS methods.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
:param int min_frag_size: minimum size of fragments to keep
Returns:
:returns: list clean_frag_list: list of fragmented SMILES
"""
res = list(BRICSDecompose(mol, returnMols=True, minFragmentSize=min_frag_size))
smis = [Chem.MolToSmiles(x, True) for x in res]
# Get larger pieces
res = list(
BRICSDecompose(
mol, returnMols=True, keepNonLeafNodes=True, minFragmentSize=min_frag_size
)
)
smis.extend([Chem.MolToSmiles(x, True) for x in res])
clean_frag_list = []
for x in res:
list_to_remove = []
for i in x.GetAtoms():
if i.GetAtomicNum() == 0:
list_to_remove.append(i.GetIdx())
x = remove_atoms(x, list_to_remove)
for atom in x.GetAtoms():
atom.SetIsotope(0)
clean_frag_list.append(Chem.MolToSmiles(x))
list(set(list(clean_frag_list)))
return clean_frag_list
#
def remove_bonds(mol, list_of_atomiso_bondsets_to_remove):
"""
This function removes bond from an rdkit mol based on
a provided list. This list is a list of sets, with each set containing
two atoms with the isotope label of that atom. Using Isotopes is to ensure
that atom Idx dont change.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
:param list list_of_atomiso_bondsets_to_remove: a list of idx values to remove
from mol
Returns:
:returns: rdkit.Chem.rdchem.Mol new_mol: the rdkit mol as input but with
the atoms from the list removed
"""
# None's often end up in a pipeline use of RDKit so we handle this data type as return None
# instead of raise TypeError
if mol is None:
return None
# If mol is wrong data type (excluding None) raise TypeError
if type(mol) != rdkit.Chem.rdchem.Mol and type(mol) != rdkit.Chem.rdchem.RWMol:
printout = "mol is the wrong data type. \n"
printout = printout + "Input should be a rdkit.Chem.rdchem.Mol\n"
printout = printout + "Input mol was {} type.".format(type(mol))
raise TypeError(printout)
new_mol = copy.deepcopy(mol)
if len(list_of_atomiso_bondsets_to_remove) == 0:
return None
for atomiso_bondsets in list_of_atomiso_bondsets_to_remove:
if len(atomiso_bondsets) == 0:
continue
if len(atomiso_bondsets) != 2:
printout = "list_of_atomiso_bondsets_to_remove needs to be 2 isolabels for the atoms"
raise TypeError(printout)
atom_1_idx = int(get_atom_w_iso_num(new_mol, atomiso_bondsets[0]))
atom_2_idx = int(get_atom_w_iso_num(new_mol, atomiso_bondsets[1]))
try:
new_mol = Chem.FragmentOnBonds(
new_mol, [atom_1_idx, atom_2_idx], addDummies=False
)
except:
return None
new_mol = MOH.check_sanitization(new_mol)
if new_mol is None:
return None
new_mol = MOH.check_sanitization(new_mol)
if new_mol is None:
return None
return new_mol
#
def make_list_of_all_unique_frags(fragment_list):
"""
This function takes a list of all molecules after fragmentation and separates the
the fragments into individual rdkit mol objects, sanitizes each, removes isotopes
and converts them into a SMILES string. The SMILES are compiled into a list,
and then redundant strings are reduced to a single entry.
It returns a list of all unique sanitized canonical SMILES for every fragment made
from all permutations of bond breaking.
Inputs:
:param list fragment_list: list of fragmented rdkit mols which haven't been separated
yet
Returns:
:returns: list clean_frag_list: List of unique sanitized SMILES strings from all objects
in fragment_list. Isotope labels are also removed here.
"""
clean_frag_list = []
for fragments in fragment_list:
frags = Chem.GetMolFrags(fragments, asMols=True, sanitizeFrags=False)
for frag in frags:
frag = MOH.check_sanitization(frag)
if frag is None:
continue
# Remove those under 2 atoms minimum
list_mol_atoms = frag.GetAtoms()
if len(list_mol_atoms) < 3:
continue
for atom in frag.GetAtoms():
atom.SetIsotope(0)
clean_frag_list.append(
Chem.MolToSmiles(frag, isomericSmiles=True, canonical=True)
)
list(set(list(clean_frag_list)))
return clean_frag_list
#
def make_unique_lig_id(parent_lig_name, current_lig_list):
"""
This will make a ligand name from the parent name. Keep start names simple.
Format of names:
- str(parent_lig_name) + "_Frag_" + str(random_int)
Inputs:
:param str parent_lig_name: str of the ligand Id for the parent mol
:param list current_lig_list: the list of names already taken
Returns:
:returns: str unique_lig_id: A unique ID/name for the child ligand.
"""
if type(parent_lig_name) != str:
raise Exception("Ligand ID's to seed this must have Unique string IDs")
parent_lig_name = parent_lig_name.replace(" ", "")
picked_name = False
while picked_name is False:
random_int = random.choice(range(100000, 999999))
unique_lig_id = str(parent_lig_name) + "_Frag_" + str(random_int)
if unique_lig_id in current_lig_list:
continue
picked_name = True
break
return unique_lig_id
#
def make_frag_list_for_one_mol(
mol_info, frags_per_seed_lig, run_brics, run_frag, c_c_bonds_off=False
):
"""
This will take a ligand string and ID encased in the list mol_info.
This will then be fragmented along all non Carbon-carbon rotatable bonds which
are not aromatic.
It will make all permutations of all potential bond breaks, reduce to only unique
fragments and than pick the number of chosen fragments. Then it will create unique ID's
for each and return a list of lists containing the chosen unique fragments.
Inputs:
:param list mol_info: list containing [mol_string, mol_id]
mol_info[0] = the SMILE string of the parent mol
mol_info[1] = the Unique ID of the parent mol
:param bool run_brics: whether to fragment using BRICS method
:param bool run_frag: whether to fragment all bonds
:param bool c_c_bonds_off: whether to fragment C-C bonds
Returns:
:returns: list final_frag_list: A list of lists containing the chosen unique fragments.
final_frag_list[0] = [SMILE, mol_id]
"""
mol_smile = mol_info[0]
lig_id = mol_info[1]
mol = Chem.MolFromSmiles(mol_smile, sanitize=False)
mol = MOH.check_sanitization(mol)
if mol is None:
printout = "\nMolecule {} failed to sanitize. \
Could not make any fragments from it".format(
lig_id
)
raise Exception(printout)
mol_smile = Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
mol = label_iso_num_w_idx(mol)
mol_copy = copy.deepcopy(mol)
bonds_to_remove_permutations = get_rot_bond_permutations_to_cut(
mol_copy, c_c_bonds_off
)
fragment_list = []
for bond_set_to_del in bonds_to_remove_permutations:
mol_copy = copy.deepcopy(mol)
x = remove_bonds(mol_copy, bond_set_to_del)
if x is None:
continue
fragment_list.append(x)
clean_frag_list = []
if run_frag is True:
clean_frag_list = make_list_of_all_unique_frags(fragment_list)
clean_frag_list = list(set(clean_frag_list))
if run_brics is True:
mol_copy = copy.deepcopy(mol)
bric_mols = get_brics_permutations(mol_copy, min_frag_size=3)
clean_frag_list.extend(bric_mols)
clean_frag_list = list(set(clean_frag_list))
if len(clean_frag_list) == 0:
printout = "\nNo fragments were made for {}.\n".format(lig_id)
print(printout)
return [[mol_smile, lig_id]]
# Pick the number of ligands to make
final_frag_list = [[mol_smile, lig_id]]
if frags_per_seed_lig == -1:
printout = "\nFor {}: {} fragmented were made.".format(
lig_id, len(clean_frag_list)
)
print(printout)
for frag in clean_frag_list:
unique_lig_id = make_unique_lig_id(lig_id, final_frag_list)
temp_frag_info = [frag, unique_lig_id]
final_frag_list.append(temp_frag_info)
return final_frag_list
#
def get_ligands_from_smi(smi_file):
"""
Get the ligands from the smi_file
Inputs:
:param str smi_file: Path to smiles file
Returns:
:returns: list list_of_ligands: A list of lists containing the chosen unique fragments.
final_frag_list[0] = [SMILE, mol_id]
"""
list_of_ligands = []
with open(smi_file, "r") as smiles_file:
line_counter = 0
for line in smiles_file:
line_counter = line_counter + 1
line = line.replace("\n", "")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 1:
parts = line.split(
" "
) # split line into parts separated by 4-spaces
if len(parts) == 2 or len(parts) > 2:
mol_string = parts[0]
mol_id = parts[1]
if type(mol_id) != str:
print(
"Miss Formatted within .SMI. Line number {}".format(
str(line_counter)
)
)
continue
try:
mol = Chem.MolFromSmiles(mol_string, sanitize=False)
except:
print(
"Miss Formatted within .SMI. Line number {}".format(
str(line_counter)
)
)
continue
mol = MOH.check_sanitization(mol)
if mol is None:
continue
mol_smile = Chem.MolToSmiles(mol, isomericSmiles=True, canonical=True)
mol_info = [mol_smile, mol_id]
list_of_ligands.append(mol_info)
else:
continue
print(
"Was able to import and sanitize {} \
ligands from the .smi.".format(
len(list_of_ligands)
)
)
if line_counter != len(list_of_ligands):
print(
"\t Failed to sanitize/import \
{} ligands from the .smi".format(
line_counter - len(list_of_ligands)
)
)
print("########")
return list_of_ligands
#
def run_fragmentation_main(vars):
"""
This runs the fragmenter.
Inputs:
:param dict vars: variable with all of the user variables
"""
print(vars)
smi_file = vars["smi_file"]
output_smi_file = vars["output_smi_file"]
run_brics = vars["run_brics"]
frags_per_seed_lig = vars["frags_per_seed_lig"]
run_frag = vars["run_frag"]
c_c_bonds_off = vars["c_c_bonds_off"]
number_of_processors = vars["number_of_processors"]
print("")
print("STARTING FRAGMENTER")
print("frags_per_seed_lig: ", frags_per_seed_lig)
print("smi_file: ", smi_file)
print("########")
print("Importing .smi file")
list_of_ligands = get_ligands_from_smi(smi_file)
# create a set of jobs to multithread the fragmentation
job_input = [
[mol_info, frags_per_seed_lig, run_brics, run_frag, c_c_bonds_off]
for mol_info in list_of_ligands
]
job_input = [tuple(x) for x in job_input]
list_of_ligands = None
output = mp.multi_threading(
job_input, number_of_processors, make_frag_list_for_one_mol
)
print("Finish multithread\n")
#
output = [x for x in output if x is not None]
output = [x for x in output if x is not ""]
initial_output_reduce = []
for x in output:
initial_output_reduce.extend(x)
output = None
initial_output_reduce = [x for x in initial_output_reduce if x[0] != ""]
initial_output_reduce = [x for x in initial_output_reduce if x[1] != ""]
# Reduce smile redundancies:
smiles_list = []
output_reduce = []
for x in initial_output_reduce:
if x[0] in smiles_list:
continue
output_reduce.append(x)
smiles_list.append(x[0])
final_mol_list = []
master_smile_list = []
master_id_list = []
for x in output_reduce:
temp_smile = x[0]
temp_id = x[1]
if temp_smile in master_smile_list:
continue
if temp_id in master_id_list:
continue
# Append to master lists and final_mol_list
final_mol_list.append(x)
master_smile_list.append(temp_smile)
master_id_list.append(temp_id)
# convert list of mols to a print statement
printout = ""
for x in final_mol_list:
printout = printout + x[0] + "\t" + x[1] + "\n"
print("####")
print("\nSaving list to file")
with open(output_smi_file, "w") as f:
f.write(printout)
print("Number of parent ligands: {}".format(len(job_input)))
print(
"Number of new fragmented ligands: {}".format(
len(final_mol_list) - len(job_input)
)
)
print("Total number ligs in output file: {}".format(len(final_mol_list)))
def convert_to_bool(val):
"""
Converts an integer, string, or boolean to the appropriate boolean.
Inputs:
:param int|bool|str val: The value to be converted.
Returns:
:returns: bool: The equivalent boolean.
"""
return (
True
if val in [True, 1] or (type(val) is str and val.upper() == "TRUE")
else False
)
def process_inputs(inputs):
"""
This will handle processing all parameters.
Inputs:
:params dict inputs: dictionary of argparse parameters
Returns:
:returns: dict inputs: dictionary of argparse parameters
"""
# check input smi
smi_file = os.path.abspath(inputs["smi_file"])
if os.path.exists(smi_file) is False:
raise Exception("\n.SMI file not found.\n")
if os.path.isfile(smi_file) is False:
raise Exception("\n.SMI file not found.\n")
inputs["smi_file"] = smi_file
# check output_smi_file
if "output_smi_file" in inputs.keys():
output_smi_file = inputs["output_smi_file"]
else:
output_smi_file = None
if output_smi_file is None:
output_smi_file = smi_file + "_Fragmented.smi"
output_smi_file = os.path.abspath(output_smi_file)
if os.path.exists(os.path.dirname(output_smi_file)) is False:
raise Exception("\n.directory for output_smi_file not found.\n")
if os.path.isfile(smi_file) is False:
raise Exception("\n.SMI file not found.\n")
inputs["output_smi_file"] = output_smi_file
if "frags_per_seed_lig" in inputs.keys():
inputs["frags_per_seed_lig"] = int(inputs["frags_per_seed_lig"])
else:
inputs["frags_per_seed_lig"] = -1
if "run_brics" in inputs.keys():
inputs["run_brics"] = convert_to_bool(inputs["run_brics"])
else:
inputs["run_brics"] = True
if "run_frag" in inputs.keys():
inputs["run_frag"] = convert_to_bool(inputs["run_brics"])
else:
inputs["run_frag"] = True
if "c_c_bonds_off" in inputs.keys():
inputs["c_c_bonds_off"] = convert_to_bool(inputs["run_brics"])
else:
inputs["c_c_bonds_off"] = True
if "number_of_processors" in inputs.keys():
inputs["number_of_processors"] = int(inputs["run_brics"])
else:
inputs["number_of_processors"] = True
return inputs
# Argument parsing
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
"--smi_file",
required=True,
default=None,
help="Path to tab-delineated .smi file to fragment",
)
PARSER.add_argument(
"--output_smi_file",
"-o",
type=str,
default=None,
help="Path to output tab-delineated .smi file of fragments. \
If not provided it will play a file in the same directory as smi_file \
titled smi_file + _Fragmented.smi",
)
PARSER.add_argument(
"--frags_per_seed_lig",
type=int,
required=False,
default=-1,
help="Number of fragments to create per input SMILES. \
default is -1 which mean all possible fragments.",
)
PARSER.add_argument(
"--run_brics",
type=bool,
required=False,
default=True,
help="Whether to fragment ligands using BRICS fragmentation. This fragments \
along synthesizable bonds. Default is True.",
)
PARSER.add_argument(
"--run_frag",
type=bool,
required=False,
default=True,
help="Whether to fragment ligands over all rotatable bonds. Default is True.",
)
PARSER.add_argument(
"--c_c_bonds_off",
type=bool,
required=False,
default=True,
help="Whether to exclude fragmenting carbon-carbon single bonds. Default is True. \
If True it will ignore fragments on C-C bonds; if False it will fragment.",
)
PARSER.add_argument(
"--number_of_processors",
"-p",
type=int,
metavar="N",
default=-1,
help="Number of processors to use for parallel calculations. \
Set to -1 for all available CPUs.",
)
ARGS_DICT = vars(PARSER.parse_args())
ARGS_DICT = process_inputs(ARGS_DICT)
run_fragmentation_main(ARGS_DICT)
print("Fragments located at: {}".format(ARGS_DICT["output_smi_file"]))
print("Finished")
| 21,810 | 30.114123 | 97 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/plot_autogrow_run.py | """
Plots a line plot of the average score for each generation of AutoGrow run.
Example submit:
python autogrow4/accessory_scripts/plot_autogrow_run.py\
-i $PATH/Run_1/Run_0/ \
--plot_reference_lines [['Olaparib Score',-12.8,'y'],\
['Niraparib',-10.7,'k'],['NAD/NADH',-10.3,'purple'],\
['ADP-ribose',-9.3,'maroon']]
"""
import __future__
import os
import glob
import json
import copy
import argparse
import matplotlib
import matplotlib.pyplot as plt
def get_usable_format(infile):
"""
This code takes a string for an file which is formatted as an .smi file. It
opens the file and reads in the components into a usable list.
The .smi must follow the following format for each line:
MANDATORY INFO
part 1 is the SMILES string
part 2 is the SMILES name/ID
Optional info
part -1 (the last piece of info) is the SMILES diversity score
relative to its population
part -2 (the second to last piece of info) is the fitness metric
for evaluating
- For default setting this is the Docking score
- If you add a unique scoring function Docking score should be
-3 and that score function should be -2
Any other information MUST be between part 2 and part -2 (this
allows for the expansion of features without disrupting the rest of the code)
Inputs:
:param str infile: the string of the PATHname of a formatted .smi file to
be read into the program
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
# IMPORT SMILES FROM THE PREVIOUS GENERATION
usable_list_of_smiles = []
if os.path.exists(infile) is False:
print("\nFile of Source compounds does not exist: {}\n".format(infile))
raise Exception("File of Source compounds does not exist")
with open(infile) as smiles_file:
for line in smiles_file:
line = line.replace("\n", "")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 1:
parts = line.split(
" "
) # split line into parts separated by 4-spaces
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
usable_list_of_smiles.append(choice_list)
return usable_list_of_smiles
def get_average_score_per_gen(infolder, folder_list):
"""
This script will get the average docking score from the ranked .smi file
from each generation.
Inputs:
:param str infolder: the path of the folder which has all of the
generation folders
:param list folder_list: a list of generation folders for each generation
within infolder
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
average_affinity_dict = {}
for gen_folder in folder_list:
gen_folder_name = infolder + gen_folder + os.sep
ranked_file = glob.glob(gen_folder_name + "*_ranked.smi")
for rank_file in ranked_file:
# write as a tab delineated .smi file
with open(rank_file, "r") as f:
gen_affinity_sum = float(0.0)
num_lines_counter = float(0.0)
for line in f:
line = line.replace("\n", "")
# split line into parts separated by 4-spaces
parts = line.split("\t")
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
gen_affinity_sum = gen_affinity_sum + float(choice_list[-2])
num_lines_counter = num_lines_counter + float(1.0)
gen_affinity_average = gen_affinity_sum / num_lines_counter
gen_num = os.path.basename(rank_file).split("_")[1]
gen_name = "generation_{}".format(gen_num)
average_affinity_dict[gen_name] = gen_affinity_average
print_gens(average_affinity_dict)
return average_affinity_dict
def get_average_top_score_per_gen(infolder, folder_list, top_score_per_gen):
"""
This script will get the average docking score of the top N number of
ligands ranked .smi file from each generation.
Inputs:
:param str infolder: the path of the folder which has all of the
generation folders
:param list folder_list: a list of generation folders for each generation
within infolder
:param int top_score_per_gen: the number of ligands to determine the
average score. ie) if top_score_per_gen=50 it will return the average of
the top 50 scores.
Returns:
:returns: dict average_affinity_dict: dictionary of average affinity
scores for top_score_per_gen number of ligands
"""
average_affinity_dict = {}
for gen_folder in folder_list:
gen_folder_name = infolder + gen_folder + os.sep
ranked_file = glob.glob(gen_folder_name + "*_ranked.smi")
for rank_file in ranked_file:
# Check number of lines
num_lines = 0
with open(rank_file, "r") as rf:
for line in rf:
num_lines = num_lines + 1
if num_lines >= top_score_per_gen:
# read as a tab delineated .smi file
with open(rank_file, "r") as f:
gen_affinity_sum = float(0.0)
for i, line in enumerate(f.readlines()):
if i >= top_score_per_gen:
break
line = line.replace("\n", "")
parts = line.split(
"\t"
) # split line into parts separated by 4-spaces
choice_list = []
for j in range(0, len(parts)):
choice_list.append(parts[j])
gen_affinity_sum = gen_affinity_sum + float(choice_list[-2])
gen_affinity_average = gen_affinity_sum / top_score_per_gen
gen_num = os.path.basename(rank_file).split("_")[1]
gen_name = "generation_{}".format(gen_num)
average_affinity_dict[gen_name] = gen_affinity_average
else:
gen_num = os.path.basename(rank_file).split("_")[1]
gen_name = "generation_{}".format(gen_num)
average_affinity_dict[gen_name] = "N/A"
print_gens(average_affinity_dict)
return average_affinity_dict
def print_gens(average_affinity_dict):
"""
This prints out the average scores for each generation
Inputs:
:param dict average_affinity_dict: dictionary of average affinity scores
for top_score_per_gen number of ligands
"""
print("generation_number average affinity score")
affinity_keys = list(average_affinity_dict.keys())
affinity_keys.sort(key=lambda x: int(x.split("_")[1]))
for gen in affinity_keys:
print(gen, " ", average_affinity_dict[gen])
def make_graph(dictionary):
"""
Because some generations may not have 50 ligands this basically checks to see if
theres enough ligands and prepares lists to be plotted
Inputs:
:param dict dictionary: dictionary of average affinity scores for
top_score_per_gen number of ligands
Returns:
:returns: list list_generations: list of ints for each generation to be plotted.
if a generation lacks ligands to generate the average it will return "N/A"
:returns: list list_of_scores: list of averages for each generation;
if a generation lacks ligands to generate the average it will return "N/A"
"""
list_generations = []
list_of_gen_names = []
list_of_scores = []
#print(dictionary)
for key in dictionary.keys():
#print(key)
list_of_gen_names.append(key)
score = dictionary[key]
list_of_scores.append(score)
gen = key.replace("generation_", "")
gen = int(gen)
list_generations.append(gen)
list_of_gen_names.append(key)
for i in list_of_scores:
if i == "N/A":
return None, None
return list_generations, list_of_scores
def run_plotter(vars, dict_of_averages, outfile):
"""
This plots the averages into a matplotlib figure. It will require you to
answer questions about titles and labels
Inputs:
:param dict vars: dict of user variables which will govern how the
programs runs
:param dict dict_of_averages: a dictionary of dictionaries containing the
average of each generation for the top 50,20, 10, and 1 ligand(s) and the
overall average for each generation.
:param str outfile: Path for the output file for the plot
"""
average_affinity_dict = dict_of_averages["average_affinity_dict"]
top_fifty_dict = dict_of_averages["top_fifty_dict"]
top_twenty_dict = dict_of_averages["top_twenty_dict"]
top_ten_dict = dict_of_averages["top_ten_dict"]
top_one_dict = dict_of_averages["top_one_dict"]
# print("Graphing Overall Average")
list_generations_average, list_of_scores_average = make_graph(average_affinity_dict)
# print("Graphing top_fifty_dict")
print_fifty = True
for key in top_fifty_dict.keys():
if top_fifty_dict[key] == "N/A":
print_fifty = False
if print_fifty is True:
list_generations_fifty, list_of_scores_fifty = make_graph(top_fifty_dict)
# print("Graphing top_fifty_dict")
print_twenty = True
for key in top_twenty_dict.keys():
if top_twenty_dict[key] == "N/A":
print_twenty = False
if print_twenty is True:
list_generations_twenty, list_of_scores_twenty = make_graph(top_twenty_dict)
# print("Graphing top_ten_dict")
list_generations_ten, list_of_scores_ten = make_graph(top_ten_dict)
# print("Graphing top_one_dict")
list_generations_one, list_of_scores_one = make_graph(top_one_dict)
# print("")
ax = plt.subplot(111)
ax.plot(
list_generations_average, list_of_scores_average, color="b", label="Average"
)
if print_fifty is True:
ax.plot(list_generations_fifty, list_of_scores_fifty, color="c", label="Top 50")
if print_twenty is True:
ax.plot(
list_generations_twenty, list_of_scores_twenty, color="m", label="Top 20"
)
ax.plot(list_generations_ten, list_of_scores_ten, color="g", label="Top 10")
ax.plot(list_generations_one, list_of_scores_one, color="r", label="Top 1")
if vars["plot_reference_lines"] is not None:
for ref_info in vars["plot_reference_lines"]:
ax.axhline(y=ref_info[1], color=ref_info[2], linestyle=':', label=ref_info[0])
ax.set_ylim()
receptor_name = os.path.basename(vars["filename_of_receptor"])
scoring_type = vars["scoring_choice"]
docking_type = vars["scoring_choice"]
num_lig = (
int(vars["number_of_mutants"])
+ int(vars["number_of_crossovers"])
+ int(vars["number_elitism_advance_from_previous_gen"])
)
number_of_conf_per_lig = str(vars["max_variants_per_compound"])
# Get Customizations
title_of_figure = "{} Scores for {} using {}".format(
scoring_type, receptor_name, docking_type
)
plt.title(title_of_figure, fontweight="semibold")
# Put a legend to the right of the current axis
ax.legend(loc="center left", bbox_to_anchor=(1, 0.274), fontsize="small")
number_of_lig_per_gen = str(num_lig)
output = (
str(number_of_lig_per_gen)
+ " lig/gen"
+ "\n"
+ str(number_of_conf_per_lig)
+ " variants/lig"
)
plt.text(
5.4, -8.5, output, bbox=dict(facecolor="white", alpha=0.5), fontsize="small"
)
# legend1 = plt.legend([lines[i].get_label() for i in range(0, lines_leg)],
# loc='center left', bbox_to_anchor=(1, 0.274),fontsize='small')
# legend2 = plt.legend([output],loc='center left',
# bbox_to_anchor=(1, 0.774),fontsize='small')
# # help(plt.legend)
# ax.add_artist(legend1)
# ax.add_artist(legend2)
ax.set_ylim()
if "VINA" in str(scoring_type):
y_label = "Docking Affinity (kcal/mol"
else:
y_label = "Fitness Score"
plt.ylabel(y_label, fontweight="semibold")
plt.xlabel("Generation Number", fontweight="semibold")
plt.savefig(outfile, bbox_inches="tight", \
foramt=vars["outfile_format"], dpi=1000)
def print_data_table(infolder, folder_list):
"""
This function takes a folder of an Autogrow Run and a list of all folders
within the infolder, and finds the average of each generation, the average
of the top 50,20, 10, and 1 ligand(s) in each generation.
It prints the average docking score values in a table and returns that
information as a dictionary of dictionaries.
Inputs:
:param str infolder: a string for the file path to a directory containing
an Autogrow run. ie) "PATH/Run_0/"
:param list folder_list: a list of every generation folders within the
infolder
Returns
:returns: dict dict_of_averages: a dictionary of dictionaries containing
the average of each generation for the top 50,20, 10, and 1 ligand(s) and
the overall average for each generation.
"""
print("Overall Scoring Average for all Compounds")
average_affinity_dict = get_average_score_per_gen(infolder, folder_list)
print("")
print("Average for Top Scoring Compounds")
print("Number of top scoring compounds: ", 50)
top_fifty_dict = get_average_top_score_per_gen(infolder, folder_list, 50)
print("")
print("Average for Top Scoring Compounds")
print("Number of top scoring compounds: ", 20)
top_twenty_dict = get_average_top_score_per_gen(infolder, folder_list, 20)
print("")
print("Average for Top Scoring Compounds")
print("Number of top scoring compounds: ", 10)
top_ten_dict = get_average_top_score_per_gen(infolder, folder_list, 10)
print("")
print("Best Score per generation")
print("Number of top scoring compounds: ", 1)
top_one_dict = get_average_top_score_per_gen(infolder, folder_list, 1)
print("")
print("")
dict_of_averages = {}
dict_of_averages["average_affinity_dict"] = average_affinity_dict
dict_of_averages["top_fifty_dict"] = top_fifty_dict
dict_of_averages["top_twenty_dict"] = top_twenty_dict
dict_of_averages["top_ten_dict"] = top_ten_dict
dict_of_averages["top_one_dict"] = top_one_dict
return dict_of_averages
def generate_figures(vars):
"""
This runs everything to make a line plot of the results of an Autogrow
simulation.
Inputs:
:param dict vars: dict of user variables which will govern how the
programs runs
"""
infolder = vars["infolder"]
outfile = vars["outfile"]
all_folders_list = [
f for f in sorted(os.listdir(infolder)) if os.path.isdir(infolder + f)
]
folder_list = []
for folder in all_folders_list:
if folder != "Data" and len(folder.split("_")) == 2:
folder_list.append(folder)
folder_list.sort(key=lambda x: int(x.split("_")[1]))
dict_of_averages = print_data_table(infolder, folder_list)
run_plotter(vars, dict_of_averages, outfile)
######## Handle Variables #####
def retrieve_vars_dict(autogrow_vars_json):
"""
This will retrieve a variable dictionary from a AutoGrow vars json file.
Inputs:
:param str autogrow_vars_json: path to AutoGrow json variable file
Returns:
:returns: dict vars: a dictionary of variable to use
"""
if os.path.exists(autogrow_vars_json) is False:
raise Exception("variable file could not be found. It should be the \
vars.json file written by AutoGrow in the output folder of the run.")
try:
with open(autogrow_vars_json, "r") as f:
vars = json.load(f)
except:
raise Exception("variable file would not import. It should be the \
vars.json file written by AutoGrow in the output folder of the run.")
return vars
#
def process_inputs(inputs):
"""
This will handle processing all parameters.
inputs:
:params dict inputs: dictionary of argparse parameters
Returns:
:returns: dict vars_dict: dictionary of argparse parameters
"""
# handle input information
inputs["infolder"] = os.path.abspath(inputs["infolder"]) + os.sep
if os.path.exists(inputs["infolder"]) is False:
raise Exception("Input folder {} does not\
exist.".format(inputs["infolder"]))
# get vars dict from last run
inputs["vars_json"] = inputs["infolder"] + "vars.json"
if os.path.exists(inputs["vars_json"]) is False:
raise Exception("Input folder {} does not contain the vars.json file \
necessary to run script. Please make sure the vars.json is in the \
folder.".format(inputs["infolder"]))
try:
with open(inputs["vars_json"], "r") as f:
vars_dict = json.load(f)
except:
raise Exception("variable file would not import. It should be the \
vars.json file written by AutoGrow in the output folder of the run.")
if "outfile_format" in inputs.keys():
if inputs["outfile_format"] is None:
inputs["outfile_format"] = "svg"
if inputs["outfile_format"].lower() not in ["svg", "png", "jpg", "pdf"]:
raise Exception("outfile_format not a valid format")
if "outfile" in inputs.keys():
if inputs["outfile"] is not None:
if os.path.dirname(inputs["outfile"]) is False:
try:
os.mkdir(os.path.dirname(inputs["outfile"]))
except:
raise Exception("outfile directory does not exist")
if os.path.dirname(inputs["outfile"]) is False:
raise Exception("outfile directory does not exist")
else:
inputs["outfile"] = inputs["infolder"] + os.sep + \
"data_line_plot." + inputs["outfile_format"]
else:
inputs["outfile"] = inputs["infolder"] + os.sep + \
"data_line_plot." + inputs["outfile_format"]
# update --plot_reference_lines
if "plot_reference_lines" not in inputs.keys():
inputs["plot_reference_lines"] = None
if inputs["plot_reference_lines"] is not None:
# names of all matplotlib color options
matplot_colors = matplotlib.colors.get_named_colors_mapping().keys()
ref_lines = inputs["plot_reference_lines"].replace("[[", "[").replace("]]", "]")
ref_lines = ref_lines.split("],")
ref_lines = [ref.replace("]", "").replace("[", "").split(",") for ref in ref_lines]
new_ref_lines = []
failed_io = False
for ref_info in ref_lines:
if len(ref_info) != 3:
failed_io = True
break
# make new list with 1st item the str name
temp_ref_lines = [str(ref_info[0])]
try:
temp_ref_lines.append(float(ref_info[1]))
except:
failed_io = True
break
if str(ref_info[2]) not in matplot_colors:
print("COULD NOT FIND COLOR: " + str(ref_info[2]))
failed_io = True
break
temp_ref_lines.append(str(ref_info[2]))
new_ref_lines.append(temp_ref_lines)
if failed_io is True:
printout = "\n --plot_reference_lines must be list of lists where each "
printout = printout + "sublist has three pieces of information in this "
printout = printout + "order:\n\t [name, value, matplotlib_color]\n"
printout = printout + "more details can be found using the -h option\n"
print(printout)
raise Exception(printout)
inputs["plot_reference_lines"] = new_ref_lines
# overwrite and return vars_dict with input commands
for key in inputs.keys():
vars_dict[key] = inputs[key]
return vars_dict
#
######################################
######################################
######################################
PARSER = argparse.ArgumentParser()
# Get needed info
PARSER.add_argument(
"--outfile",
"-o",
metavar="param.outfile",
required=False,
default=None,
help="Path to folder to output files. It will be created if does not exist. \
If not provide it will be placed in the infolder/data_line_plot.svg",
)
PARSER.add_argument(
"--outfile_format",
metavar="param.outfile_format",
type=str, default="svg",
choices=["svg", "png", "jpg", "pdf"],
help="The type of file for figure to be exported as default is .svg file.",
)
PARSER.add_argument(
"--infolder",
"-i",
metavar="param.infolder",
required=True,
help="Path to input folder containing the AutoGrow run. This should be the \
top folder which contains the vars.json file.",
)
PARSER.add_argument(
"--plot_reference_lines",
default=None,
help="This will be a list of lists, with each sublist being a different \
dotted-line reference to plot. For each sublist the order of \
information should be: [name, value, matplotlib_color] \
For example a [['Olaparib score',-12.8,'y'],['Niraparib score',-10.7,'k']] \
will add horizontal dotted lines at -12.8 (yellow) and -10.7 (black) \
with Olaparib and Niraparib added to the legend. \
Spaces must be within quotes and not be between variables. \
matplotlib colors can be found with mcolors.get_named_colors_mapping().keys()"
)
ARGSDICT = vars(PARSER.parse_args())
# copying ARGSDICT so we can delete out of while iterating through the
# original ARGSDICT
INPUTS = copy.deepcopy(ARGSDICT)
for k, v in ARGSDICT.items():
if v is None:
del INPUTS[k]
USER_VARS = process_inputs(INPUTS)
generate_figures(USER_VARS)
print("FINISHED {}".format(USER_VARS["outfile"]))
print("finished")
| 22,547 | 35.192616 | 91 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/__init__.py | 1 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/convert_directory_ligands_pdb_to_smi.py | """
convert pdbs into smiles
This script will take a folder and convert all pdb files into a single texted file.
The text file will contain smiles strings of the respective pdb and the name of the file.
Run example:
output example:
python convert_ligands_pdb_to_smi.py \
--source_folder $PATH/OF/PDBS/ \
--output_folder $PATH/TO/OUTPUT/ \
--number_of_processors -1
This will convert all .pdb files within $PATH/OF/PDBS/ into a single
.smi file at $PATH/TO/OUTPUT/PDBS.smi
using all available processors
CC1COC(=O)OC(=O)O1 ZINC60039447
O = C1OC(=O)N2CCC12 ZINC59199492
O = C1CC(C(=O)O)C(=O)O1 ZINC59901386
"""
import __future__
import glob
import os
import argparse
from rdkit import Chem
import support_scripts.mol_object_handling as MOH
import support_scripts.Multiprocess as mp
def run_convert_on_single_pdb(pdb):
"""
This function converts a ligand into SMILES
and returns the list with the smiles with a name.
The names are the basename of the file minus the .pdb
Inputs:
:param str pdb: path to the folder to a pdb file
Returns:
:returns: list output_data: A list containing all SMILES info from the file
"""
try:
mol = Chem.MolFromPDBFile(pdb)
mol_sanitized = MOH.check_sanitization(mol)
if mol_sanitized is not None:
smiles = Chem.MolToSmiles(mol_sanitized, isomericSmiles=True)
file_name = os.path.basename(pdb)
file_stripped = file_name.replace(".pdb", "")
output_data = smiles + "\t" + file_stripped
except:
pass
return output_data
#
def make_smile_list(sub_folder):
"""
This function converts every ligand within a folder into SMILES
and returns the list of smiles with a name.
The names are the basename of the file minus the .pdb
Inputs:
:param str sub_folder: path to the folder to search for pdb files
Returns:
:returns: list smiles_list: A list of lists containing all SMILES from
the .pdb files and their respective name
"""
sub_folder = sub_folder+os.sep
smiles_list = []
pdb_list = glob.glob(os.sep + sub_folder+"*.pdb")
pdb_list.extend(glob.glob(os.sep + sub_folder+"*.PDB"))
pdb_list = tuple([tuple([pdb]) for pdb in pdb_list])
# run convert in multithread
smiles_list = mp.multi_threading(pdb_list, -1, run_convert_on_single_pdb)
return smiles_list
#
def start_run_main(vars):
"""
This will run the main arguments for the script.
Inputs:
:param dict vars: dictionary of user variables.
"""
# Running converter
smiles_list = make_smile_list(vars["source_folder"])
name = [x for x in vars["source_folder"].split(os.sep)if x != ""][-1]
output_file = vars["output_folder"] + os.sep + name + ".smi"
with open(output_file, "w") as f:
f.write("\n".join(smiles_list))
print("Converted ligands to .smi located:\n\t{}".format(output_file))
def get_arguments_from_argparse(args_dict):
"""
This function handles the arg parser arguments for the script.
Inputs:
:param dict args_dict: dictionary of parameters
Returns:
:returns: dict args_dict: dictionary of parameters
"""
# Argument handling
if type(args_dict["source_folder"]) != str:
raise Exception("provided source folder must be a directory.")
if type(args_dict["output_folder"]) != str:
raise Exception("provided output_folder must be a directory.")
# argument_handling
if os.path.exists(args_dict["source_folder"]) is False or \
os.path.isdir(args_dict["source_folder"]) is False:
raise Exception("provided source folder can not be found or is not a directory.")
args_dict["source_folder"] = os.path.abspath(args_dict["source_folder"]) + os.sep
if os.path.exists(args_dict["output_folder"]) is False:
try:
os.mkdir(args_dict["output_folder"])
except:
pass
if os.path.exists(args_dict["output_folder"]) is False:
raise Exception("output_folder could not be made or found.")
else:
if os.path.isdir(args_dict["output_folder"]) is False:
raise Exception("output_folder needs to be a directory.")
args_dict["output_folder"] = os.path.abspath(args_dict["output_folder"]) + os.sep
return args_dict
#
# Argument parsing
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--source_folder', '-s', required=True, default=None,
help='Path to folder containing .pdb files to convert. \
File must contain a single small molecules. Without protein. \
Files must end with either .pdb or .PDB'
)
PARSER.add_argument(
'--output_folder', '-o', required=True, default=None,
help='Path to folder where we will output a .smi file of converted .pdb files.'
)
# processors and multithread mode
PARSER.add_argument(
'--number_of_processors', '-p', type=int, metavar='N', default=1,
help='Number of processors to use for parallel calculations. \
This script is not MPI enable but is able to multithread using SMP architecture. \
Set to -1 for all available CPUs.'
)
ARGS_DICT = vars(PARSER.parse_args())
ARGS_DICT = get_arguments_from_argparse(ARGS_DICT)
# Running converter
start_run_main(ARGS_DICT)
| 5,274 | 31.361963 | 89 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/support_scripts/mol_object_handling.py | # Copyright 2018 Jacob D. Durrant
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##### mol_object_handling.py
# This script is taken directly from Gypsum-DL's MolObjectHandling.py
# Modification to function names were simply superficial to make PEP8 compliant
# ie
# def handle_hydrogens became def handle_hydrogens
# def nitrogen_charge_adjustment became nitrogen_charge_adjustment
# file name change from MolObjectHandling.py to mol_object_handling.py
import __future__
import rdkit
from rdkit import Chem
#Disable the unnecessary RDKit warnings
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
def check_sanitization(mol):
"""
Given a rdkit.Chem.rdchem.Mol this script will sanitize the molecule.
It will be done using a series of try/except statements so that if it fails it will return a None
rather than causing the outer script to fail.
Nitrogen Fixing step occurs here to correct for a common RDKit valence error in which Nitrogens with
with 4 bonds have the wrong formal charge by setting it to -1.
This can be a place to add additional correcting features for any discovered common sanitation failures.
Handled here so there are no problems later.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: an rdkit molecule to be sanitized
Returns:
:returns: rdkit.Chem.rdchem.Mol mol: A sanitized rdkit molecule or None if it failed.
"""
if mol is None:
return None
# easiest nearly everything should get through
try:
sanitize_string = Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True)
except:
return None
if sanitize_string.name == "SANITIZE_NONE":
return mol
else:
# try to fix the nitrogen (common problem that 4 bonded Nitrogens improperly lose their + charges)
mol = nitrogen_charge_adjustment(mol)
Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True)
sanitize_string = Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True)
if sanitize_string.name == "SANITIZE_NONE":
return mol
# run a sanitation Filter 1 more time incase something slipped through
# ie. if there are any forms of sanition which fail ie. KEKULIZE then return None
sanitize_string = Chem.SanitizeMol(mol, sanitizeOps = rdkit.Chem.rdmolops.SanitizeFlags.SANITIZE_ALL, catchErrors = True)
if sanitize_string.name != "SANITIZE_NONE":
return None
else:
return mol
#
def handle_hydrogens(mol, protanate_step):
"""
Given a rdkit.Chem.rdchem.Mol this script will sanitize the molecule, remove all non-explicit H's
and add back on all implicit H's. This is to control for any discrepencies in the smiles strings or presence and
absense of H's.
If it fails it will return a None rather than causing the outer script to fail. Handled here so there are no problems later.
Inputs:
:param rdkit.Chem.rdchem.Mol sanitized_deprotanated_mol: an rdkit molecule already sanitized and deprotanated.
:param bool protanate_step: True if mol needs to be protanated; False if deprotanated
-Note if Protanated, smiles_merge takes up to 10times longer
Returns:
:returns: rdkit.Chem.rdchem.Mol mol: an rdkit molecule with H's handled (either added or removed) and sanitized.
it returns None if H's can't be added or if sanitation fails
"""
mol = check_sanitization(mol)
if mol is None:
# mol failed Sanitation
return None
mol = try_deprotanation(mol)
if mol is None:
# mol failed deprotanation
return None
if protanate_step is True:
# PROTANTION IS ON
mol = try_reprotanation(mol)
if mol is None:
# mol failed reprotanation
return None
return mol
#
def try_deprotanation(sanitized_mol):
"""
Given an already sanitize rdkit.Chem.rdchem.Mol object, we will try to deprotanate the mol of all non-explicit
Hs. If it fails it will return a None rather than causing the outer script to fail.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: an rdkit molecule already sanitized.
Returns:
:returns: rdkit.Chem.rdchem.Mol mol_sanitized: an rdkit molecule with H's removed and sanitized.
it returns None if H's can't be added or if sanitation fails
"""
try:
mol = Chem.RemoveHs(sanitized_mol, sanitize=False)
except:
return None
mol_sanitized = check_sanitization(mol)
return mol_sanitized
#
def try_reprotanation(sanitized_deprotanated_mol):
"""
Given an already sanitize and deprotanate rdkit.Chem.rdchem.Mol object, we will try to reprotanate the mol with
implicit Hs. If it fails it will return a None rather than causing the outer script to fail.
Inputs:
:param rdkit.Chem.rdchem.Mol sanitized_deprotanated_mol: an rdkit molecule already sanitized and deprotanated.
Returns:
:returns: rdkit.Chem.rdchem.Mol mol_sanitized: an rdkit molecule with H's added and sanitized.
it returns None if H's can't be added or if sanitation fails
"""
if sanitized_deprotanated_mol is not None:
try:
mol = Chem.AddHs(sanitized_deprotanated_mol)
except:
mol = None
mol_sanitized = check_sanitization(mol)
return mol_sanitized
else:
return None
#
def remove_atoms(mol, list_of_idx_to_remove):
"""
This function removes atoms from an rdkit mol based on
a provided list. The RemoveAtom function in Rdkit requires
converting the mol to an more editable version of the rdkit mol
object (Chem.EditableMol).
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
:param list list_of_idx_to_remove: a list of idx values to remove
from mol
Returns:
:returns: rdkit.Chem.rdchem.Mol new_mol: the rdkit mol as input but with
the atoms from the list removed
"""
if mol is None:
return None
try:
atomsToRemove = list_of_idx_to_remove
atomsToRemove.sort(reverse = True)
except:
return None
try:
em1 = Chem.EditableMol(mol)
for atom in atomsToRemove:
em1.RemoveAtom(atom)
new_mol = em1.GetMol()
return new_mol
except:
return None
#
def nitrogen_charge_adjustment(mol):
"""
When importing ligands with sanitation turned off, one can successfully import
import a SMILES in which a Nitrogen (N) can have 4 bonds, but no positive charge.
Any 4-bonded N lacking a positive charge will fail a sanitiation check.
-This could be an issue with importing improper SMILES, reactions, or crossing a nuetral nitrogen
with a side chain which adds an extra bond, but doesn't add the extra positive charge.
To correct for this, this function will find all N atoms with a summed bond count of 4
(ie. 4 single bonds;2 double bonds; a single and a triple bond; two single and a double bond)
and set the formal charge of those N's to +1.
RDkit treats aromatic bonds as a bond count of 1.5. But we will not try to correct for
Nitrogens labeled as Aromatic. As precaution, any N which is aromatic is skipped in this function.
Inputs:
:param rdkit.Chem.rdchem.Mol mol: any rdkit mol
Returns:
:returns: rdkit.Chem.rdchem.Mol mol: the same rdkit mol with the N's adjusted
"""
if mol is None:
return None
# makes sure its an rdkit obj
try:
atoms = mol.GetAtoms()
except:
return None
for atom in atoms:
if atom.GetAtomicNum() == 7:
bonds = [bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]
# If aromatic skip as we do not want assume the charge.
if 1.5 in bonds:
continue
# GetBondTypeAsDouble prints out 1 for single, 2.0 for double,
# 3.0 for triple, 1.5 for AROMATIC but if AROMATIC WE WILL SKIP THIS ATOM
num_bond_sums = sum(bonds)
# Check if the octet is filled
if num_bond_sums == 4.0:
atom.SetFormalCharge(+1)
return mol
#
def check_for_unassigned_atom(mol):
"""
Check there isn't a missing atom group ie. '*'
A '*' in a SMILES string is an atom with an atomic num of 0
"""
if mol is None:
return None
try:
atoms = mol.GetAtoms()
except:
return None
for atom in atoms:
if atom.GetAtomicNum()==0:
return None
return mol
#
def handle_frag_check(mol):
"""
This will take a RDKit Mol object. It will check if it is fragmented.
If it has fragments it will return the largest of the two fragments.
If it has no fragments it will return the molecule on harmed.
"""
if mol is None:
return None
try:
frags = Chem.GetMolFrags(mol, asMols=True, sanitizeFrags=False)
except:
return None
if len(frags)==1:
return mol
else:
frag_info_list = []
frag_index = 0
for frag in frags:
# Check for unassigned breaks ie. a '*'
frag = check_for_unassigned_atom(frag)
if frag is None:
frag_index = frag_index + 1
continue
else:
num_atoms = frag.GetNumAtoms()
frag_info = [frag_index, num_atoms]
frag_info_list.append(frag_info)
frag_index = frag_index + 1
if len(frag_info_list) == 0:
return None
# Get the largest Fragment
frag_info_list.sort(key = lambda x: float(x[-1]),reverse = True)
largest_frag_idx = frag_info_list[0][0]
largest_frag = frags[largest_frag_idx]
return largest_frag
#
| 10,623 | 34.531773 | 130 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/support_scripts/__init__.py | 1 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/accessory_scripts/support_scripts/Multiprocess.py | """
Run commands on multiple processors in python.
Adapted from examples on https://docs.python.org/2/library/multiprocessing.html
"""
# These functions are also borrow from the Gypsum-DL script Parallelizer.py
# These functions were renamed to be pep8 compliant
# ie )
# def multi_threading became def multi_threading
import multiprocessing
def multi_threading(inputs, num_processors, task_name):
"""Initialize this object.
Args:
inputs ([data]): A list of data. Each datum contains the details to
run a single job on a single processor.
num_processors (int): The number of processors to use.
task_class_name (class): The class that governs what to do for each
job on each processor.
"""
results = []
# If there are no inputs, just return an empty list.
if len(inputs) == 0:
return results
num_processors = count_processors(len(inputs), num_processors)
tasks = []
for index, item in enumerate(inputs):
if not isinstance(item, tuple):
item = (item,)
task = (index, (task_name, item))
tasks.append(task)
if num_processors == 1:
for item in tasks:
job, args = item[1]
output = job(*args)
results.append(output)
else:
results = start_processes(tasks, num_processors)
return results
#
# Worker function
#
def worker(input, output):
for seq, job in iter(input.get, 'STOP'):
func, args = job
result = func(*args)
ret_val = (seq, result)
output.put(ret_val)
def count_processors(num_inputs, num_processors):
"""
Checks processors available and returns a safe number of them to
utilize.
:param int num_inputs: The number of inputs.
:param int num_processors: The number of desired processors.
:returns: The number of processors to use.
"""
# first, if num_processors<= 0, determine the number of processors to
# use programatically
if num_processors<= 0:
num_processors = multiprocessing.cpu_count()
# reduce the number of processors if too many have been specified
if num_inputs < num_processors:
num_processors = num_inputs
return num_processors
def start_processes(inputs, num_processors):
"""
Creates a queue of inputs and outputs
"""
# Create queues
task_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
# Submit tasks
for item in inputs:
task_queue.put(item)
# Start worker processes
for i in range(num_processors):
multiprocessing.Process(target = worker, args = (task_queue, done_queue)).start()
# Get and print results
results = []
for i in range(len(inputs)):
results.append(done_queue.get())
# Tell child processes to stop
for i in range(num_processors):
task_queue.put('STOP')
results.sort(key = lambda tup: tup[0])
return [item[1] for item in map(list, results)]
###
# Helper functions
###
def flatten_list(tier_list):
"""
Given a list of lists, this returns a flat list of all items.
:params list tier_list: A 2D list.
:returns: A flat list of all items.
"""
if tier_list is None:
return []
flat_list = [item for sublist in tier_list for item in sublist]
return flat_list
def strip_none(none_list):
"""
Given a list that might contain None items, this returns a list with no
None items.
:params list none_list: A list that may contain None items.
:returns: A list stripped of None items.
"""
if none_list is None:
return []
results = [x for x in none_list if x is not None]
return results
| 3,732 | 24.744828 | 89 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/user_vars.py | """user_vars
This should contain the functions for defining input variables.
Both the default variables and the user input variables.
This should also validate them.
"""
import __future__
import os
import copy
import datetime
import json
import sys
import platform
from shutil import copyfile
def program_info():
"""
Get the program version number, etc.
Returns:
:returns: str program_output: a string for the print of the program information
"""
program_output = "\nAutoGrow Version 4.0.3\n"
program_output = program_output + " ================== \n"
program_output = (
program_output
+ "If you use AutoGrow 4.0.3 in your research, please cite the following reference:\n"
)
program_output = program_output + "Spiegel, J.O., Durrant, J.D. \n"
program_output = program_output + "AutoGrow4: an open-source genetic algorithm "
program_output = program_output + "for de novo drug design and lead optimization. \n"
program_output = program_output + "J Cheminform 12, 25 (2020). \n"
program_output = program_output + "[doi: 10.1186/s13321-020-00429-4]\n"
program_output = program_output + " ================== \n\n"
return program_output
#
def save_vars_as_json(vars):
"""
This function saves the vars dictionary as a json file. This can be used
later to track experiments and is necessary for several of the utility
scripts.
It saves all variables except the parallelizer class object.
It saves the file to the output_directory + "vars.json"
-If AutoGrow has been run multiple times for the same directory it
will save the new vars file as append a number to the file name
starting with 2. The util scripts will only look at the original "vars.json"
ie) output_directory + "vars_2.json"
Inputs:
:param dict vars: dict of user variables which will govern how the programs runs
"""
output_directory = vars["output_directory"]
vars_file = output_directory + os.sep + "vars.json"
if os.path.exists(vars_file):
# vars.json already exist
# lets make the next file
path_exists = True
i = 2
while path_exists is True:
vars_file = "{}{}vars_{}.json".format(output_directory, os.sep, i)
if os.path.exists(vars_file):
i = i + 1
else:
path_exists = False
temp_vars = {}
for k in vars.keys():
if "parallelizer" in k or k == "filter_object_dict":
continue
temp_vars[k] = copy.deepcopy(vars[k])
with open(vars_file, "w") as fp:
json.dump(temp_vars, fp, indent=4)
def multiprocess_handling(vars):
"""
This function handles the multiprocessing functions. It establishes a Paralellizer object
and adds it to the vars dictionary.
Inputs:
:param dict vars: dict of user variables which will govern how the programs runs
Returns:
:returns: dict vars: dict of user variables which will govern how the programs runs
"""
# Handle Serial overriding number_of_processors
# serial fixes it to 1 processor
if vars["multithread_mode"].lower() == "serial":
vars["multithread_mode"] = "serial"
if vars["number_of_processors"] != 1:
print(
"Because --multithread_mode was set to serial, "
+ "this will be run on a single processor."
)
vars["number_of_processors"] = 1
# Handle mpi errors if mpi4py isn't installed
if vars["multithread_mode"].lower() == "mpi":
vars["multithread_mode"] = "mpi"
try:
import mpi4py
except:
printout = "mpi4py not installed but --multithread_mode is set to"
printout = printout + " mpi. \n Either install mpi4py or switch "
printout = printout + "multithread_mode to multithreading or serial"
raise ImportError(printout)
try:
import func_timeout
from func_timeout import func_timeout, FunctionTimedOut
except:
printout = "func_timeout not installed but --multithread_mode is "
printout = printout + "set to mpi. \n Either install func_timeout "
printout = printout + "or switch multithread_mode to"
printout = printout + " multithreading or serial"
raise ImportError(printout)
# # # launch mpi workers
if vars["multithread_mode"] == "mpi":
# Avoid EOF error
from autogrow.operators.convert_files.gypsum_dl.gypsum_dl.Parallelizer import (
Parallelizer,
)
vars["parallelizer"] = Parallelizer(
vars["multithread_mode"], vars["number_of_processors"]
)
if vars["parallelizer"] is None:
printout = "EOF ERRORS FAILED TO CREATE A PARALLIZER OBJECT"
print(printout)
raise Exception(printout)
else:
# Lower level mpi (ie making a new Parallelizer within an mpi)
# has problems with importing the MPI environment and mpi4py
# So we will flag it to skip the MPI mode and just go to multithread/serial
# This is a saftey precaution
from autogrow.operators.convert_files.gypsum_dl.gypsum_dl.Parallelizer import Parallelizer
vars["parallelizer"] = Parallelizer(
vars["multithread_mode"], vars["number_of_processors"], True
)
return vars
def test_docking_executables(vars, vina_exe, qvina2_exe):
"""
This will test if docking executables are compatible with OS.
This is only required for MacOS.
Test will output the version of Vina and QVina2.1 executables to txt file
in the root_output_folder (docking_exe_MACOS_test.txt)
If both executables are compatible with this MacOS there should be the following
2 lines in the txt file:
AutoDock Vina 1.1.2 (May 11, 2011)
QuickVina 2.1 (24 Dec, 2017)
Returns True if both work and returns False.
Inputs:
:param dict vars: dict of user variables which will govern how the programs runs
:param str vina_exe: path to vina executable
:param str qvina2_exe: path to quick vina 2 executable
Returns:
:returns: bool bool: returns True if both docking executables work; False if either fails
"""
test_vina_outfile = vars["root_output_folder"] + os.sep + "docking_exe_MACOS_test.txt"
try:
command = "{} --version > {arg_2} 2>> {arg_2}".format(vina_exe, arg_2=test_vina_outfile)
os.system(command)
command = "{} --version >> {arg_2} 2>> {arg_2}".format(qvina2_exe, arg_2=test_vina_outfile)
os.system(command)
except:
printout = "Docking executables could not be found."
# is not compatible on this OS. \nPlease use docker "
return False
with open(test_vina_outfile, "r") as test_file:
lines = test_file.readlines()
if "AutoDock Vina 1.1.2" not in lines[0]:
printout = "Vina docking is not compatible on this OS. \nPlease use docker or "
printout = printout + "try provide a Vina executable compatible with the OS.\n"
print(printout)
if vars["dock_choice"] == "VinaDocking":
return False
if "QuickVina 2.1" not in lines[1]:
printout = "QuickVina 2.1 docking is not compatible on this OS. \nPlease use docker"
printout = printout + " or try provide a Vina executable compatible with the OS.\n"
print(printout)
if vars["dock_choice"] == "QuickVina2Docking":
return False
return True
def run_macos_notarization(vars):
"""
This function runs notarization on vina and qvina2 docking.
This is important for MacOS newer than 10.15 and newer than
For MacOS newer than 10.15, this will require an internet connection.
Inputs:
:param dict vars: dict of user variables which will govern how the programs runs
"""
current_dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
vina_exe = current_dir + os.sep.join(["docking", "docking_executables", "vina", \
"autodock_vina_1_1_2_mac", "bin", "vina"])
qvina2_exe = current_dir + os.sep.join(["docking", "docking_executables", \
"q_vina_2", "q_vina_2_1_mac", "qvina2.1"])
# Check executables exist. raise exception if not
if os.path.exists(vina_exe) is False or os.path.exists(qvina2_exe) is False:
printout = "Docking executables could not be found."
raise Exception(printout)
both_docking_exe_work = test_docking_executables(vars, vina_exe, qvina2_exe)
if both_docking_exe_work is False:
# Ensure permissions are unrestricted
try:
command = "chmod -R a+rwx {}".format(vina_exe)
os.system(command)
command = "chmod -R a+rwx {}".format(qvina2_exe)
os.system(command)
except:
printout = "Permissions could not be adjusted on docking files."
print(printout)
raise Exception(printout)
# Check Platform information
mac_version = platform.mac_ver()[0].split(".")
if int(mac_version[0]) < 10:
printout = "We do not provide support for MacOS less than 10.7.\n" + \
"Please run using docker version of AutoGrow."
print(printout)
raise Exception(printout)
if int(mac_version[0]) == 10:
if int(mac_version[1]) < 7:
printout = "We do not support for MacOS less than 10.7.\n" + \
"Please run using docker version of AutoGrow."
print(printout)
raise Exception(printout)
if int(mac_version[1]) > 15:
# 10.15 is Catalina which requires notarizing docking software
printout = "We have not tested MacOS higher than 10.15.\n" + \
"Please run using docker version of AutoGrow."
print(printout)
raise Exception(printout)
try:
command = "xattr -w com.apple.quarantine {}".format(vina_exe)
os.system(command)
command = "xattr -w com.apple.quarantine {}".format(qvina2_exe)
os.system(command)
except:
printout = "Please install xattr. Can be installed using the command:"
printout = printout + "\n\tpip install xattr"
print(printout)
raise Exception(printout)
############################################
###### Variables Handlining Settings #######
############################################
def check_for_required_inputs(input_params):
"""
Confirm all the required inputs were provided.
Required Variables go here.
Inputs:
:param dict input_params: The parameters. A dictionary of {parameter name: value}.
"""
keys_from_input = list(input_params.keys())
list_of_required_inputs = [
"filename_of_receptor",
"center_x",
"center_y",
"center_z",
"size_x",
"size_y",
"size_z",
"root_output_folder",
"source_compound_file"
]
missing_variables = []
for variable in list_of_required_inputs:
if variable in keys_from_input:
continue
missing_variables.append(variable)
if len(missing_variables) != 0:
printout = "\nRequired variables are missing from the input. A description \
of each of these can be found by running python ./RunAutogrow -h"
printout = printout + "\nThe following required variables are missing: "
for variable in missing_variables:
printout = printout + "\n\t" + variable
print("")
print(printout)
print("")
raise NotImplementedError("\n" + printout + "\n")
# Make sure the dimmensions are in floats. If in int convert to float.
for x in ["center_x", "center_y", "center_z", "size_x", "size_y", "size_z"]:
if type(input_params[x]) == float:
continue
if type(input_params[x]) == int:
input_params[x] = float(input_params[x])
else:
printout = "\n{} must be a float value.\n".format(x)
print(printout)
raise Exception(printout)
# Check Docking Exhaustiveness and modes...
if "docking_exhaustiveness" in list(input_params.keys()):
if input_params["docking_exhaustiveness"] == "None":
input_params["docking_exhaustiveness"] = None
if input_params["docking_exhaustiveness"] is not None:
try:
input_params["docking_exhaustiveness"] = int(
input_params["docking_exhaustiveness"]
)
except:
pass
if (
type(input_params["docking_exhaustiveness"]) != int
and type(input_params["docking_exhaustiveness"]) != float
):
raise Exception(
"docking_exhaustiveness needs to be an interger. \
If you do not know what to use, leave this blank and the \
default for the docking software will be used."
)
if "docking_num_modes" in list(input_params.keys()):
if input_params["docking_num_modes"] == "None":
input_params["docking_num_modes"] = None
if input_params["docking_num_modes"] is not None:
try:
input_params["docking_num_modes"] = int(
input_params["docking_num_modes"]
)
except:
pass
if (
type(input_params["docking_num_modes"]) != int
and type(input_params["docking_num_modes"]) != float
):
raise Exception(
"docking_num_modes needs to be an interger. \
If you do not know what to use, leave this blank and the \
default for the docking software will be used."
)
# Check numbers which may be defined by first generation
if "top_mols_to_seed_next_generation_first_generation" not in list(
input_params.keys()
):
if "top_mols_to_seed_next_generation" not in list(input_params.keys()):
# Use defined default of 10
input_params["top_mols_to_seed_next_generation"] = 10
input_params["top_mols_to_seed_next_generation_first_generation"] = 10
else:
input_params[
"top_mols_to_seed_next_generation_first_generation"
] = input_params["top_mols_to_seed_next_generation"]
if "number_of_crossovers_first_generation" not in list(input_params.keys()):
if "number_of_crossovers" not in list(input_params.keys()):
# Use defined default of 10
input_params["number_of_crossovers"] = 10
input_params["number_of_crossovers_first_generation"] = 10
else:
input_params["number_of_crossovers_first_generation"] = input_params[
"number_of_crossovers"
]
if "number_of_mutants_first_generation" not in list(input_params.keys()):
if "number_of_mutants" not in list(input_params.keys()):
# Use defined default of 10
input_params["number_of_mutants"] = 10
input_params["number_of_mutants_first_generation"] = 10
else:
input_params["number_of_mutants_first_generation"] = input_params[
"number_of_mutants"
]
if "number_elitism_advance_from_previous_gen_first_generation" not in list(
input_params.keys()
):
if "number_elitism_advance_from_previous_gen" not in list(input_params.keys()):
# Use defined default of 10
input_params["number_elitism_advance_from_previous_gen"] = 10
input_params[
"number_elitism_advance_from_previous_gen_first_generation"
] = 10
else:
input_params[
"number_elitism_advance_from_previous_gen_first_generation"
] = input_params["number_elitism_advance_from_previous_gen"]
#######################################
# Check that all required files exist #
#######################################
# convert paths to abspath, in case necessary
input_params["filename_of_receptor"] = os.path.abspath(
input_params["filename_of_receptor"]
)
input_params["root_output_folder"] = os.path.abspath(
input_params["root_output_folder"]
)
input_params["source_compound_file"] = os.path.abspath(
input_params["source_compound_file"]
)
# Check filename_of_receptor exists
if os.path.isfile(input_params["filename_of_receptor"]) is False:
raise NotImplementedError(
"Receptor file can not be found. File must be a .PDB file."
)
if ".pdb" not in input_params["filename_of_receptor"]:
raise NotImplementedError("filename_of_receptor must be a .PDB file.")
# Check root_output_folder exists
if os.path.exists(input_params["root_output_folder"]) is False:
# If the output directory doesn't exist, then make ithe output
# directory doesn't exist, then make it
try:
os.makedirs(input_params["root_output_folder"])
except:
raise NotImplementedError(
"root_output_folder could not be found and could not be created. \
Please manual create desired directory or check input parameters"
)
if os.path.exists(input_params["root_output_folder"]) is False:
raise NotImplementedError(
"root_output_folder could not be found and could not be created. \
Please manual create desired directory or check input parameters"
)
if os.path.isdir(input_params["root_output_folder"]) is False:
raise NotImplementedError(
"root_output_folder is not a directory. \
Check your input parameters."
)
# Check source_compound_file exists
if os.path.isfile(input_params["source_compound_file"]) is False:
raise NotImplementedError(
"source_compound_file can not be found. \
File must be a tab delineated .smi file."
)
if ".smi" not in input_params["source_compound_file"]:
raise NotImplementedError(
"source_compound_file must be a \
tab delineated .smi file."
)
def determine_bash_timeout_vs_gtimeout():
"""
This function tests whether we should use the BASH command "timeout" (for linux)
or the coreutils function "gtimeout" for MacOS which can be obtained
through homebrew
Returns:
:returns: str timeout_option: A string either "timeout" or "gtimeout" describing
whether the bash terminal is able to use the bash function timeout or gtimeout
"""
if sys.platform.lower() in ["linux", "linux2"]:
# Should be true and default installed in all Linux machines
return "timeout"
command = 'timeout 1 echo " "'
# Running the os.system command for command will return 0,1, or 32512
# 0 means that the timeout function works (most likely this is a linux os)
# 32512 means that the timeout function DOES NOT Work (most likely this is MacOS)
try: # timeout or gtimeout
timeout_result = os.system("g" + command)
except:
raise Exception(
"Something is very wrong. This OS may not be supported \
by Autogrow or you may need to execute through Bash."
)
if timeout_result == 0:
timeout_option = "gtimeout"
return timeout_option
print("gtimeout failed to run, we will check timeout")
try: # timeout or gtimeout
timeout_result = os.system(command)
except:
raise Exception(
"Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash."
)
if timeout_result == 0:
timeout_option = "timeout"
return timeout_option
printout = "Need to install GNU tools for Bash to work. \n"
printout = (
printout
+ "This is essential to use Bash Timeout function in Autogrow. \n"
)
printout = printout + "\t This will require 1st installing homebrew. \n"
printout = printout + "\t\t Instructions found at: https://brew.sh/ \n"
printout = printout + "\t Once brew is installed, please run:"
printout = printout + " sudo brew install coreutils \n\n"
print(printout)
raise Exception(printout)
def check_dependencies():
"""
This function will try to import all the installed dependencies that will be
used in Autogrow. If it fails to import it will raise an ImportError
"""
# Check Bash Timeout function (There's a difference between MacOS and linux)
# Linux uses timeout while MacOS uses gtimeout
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option not in ["timeout", "gtimeout"]:
raise Exception(
"Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash."
)
try:
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw import PrepareMolForDrawing
from rdkit.Chem import rdFMCS
from rdkit.Chem import FilterCatalog
from rdkit.Chem.FilterCatalog import FilterCatalogParams
import rdkit.Chem.Lipinski as Lipinski
import rdkit.Chem.Crippen as Crippen
import rdkit.Chem.Descriptors as Descriptors
import rdkit.Chem.MolSurf as MolSurf
except:
print("You need to install rdkit and its dependencies.")
raise ImportError("You need to install rdkit and its dependencies.")
# molvs is prepackaged within gypsum_dl
# try:
# from molvs import standardize_smiles as ssmiles
# except:
# print("You need to install molvs and its dependencies.")
# raise ImportError("You need to install molvs and its dependencies.")
try:
import numpy
except:
print("You need to install numpy and its dependencies.")
raise ImportError("You need to install numpy and its dependencies.")
try:
from scipy.cluster.vq import kmeans2
except:
print("You need to install scipy and its dependencies.")
raise ImportError("You need to install scipy and its dependencies.")
try:
import os
import sys
import glob
import subprocess
import multiprocessing
import time
except:
print(
"Missing a Python Dependency. Could be import: os,sys,glob,\
subprocess,multiprocess, time."
)
raise ImportError(
"Missing a Python Dependency. Could be import: \
os,sys,glob,subprocess,multiprocess, time."
)
try:
import copy
import random
import string
import math
except:
print("Missing a Python Dependency. Could be import: copy,random, string,math")
raise ImportError(
"Missing a Python Dependency. Could be import: copy,random, string,math"
)
try:
from collections import OrderedDict
import webbrowser
import argparse
import itertools
import unittest
except:
print(
"Missing a Python Dependency. Could be import: collections,\
webbrowser,argparse,itertools,unittest"
)
raise ImportError(
"Missing a Python Dependency. Could be import: \
collections,webbrowser,argparse,itertools,unittest"
)
try:
import textwrap
import pickle
import json
except:
print("Missing a Python Dependency. Could be import: textwrap, pickle,json")
raise ImportError(
"Missing a Python Dependency. Could be import: \
textwrap, pickle,json"
)
def define_defaults():
"""
Sets the command-line parameters to their default values.
Returns:
:returns: dict vars: a dictionary of all default variables
"""
vars = {}
# where we are currently (absolute filepath from route)
# used for relative pathings
script_dir = os.path.dirname(os.path.realpath(__file__))
# Some variables which can be manually replaced but defaults
# point to prepackaged locations.
## Neural Network executable for scoring binding
vars["nn1_script"] = os.path.join(
script_dir, "docking", "scoring", "nn_score_exe", "nnscore1", "NNScore.py"
)
# Example: vars['nn1_script'] =
# "/PATH/autogrow4/autogrow/docking/scoring/nn_score_exe/nnscore1/NNScore.py"
vars["nn2_script"] = os.path.join(
script_dir, "docking", "scoring", "nn_score_exe", "nnscore2", "NNScore2.py"
)
# Example: vars['nn2_script'] =
# "/PATH/autogrow4/autogrow/docking/scoring/nnscore2/NNScore2.py"
#### OPTIONAL FILE-LOCATION VARIABLES ####
# (RECOMMEND SETTING TO "" SO AUTOGROW CAN AUTOLOCATE THESE FILES)#
# PARSER.add_argument('--conversion_choice', choices
# = ["MGLTools","obabel"], default="MGLTools",
vars["conversion_choice"] = "MGLToolsConversion"
vars["obabel_path"] = "obabel"
vars["custom_conversion_script"] = ""
# vars['prepare_ligand4.py'] =
# "/PATH/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_ligand4.py"
vars["prepare_ligand4.py"] = ""
# vars['prepare_receptor4.py'] =
# "/PATH/MGLTools-1.5.4/MGLToolsPckgs/AutoDockTools/Utilities24/prepare_receptor4.py"
vars["prepare_receptor4.py"] = ""
# vars['mgl_python'] = "/PATH/MGLTools-1.5.4/bin/pythonsh"
vars["mgl_python"] = ""
# Crossover function
vars["start_a_new_run"] = False
vars["max_time_mcs_prescreen"] = 1
vars["max_time_mcs_thorough"] = 1
vars["min_atom_match_mcs"] = 4
vars["protanate_step"] = False
# Mutation Settings
vars["rxn_library"] = "click_chem_rxns"
vars["rxn_library_file"] = ""
vars["function_group_library"] = ""
vars["complementary_mol_directory"] = ""
# processors
vars["number_of_processors"] = 1
vars["multithread_mode"] = "multithreading"
# Genetic Algorithm Components
vars["selector_choice"] = "Roulette_Selector"
vars["tourn_size"] = 0.1
# Seeding next gen and diversity
vars["top_mols_to_seed_next_generation_first_generation"] = 10
vars["top_mols_to_seed_next_generation"] = 10
vars["diversity_mols_to_seed_first_generation"] = 10
vars["diversity_seed_depreciation_per_gen"] = 2
# Populations settings
vars["filter_source_compounds"] = True
vars["use_docked_source_compounds"] = True
vars["num_generations"] = 10
vars["number_of_crossovers_first_generation"] = 10
vars["number_of_mutants_first_generation"] = 10
vars["number_of_crossovers"] = 10
vars["number_of_mutants"] = 10
vars["number_elitism_advance_from_previous_gen"] = 10
vars["number_elitism_advance_from_previous_gen_first_generation"] = 10
vars["redock_elite_from_previous_gen"] = False
# Filters
vars["LipinskiStrictFilter"] = False
vars["LipinskiLenientFilter"] = False
vars["GhoseFilter"] = False
vars["GhoseModifiedFilter"] = False
vars["MozziconacciFilter"] = False
vars["VandeWaterbeemdFilter"] = False
vars["PAINSFilter"] = False
vars["NIHFilter"] = False
vars["BRENKFilter"] = False
vars["No_Filters"] = False
vars["alternative_filter"] = None
# docking
vars["dock_choice"] = "QuickVina2Docking"
vars["docking_executable"] = None
vars["docking_exhaustiveness"] = None
vars["docking_num_modes"] = None
vars["docking_timeout_limit"] = 120
vars["custom_docking_script"] = ""
# scoring
vars["scoring_choice"] = "VINA"
vars["rescore_lig_efficiency"] = False
vars["custom_scoring_script"] = ""
# gypsum # max variance is the number of conformers made per ligand
vars["max_variants_per_compound"] = 3
vars["gypsum_thoroughness"] = 3
vars["min_ph"] = 6.4
vars["max_ph"] = 8.4
vars["pka_precision"] = 1.0
vars["gypsum_timeout_limit"] = 10
# Other vars
vars["debug_mode"] = False
vars["reduce_files_sizes"] = False
vars["generate_plot"] = True
# Check Bash Timeout function (There's a difference between MacOS and linux)
# Linux uses timeout while MacOS uses gtimeout
timeout_option = determine_bash_timeout_vs_gtimeout()
if timeout_option in ["timeout", "gtimeout"]:
vars["timeout_vs_gtimeout"] = timeout_option
else:
raise Exception("Something is very wrong. This OS may not be supported by \
Autogrow or you may need to execute through Bash.")
return vars
############################################
######## Input Handlining Settings #########
############################################
def convert_json_params_from_unicode(params_unicode):
"""
Set the parameters that will control this ConfGenerator object.
:param dict params_unicode: The parameters. A dictionary of {parameter name:
value}.
Returns:
:returns: dict params: Dictionary of User variables
"""
# Also, rdkit doesn't play nice with unicode, so convert to ascii
# Because Python2 & Python3 use different string objects, we separate their
# usecases here.
params = {}
if sys.version_info < (3,):
for param in params_unicode:
val = params_unicode[param]
if isinstance(val, unicode):
val = str(val).encode("utf8")
key = param.encode("utf8")
params[key] = val
else:
for param in params_unicode:
val = params_unicode[param]
key = param
params[key] = val
return params
def check_value_types(vars, argv):
"""
This checks that all the user variables loaded in use that same or comparable
datatypes as the defaults in vars. This prevents type issues later in the
simulation.
Given the many uservars and the possibility for intentional differences,
especially as the program is developed, this function tries to be
NOT OPINIONATED, only correcting for several obvious and easy to correct issues
of type discrepancies occur between argv[key] and vars[key]
ie
1) argv[key] = "true" and vars[key] = False
this script will not change argv[key] to False... it will
convert "true" to True
---> argv[key]=True
2) argv[key] = "1.01" and vars[key] = 2.1
this script will change argv[key] from "1.01" to float(1.01)
Inputs:
:param dict vars: Dictionary of program defaults, which will later be
overwritten by argv values
:param dict argv: Dictionary of User specified variables
Returns:
:returns: dict vars: Dictionary of program defaults, which will later
be overwritten by argv values
:returns: dict argv: Dictionary of User specified variables
"""
for key in list(argv.keys()):
if key not in list(vars.keys()):
# Examples may be things like filename_of_receptor or
# dimensions of the docking box
# Just skip these
continue
if type(argv[key]) != type(vars[key]):
# Several variable default is None which means checks are
# processed elsewhere...
if vars[key] is None:
# check argv[key] is "none" or "None"
if type(argv[key]) == str:
if argv[key].lower() == "none":
argv[key] = None
else:
continue
# Handle number types
elif type(vars[key]) == int or type(vars[key]) == float:
if type(argv[key]) == int or type(argv[key]) == float:
# this is fine
continue
elif type(argv[key]) == str:
try:
temp_item = float(argv[key])
if type(temp_item) == float:
argv[key] = temp_item
else:
printout = "This parameter is the wrong type.\n \t Check : "
printout = printout + "{} type={}\n".format(
key, type(argv[key])
)
printout = printout + "\t Should be type={}\n\t".format(
type(vars[key])
)
printout = (
printout
+ "Please check Autogrow documentation using -h"
)
raise IOError(printout)
except:
printout = "This parameter is the wrong type. \n \t Check :"
printout = printout + " {} type={}\n".format(
key, type(argv[key])
)
printout = printout + "\t Should be type={}\n\t".format(
type(vars[key])
)
printout = (
printout + "Please check Autogrow documentation using -h"
)
raise IOError(printout)
else:
printout = "This parameter is the wrong type. \n \t Check :"
printout = printout + " {} type={}\n".format(key, type(argv[key]))
printout = printout + "\t Should be type={}\n\t".format(
type(vars[key])
)
printout = printout + "Please check Autogrow documentation using -h"
raise IOError(printout)
elif type(vars[key]) == bool:
if argv[key] is None:
# Do not try to handle this. May make sense.
continue
if type(argv[key]) == str:
if argv[key].lower() in ["true", "1"]:
argv[key] = True
elif argv[key].lower() in ["false", "0"]:
argv[key] = False
elif argv[key].lower() in ["none"]:
argv[key] = None
else:
printout = "This parameter is the wrong type. \n \t Check :"
printout = printout + " {} type={}\n".format(
key, type(argv[key])
)
printout = printout + "\t Should be type={}\n\t".format(
type(vars[key])
)
printout = (
printout + "Please check Autogrow documentation using -h"
)
raise IOError(printout)
else:
printout = "This parameter is the wrong type. \n \t Check :"
printout = printout + " {} type={}\n".format(key, type(argv[key]))
printout = printout + "\t Should be type={}\n\t".format(
type(vars[key])
)
printout = printout + "Please check Autogrow documentation using -h"
raise IOError(printout)
return vars, argv
def load_in_commandline_parameters(argv):
"""
Load in the command-line parameters
Inputs:
:param dict argv: Dictionary of User specified variables
Returns:
:returns: dict vars: Dictionary of User variables
:returns: str printout: a string to be printed to screen and saved to output file
"""
vars = define_defaults()
# Load the parameters from the json
if "json" in argv:
json_vars = json.load(open(argv["json"]))
json_vars = convert_json_params_from_unicode(json_vars)
check_for_required_inputs(json_vars)
vars, json_vars = check_value_types(vars, json_vars)
for key in list(json_vars.keys()):
vars[key] = json_vars[key]
else:
check_for_required_inputs(argv)
argv = handle_custom_inputs_if_argparsed(argv)
vars, argv = check_value_types(vars, argv)
for key in list(argv.keys()):
vars[key] = argv[key]
vars = multiprocess_handling(vars)
printout = "(RE)STARTING AUTOGROW 4.0: " + str(datetime.datetime.now())
printout = printout + program_info()
printout = (
printout + "\nUse the -h tag to get detailed help regarding program usage.\n"
)
print(printout)
sys.stdout.flush()
# Check all Dependencies are installed
check_dependencies()
vars = filter_choice_handling(vars)
###########################################
########## Check variables Exist ##########
###########################################
# Check if Custom docking option if so there's a few things which
# need to also be specified
# if not lets flag the error
if vars["dock_choice"] == "Custom":
if vars["docking_executable"] is None:
raise ValueError(
"TO USE Custom DOCKING OPTION, MUST SPECIFY THE \
PATH TO THE docking_executable AND THE DOCKING_CLASS"
)
if os.path.exists(vars["docking_executable"]) is False:
raise ValueError(
"Custom docking_executable could not be found at:\
{}".format(
vars["docking_executable"]
)
)
if (
type(vars["custom_docking_script"]) != list
or os.path.exists(vars["custom_docking_script"][1]) is not True
):
raise ValueError(
"TO USE Custom DOCKING OPTION, MUST SPECIFY THE \
PATH TO THE Custom DOCKING SCRIPT"
)
if vars["dock_choice"] in ["VinaDocking", "QuickVina2Docking"]:
if sys.platform.lower() == "darwin":
# Some MacOS require docking software to be notarized.
# This will require an internet signal
run_macos_notarization(vars)
if vars["conversion_choice"] == "Custom":
if (
type(vars["custom_conversion_script"]) != list
or os.path.exists(vars["custom_conversion_script"][1]) is not True
):
raise ValueError(
"TO USE Custom conversion_choice OPTION, \
MUST SPECIFY THE PATH TO THE custom Conversion SCRIPT"
)
if vars["scoring_choice"] == "Custom":
if (
type(vars["custom_scoring_script"]) != list
or os.path.exists(vars["custom_scoring_script"][1]) is not True
):
raise ValueError(
"TO USE custom scoring_choice OPTION, \
MUST SPECIFY THE PATH TO THE Custom SCORING SCRIPT"
)
if (
vars["conversion_choice"] == "Custom"
or vars["dock_choice"] == "Custom"
or vars["scoring_choice"] == "Custom"
):
vars = handle_custom_dock_and_conversion_scoring_options(vars)
# Mutation Settings
if vars["rxn_library"] == "Custom":
if vars["rxn_library_file"] == "" or vars["function_group_library"] == "":
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY \
THE PATH TO THE REACTION LIBRARY USING INPUT PARAMETER rxn_library"
)
if os.path.exists(vars["rxn_library_file"]) is False:
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY \
THE PATH TO THE REACTION LIBRARY USING INPUT PARAMETER rxn_library"
)
if vars["complementary_mol_directory"] == "":
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY THE PATH \
TO THE REACTION LIBRARY USING INPUT PARAMETER function_group_library"
)
if os.path.isdir(vars["complementary_mol_directory"]) is False:
raise ValueError(
"TO USE Custom REACTION LIBRARY OPTION, ONE MUST SPECIFY THE PATH \
TO THE REACTION LIBRARY USING INPUT PARAMETER complementary_mol_directory"
)
else: # Using default settings
if vars["rxn_library_file"] != "":
raise ValueError(
"You have selected a Custom rxn_library_file group \
library but not chosen to use the Custom option for rxn_library. \
Please use either the provided rxn_library options or chose the Custom \
option for rxn_library"
)
if vars["function_group_library"] != "":
raise ValueError(
"You have selected a Custom function_group_library but \
not chosen to use the Custom option for rxn_library. Please use \
either the provided rxn_library options or chose the Custom option \
for rxn_library"
)
if vars["complementary_mol_directory"] != "":
raise ValueError(
"You have selected a Custom complementary_mol_directory\
but not chosen to use the Custom option for rxn_library. \
Please use either the provided rxn_library options or chose the Custom\
option for rxn_library"
)
# Check if the Operating System is Windows, if so turn off Multiprocessing.
if os.name == "nt" or os.name == "ce":
# so it's running under windows. multiprocessing disabled
vars["number_of_processors"] = 1
printout = (
printout
+ "\nWARNING: Multiprocessing is disabled on\
windows machines.\n"
)
# convert paths to abspath, in case necessary
vars["nn1_script"] = os.path.abspath(vars["nn1_script"])
vars["nn2_script"] = os.path.abspath(vars["nn2_script"])
# make sure directories end in os.sep
if vars["root_output_folder"][-1] != os.sep:
vars["root_output_folder"] = vars["root_output_folder"] + os.sep
# If MGLTools is being used handle its paths
if vars["conversion_choice"] == "MGLToolsConversion":
if "mgltools_directory" not in vars.keys():
printout = "\nmgltools_directory was not provide but conversion_choice"
printout = printout + " is set to MGLToolsConversion. Please "
printout = printout + " provide the path to the mgltools_directory\n"
print(printout)
raise NotImplementedError(printout)
vars["mgltools_directory"] = os.path.abspath(
vars["mgltools_directory"]
)
if os.path.exists(vars["mgltools_directory"]) is False:
raise NotImplementedError("mgltools_directory does not exist")
if os.path.isdir(vars["mgltools_directory"]) is False:
raise NotImplementedError(
"mgltools_directory is not a directory. \
Check your input parameters."
)
if vars["mgltools_directory"][-1] != os.sep:
vars["mgltools_directory"] = vars["mgltools_directory"] + os.sep
# find other mgltools-related scripts
if vars["prepare_ligand4.py"] == "":
vars["prepare_ligand4.py"] = (
vars["mgltools_directory"]
+ "MGLToolsPckgs"
+ os.sep
+ "AutoDockTools"
+ os.sep
+ "Utilities24"
+ os.sep
+ "prepare_ligand4.py"
)
if vars["prepare_receptor4.py"] == "":
vars["prepare_receptor4.py"] = (
vars["mgltools_directory"]
+ "MGLToolsPckgs"
+ os.sep
+ "AutoDockTools"
+ os.sep
+ "Utilities24"
+ os.sep
+ "prepare_receptor4.py"
)
if vars["mgl_python"] == "":
vars["mgl_python"] = vars["mgltools_directory"] + "bin" + os.sep + "pythonsh"
# More Handling for Windows OS
# convert path names with spaces if this is windows
if os.name == "nt" or os.name == "ce":
# so it's running under windows. multiprocessing disabled
if " " in vars["filename_of_receptor"]:
vars["filename_of_receptor"] = '"' + vars["filename_of_receptor"] + '"'
if " " in vars["root_output_folder"]:
vars["root_output_folder"] = '"' + vars["root_output_folder"] + '"'
if " " in vars["nn1_script"]:
vars["nn1_script"] = '"' + vars["nn1_script"] + '"'
if " " in vars["nn2_script"]:
vars["nn2_script"] = '"' + vars["nn2_script"] + '"'
# If MGLTools is being used handle its paths
if vars["conversion_choice"] == "MGLToolsConversion":
if " " in vars["mgltools_directory"]:
vars["mgltools_directory"] = '"' + vars["mgltools_directory"] + '"'
if " " in vars["prepare_ligand4.py"]:
vars["prepare_ligand4.py"] = '"' + vars["prepare_ligand4.py"] + '"'
if " " in vars["prepare_receptor4.py"]:
vars["prepare_receptor4.py"] = '"' + vars["prepare_receptor4.py"] + '"'
if " " in vars["mgl_python"]:
vars["mgl_python"] = '"' + vars["mgl_python"] + '"'
# output the paramters used
printout = printout + "\nPARAMETERS" + "\n"
printout = printout + " ========== " + "\n"
# Make sure scripts and executables exist
# If MGLTools is being used handle its paths
if vars["conversion_choice"] == "MGLToolsConversion":
if not os.path.exists(vars["prepare_ligand4.py"]) and not os.path.exists(
vars["prepare_ligand4.py"].replace('"', "")
):
printout = (
printout
+ "\nERROR: Could not find prepare_ligand4.py at "
+ vars["prepare_ligand4.py"]
+ "\n"
)
print(printout)
raise NotImplementedError(printout)
if not os.path.exists(vars["prepare_receptor4.py"]) and not os.path.exists(
vars["prepare_receptor4.py"].replace('"', "")
):
printout = (
printout
+ "\nERROR: Could not find prepare_receptor4.py at "
+ vars["prepare_receptor4.py"]
+ "\n"
)
print(printout)
raise NotImplementedError(printout)
if not os.path.exists(vars["mgl_python"]) and not os.path.exists(
vars["mgl_python"].replace('"', "")
):
printout = (
printout
+ "\nERROR: Could not find pythonsh at "
+ vars["mgl_python"]
+ "\n"
)
print(printout)
raise NotImplementedError(printout)
if not os.path.exists(vars["nn1_script"]) and not os.path.exists(
vars["nn1_script"].replace('"', "")
):
printout = (
printout
+ "\nERROR: Could not find "
+ os.path.basename(vars["nn1_script"])
+ " at "
+ vars["nn1_script"]
+ "\n"
)
print(printout)
raise NotImplementedError(printout)
if not os.path.exists(vars["nn2_script"]) and not os.path.exists(
vars["nn2_script"].replace('"', "")
):
printout = (
printout
+ "\nERROR: Could not find "
+ os.path.basename(vars["nn2_script"])
+ " at "
+ vars["nn2_script"]
+ "\n"
)
print(printout)
raise NotImplementedError(printout)
if not os.path.exists(vars["filename_of_receptor"]):
printout = (
printout
+ '\nERROR: There receptor file does not exist: "'
+ vars["filename_of_receptor"]
+ '".'
+ "\n"
)
print(printout)
raise NotImplementedError(printout)
# CHECK THAT NN1/NN2 are using only traditional Vina Docking
if vars["scoring_choice"] == "NN1" or vars["scoring_choice"] == "NN2":
if vars["dock_choice"] != "VinaDocking":
printout = "\n\nNeural Networks 1 and 2 (NN1/NN2) are trained on data "
printout = printout + "using PDBQT files converted by MGLTools \n"
printout = printout + "and docked using Autodock Vina 1.1.2.\n"
printout = (
printout
+ "\nUsing conversion or docking software besides" +
" these will not work. \n"
)
printout = (
printout
+ "\nPlease switch dock_choice option to VinaDocking" +
" or deselect NN1/NN2 as the scoring_choice.\n"
)
print(printout)
raise Exception(printout)
# IF ALTERNATIVE CONVERSION OF PDB2PDBQT CHECK THAT NN1/NN2 are using only MGLTOOLS
if vars["conversion_choice"] != "MGLToolsConversion":
printout = "\n\nNeural Networks 1 and 2 (NN1/NN2) are trained on data "
printout = printout + "using PDBQT files converted by MGLTools \n"
printout = printout + "and docked using Autodock Vina 1.1.2.\n"
printout = (
printout
+ "\nUsing conversion or docking software besides" +
" these will not work. \n"
)
printout = (
printout
+ "Please switch conversion_choice option to MGLToolsConversion" +
" or deselect NN1/NN2 as the scoring_choice.\n"
)
print(printout)
raise Exception(printout)
# Check if the user wants to continue a run or start a new run.
# Make new run directory if necessary. return the Run folder path
# The run folder path will be where we place our generations and output files
vars["output_directory"] = set_run_directory(
vars["root_output_folder"], vars["start_a_new_run"]
)
# Save variables in vars dict to a .json file for later usage and reference
# It saves the file to the output_directory + "vars.json"
# -If AutoGrow has been run multiple times for the same directory it
# will save the new vars file as append a number to the file name
# starting with 2. The util scripts will only look at the original "vars.json"
# ie) output_directory + "vars_2.json"
save_vars_as_json(vars)
return vars, printout
############################################
######### File Handlining Settings #########
############################################
def find_previous_runs(folder_name_path):
"""
This will check if there are any previous runs in the output directory.
- If there are it will return the interger of the number label of the last Run folder path.
- ie if there are folders Run_0, Run_1, Run_2 the function will return int(2)
- If there are no previous Run folders it returns None.
Inputs:
:param str folder_name_path: is the path of the root output folder. We will
make a directory within this folder to store our output files
Returns:
:returns: int last_run_number: the int of the last run number or None if no previous runs.
"""
path_exists = True
i = 0
while path_exists is True:
folder_path = "{}{}{}".format(folder_name_path, i, os.sep)
if os.path.exists(folder_path):
i = i + 1
else:
path_exists = False
if i == 0:
# There are no previous runs in this directory
last_run_number = None
return None
# A previous run exists. The number of the last run.
last_run_number = i - 1
return last_run_number
def set_run_directory(root_folder_path, start_a_new_run):
"""
Determine and make the folder for the run directory.
If start_a_new_run is True Start a frest new run.
-If no previous runs exist in the root_folder_path then make a new
folder named root_folder_path + "Run_0"
-If there are previous runs in the root_folder_path then make a
new folder incremental increasing the name by 1 from the last
run in the same output directory.
If start_a_new_run is False Find the last run folder and return that path
-If no previous runs exist in the root_folder_path then make a new
folder named root_folder_path + "Run_0"
Inputs:
:param str root_folder_path: is the path of the root output folder. We will
make a directory within this folder to store our output files
:param bol start_a_new_run: True or False to determine if we continue from
the last run or start a new run
- This is set as a vars["start_a_new_run"]
- The default is vars["start_a_new_run"] = True
Returns:
:returns: str folder_path: the string of the newly created directory for
puting output folders
"""
folder_name_path = root_folder_path + "Run_"
print(folder_name_path)
last_run_number = find_previous_runs(folder_name_path)
if last_run_number is None:
# There are no previous simulation runs in this directory
print("There are no previous runs in this directory.")
print("Starting a new run named Run_0.")
# make a folder for the new generation
run_number = 0
folder_path = "{}{}{}".format(folder_name_path, run_number, os.sep)
os.makedirs(folder_path)
else:
if start_a_new_run is False:
# Continue from the last simulation run
run_number = last_run_number
folder_path = "{}{}{}".format(folder_name_path, last_run_number, os.sep)
else: # start_a_new_run is True
# Start a new fresh simulation
# Make a directory for the new run by increasing run number by +1
# from last_run_number
run_number = last_run_number + 1
folder_path = "{}{}{}".format(folder_name_path, run_number, os.sep)
os.makedirs(folder_path)
print("The Run number is: ", run_number)
print("The Run folder path is: ", folder_path)
print("")
return folder_path
############################################
######## Custom Option Settings ########
############################################
def handle_custom_inputs_if_argparsed(input_params):
"""
There are several Custom options such as filters, docking software
which take a list of information. Because Filters can use multiple options
at once it takes a list of list information.
This function is used to properly import and parse those user variables if
using the commandline argparse
This function will handle those if there are used and return
the modified input_params dict
Inputs:
:param dict input_params: The parameters. A dictionary of
{parameter name: value}.
Returns:
:returns: dict input_params: The parameters. A dictionary of
{parameter name: value}.
"""
# Custom Filters
if "alternative_filter" not in input_params.keys():
input_params["alternative_filter"] = None
if (
input_params["alternative_filter"] is not None
and input_params["alternative_filter"] != []
):
orginal = input_params["alternative_filter"][0]
orginal = orginal.replace("[[", "[").replace("]]", "]")
new_alternative_filter = []
for custom_filter in orginal.split("]"):
custom_filter = custom_filter.replace("[", "").replace("]", "")
custom_filter = [x for x in custom_filter.split(",") if x != ""]
if len(custom_filter) == 2:
new_alternative_filter.append(custom_filter)
input_params["alternative_filter"] = new_alternative_filter
# custom_conversion_script
if "custom_conversion_script" not in input_params.keys():
input_params["custom_conversion_script"] = None
if input_params["custom_conversion_script"] not in [None, [], "", "[]"]:
orginal = input_params["custom_conversion_script"][0]
orginal = orginal.replace("[[", "[").replace("]]", "]")
new_alternative_conversion = []
for custom_converter in orginal.split("]"):
custom_converter = custom_converter.replace("[", "").replace("]", "")
custom_converter = [x for x in custom_converter.split(",") if x != ""]
if len(custom_converter) == 2:
new_alternative_conversion.append(custom_converter)
input_params["custom_conversion_script"] = new_alternative_conversion
# custom_docking_script
if "custom_docking_script" not in input_params.keys():
input_params["custom_docking_script"] = None
if input_params["custom_docking_script"] not in [None, [], "", "[]"]:
orginal = input_params["custom_docking_script"][0]
orginal = orginal.replace("[[", "[").replace("]]", "]")
new_alternative_docking = []
for custom_docking in orginal.split("]"):
custom_docking = custom_docking.replace("[", "").replace("]", "")
custom_docking = [x for x in custom_docking.split(",") if x != ""]
if len(custom_docking) == 2:
new_alternative_docking.append(custom_docking)
input_params["custom_docking_script"] = new_alternative_docking
# Custom_Scoring script
if "custom_scoring_script" not in input_params.keys():
input_params["custom_scoring_script"] = None
if input_params["custom_scoring_script"] not in [None, [], "", "[]"]:
orginal = input_params["custom_scoring_script"][0]
orginal = orginal.replace("[[", "[").replace("]]", "]")
new_alternative_scoring = []
for custom_scoring in orginal.split("]"):
custom_scoring = custom_scoring.replace("[", "").replace("]", "")
custom_scoring = [x for x in custom_scoring.split(",") if x != ""]
if len(custom_scoring) == 2:
new_alternative_scoring.append(custom_scoring)
input_params["custom_scoring_script"] = new_alternative_scoring
return input_params
#
def handle_alternative_filters(vars, filter_list):
"""
This will handle Custom Filters
Inputs:
:param dict vars: Dictionary of User variables
:param list filter_list: a list of the class of filter which will be used
later to check for drug likeliness for a generation.
If a User adds their own filter they just need to follow the same
nomenclature and enter that filter in the user vars["alternative_filters"]
as the name of that class and place that file in the same folder as the
other filter classes.
Returns:
:returns: list filter_list: a list of the class of filter which will be used
later to check for drug likeliness for a generation.
If a User adds their own filter they just need to follow the same
nomenclature and enter that filter in the user vars["alternative_filters"]
as the name of that class and place that file in the same folder as the
other filter classes.
"""
if vars["alternative_filter"] is not None:
if type(vars["alternative_filter"]) != list:
raise Exception(
"If you want to add Custom filters to the filter \
child classes Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]"
)
if type(vars["alternative_filter"][0]) != list:
print(vars["alternative_filter"])
raise Exception(
"If you want to add Custom filters to the filter \
child classes Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],[name_filter2, Path/to/name_filter2.py]]"
)
full_children_dict = make_complete_children_dict("filter")
scripts_to_copy = []
for custom_class in vars["alternative_filter"]:
if custom_class[0] not in full_children_dict.keys():
if os.path.exists(custom_class[1]) is False:
# Check that the path to the original script exists.
raise Exception(
"File can not be found for alternative_filter \
{}\n If you want to add Custom filters to the filter child \
classes Must be a list of lists \
[[name_filter1, Path/to/name_filter1.py],\
[name_filter2, Path/to/name_filter2.py]]".format(custom_class[1])
)
new_file = os.sep.join(
[
os.path.abspath(os.path.dirname(__file__)),
"operators",
"filter",
"filter_classes",
"filter_children_classes",
os.path.basename(custom_class[0]) + ".py",
]
)
if os.path.exists(new_file) is True:
# File has been copied to proper dir but is not being found by the code
printout = "A copy of the custom script {} has been moved \
to {}\n".format(custom_class[1], new_file)
printout = (
printout
+ "Unfortunately this could not be \
imported by the filter module."
)
printout = (
printout
+ "Please check the file naming \
corresponding to: {}\n\n".format(
custom_class
)
)
print(printout)
raise Exception(printout)
# Add to list of scripts to copy into the filter folder
scripts_to_copy.append([custom_class[1], new_file])
filter_list.append(custom_class[0])
if len(scripts_to_copy) != 0:
for filter_info in scripts_to_copy:
print("copying Custom class file into the FilterClasses folder:")
print(
"\t Copying : {}\n\t New file: {}\n".format(
custom_class[1], new_file
)
)
print(
"AutoGrow will need to be restarted once all custom scripts \
have been copied to their required location."
)
print(
"This is done once so if the script needs to be changed \
please either remove or replace the script within the \
FilterClasses folder."
)
print(
"Please ensure you unit test this code properly before \
incorporating.\n"
)
copyfile(filter_info[0], filter_info[1])
print(
"\n########################################"
+ "#####################################"
)
print("AutoGrow has incorporated the custom files into"
+ " the filter Module.")
print(
" AutoGrow needs to be restarted and should now "
+ "be able to run custom scripts."
)
print("Please ensure you unit test this code properly before incorporating.")
print(
"#####################################"
+ "########################################\n"
)
# Technically Exit intentionally but maybe should be a raise Exception
sys.exit(0)
return filter_list
#
def make_complete_children_dict(purpose_of_object):
"""
This will retrieve all the names of every child class of the parent class
This can be either filter, parent_pdbqt_converter, ParentDocking,
or ParentScoring
Inputs:
:param str purpose_of_object: either filter, parent_pdbqt_converter,
ParentDocking, or ParentScoring
Returns:
:returns: dict child_dict: Dictionary of all the class objects for either
Filtering, docking, Dockingfile conversion or scoring
"""
if purpose_of_object == "filter":
import autogrow.operators.filter.filter_classes.filter_children_classes
from autogrow.operators.filter.filter_classes.parent_filter_class import ParentFilter as parent_object
from autogrow.operators.filter.filter_classes.get_child_filter_class import get_all_subclasses
elif purpose_of_object == "parent_pdbqt_converter":
import autogrow.docking.docking_class.docking_file_conversion
from autogrow.docking.docking_class.parent_pdbqt_converter import ParentPDBQTConverter as parent_object
from autogrow.docking.docking_class.get_child_class import get_all_subclasses
elif purpose_of_object == "ParentDocking":
import autogrow.docking.docking_class.docking_class_children
from autogrow.docking.docking_class.parent_dock_class import ParentDocking as parent_object
from autogrow.docking.docking_class.get_child_class import get_all_subclasses
elif purpose_of_object == "ParentScoring":
import autogrow.docking.scoring.scoring_classes.scoring_functions
from autogrow.docking.scoring.scoring_classes.parent_scoring_class import ParentScoring as parent_object
from autogrow.docking.docking_class.get_child_class import get_all_subclasses
children = get_all_subclasses(parent_object)
child_dict = {}
for child in children:
child_object = child()
child_name = child_object.get_name()
child_dict[child_name] = child_object
return child_dict
#
def handle_custom_conversion_script(vars):
"""
This will handle Custom Conversion_scripts
Inputs:
:param dict vars: Dictionary of User variables
Returns:
:returns: dict vars: Dictionary of User variables modified with
the vars["conversion_choice"] set to the new custom conversion_choice
:returns: bool need_restart: If True AutoGrow will need to be restarted
after all other files are incorporated
:returns: str printout: "" or a message to be print prior to being
restarted if needed
"""
need_restart = False
printout = ""
if vars["custom_conversion_script"] is not None:
if type(vars["custom_conversion_script"]) != list:
print(vars["custom_conversion_script"])
raise Exception(
"If you want to add Custom Conversion_script \
to the Conversion_script child classes Must be a list of \
[name_Conversion_script1, Path/to/name_Conversion_script1.py]"
)
if type(vars["custom_conversion_script"][0]) != str:
print("")
print(vars["custom_conversion_script"])
print("")
raise Exception(
"If you want to add Custom Conversion_script \
to the Conversion_script child classes Must be a list of \
[name_Conversion_script1, Path/to/name_Conversion_script1.py]"
)
full_children_dict = make_complete_children_dict("parent_pdbqt_converter")
custom_class = vars["custom_conversion_script"]
if custom_class[0] not in full_children_dict.keys():
if os.path.exists(custom_class[1]) is False:
print(custom_class)
raise Exception(
"File can not be found for custom_conversion_script \
{}\n If you want to add Custom Conversion_scripts to the \
Conversion_script child classes Must be a list of \
[name_Conversion_script1, Path/to/name_Conversion_script1.py]".format(
custom_class[1]
)
)
new_file = os.sep.join(
[
os.path.abspath(os.path.dirname(__file__)),
"docking",
"docking_class",
"docking_file_conversion",
os.path.basename(custom_class[0]) + ".py",
]
)
if os.path.exists(new_file) is True:
# File has been copied to proper dir but is not being found by the code
printout = "A copy of the custom script {} has been moved \
to {}\n".format(custom_class[1], new_file)
printout = (
printout
+ "Unfortunately this could not be \
imported by the Conversion_script module."
)
printout = (
printout
+ "Please check the file naming corresponding \
to: {}\n\n".format(
custom_class
)
)
print(printout)
raise Exception(printout)
# Add copy the script to the docking_file_conversion folder
print("copying Custom class file into the Conversion_script folder:")
print(
"\t Copying : {}\n\t New file: {}\n".format(
custom_class[1], new_file
)
)
print(
"AutoGrow will need to be restarted once the custom script \
has been copied to their required location."
)
print(
"This is done once so if the script needs to be changed \
please either remove or replace the script within the \
docking_file_conversion folder."
)
print(
"Please ensure you unit test this code properly before \
incorporating."
)
copyfile(custom_class[1], new_file)
printout = (
printout
+ "\n#########################################"
+ "####################################"
)
printout = (
printout
+ "AutoGrow has incorporated the custom files into "
+ "the docking_file_conversion Module."
)
printout = (
printout
+ "AutoGrow needs to be restarted and should now be "
+ "able to run custom scripts."
)
printout = (
printout
+ "Please ensure you unit test this code properly "
+ "before incorporating."
)
printout = (
printout
+ "#########################################"
+ "####################################\n"
)
need_restart = True
vars["conversion_choice"] = custom_class[0]
return vars, need_restart, printout
#
def handle_custom_docking_script(vars):
"""
This will handle Custom Docking_scripts
Inputs:
:param dict vars: Dictionary of User variables
Returns:
:returns: dict vars: Dictionary of User variables modified with the
vars["dock_choice"] set to the new custom dock_choice
:returns: bool need_restart: If True AutoGrow will need to be estarted
after all other files are incorporated
:returns: str printout: "" or a message to be print prior to being
restarted if needed
"""
need_restart = False
printout = ""
if vars["custom_docking_script"] is not None:
if type(vars["custom_docking_script"]) != list:
print(vars["custom_docking_script"])
raise Exception(
"If you want to add Custom Docking_script to the \
Docking_script child classes Must be a list of \
[name_Docking_script1, Path/to/name_Docking_script1.py]"
)
if type(vars["custom_docking_script"][0]) != str:
print("")
print(vars["custom_docking_script"])
print("")
raise Exception(
"If you want to add Custom Docking_script to the \
Docking_script child classes Must be a list of \
[name_Docking_script1, Path/to/name_Docking_script1.py]"
)
full_children_dict = make_complete_children_dict("ParentDocking")
custom_class = vars["custom_docking_script"]
if custom_class[0] not in full_children_dict.keys():
if os.path.exists(custom_class[1]) is False:
print(custom_class)
raise Exception(
"File can not be found for custom_docking_script \
{}\n If you want to add Custom Docking_scripts to the \
Docking_script child classes Must be a list of \
[name_Docking_script1, Path/to/name_Docking_script1.py]".format(
custom_class[1]
)
)
new_file = os.sep.join(
[
os.path.abspath(os.path.dirname(__file__)),
"docking",
"docking_class",
"docking_class_children",
os.path.basename(custom_class[0]) + ".py",
]
)
if os.path.exists(new_file) is True:
# File has been copied to proper dir but is not being found by the code
printout = "A copy of the custom script {} has been moved \
to {}\n".format(
custom_class[1], new_file
)
printout = (
printout
+ "Unfortunately this could not be imported \
by the docking module."
)
printout = (
printout
+ "Please check the file naming corresponding \
to: {}\n\n".format(
custom_class
)
)
print(printout)
raise Exception(printout)
# Add copy the script to the children folder
print("copying Custom class file into the children folder:")
print(
"\t Copying : {}\n\t New file: {}\n".format(
custom_class[1], new_file
)
)
print(
"AutoGrow will need to be restarted once the custom \
script has been copied to their required location."
)
print(
"This is done once so if the script needs to be changed \
please either remove or replace the script within the \
children folder."
)
print(
"Please ensure you unit test this code properly before incorporating."
)
copyfile(custom_class[1], new_file)
printout = (
printout
+ "\n############################################"
+ "#################################"
)
printout = (
printout
+ "AutoGrow has incorporated the custom files into the children Module."
)
printout = (
printout
+ "AutoGrow needs to be restarted and should now be able to run custom scripts."
)
printout = (
printout
+ "Please ensure you unit test this code properly before incorporating."
)
printout = (
printout
+ "##############################################"
+ "###############################\n"
)
need_restart = True
vars["dock_choice"] = custom_class[0]
return vars, need_restart, printout
#
def handle_custom_scoring_script(vars):
"""
This will handle Custom scoring_scripts
Inputs:
:param dict vars: Dictionary of User variables
Returns:
:returns: dict vars: Dictionary of User variables modified with the
vars["dock_choice"] set to the new custom dock_choice
:returns: bool need_restart: If True AutoGrow will need to be restarted
after all other files are incorporated
:returns: str printout: "" or a message to be print prior to
being restarted if needed
"""
need_restart = False
printout = ""
if vars["custom_scoring_script"] is not None:
if type(vars["custom_scoring_script"]) != list:
print(vars["custom_scoring_script"])
raise Exception(
"If you want to add Custom scoring_script \
to the scoring_script child classes Must be a list of \
[name_scoring_script1, Path/to/name_scoring_script1.py]"
)
if type(vars["custom_scoring_script"][0]) != str:
print("")
print(vars["custom_scoring_script"])
print("")
raise Exception(
"If you want to add Custom scoring_script \
to the scoring_script child classes Must be a list of \
[name_scoring_script1, Path/to/name_scoring_script1.py]"
)
full_children_dict = make_complete_children_dict("ParentScoring")
custom_class = vars["custom_scoring_script"]
if custom_class[0] not in full_children_dict.keys():
if os.path.exists(custom_class[1]) is False:
print(custom_class)
raise Exception(
"File can not be found for custom_scoring_script \
{}\n If you want to add Custom scoring_scripts to the \
scoring_script child classes Must be a list of \
[name_scoring_script1, Path/to/name_scoring_script1.py]".format(
custom_class[1]
)
)
new_file = os.sep.join(
[
os.path.abspath(os.path.dirname(__file__)),
"docking",
"scoring",
"scoring_classes",
"scoring_functions",
os.path.basename(custom_class[0]) + ".py",
]
)
if os.path.exists(new_file) is True:
# File has been copied to proper dir but is not being found by the code
printout = "A copy of the custom script {} has been moved to {}\n".format(
custom_class[1], new_file
)
printout = (
printout
+ "Unfortunately this could not be imported by the scoring module."
)
printout = (
printout
+ "Please check the file naming corresponding to: {}\n\n".format(
custom_class
)
)
print(printout)
raise Exception(printout)
# Add copy the script to the scoring_choices folder
print("copying Custom class file into the scoring_choices folder:")
print(
"\t Copying : {}\n\t New file: {}\n".format(
custom_class[1], new_file
)
)
print(
"AutoGrow will need to be restarted once the custom script \
has been copied to their required location."
)
print(
"This is done once so if the script needs to be changed \
please either remove or replace the script within \
the scoring_choices folder."
)
print(
"Please ensure you unit test this code properly before incorporating."
)
copyfile(custom_class[1], new_file)
printout = "\n#######################################"
printout = printout + "######################################"
printout = (
printout
+ "AutoGrow has incorporated the custom files into the scoring Module."
)
printout = (
printout
+ "AutoGrow needs to be restarted and should now be able to run custom scripts."
)
printout = (
printout
+ "Please ensure you unit test this code properly before incorporating."
)
printout = (
printout
+ "##############################################"
+ "###############################\n"
)
need_restart = True
vars["scoring_choice"] = custom_class[0]
return vars, need_restart, printout
#
def handle_custom_dock_and_conversion_scoring_options(vars):
"""
This function handles selecting the user defined Custom options
for Custom docking Conversion, and scoring scripts.
Inputs:
:param dict vars: Dictionary of User variables
Returns:
:returns: dict vars: Dictionary of User variables with the added options
"""
master_need_restart = False
master_printout = ""
if vars["conversion_choice"] == "Custom":
vars, need_restart, printout = handle_custom_conversion_script(vars)
if need_restart is True:
master_need_restart = True
master_printout = master_printout + printout
if vars["dock_choice"] == "Custom":
vars, need_restart, printout = handle_custom_docking_script(vars)
if need_restart is True:
master_need_restart = True
master_printout = master_printout + printout
if vars["scoring_choice"] == "Custom":
vars, need_restart, printout = handle_custom_scoring_script(vars)
if need_restart is True:
master_need_restart = True
master_printout = master_printout + printout
if master_need_restart is True:
print(master_printout)
sys.exit(
0
) # Technically Exit intentionally but maybe should be a raise Exception
return vars
############################################
######## Filter Handlining Settings ########
############################################
def filter_choice_handling(vars):
"""
This function handles selecting the user defined Ligand filters.
Inputs:
:param dict vars: Dictionary of User variables
Returns:
:returns: dict vars: Dictionary of User variables with the
chosen_ligand_filters added
"""
if "No_Filters" in list(vars.keys()):
if vars["No_Filters"] is True:
chosen_ligand_filters = None
else:
chosen_ligand_filters, vars = picked_filters(vars)
else:
chosen_ligand_filters, vars = picked_filters(vars)
vars["chosen_ligand_filters"] = chosen_ligand_filters
import autogrow.operators.filter.execute_filters as Filter
# get child filter class object function dictionary
vars["filter_object_dict"] = Filter.make_run_class_dict(chosen_ligand_filters)
return vars
#
def picked_filters(vars):
"""
This will take the user vars and return a list of the filters
which a molecule must pass to move into the next generation.
Inputs:
:param dict vars: Dictionary of User variables
Returns:
:returns: list filter_list: a list of the class of filter which will be used
later to check for drug likeliness for a generation.
If a User adds their own filter they just need to follow
the same nomenclature and enter that filter in the user
vars["alternative_filters"] as the name of that class and place
that file in the same folder as the other filter classes.
"""
filter_list = []
vars_keys = list(vars.keys())
if "LipinskiStrictFilter" in vars_keys:
if vars["LipinskiStrictFilter"] is True:
filter_list.append("LipinskiStrictFilter")
else:
vars["LipinskiStrictFilter"] = False
if "LipinskiLenientFilter" in vars_keys:
if vars["LipinskiLenientFilter"] is True:
filter_list.append("LipinskiLenientFilter")
else:
vars["LipinskiLenientFilter"] = False
if "GhoseFilter" in vars_keys:
if vars["GhoseFilter"] is True:
filter_list.append("GhoseFilter")
else:
vars["GhoseFilter"] = False
if "GhoseModifiedFilter" in vars_keys:
if vars["GhoseModifiedFilter"] is True:
filter_list.append("GhoseModifiedFilter")
else:
vars["GhoseModifiedFilter"] = False
if "MozziconacciFilter" in vars_keys:
if vars["MozziconacciFilter"] is True:
filter_list.append("MozziconacciFilter")
else:
vars["MozziconacciFilter"] = False
if "VandeWaterbeemdFilter" in vars_keys:
if vars["VandeWaterbeemdFilter"] is True:
filter_list.append("VandeWaterbeemdFilter")
else:
vars["VandeWaterbeemdFilter"] = False
if "PAINSFilter" in vars_keys:
if vars["PAINSFilter"] is True:
filter_list.append("PAINSFilter")
else:
vars["PAINSFilter"] = False
if "NIHFilter" in vars_keys:
if vars["NIHFilter"] is True:
filter_list.append("NIHFilter")
else:
vars["NIHFilter"] = False
if "BRENKFilter" in vars_keys:
if vars["BRENKFilter"] is True:
filter_list.append("BRENKFilter")
else:
vars["BRENKFilter"] = False
if "alternative_filter" in vars_keys:
filter_list = handle_alternative_filters(vars, filter_list)
else:
vars["alternative_filter"] = None
# if there is no user specified ligand filters but they haven't set
# filters to None ---> set filter to default of LipinskiLenientFilter.
if len(filter_list) == 0:
vars["LipinskiLenientFilter"] = True
filter_list.append("LipinskiLenientFilter")
return filter_list, vars
| 87,255 | 38.697907 | 112 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/model.py | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Descriptors
def smiles2fp(smiles_string):
mol = Chem.MolFromSmiles(smiles_string)
Chem.SanitizeMol(mol)
fp = AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=2048)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, features)
fingerprint = torch.from_numpy(features).float().view(1,-1)
return fingerprint ### [1,2048] torch.Tensor
class Ligand2D(nn.Module):
"""
input: SMILES
output: scalar
"""
def __init__(self, ):
super(Ligand2D, self).__init__()
self.input_mlp = nn.Linear(2048, 100)
self.output_mlp = nn.Linear(100, 1)
def forward(self, smiles_):
"""
:param smiles_
- list of SMILES string
- SMILES string
"""
if type(smiles_) == list:
fps = [smiles2fp(s) for s in smiles_]
fps = torch.cat(fps, 0)
hidden_state = F.relu(self.input_mlp(fps))
output = self.output_mlp(hidden_state)
output = output.view(-1)
output = F.softmax(output)
return output
else:
fingerprint = smiles2fp(smiles_)
hidden_state = F.relu(self.input_mlp(fingerprint))
output = self.output_mlp(hidden_state)
return output ### [1,1]
class Ligand2D_product(nn.Module):
'''
input: ligand2d & product_smiles
output: scalar
'''
def __init__(self, ):
super(Ligand2D_product, self).__init__()
self.ligand_mlp = nn.Linear(2048, 100)
self.product_mlp = nn.Linear(2048, 100)
self.output_mlp = nn.Linear(200, 1)
def forward(self, ligand_smiles, product_smiles_list):
n = len(product_smiles_list)
ligand_fp = smiles2fp(ligand_smiles)
ligand_embedding = F.relu(self.ligand_mlp(ligand_fp))
ligand_embedding = ligand_embedding.repeat(n,1)
product_fps = [smiles2fp(smiles) for smiles in product_smiles_list]
product_fps = torch.cat(product_fps, 0)
product_embeddings = F.relu(self.product_mlp(product_fps))
latent_variable = torch.cat([ligand_embedding, product_embeddings], 1)
output = self.output_mlp(latent_variable).view(-1)
output = F.softmax(output)
return output
if __name__ == "__main__":
model = Ligand2D()
smiles = ['CCC', 'CCC']
output = model(smiles)
print(output.shape, output)
output = model(smiles[0])
model = Ligand2D_product()
output = model(smiles[0], smiles)
print(output)
| 2,365 | 24.170213 | 72 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/__init__.py | 1 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/autogrow_main_execute.py | """
Top level for running AutoGrow.
Runs all population generation (operations) and docking.
Runs plotting at end.
"""
import __future__
import os
import glob
import sys
import shutil
import autogrow.docking.execute_docking as DockingClass
import autogrow.operators.operations as operations
import autogrow.docking.concatenate_files as concatenate_files
from autogrow.model import Ligand2D
def main_execute(vars):
"""
This function takes the user variables and runs Autogrow
Inputs:
:param dict vars: dict of user variables which will govern how the
programs runs
"""
# Unpack necessary variables
# output_directory is the root output folder for the run
output_directory = vars["output_directory"]
num_gens_to_make = vars["num_generations"]
# Determine what was the last completed generation in the Run directory
last_generation = determine_current_gen(output_directory)
if last_generation is None:
# Check to see if there's a Run 0 based on the seed.
if vars["use_docked_source_compounds"] is True:
# This will assess and rank the source compounds prior to
# generation 1. Thus using the source compounds as a generation 0
starting_generation_num = 0
else:
starting_generation_num = 1
else:
starting_generation_num = last_generation + 1
if starting_generation_num > num_gens_to_make:
print("This simulation has already been completed to the user defined number \
of generations. Please check your user variables.")
raise Exception("This simulation has already been completed to the user defined number \
of generations. Please check your user variables.")
##########################################################################################
##########################################################################################
### policy network
mutate_ligand_select_policy_net = Ligand2D()
mutate_reaction_select_policy_net = Ligand2D()
crossover_ligand1_policy_net = Ligand2D()
crossover_ligand2_policy_net = Ligand2D()
opt1 = torch.optim.Adam(mutate_ligand_select_policy_net.parameters(), lr=1e-3)
opt2 = torch.optim.Adam(mutate_reaction_select_policy_net.parameters(), lr=1e-3)
opt3 = torch.optim.Adam(crossover_ligand1_policy_net.parameters(), lr=1e-3)
opt4 = torch.optim.Adam(crossover_ligand2_policy_net.parameters(), lr=1e-3)
##########################################################################################
##########################################################################################
# This is the main loop which will control and execute all commands This
# is broken into 3 main sections:
# 1) operations which populating the new generation with ligands which
# both pass the userdefined filter and convert from 1D smiles to 3D
# PDB
# 2) Docking which handles converting from PDBs to Docking specific
# formats and running the actual Docking simulations
# 3) Ranking the generation based on the Docking scores
for current_generation_number in range(starting_generation_num, num_gens_to_make+1):
sys.stdout.flush()
# Get directory for smi to go
current_generation_dir = vars["output_directory"] + "generation_{}{}".format(current_generation_number, os.sep)
print(current_generation_dir)
sys.stdout.flush()
##### 0-th generation
if current_generation_number == 0 and vars["use_docked_source_compounds"] is True:
if os.path.exists(current_generation_dir + os.sep + "generation_0_ranked.smi") is True:
continue
##################################################
#################### main #######################
##################################################
##### A. populate_generation
already_docked, smile_file_new_gen, new_gen_ligands_list = operations.populate_generation_zero(vars, generation_num=0)
sys.stdout.flush()
if already_docked is False:
# Run file conversions of PDB to docking specific file type
# and Begin Docking unweighted_ranked_smile_file is the file
# name where the unweighted ranked but score .smi file resides
##########################################
##### B. docking
##########################################
unweighted_ranked_smile_file = DockingClass.run_docking_common(
vars, current_generation_number,
current_generation_dir, smile_file_new_gen)
else:
##################################################
#################### main #######################
##################################################
##### A. populate_generation
smile_file_new_gen, new_gen_ligands_list = operations.populate_generation(vars, current_generation_number,
mutate_ligand_select_policy_net, mutate_reaction_select_policy_net,
crossover_ligand1_policy_net, crossover_ligand2_policy_net, )
## smiles -> sdf -> pdb
sys.stdout.flush()
if new_gen_ligands_list is None:
raise ValueError("Population failed to make enough mutants or crossovers... \
Errors could include not enough diversity, too few seeds to the generation, \
the seed mols are unable to cross-over due to lack of similarity,\
or all of the seed lack functional groups for performing reactions.")
# Run file conversions of PDB to docking specific file type and
# Begin Docking unweighted_ranked_smile_file is the file name
# where the unweighted ranked but score .smi file resides
##########################################
##### B. docking
##########################################
## pdb -> pdbqt
unweighted_ranked_smile_file = DockingClass.run_docking_common(vars, current_generation_number, current_generation_dir, smile_file_new_gen)
# Delete all temporary files; Skip if in Debugging Mode
if vars["debug_mode"] is False:
print("Deleting temporary files and directories")
files_to_del = []
folders_to_del = ["{}{}3D_SDFs{}".format(current_generation_dir, os.sep, os.sep), "{}{}3D_SDFs{}log{}".format(current_generation_dir, os.sep, os.sep, os.sep), "{}{}gypsum_submission_files{}".format(current_generation_dir, os.sep, os.sep)]
for folder in folders_to_del:
if os.path.exists(folder) is False:
continue
files_to_del.extend(glob.glob(folder+"*"))
job_input = tuple([tuple([x]) for x in files_to_del if os.path.isfile(x) is True])
vars["parallelizer"].run(job_input, delete_temporary_files_and_folders)
# Delete Folders in an ordered manor incase folders are nested
for i in range(0, len(folders_to_del)):
delete_temporary_files_and_folders(folders_to_del[i])
sys.stdout.flush()
if vars["reduce_files_sizes"] is True:
# Reduce the files in the PDBs folder to a single compiled file.
# This reduces the data size And makes it easier to transfer the
# data
pdbs_folder = "{}{}PDBs{}".format(current_generation_dir, os.sep, os.sep)
if os.path.exists(pdbs_folder) is True:
concatenate_files.run_concatenation(vars["parallelizer"], pdbs_folder)
else:
print("\nNo PDB folder to concatenate and compress. This is likely generation 0 seeded with a Ranked .smi file.\n")
print("")
print("Finished generation ", current_generation_number)
sys.stdout.flush()
if vars["generate_plot"] is True:
matplotlib_is_callable = False
try:
import matplotlib
matplotlib_is_callable = True
except:
matplotlib_is_callable = False
if matplotlib_is_callable is False:
print("Can not make figure as matplotlib is not installed")
else:
print("Plotting")
import autogrow.plotting.generate_line_plot as plot
plot.generate_figures(vars)
sys.stdout.flush()
#
def determine_current_gen(output_directory):
"""
Check if there has been any previous runs in the output directory. Returns
an integer of the last completed generation folder. The last completed
generation folder will be what seeds the next generation. If no previous
runs exist which completed (have a ranked.smi file) then it returns a None
which causes the program to start off at generation 0 using the
source_compound_file to seed generation 1.
If the last two generation folders were incomplete (ie both lack a
ranked.smi file) then we will raise Exception.
Additionally, if a generation failed to complete in a previous attempt,
than that generation directory will be renamed so that we can make a new
generation in its place without losing that data
-ie if a failed generation directory was named PATH/generation_3 it will
be rename Path/generation_3_Failed_0
-if Path/generation_3_Failed_0 already exists it will be name
Path/generation_3_Failed_1 or so on until unique
Inputs:
:param str output_directory: is the path of the Run folder within root
output folder.
Returns:
:returns: int last_gen_number: the int of the last generation number or
None if no previous generations were completed.
"""
folder_path_gen = output_directory + "generation_"
for tries in range(2):
if tries == 2:
print("We are in the following directory:", output_directory)
raise Exception("The last 2 generations in this Run have failed to complete. \
Please check that the Run folder that there is something to continue off of.")
last_gen_number = find_last_generation(folder_path_gen)
if last_gen_number is None:
# There are no previous runs in this directory
return None
# A previous run exists. The number of the last run.
folder_path = "{}{}".format(folder_path_gen, last_gen_number)
is_completed = determine_if_gen_completed(folder_path, last_gen_number)
if is_completed is True:
# The last generation (last_gen_number) completed and we will
# continue our run from that
return last_gen_number
# The last generation in the folder crashed before completing.
# So we will rename the directory by appending _FAILED to the
# folder name
printout = "Generation {} in {} failed in the previous simulation.".format(last_gen_number, folder_path)
print(printout)
counter = 0
dir_exists = True
while dir_exists is True:
failed_folder_rename = "{}_FAILED".format(folder_path)
failed_folder_rename_count = "{}_{}".format(failed_folder_rename, counter)
if os.path.isdir(failed_folder_rename_count) is True:
counter = counter + 1
else:
dir_exists = False
os.rename(folder_path, failed_folder_rename_count)
printout = "Renaming folder: {} \
to: {}".format(folder_path, failed_folder_rename)
print(printout)
###################################
### main
###################################
def find_last_generation(folder_path_string_no_gen):
"""
This will take a folder path which is missing an interger at the end, and
find if there are any folders which exist with that path with an interger.
If there are it will return the highest interger that when added to the
path exists as a directory.
If no directories exist with that path+0 then we return None. This causes
the starting generation of this attempt to run to be generation_0.
Starting fresh.
folder_path_string_no_gen = output_directory + "generation_"
Inputs:
:param str folder_path_string_no_gen: the folder to check.
Returns:
:returns: int last_gen_number: the int of the last generation number or
None if no previous runs.
"""
path_exists = True
i = 1
while path_exists is True:
folder_path = "{}{}{}".format(folder_path_string_no_gen, i, os.sep)
if os.path.exists(folder_path):
i = i + 1
else:
path_exists = False
if i == 1:
# Check to see if there's a Run 0 based on the seed.
i = 0
folder_path = "{}{}{}".format(folder_path_string_no_gen, i, os.sep)
if os.path.exists(folder_path) is False:
return None
# There are no previous runs in this directory
last_gen_number = None
else:
last_gen_number = i - 1
return last_gen_number
#
def determine_if_gen_completed(gen_dir_path, gen_number):
"""
Check if this generation has completed or if it failed. Every generation
which completes has a .smi file title generation_0_ranked.smi (with the
number of the generation between the word generation and ranked).
-If a Run failed due to either a hard crash or a soft crash, there should
not be a ranked .smi file.
Inputs:
:param str gen_dir_path: is the path of the generation folder within a Run
folder.
:param int gen_number: The generation number of the folder.
Returns:
:returns: bool os.path.isfile(file_path): Returns True if the gen_dir_path
has a ranked.smi file. Returns False if the gen_dir_path does not have a
ranked.smi file
"""
ranked_file_name = "generation_{}_ranked.smi".format(gen_number)
file_path = "{}{}{}".format(gen_dir_path, os.sep, ranked_file_name)
return os.path.isfile(file_path)
#
def delete_temporary_files_and_folders(file_or_folder):
"""
This deletes all temporary files.
Inputs:
:param str file_or_folder: the file or folder to delete
"""
if os.path.exists(file_or_folder) is True:
if os.path.isdir(file_or_folder) is True:
try:
shutil.rmtree(file_or_folder)
except:
pass
else:
try:
os.remove(file_or_folder)
except:
pass
# If it failed to delete try via bash command
if os.path.exists(file_or_folder) is True:
command = "rm -rf {}".format(file_or_folder)
try:
os.system(command)
except:
pass
else:
pass
#
| 15,174 | 40.236413 | 250 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/plotting/generate_line_plot.py | """ Plots AutoGrow Run"""
import __future__
import os
import glob
import matplotlib
import matplotlib.pyplot as plt
def get_usable_format(infile):
"""
This code takes a string for an file which is formatted as an .smi file. It
opens the file and reads in the components into a usable list.
The .smi must follow the following format for each line:
MANDATORY INFO
part 1 is the SMILES string
part 2 is the SMILES name/ID
Optional info
part -1 (the last piece of info) is the SMILES diversity score
relative to its population
part -2 (the second to last piece of info) is the fitness metric
for evaluating
- For default setting this is the Docking score
- If you add a unique scoring function Docking score should be
-3 and that score function should be -2
Any other information MUST be between part 2 and part -2 (this
allows for the expansion of features without disrupting the rest of the code)
Inputs:
:param str infile: the string of the PATHname of a formatted .smi file to
be read into the program
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
# IMPORT SMILES FROM THE PREVIOUS GENERATION
usable_list_of_smiles = []
if os.path.exists(infile) is False:
print("\nFile of Source compounds does not exist: {}\n".format(infile))
raise Exception("File of Source compounds does not exist")
with open(infile) as smiles_file:
for line in smiles_file:
line = line.replace("\n", "")
parts = line.split("\t") # split line into parts separated by 4-spaces
if len(parts) == 1:
parts = line.split(
" "
) # split line into parts separated by 4-spaces
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
usable_list_of_smiles.append(choice_list)
return usable_list_of_smiles
def get_average_score_per_gen(infolder, folder_list):
"""
This script will get the average docking score from the ranked .smi file
from each generation.
Inputs:
:param str infolder: the path of the folder which has all of the
generation folders
:param list folder_list: a list of generation folders for each generation
within infolder
Returns:
:returns: list usable_list_of_smiles: list of SMILES and their associated
information formatted into a list which is usable by the rest of Autogrow
"""
average_affinity_dict = {}
for gen_folder in folder_list:
gen_folder_name = infolder + gen_folder + os.sep
ranked_file = glob.glob(gen_folder_name + "*_ranked.smi")
for rank_file in ranked_file:
# write as a tab delineated .smi file
with open(rank_file, "r") as f:
gen_affinity_sum = float(0.0)
num_lines_counter = float(0.0)
for line in f:
line = line.replace("\n", "")
parts = line.split(
"\t"
) # split line into parts separated by 4-spaces
choice_list = []
for i in range(0, len(parts)):
choice_list.append(parts[i])
gen_affinity_sum = gen_affinity_sum + float(choice_list[-2])
num_lines_counter = num_lines_counter + float(1.0)
gen_affinity_average = gen_affinity_sum / num_lines_counter
gen_num = os.path.basename(rank_file).split("_")[1]
gen_name = "generation_{}".format(gen_num)
average_affinity_dict[gen_name] = gen_affinity_average
print_gens(average_affinity_dict)
return average_affinity_dict
def get_average_top_score_per_gen(infolder, folder_list, top_score_per_gen):
"""
This script will get the average docking score of the top N number of
ligands ranked .smi file from each generation.
Inputs:
:param str infolder: the path of the folder which has all of the
generation folders
:param list folder_list: a list of generation folders for each generation
within infolder
:param int top_score_per_gen: the number of ligands to determine the
average score. ie) if top_score_per_gen=50 it will return the average of
the top 50 scores.
Returns:
:returns: dict average_affinity_dict: dictionary of average affinity
scores for top_score_per_gen number of ligands
"""
average_affinity_dict = {}
for gen_folder in folder_list:
gen_folder_name = infolder + gen_folder + os.sep
ranked_file = glob.glob(gen_folder_name + "*_ranked.smi")
for rank_file in ranked_file:
# Check number of lines
num_lines = 0
with open(rank_file, "r") as rf:
for line in rf:
num_lines = num_lines + 1
if num_lines >= top_score_per_gen:
# read as a tab delineated .smi file
with open(rank_file, "r") as f:
gen_affinity_sum = float(0.0)
for i, line in enumerate(f.readlines()):
if i >= top_score_per_gen:
break
line = line.replace("\n", "")
parts = line.split(
"\t"
) # split line into parts separated by 4-spaces
choice_list = []
for j in range(0, len(parts)):
choice_list.append(parts[j])
gen_affinity_sum = gen_affinity_sum + float(choice_list[-2])
gen_affinity_average = gen_affinity_sum / top_score_per_gen
gen_num = os.path.basename(rank_file).split("_")[1]
gen_name = "generation_{}".format(gen_num)
average_affinity_dict[gen_name] = gen_affinity_average
else:
gen_num = os.path.basename(rank_file).split("_")[1]
gen_name = "generation_{}".format(gen_num)
average_affinity_dict[gen_name] = "N/A"
print_gens(average_affinity_dict)
return average_affinity_dict
def print_gens(average_affinity_dict):
"""
This prints out the average scores for each generation
Inputs:
:param dict average_affinity_dict: dictionary of average affinity scores
for top_score_per_gen number of ligands
"""
print("generation_number average affinity score")
affinity_keys = list(average_affinity_dict.keys())
affinity_keys.sort(key=lambda x: int(x.split("_")[1]))
for gen in affinity_keys:
print(gen, " ", average_affinity_dict[gen])
def make_graph(dictionary):
"""
Because some generations may not have 50 ligands this basically checks to
see if theres enough ligands and prepares lists to be plotted
Inputs:
:param dict dictionary: dictionary of average affinity scores for
top_score_per_gen number of ligands
Returns:
:returns: list list_generations: list of ints for each generation to be
plotted. if a generation lacks ligands to generate the average it will
return "N/A"
:returns: list list_of_scores: list of averages for each generation; if a
generation lacks ligands to generate the average it will return "N/A"
"""
list_generations = []
list_of_gen_names = []
list_of_scores = []
for key in dictionary.keys():
list_of_gen_names.append(key)
score = dictionary[key]
list_of_scores.append(score)
gen = key.replace("generation_", "")
gen = int(gen)
list_generations.append(gen)
list_of_gen_names.append(key)
# Check that there are no N/A in list_of_scores
# If any gen has an N/A we will not plot the entire line
for i in list_of_scores:
if i == "N/A":
return None, None
return list_generations, list_of_scores
def run_plotter(vars, dict_of_averages, outfile):
"""
This plots the averages into a matplotlib figure. It will require you to
answer questions about titles and labels
Inputs:
:param dict vars: dict of user variables which will govern how the
programs runs
:param dict dict_of_averages: a dictionary of dictionaries containing the
average of each generation for the top 50,20, 10, and 1 ligand(s) and the
overall average for each generation.
:param str outfile: Path for the output file for the plot
"""
average_affinity_dict = dict_of_averages["average_affinity_dict"]
top_fifty_dict = dict_of_averages["top_fifty_dict"]
top_twenty_dict = dict_of_averages["top_twenty_dict"]
top_ten_dict = dict_of_averages["top_ten_dict"]
top_one_dict = dict_of_averages["top_one_dict"]
# print("Graphing Overall Average")
list_generations_average, list_of_scores_average = make_graph(average_affinity_dict)
# print("Graphing top_fifty_dict")
print_fifty = True
for key in top_fifty_dict.keys():
if top_fifty_dict[key] == "N/A":
print_fifty = False
if print_fifty is True:
list_generations_fifty, list_of_scores_fifty = make_graph(top_fifty_dict)
# print("Graphing top_fifty_dict")
print_twenty = True
for key in top_twenty_dict.keys():
if top_twenty_dict[key] == "N/A":
print_twenty = False
if print_twenty is True:
list_generations_twenty, list_of_scores_twenty = make_graph(top_twenty_dict)
print_ten = True
for key in top_ten_dict.keys():
if top_ten_dict[key] == "N/A":
print_ten = False
list_generations_ten, list_of_scores_ten = make_graph(top_ten_dict)
# print("Graphing top_one_dict")
list_generations_one, list_of_scores_one = make_graph(top_one_dict)
# print("")
ax = plt.subplot(111)
ax.plot(
list_generations_average, list_of_scores_average, color="b", label="Average"
)
if print_fifty is True:
ax.plot(list_generations_fifty, list_of_scores_fifty, color="c", label="Top 50")
if print_twenty is True:
ax.plot(
list_generations_twenty, list_of_scores_twenty, color="m", label="Top 20"
)
if print_ten is True:
ax.plot(
list_generations_ten, list_of_scores_ten, color="g", label="Top 10"
)
ax.plot(list_generations_one, list_of_scores_one, color="r", label="Top 1")
ax.set_ylim()
receptor_name = os.path.basename(vars["filename_of_receptor"])
scoring_type = vars["scoring_choice"]
docking_type = vars["scoring_choice"]
num_lig = (
int(vars["number_of_mutants"])
+ int(vars["number_of_crossovers"])
+ int(vars["number_elitism_advance_from_previous_gen"])
)
number_of_conf_per_lig = str(vars["max_variants_per_compound"])
# Get Customizations
title_of_figure = "{} Scores for {} using {}".format(
scoring_type, receptor_name, docking_type
)
plt.title(title_of_figure, fontweight="semibold")
# Put a legend to the right of the current axis
ax.legend(loc="center left", bbox_to_anchor=(1, 0.274), fontsize="small")
number_of_lig_per_gen = str(num_lig)
output = (
str(number_of_lig_per_gen)
+ " lig/gen"
+ "\n"
+ str(number_of_conf_per_lig)
+ " variants/lig"
)
plt.text(
5.4, -8.5, output, bbox=dict(facecolor="white", alpha=0.5), fontsize="small"
)
# legend1 = plt.legend([lines[i].get_label() for i in range(0, lines_leg)],
# loc='center left', bbox_to_anchor=(1, 0.274),fontsize='small')
# legend2 = plt.legend([output],loc='center left',
# bbox_to_anchor=(1, 0.774),fontsize='small')
# # help(plt.legend)
# ax.add_artist(legend1)
# ax.add_artist(legend2)
ax.set_ylim()
if "VINA" in str(scoring_type):
y_label = "Docking Affinity (kcal/mol"
else:
y_label = "Fitness Score"
plt.ylabel(y_label, fontweight="semibold")
plt.xlabel("Generation Number", fontweight="semibold")
try:
if print_ten is True:
plt.savefig(outfile, bbox_inches="tight", dpi=1000)
else:
# Remove the bbox_inches="tight" is necessary if
# the plot is too small for
plt.savefig(outfile, dpi=500)
except:
printout = "\nUNABLE TO CREATE PLOT: \n"
printout = printout + "Population size or number of generations was "
printout = printout + "too small to effectively plot. \n"
print(printout)
def print_data_table(infolder, folder_list):
"""
This function takes a folder of an Autogrow Run and a list of all folders
within the infolder, and finds the average of each generation, the average
of the top 50,20, 10, and 1 ligand(s) in each generation.
It prints the average docking score values in a table and returns that
information as a dictionary of dictionaries.
Inputs:
:param str infolder: a string for the file path to a directory containing
an Autogrow run. ie) "PATH/Run_0/"
:param list folder_list: a list of every generation folders within the
infolder
Returns
:returns: dict dict_of_averages: a dictionary of dictionaries containing
the average of each generation for the top 50,20, 10, and 1 ligand(s) and
the overall average for each generation.
"""
print("Overall Scoring Average for all Compounds")
average_affinity_dict = get_average_score_per_gen(infolder, folder_list)
print("")
print("Average for Top Scoring Compounds")
print("Number of top scoring compounds: ", 50)
top_fifty_dict = get_average_top_score_per_gen(infolder, folder_list, 50)
print("")
print("Average for Top Scoring Compounds")
print("Number of top scoring compounds: ", 20)
top_twenty_dict = get_average_top_score_per_gen(infolder, folder_list, 20)
print("")
print("Average for Top Scoring Compounds")
print("Number of top scoring compounds: ", 10)
top_ten_dict = get_average_top_score_per_gen(infolder, folder_list, 10)
print("")
print("Best Score per generation")
print("Number of top scoring compounds: ", 1)
top_one_dict = get_average_top_score_per_gen(infolder, folder_list, 1)
print("")
print("")
dict_of_averages = {}
dict_of_averages["average_affinity_dict"] = average_affinity_dict
dict_of_averages["top_fifty_dict"] = top_fifty_dict
dict_of_averages["top_twenty_dict"] = top_twenty_dict
dict_of_averages["top_ten_dict"] = top_ten_dict
dict_of_averages["top_one_dict"] = top_one_dict
return dict_of_averages
# Run Everything
def generate_figures(vars):
"""
This runs everything to make a line plot of the results of an Autogrow
simulation.
Inputs:
:param dict vars: dict of user variables which will govern how the
programs runs
"""
for i in range(0, 10):
print("")
infolder = vars["output_directory"]
outfile = infolder + "data_line_plot.png"
all_folders_list = [
f for f in sorted(os.listdir(infolder)) if os.path.isdir(infolder + f)
]
folder_list = []
for folder in all_folders_list:
if folder != "Data" and len(folder.split("_")) == 2:
folder_list.append(folder)
folder_list.sort(key=lambda x: int(x.split("_")[1]))
dict_of_averages = print_data_table(infolder, folder_list)
run_plotter(vars, dict_of_averages, outfile)
| 15,937 | 34.896396 | 89 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/plotting/__init__.py | 1 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/operations1.py | """
Populates an AutoGrow generation via mutation, crossover, and elitism.
Also filters and converts SMILES to 3d SDFS.
"""
import __future__
import os
import random
import copy
import sys
import rdkit
import rdkit.Chem as Chem
# Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog("rdApp.*")
import autogrow.operators.filter.execute_filters as Filter
import autogrow.docking.ranking.ranking_mol as Ranking
import autogrow.operators.mutation.execute_mutations as Mutation
import autogrow.operators.crossover.execute_crossover as execute_crossover
import autogrow.operators.convert_files.conversion_to_3d as conversion_to_3d
import autogrow.operators.convert_files.gypsum_dl.gypsum_dl.MolObjectHandling as MOH
from collections import Counter, defaultdict
def canonicalize(smiles):
"""Args:
smiles: str, SMILES string
Returns:
smiles: str, canonical SMILES string.
"""
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
return Chem.MolToSmiles(mol, isomericSmiles=True)
else:
return None
#############
# Main run Autogrow operators to make a generation
#############
def populate_generation(vars, generation_num, mutate_ligand_select_policy_net, mutate_reaction_select_policy_net,
crossover_ligand1_policy_net, crossover_ligand2_policy_net, ):
"""
This will run all of the mutations, crossovers, and filters for a single
generation. Populates a new generation of ligands.
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the generation number, 1,2,3,4,...,
Returns:
:returns: str full_generation_smiles_file: the name of the .smi file
containing the new population
:returns: list full_generation_smiles_list: list with the new population
of ligands
:returns: bool None: returns None twice if any step failed. This will
result in the program ending
"""
number_of_processors = int(vars["number_of_processors"])
# Determine which generation it is and how many mutations and crossovers
# to make
if generation_num == 1:
# If 1st generation
num_crossovers = vars["number_of_crossovers_first_generation"]
num_mutations = vars["number_of_mutants_first_generation"]
# How many advance from previous generation to the next generation
# directly This will be done later but we are unpacking vars here
num_elite_to_advance_from_previous_gen = vars["number_elitism_advance_from_previous_gen_first_generation"]
else:
# Later generations
num_crossovers = vars["number_of_crossovers"]
num_mutations = vars["number_of_mutants"]
num_elite_to_advance_from_previous_gen = vars["number_elitism_advance_from_previous_gen"]
# Get the Source compound list. This list is the full population from
# either the previous generations or if its Generation 1 than the its the
# entire User specified Source compound list If either has a SMILES that
# does not sanitize in RDKit it will be excluded and a printout of its
# Name and SMILES string will be printed.
source_compounds_list = get_complete_list_prev_gen_or_source_compounds(vars, generation_num)
num_seed_diversity, num_seed_dock_fitness = determine_seed_population_sizes(vars, generation_num)
# Total Population size of this generation
total_num_desired_new_ligands = num_crossovers + num_mutations + num_elite_to_advance_from_previous_gen
###################################################################
###################################################################
# A. Making Mutations
###################################################################
###################################################################
# Get starting compounds for Mutations
seed_list_mutations = make_seed_list(
vars,
source_compounds_list,
generation_num,
num_seed_diversity,
num_seed_dock_fitness,
) ##### *** random.shuffle ***
# Save seed list for Mutations
save_ligand_list(
vars["output_directory"],
generation_num,
seed_list_mutations,
"Mutation_Seed_List",
)
sys.stdout.flush()
print("MAKE MUTATIONS")
rxn_library_variables = [
vars["rxn_library"],
vars["rxn_library_file"],
vars["function_group_library"],
vars["complementary_mol_directory"],
] # Package user vars specifying the Reaction library to use for mutation
new_mutation_smiles_list = [] # List of SMILES from mutation
smiles2reaction = dict()
a_smiles_click_chem_object = SmileClickClass.SmilesClickChem(rxn_library_variables,
new_mutation_smiles_list, vars["filter_object_dict"])
result_of_run = a_smiles_click_chem_object.run_smiles_click(smile)
# Make all the required ligands by mutations
while len(new_mutation_smiles_list) < num_mutations:
sys.stdout.flush()
num_mutants_to_make = num_mutations - len(new_mutation_smiles_list)
##################################
# Make all mutants -------- main
##################################
source_smiles_list = [i[0] for i in seed_list_mutations]
ligand_weight = mutate_ligand_select_policy_net(source_smiles_list) ### after softmax
ligand_weight_list = ligand_weight.numpy().reshape(-1).tolist()
idx_lst = random.choices(list(range(len(sample_weight_np))), weights = sample_weight_np, k = num_mutations * 10)
idx2cnt = Counter(idx_lst)
for idx,cnt in idx2cnt:
smiles = canonical(source_smiles_list[idx])
if smiles in smiles2reaction:
reaction_list = smiles2reaction[smiles]
else:
reaction_list = [] ### todo
smiles2reaction[smiles] = reaction_list
reaction_weight = mutate_reaction_select_policy_net(smiles, reaction_list) ## after softmax
reaction_weight_list = reaction_weight.numpy().reshape(-1).tolist()
reaction_sampled_list = random.choices(list(range(len(reaction_weight_list))), weights = reaction_weight_list, k = cnt) ## repeated
new_mutation_smiles_list.extend(reaction_sampled_list)
#### autogrow
new_mutants = Mutation.make_mutants(
vars,
generation_num,
number_of_processors,
num_mutants_to_make,
seed_list_mutations,
new_mutation_smiles_list,
rxn_library_variables,
mutate_ligand_select_policy_net,
mutate_reaction_select_policy_net,
)
if new_mutants is None: # try once more
new_mutants = Mutation.make_mutants(
vars,
generation_num,
number_of_processors,
num_mutants_to_make,
seed_list_mutations,
new_mutation_smiles_list,
rxn_library_variables,
mutate_ligand_select_policy_net,
mutate_reaction_select_policy_net,
)
if new_mutants is None:
break
new_mutants = [x for x in new_mutants if x is not None] # Remove Nones
for i in new_mutants:
new_mutation_smiles_list.append(i)
if len(new_mutation_smiles_list) == num_mutations:
break
sys.stdout.flush()
'''
(old_smiles, old_score)
old_smiles -> all_new_smiles_list
policy_network
(new_smiles, new_score) selected
'''
###################################
## output: new_mutation_smiles_list
###################################
# save new_mutation_smiles_list
save_ligand_list(
vars["output_directory"],
generation_num,
new_mutation_smiles_list,
"Chosen_Mutants",)
if new_mutation_smiles_list is None or len(new_mutation_smiles_list) < num_mutations:
print("\nWe needed to make {} ligands through Mutation".format(num_mutations))
print("We only made {} ligands through Mutation\n".format(len(new_mutation_smiles_list)))
raise Exception("Mutation failed to make enough new ligands.")
print("FINISHED MAKING MUTATIONS")
###################################################################
###################################################################
# B. Making Crossovers
# List of smiles from crossover
###################################################################
###################################################################
# Get starting compounds to seed Crossovers
seed_list_crossovers = make_seed_list(
vars,
source_compounds_list,
generation_num,
num_seed_diversity,
num_seed_dock_fitness,
)
# Save seed list for Crossovers
save_ligand_list(
vars["output_directory"],
generation_num,
seed_list_crossovers,
"Crossover_Seed_List",
)
print("MAKE CROSSOVERS")
sys.stdout.flush()
new_crossover_smiles_list = []
# Make all the required ligands by Crossover
while len(new_crossover_smiles_list) < num_crossovers:
sys.stdout.flush()
num_crossovers_to_make = num_crossovers - len(new_crossover_smiles_list)
# Make all crossovers
new_crossovers = execute_crossover.make_crossovers(
vars,
generation_num,
number_of_processors,
num_crossovers_to_make,
seed_list_crossovers,
new_crossover_smiles_list,
)
if new_crossovers is None:
# try once more
new_crossovers = execute_crossover.make_crossovers(
vars,
generation_num,
number_of_processors,
num_crossovers_to_make,
seed_list_crossovers,
new_crossover_smiles_list,
)
if new_crossovers is None:
break
# Remove Nones:
new_crossovers = [x for x in new_crossovers if x is not None]
# append those which passed the filter
for i in new_crossovers:
new_crossover_smiles_list.append(i)
if len(new_crossover_smiles_list) == num_crossovers:
break
# save new_crossover_smiles_list
save_ligand_list(
vars["output_directory"],
generation_num,
new_crossover_smiles_list,
"Chosen_Crossovers",
)
if new_crossover_smiles_list is None or len(new_crossover_smiles_list) < num_crossovers:
print("\n\nWe needed to make {} ligands through Crossover".format(num_crossovers))
print("We only made {} ligands through Crossover\n\n".format(len(new_crossover_smiles_list)))
raise Exception("Crossover failed to make enough new ligands.")
print("FINISHED MAKING CROSSOVERS")
# Get unaltered samples from the previous generation
print("GET SOME LIGANDS FROM THE LAST GENERATION")
sys.stdout.flush()
# Make a list of the ligands chosen to pass through to the next generation
# via Elitism This handles creating a seed list and defining the advance
# to next generation final selection
chosen_mol_to_pass_through_list = make_pass_through_list(
vars,
source_compounds_list,
num_elite_to_advance_from_previous_gen,
generation_num,
)
if type(chosen_mol_to_pass_through_list) == str:
printout = (
chosen_mol_to_pass_through_list
+ "\nIf this is the 1st generation, it may be due to the starting "
+ "library has SMILES which could not be converted to sanitizable "
+ "RDKit Molecules"
)
raise Exception(printout)
sys.stdout.flush()
# save chosen_mol_to_pass_through_list
save_ligand_list(
vars["output_directory"],
generation_num,
chosen_mol_to_pass_through_list,
"Chosen_Elite_To_advance",
)
print("GOT LIGANDS FROM THE LAST GENERATION")
# make a list of all the ligands from mutations, crossovers, and from the
# last generation
new_generation_smiles_list = []
full_generation_smiles_list = []
for i in new_mutation_smiles_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
for i in new_crossover_smiles_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
if vars["redock_elite_from_previous_gen"] is False and generation_num != 1:
for i in chosen_mol_to_pass_through_list:
# Doesn't append to the new_generation_smiles_list
full_generation_smiles_list.append(i)
# Generation 0 pass through gets added to the convert and dock list
# because it has no docking score to compare with This is independent of
# the vars['redock_elite_from_previous_gen']
else:
for i in chosen_mol_to_pass_through_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
if len(full_generation_smiles_list) < total_num_desired_new_ligands:
print("We needed ", total_num_desired_new_ligands)
print("We made ", len(full_generation_smiles_list))
print("population failed to make enough mutants or crossovers... \
Errors could include not enough diversity, too few seeds to \
the generation, the seed mols are unable to cross-over due \
to lack of similariy, or all of the seed lack functional groups \
for performing reactions")
return None, None, None
# Save the Full Generation
full_generation_smiles_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"], generation_num, full_generation_smiles_list, None
)
# Save the File to convert to 3d
smiles_to_convert_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"],
generation_num,
new_generation_smiles_list,
"_to_convert",
)
sys.stdout.flush()
# CONVERT SMILES TO .sdf USING GYPSUM and convert .sdf to .pdb with rdkit
# This will output sdf files into a folder. The .smi.0.sdf file is not a
# valid mol, but all the others will be valid the 1st Smiles in the
# original .smi file is saved as .smi.1.sdf and 2nd file is saved as
# .smi.2.sdf
conversion_to_3d.convert_to_3d(vars, smiles_to_convert_file, new_gen_folder_path)
sys.stdout.flush()
return full_generation_smiles_file, full_generation_smiles_list
def populate_generation_zero(vars, generation_num=0):
"""
This will handle all that is required for generation 0redock and handle
the generation 0
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the generation number
Returns:
:returns: str full_generation_smiles_file: the name of the .smi file
containing the new population
:returns: list full_generation_smiles_list: list with the new population
of ligands.
:returns: bool already_docked: if true we won't redock the source ligands.
If False we will dock the source ligands.
"""
number_of_processors = int(vars["number_of_processors"])
num_crossovers = 0
num_mutations = 0
# Get the Source compound list. This list is the full population from
# either the previous generations or if its Generation 1 than the its the
# entire User specified Source compound list If either has a SMILES that
# does not sanitize in RDKit it will be excluded and a printout of its
# Name and SMILES string will be printed.
source_compounds_list = get_complete_list_prev_gen_or_source_compounds(
vars, generation_num
)
num_elite_to_advance_from_previous_gen = len(source_compounds_list)
num_seed_diversity, num_seed_dock_fitness = determine_seed_population_sizes(
vars, generation_num
)
# Total Population size of this generation
total_num_desired_new_ligands = num_crossovers + num_mutations + 1
# Get unaltered samples from the previous generation
print("GET SOME LIGANDS FROM THE LAST GENERATION")
# Make a list of the ligands chosen to pass through to the next generation
# This handles creating a seed list and defining the advance to next
# generation final selection
chosen_mol_to_pass_through_list = make_pass_through_list(
vars, source_compounds_list, 1, 0
)
if type(chosen_mol_to_pass_through_list) == str:
printout = (
chosen_mol_to_pass_through_list
+ "\nIf this is the 1st generation, it may be due to the starting "
+ "library has SMILES which could not be converted to "
+ "sanitizable RDKit Molecules"
)
raise Exception(printout)
# save chosen_mol_to_pass_through_list
save_ligand_list(
vars["output_directory"],
generation_num,
chosen_mol_to_pass_through_list,
"Chosen_Elite_To_advance",
)
print("GOT LIGANDS FROM THE LAST GENERATION")
# make a list of all the ligands from mutations, crossovers, and from the
# last generation
new_generation_smiles_list = []
full_generation_smiles_list = []
# These will be docked and scored for generation 0
for i in chosen_mol_to_pass_through_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
if len(full_generation_smiles_list) == 0:
print(
"population failed to import any molecules from the source_compounds_list."
)
return None, None, None
# Save the Full Generation
full_generation_smiles_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"], generation_num, full_generation_smiles_list, None
)
# Save the File to convert to 3d
smiles_to_convert_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"],
generation_num,
new_generation_smiles_list,
"_to_convert",
)
# order files by -2 of each lig
try:
full_generation_smiles_list.sort(key=lambda x: float(x[-2]), reverse=False)
full_generation_smiles_list_printout = [
"\t".join(x) for x in full_generation_smiles_list
]
already_docked = True
except:
print(
"Not all ligands in source compound list are scored. "
+ "We will convert and redock them all."
)
already_docked = False
if already_docked is True:
# Write all the ligands to a ranked file
full_generation_smiles_list_printout = "\n".join(
full_generation_smiles_list_printout
)
ranked_file = new_gen_folder_path + os.sep + "generation_0_ranked.smi"
with open(ranked_file, "w") as f:
f.write(full_generation_smiles_list_printout)
return already_docked, full_generation_smiles_file, full_generation_smiles_list
# If you are to redock and convert the generation zero you will also need
# to do the following:
# CONVERT SMILES TO .sdf USING GYPSUM and convert .sdf to .pdb with
# rdkit This will output sdf files into a folder. The .smi.0.sdf file
# is not a valid mol, but all the others will be valid the 1st Smiles
# in the original .smi file is saved as .smi.1.sdf and 2nd file is
# saved as .smi.2.sdf
conversion_to_3d.convert_to_3d(
vars, smiles_to_convert_file, new_gen_folder_path
)
return already_docked, full_generation_smiles_file, full_generation_smiles_list
#############
# Get seeds
#############
def test_source_smiles_convert(smile_info):
"""
This attempts to convert a SMILES string to an rdkit.Chem.rdchem.Mol
object
-done in a try statement so that it is some bad SMILES string which is
incapable of being converted
- it also checks that the SMILES string is able to be sanitized
Inputs:
:param list smile_info: a list containing the SMILES of a ligand, its ID
and potentially additional information about the ligand
Returns:
:returns: list smile_info: If it passed the test, it returns the list
containing the SMILES of a ligand, its ID and potentially additional
information about the ligand
:returns: str printout: If it failed to convert it returns the error
message. This passess out to prevent MPI print issues
"""
if smile_info is None or len(smile_info) == 0:
printout = (
"REMOVING SMILES FROM SOURCE LIST: Blank "
+ "entry in source compound list.\n"
)
printout = printout + "\tRemoving: {}".format(smile_info)
return printout
if len(smile_info) == 1:
printout = "REMOVING SMILES FROM SOURCE LIST: Unformatted or blank "
printout = printout + "entry in source compound list.\n"
printout = printout + "\tRemoving: {}".format(smile_info)
return printout
# separate out SMILES str and ID
smile_str = smile_info[0]
smile_id = str(smile_info[1])
if type(smile_str) is not type(""):
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string is not a "
printout = printout + "String. Check for formatting errors. \n"
printout = printout + "\tIgnored SMILES is: {}".format(smile_str)
return printout
# Try importing it into RDKit with Sanitization off. Tests for errors in
# having the wrong data type
try:
mol = Chem.MolFromSmiles(str(smile_str), sanitize=False)
except:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string failed "
printout = printout + "to import into RDKit.\n\t "
printout = printout + "Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# This will fail if there are valence errors. We won't try to correct
# someones source compound list Although the MOH.check_sanitization will
# do that. try sanitizing, which is necessary later
try:
Chem.SanitizeMol(mol)
except:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES "
printout = printout + "string failed to Sanitize in RDKit.\n"
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# Make the mol again fresh and try running it through MOH.handleHs() This
# will try protanating and Deprotanating the mol. If it can't handle that
# We reject it as many functions will require this sort of manipulation.
# More advanced sanitization issues will also be removed in this step
mol = Chem.MolFromSmiles(str(smile_str), sanitize=False)
mol = MOH.handleHs(mol, True)
if mol is None:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string failed \
to be protanated or deprotanated.\n"
printout = (
printout
+ "\t This is often an issue with valence and sanitization "
+ "issues with the SMILES string."
)
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# Check there are no * which are atoms with atomic number=0
mol = MOH.check_for_unassigned_atom(mol)
if mol is None:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string contained "
printout = printout + "an unassigned atom type labeled as *.\n"
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# Check for fragments.
if len(Chem.GetMolFrags(mol, asMols=True, sanitizeFrags=False)) != 1:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string was fragmented.\n"
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# the ligand is good enough to use throughout the program!
return smile_info
def get_complete_list_prev_gen_or_source_compounds(vars, generation_num):
"""
Get the source compounds list from the previous generation of the source
compound list
This also filters the list to ensure mols can be imported to RDKit and
that they pass the drug-likliness filters.
If generation = 1 use the User specified starting compounds If generation
is >1 than use the previous generations top ligands. This takes an .smi
file
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the interger of the current generation
Returns:
:returns: list usable_list_of_smiles: a list with SMILES strings, names,
and information about the smiles from the previous generation or the
source compound list
"""
source_file_gen_0 = vars[
"output_directory"
] + "generation_{}{}generation_{}_ranked.smi".format(0, os.sep, 0)
if generation_num == 0:
# This will be the full length list of starting molecules as the seed
source_file = str(vars["source_compound_file"])
usable_list_of_smiles = Ranking.get_usable_format(source_file)
if len(usable_list_of_smiles) == 0:
print(
"\nThere were no available ligands in source compound. Check formatting\n"
)
raise Exception(
"There were no available ligands in source compound. Check formatting"
)
elif generation_num == 1 and os.path.exists(source_file_gen_0) is False:
# This will be the full length list of starting molecules as the seed
source_file = str(vars["source_compound_file"])
usable_list_of_smiles = Ranking.get_usable_format(source_file)
if len(usable_list_of_smiles) == 0:
print(
"\nThere were no available ligands in source compound. Check formatting\n"
)
raise Exception(
"There were no available ligands in source compound. Check formatting"
)
else:
source_file = vars[
"output_directory"
] + "generation_{}{}generation_{}_ranked.smi".format(
generation_num - 1, os.sep, generation_num - 1
)
if os.path.exists(source_file) is False:
printout = (
"\n"
+ "There were no available ligands in previous"
+ " generation ranked ligand file.\n"
)
printout = printout + "\tCheck formatting or if file has been moved.\n"
print(printout)
raise Exception(printout)
usable_list_of_smiles = Ranking.get_usable_format(source_file)
if len(usable_list_of_smiles) == 0:
printout = (
"\n"
+ "There were no available ligands in previous"
+ " generation ranked ligand file.\n"
)
printout = printout + "\tCheck formatting or if file has been moved. \n"
print(printout)
raise Exception(printout)
# Test that every SMILES in the usable_list_of_smiles is a valid SMILES
# which will import and Sanitize in RDKit. SMILES will be excluded if they
# are fragmented, contain atoms with no atomic number (*), or do not
# sanitize
job_input = tuple([tuple([i]) for i in usable_list_of_smiles])
usable_list_of_smiles = vars["parallelizer"].run(
job_input, test_source_smiles_convert
)
usable_list_of_smiles = [x for x in usable_list_of_smiles if x is not None]
print_errors = [x for x in usable_list_of_smiles if type(x) is str]
usable_list_of_smiles = [x for x in usable_list_of_smiles if type(x) is list]
for x in print_errors:
print(x)
if len(usable_list_of_smiles) == 0:
printout = "\nThere were no ligands in source compound or previous \
generation which could sanitize.\n"
print(printout)
raise Exception(printout)
if vars["filter_source_compounds"] is True:
prefilter_list = copy.deepcopy(usable_list_of_smiles)
print("")
print("Running Filter on the Compounds from last generation/Source")
usable_list_of_smiles = Filter.run_filter(vars, usable_list_of_smiles)
# Remove Nones:
usable_list_of_smiles = [x for x in usable_list_of_smiles if x is not None]
if len(usable_list_of_smiles) == 0:
printout = "\nThere were no ligands in source compound which \
passed the User-selected Filters.\n"
print(printout)
raise Exception(printout)
for lig in usable_list_of_smiles:
failed_filter_list = []
if lig not in prefilter_list:
failed_filter_list.append(lig[1])
if len(failed_filter_list) != 0:
printout = "\n THE FOLLOWING LIGANDS WERE REMOVED FROM THE\
SOURCE LIST: Failed the User-selected Filters\n"
printout = printout + "\t{}".format(failed_filter_list)
print(printout)
random.shuffle(usable_list_of_smiles)
return usable_list_of_smiles
def make_seed_list(vars, source_compounds_list, generation_num, num_seed_diversity,
num_seed_dock_fitness):
"""
Get the starting compound list for running the Mutation and Crossovers
If generation = 0 use the User specified starting compounds If generation
is >0 than use the previous generations top ligands. This takes an .smi
file
Inputs:
:param dict vars: a dictionary of all user variables
:param list source_compounds_list: a list with SMILES strings, names, and
information about the smiles from either the previous generation or the
source compound list
:param int generation_num: the interger of the current generation
:param int num_seed_diversity: the number of seed molecules which come
from diversity selection
:param int num_seed_dock_fitness: the number of seed molecules which come
from eite selection by docking score
Returns:
:returns: list usable_list_of_smiles: a list with SMILES strings, names,
and information about the smiles which will be used to seed the next
generation
"""
usable_list_of_smiles = copy.deepcopy(source_compounds_list)
full_length = False
if generation_num == 0:
# Get starting compounds for Mutations
full_length = True
elif generation_num == 1:
if vars["use_docked_source_compounds"] is False:
# Get starting compounds for Mutations
full_length = True
else:
source_file_gen_0 = vars[
"output_directory"
] + "generation_{}{}generation_{}_ranked.smi".format(0, os.sep, 0)
if os.path.exists(source_file_gen_0) is False:
full_length = True
else:
# generation_num 1 may run into problems if the source
# compounds are smaller than the seed pool required to seed
# generation 1. Because the seeding options are connected to
# the generation number (due to the depreciation of diversity
# option) Because of this we may need to ignore the ranking
# for the seeds of generation 1 to accomidate the smaller
# source size. This is especially important with
# lead-optimization in which the source pool may be much
# smaller For this reason we will override the seeding of
# generation 1 if the number to seed is greater than exists
# but will provide a warning message.
if (
len(usable_list_of_smiles) < num_seed_diversity
or len(usable_list_of_smiles) < num_seed_diversity
):
# This is problematic so just use what is available
printout = "\n\nNot enough ligands in source compound \
list to seed generation 1. We will use the entire \
list of every ligand in the source compound list \
to seed generation 1. This means there is no \
selection in generation 1's seeding process.\n\n"
print(printout)
full_length = True
else:
full_length = False
else:
full_length = False
if full_length is True or generation_num == 0:
# This will be the full length list of starting molecules as the seed
random.shuffle(usable_list_of_smiles)
else:
selector_choice = vars["selector_choice"]
tourn_size = vars["tourn_size"]
# Get subset of the source_file based on diversity scores and docking
# scores
usable_list_of_smiles = Ranking.create_seed_list(
usable_list_of_smiles,
num_seed_diversity,
num_seed_dock_fitness,
selector_choice,
tourn_size,
)
random.shuffle(usable_list_of_smiles)
return usable_list_of_smiles
def determine_seed_population_sizes(vars, generation_num):
"""
This function determines how many molecules will be chosen to seed a
generation because of their docking score and how many will assess because
of their diversity score.
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the interger of the current generation
Returns:
:returns: int num_seed_diversity: the number of seed molecules which come
from diversity selection
:returns: int num_seed_dock_fitness: the number of seed molecules which
come from eite selection by docking score
"""
# How many fewer seed mols are chosen from diversity compared to the 1st
# generation This is also how many more get chosen from elitist selection
diversity_depreciation = (
int(generation_num - 1) * vars["diversity_seed_depreciation_per_gen"]
)
if generation_num == 1:
top_mols_to_seed_next_generation = vars[
"top_mols_to_seed_next_generation_first_generation"
]
else:
top_mols_to_seed_next_generation = vars["top_mols_to_seed_next_generation"]
# Number of mols chosen because of their diversity score
num_seed_diversity = (
vars["diversity_mols_to_seed_first_generation"] - diversity_depreciation
)
# Number of mols chosen because of their docking score. Elitist style
# selection but this will be chosen using a weight roulette selector later
# on.
if num_seed_diversity <= 0:
num_seed_dock_fitness = (
top_mols_to_seed_next_generation
+ vars["diversity_mols_to_seed_first_generation"]
)
num_seed_diversity = 0
elif num_seed_diversity > 0:
num_seed_dock_fitness = (
top_mols_to_seed_next_generation + diversity_depreciation
)
return num_seed_diversity, num_seed_dock_fitness
def make_pass_through_list(vars, smiles_from_previous_gen_list,
num_elite_to_advance_from_previous_gen,
generation_num):
"""
This function determines the molecules which elite ligands will advance
from the previous generation without being altered into the next
generation.
Inputs:
:param dict vars: a dictionary of all user variables
:param list smiles_from_previous_gen_list: List of SMILES from the last
generation chosen to seed the list of molecules to advance to the next
generation without modification via elitism.
:param int num_elite_to_advance_from_previous_gen: the number of molecules
to advance from the last generation without modifications.
:param int generation_num: the interger of the current generation
Returns:
:returns: list list_of_ligands_to_advance: a list of ligands which should
advance into the new generation without modifications, via elitism from
the last generation. Returns a printout of why it failed if it fails
"""
# this will be a list of lists. Each sublist will be [SMILES_string, ID]
list_of_ligands_to_advance = []
# If not enough of your previous generation sanitize to make the list
# Return None and trigger an Error
if (
generation_num != 0
and len(smiles_from_previous_gen_list) < num_elite_to_advance_from_previous_gen
):
printout = "Not enough ligands in initial list the filter to progress"
printout = (
printout
+ "\n len(smiles_from_previous_gen_list): {} ; \
num_elite_to_advance_from_previous_gen: {}".format(
len(smiles_from_previous_gen_list),
num_elite_to_advance_from_previous_gen)
)
return printout
smiles_from_previous_gen_list = [
x for x in smiles_from_previous_gen_list if type(x) == list
]
if generation_num == 0 and vars["filter_source_compounds"] is True:
# Run Filters on ligand list
ligands_which_passed_filters = Filter.run_filter(
vars, smiles_from_previous_gen_list
)
# Remove Nones:
ligands_which_passed_filters = [
x for x in ligands_which_passed_filters if x is not None
]
else:
ligands_which_passed_filters = [
x for x in smiles_from_previous_gen_list if x is not None
]
# If not enough of your previous generation sanitize to make the list
# Return None and trigger an Error
if (
generation_num != 0
and len(ligands_which_passed_filters) < num_elite_to_advance_from_previous_gen
):
printout = "Not enough ligands passed the filter to progress"
return printout
# Save seed list of all ligands which passed which will serve as the seed
# list.
save_ligand_list(
vars["output_directory"],
generation_num,
ligands_which_passed_filters,
"Previous_Gen_Elite_Seed_List",
)
# check if ligands_which_passed_filters has docking scores
has_dock_score = False
try:
temp = [float(x[-2]) for x in ligands_which_passed_filters]
has_dock_score = True
except:
has_dock_score = False
if generation_num == 0 and has_dock_score is False:
# Take the 1st num_elite_to_advance_from_previous_gen number of
# molecules from ligands_which_passed_filters
random.shuffle(ligands_which_passed_filters)
list_of_ligands_to_advance = []
for x in range(0, len(ligands_which_passed_filters)):
selected_mol = ligands_which_passed_filters[x]
list_of_ligands_to_advance.append(selected_mol)
elif generation_num == 0 and has_dock_score is True:
# Use the make_seed_list function to select the list to advance.
# This list will be chosen strictly by
list_of_ligands_to_advance = make_seed_list(
vars,
ligands_which_passed_filters,
generation_num,
len(ligands_which_passed_filters),
num_elite_to_advance_from_previous_gen,
)
elif generation_num != 0 and has_dock_score is False:
# Take the 1st num_elite_to_advance_from_previous_gen number of
# molecules from ligands_which_passed_filters
random.shuffle(ligands_which_passed_filters)
list_of_ligands_to_advance = []
for x in range(0, num_elite_to_advance_from_previous_gen):
selected_mol = ligands_which_passed_filters[x]
list_of_ligands_to_advance.append(selected_mol)
elif generation_num != 0 and has_dock_score is True:
# Use the make_seed_list function to select the list to advance. This
# list will be chosen strictly by
list_of_ligands_to_advance = make_seed_list(
vars,
ligands_which_passed_filters,
generation_num,
0,
num_elite_to_advance_from_previous_gen,
)
if generation_num == 0:
return list_of_ligands_to_advance
elif len(list_of_ligands_to_advance) >= num_elite_to_advance_from_previous_gen:
return list_of_ligands_to_advance
printout = "Not enough ligands were chosen to advance to the next generation."
return printout
#############
# Saving Output files for generations and seeds
#############
def save_generation_smi(output_directory, generation_num,
formatted_smile_list, nomenclature_tag):
""""
This function saves a list of newly generated population of ligands as an
.smi file. .smi file column 1 is the SMILES string and column 2 is its
smile ID
Inputs:
:param dict output_directory: the directory of the run to save the
generation
:param int generation_num: the interger of the current generation
:param list formatted_smile_list: list of the newly generated population
of ligands
:param str nomenclature_tag: The str describing the ligand list if None
than don't add tag. It is the full list of all ligands for the
generation
If it says '_to_convert' its the list of ligands which will need to be
converted to 3D -this may or may not have the ligands which pass
through from the last gen.
Returns:
:returns: str output_file_name: name of the output file
:returns: str new_gen_folder_path: the path to the folder containing all
that will be in this generation
"""
# folder for this new generation
new_gen_folder_path = output_directory + "generation_{}{}".format(
generation_num, os.sep
)
if nomenclature_tag is None:
# make the name for the new generation file
output_file_name = new_gen_folder_path + "generation_{}.smi".format(
generation_num
)
else:
# make the name for the new generation file
output_file_name = new_gen_folder_path + "generation_{}{}.smi".format(
generation_num, nomenclature_tag
)
# write as a tab delineated .smi file
with open(output_file_name, "w") as f:
for smile in formatted_smile_list:
# smile_string = smile[0]
# smile_id = smile[1]
x = str(smile[0] + "\t" + str(smile[1]) + "\n")
f.write(x)
sys.stdout.flush()
return output_file_name, new_gen_folder_path
def save_ligand_list(output_directory, generation_num,
list_of_chosen_ligands, nomenclature_tag):
"""
Save the list of ligands. nomenclature_tag is a string such as "Mutation"
or "Crossover" or "Previous_Gen_choice" describing what this data is used
for. If it says seeding it is the chosen mols from the previous generation
being used to seed the next generation
Inputs:
:param dict output_directory: the directory of the run to save the
generation
:param int generation_num: The generation number
:param list list_of_chosen_ligands: The formatted list of ligands to seed
a generation
:param str nomenclature_tag: The str describing the ligand list
-ie seeding_mutations is the list that seeded the mutations while
mutations would be the list of mutations generated from the
seeding_mutations list
-ie. mutation, crossover, Previous_Gen_choice
"""
# make a folder for the new generation
new_gen_folder_path = output_directory + "generation_{}{}".format(
generation_num, os.sep
)
# make a folder for the Seed files
seed_folder_path = new_gen_folder_path + "SeedFolder" + os.sep
# check if folders exist, if not make them
if not os.path.isdir(new_gen_folder_path):
os.makedirs(new_gen_folder_path)
if not os.path.isdir(seed_folder_path):
os.makedirs(seed_folder_path)
output_file_name = "{}{}_Gen_{}.smi".format(
seed_folder_path, nomenclature_tag, generation_num
)
# save to a new output smiles file. ie. save to ranked_smiles_file
with open(output_file_name, "w") as output:
for line in list_of_chosen_ligands:
output_line = "\t".join(line) + "\n"
output.write(output_line)
sys.stdout.flush()
| 45,015 | 37.974892 | 144 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/__init__.py | 1 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/operations.py | """
Populates an AutoGrow generation via mutation, crossover, and elitism.
Also filters and converts SMILES to 3d SDFS.
"""
import __future__
import os
import random
import copy
import sys
import rdkit
import rdkit.Chem as Chem
# Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog("rdApp.*")
import autogrow.operators.filter.execute_filters as Filter
import autogrow.docking.ranking.ranking_mol as Ranking
import autogrow.operators.mutation.execute_mutations as Mutation
import autogrow.operators.crossover.execute_crossover as execute_crossover
import autogrow.operators.convert_files.conversion_to_3d as conversion_to_3d
import autogrow.operators.convert_files.gypsum_dl.gypsum_dl.MolObjectHandling as MOH
#############
# Main run Autogrow operators to make a generation
#############
def populate_generation(vars, generation_num):
"""
This will run all of the mutations, crossovers, and filters for a single
generation. Populates a new generation of ligands.
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the generation number, 1,2,3,4,...,
Returns:
:returns: str full_generation_smiles_file: the name of the .smi file
containing the new population
:returns: list full_generation_smiles_list: list with the new population
of ligands
:returns: bool None: returns None twice if any step failed. This will
result in the program ending
"""
number_of_processors = int(vars["number_of_processors"])
# Determine which generation it is and how many mutations and crossovers
# to make
if generation_num == 1:
# If 1st generation
num_crossovers = vars["number_of_crossovers_first_generation"]
num_mutations = vars["number_of_mutants_first_generation"]
# How many advance from previous generation to the next generation
# directly This will be done later but we are unpacking vars here
num_elite_to_advance_from_previous_gen = vars["number_elitism_advance_from_previous_gen_first_generation"]
else:
# Later generations
num_crossovers = vars["number_of_crossovers"]
num_mutations = vars["number_of_mutants"]
num_elite_to_advance_from_previous_gen = vars["number_elitism_advance_from_previous_gen"]
# Get the Source compound list. This list is the full population from
# either the previous generations or if its Generation 1 than the its the
# entire User specified Source compound list If either has a SMILES that
# does not sanitize in RDKit it will be excluded and a printout of its
# Name and SMILES string will be printed.
source_compounds_list = get_complete_list_prev_gen_or_source_compounds(
vars, generation_num
)
num_seed_diversity, num_seed_dock_fitness = determine_seed_population_sizes(
vars, generation_num
)
# Total Population size of this generation
total_num_desired_new_ligands = (
num_crossovers + num_mutations + num_elite_to_advance_from_previous_gen
)
###################################################################
###################################################################
# A. Making Mutations
###################################################################
###################################################################
# Get starting compounds for Mutations
seed_list_mutations = make_seed_list(
vars,
source_compounds_list,
generation_num,
num_seed_diversity,
num_seed_dock_fitness,
)
# Save seed list for Mutations
save_ligand_list(
vars["output_directory"],
generation_num,
seed_list_mutations,
"Mutation_Seed_List",
)
sys.stdout.flush()
print("MAKE MUTATIONS")
# Package user vars specifying the Reaction library to use for mutation
rxn_library_variables = [
vars["rxn_library"],
vars["rxn_library_file"],
vars["function_group_library"],
vars["complementary_mol_directory"],
]
new_mutation_smiles_list = [] # List of SMILES from mutation
# Make all the required ligands by mutations
while len(new_mutation_smiles_list) < num_mutations:
sys.stdout.flush()
num_mutants_to_make = num_mutations - len(new_mutation_smiles_list)
##################################
# Make all mutants -------- main
##################################
new_mutants = Mutation.make_mutants(
vars,
generation_num,
number_of_processors,
num_mutants_to_make,
seed_list_mutations,
new_mutation_smiles_list,
rxn_library_variables,
)
if new_mutants is None:
# try once more
new_mutants = Mutation.make_mutants(
vars,
generation_num,
number_of_processors,
num_mutants_to_make,
seed_list_mutations,
new_mutation_smiles_list,
rxn_library_variables,
)
if new_mutants is None:
break
# Remove Nones:
new_mutants = [x for x in new_mutants if x is not None]
for i in new_mutants:
new_mutation_smiles_list.append(i)
if len(new_mutation_smiles_list) == num_mutations:
break
sys.stdout.flush()
# save new_mutation_smiles_list
save_ligand_list(
vars["output_directory"],
generation_num,
new_mutation_smiles_list,
"Chosen_Mutants",
)
if (
new_mutation_smiles_list is None
or len(new_mutation_smiles_list) < num_mutations
):
print("")
print("")
print("We needed to make {} ligands through Mutation".format(num_mutations))
print(
"We only made {} ligands through Mutation".format(
len(new_mutation_smiles_list)
)
)
print("")
print("")
raise Exception("Mutation failed to make enough new ligands.")
print("FINISHED MAKING MUTATIONS")
###################################################################
###################################################################
# B. Making Crossovers
# List of smiles from crossover
###################################################################
###################################################################
# Get starting compounds to seed Crossovers
seed_list_crossovers = make_seed_list(
vars,
source_compounds_list,
generation_num,
num_seed_diversity,
num_seed_dock_fitness,
)
# Save seed list for Crossovers
save_ligand_list(
vars["output_directory"],
generation_num,
seed_list_crossovers,
"Crossover_Seed_List",
)
print("MAKE CROSSOVERS")
sys.stdout.flush()
new_crossover_smiles_list = []
# Make all the required ligands by Crossover
while len(new_crossover_smiles_list) < num_crossovers:
sys.stdout.flush()
num_crossovers_to_make = num_crossovers - len(new_crossover_smiles_list)
# Make all crossovers
new_crossovers = execute_crossover.make_crossovers(
vars,
generation_num,
number_of_processors,
num_crossovers_to_make,
seed_list_crossovers,
new_crossover_smiles_list,
)
if new_crossovers is None:
# try once more
new_crossovers = execute_crossover.make_crossovers(
vars,
generation_num,
number_of_processors,
num_crossovers_to_make,
seed_list_crossovers,
new_crossover_smiles_list,
)
if new_crossovers is None:
break
# Remove Nones:
new_crossovers = [x for x in new_crossovers if x is not None]
# append those which passed the filter
for i in new_crossovers:
new_crossover_smiles_list.append(i)
if len(new_crossover_smiles_list) == num_crossovers:
break
# save new_crossover_smiles_list
save_ligand_list(
vars["output_directory"],
generation_num,
new_crossover_smiles_list,
"Chosen_Crossovers",
)
if (
new_crossover_smiles_list is None
or len(new_crossover_smiles_list) < num_crossovers
):
print("")
print("")
print("We needed to make {} ligands through Crossover".format(num_crossovers))
print(
"We only made {} ligands through Crossover".format(
len(new_crossover_smiles_list)
)
)
print("")
print("")
raise Exception("Crossover failed to make enough new ligands.")
print("FINISHED MAKING CROSSOVERS")
# Get unaltered samples from the previous generation
print("GET SOME LIGANDS FROM THE LAST GENERATION")
sys.stdout.flush()
# Make a list of the ligands chosen to pass through to the next generation
# via Elitism This handles creating a seed list and defining the advance
# to next generation final selection
chosen_mol_to_pass_through_list = make_pass_through_list(
vars,
source_compounds_list,
num_elite_to_advance_from_previous_gen,
generation_num,
)
if type(chosen_mol_to_pass_through_list) == str:
printout = (
chosen_mol_to_pass_through_list
+ "\nIf this is the 1st generation, it may be due to the starting "
+ "library has SMILES which could not be converted to sanitizable "
+ "RDKit Molecules"
)
raise Exception(printout)
sys.stdout.flush()
# save chosen_mol_to_pass_through_list
save_ligand_list(
vars["output_directory"],
generation_num,
chosen_mol_to_pass_through_list,
"Chosen_Elite_To_advance",
)
print("GOT LIGANDS FROM THE LAST GENERATION")
# make a list of all the ligands from mutations, crossovers, and from the
# last generation
new_generation_smiles_list = []
full_generation_smiles_list = []
for i in new_mutation_smiles_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
for i in new_crossover_smiles_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
if vars["redock_elite_from_previous_gen"] is False and generation_num != 1:
for i in chosen_mol_to_pass_through_list:
# Doesn't append to the new_generation_smiles_list
full_generation_smiles_list.append(i)
# Generation 0 pass through gets added to the convert and dock list
# because it has no docking score to compare with This is independent of
# the vars['redock_elite_from_previous_gen']
else:
for i in chosen_mol_to_pass_through_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
if len(full_generation_smiles_list) < total_num_desired_new_ligands:
print("We needed ", total_num_desired_new_ligands)
print("We made ", len(full_generation_smiles_list))
print(
"population failed to make enough mutants or crossovers... \
Errors could include not enough diversity, too few seeds to \
the generation, the seed mols are unable to cross-over due \
to lack of similariy, or all of the seed lack functional groups \
for performing reactions"
)
return None, None, None
# Save the Full Generation
full_generation_smiles_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"], generation_num, full_generation_smiles_list, None
)
# Save the File to convert to 3d
smiles_to_convert_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"],
generation_num,
new_generation_smiles_list,
"_to_convert",
)
sys.stdout.flush()
# CONVERT SMILES TO .sdf USING GYPSUM and convert .sdf to .pdb with rdkit
# This will output sdf files into a folder. The .smi.0.sdf file is not a
# valid mol, but all the others will be valid the 1st Smiles in the
# original .smi file is saved as .smi.1.sdf and 2nd file is saved as
# .smi.2.sdf
conversion_to_3d.convert_to_3d(vars, smiles_to_convert_file, new_gen_folder_path)
sys.stdout.flush()
return full_generation_smiles_file, full_generation_smiles_list
def populate_generation_zero(vars, generation_num=0):
"""
This will handle all that is required for generation 0redock and handle
the generation 0
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the generation number
Returns:
:returns: str full_generation_smiles_file: the name of the .smi file
containing the new population
:returns: list full_generation_smiles_list: list with the new population
of ligands.
:returns: bool already_docked: if true we won't redock the source ligands.
If False we will dock the source ligands.
"""
number_of_processors = int(vars["number_of_processors"])
num_crossovers = 0
num_mutations = 0
# Get the Source compound list. This list is the full population from
# either the previous generations or if its Generation 1 than the its the
# entire User specified Source compound list If either has a SMILES that
# does not sanitize in RDKit it will be excluded and a printout of its
# Name and SMILES string will be printed.
source_compounds_list = get_complete_list_prev_gen_or_source_compounds(
vars, generation_num
)
num_elite_to_advance_from_previous_gen = len(source_compounds_list)
num_seed_diversity, num_seed_dock_fitness = determine_seed_population_sizes(
vars, generation_num
)
# Total Population size of this generation
total_num_desired_new_ligands = num_crossovers + num_mutations + 1
# Get unaltered samples from the previous generation
print("GET SOME LIGANDS FROM THE LAST GENERATION")
# Make a list of the ligands chosen to pass through to the next generation
# This handles creating a seed list and defining the advance to next
# generation final selection
chosen_mol_to_pass_through_list = make_pass_through_list(
vars, source_compounds_list, 1, 0
)
if type(chosen_mol_to_pass_through_list) == str:
printout = (
chosen_mol_to_pass_through_list
+ "\nIf this is the 1st generation, it may be due to the starting "
+ "library has SMILES which could not be converted to "
+ "sanitizable RDKit Molecules"
)
raise Exception(printout)
# save chosen_mol_to_pass_through_list
save_ligand_list(
vars["output_directory"],
generation_num,
chosen_mol_to_pass_through_list,
"Chosen_Elite_To_advance",
)
print("GOT LIGANDS FROM THE LAST GENERATION")
# make a list of all the ligands from mutations, crossovers, and from the
# last generation
new_generation_smiles_list = []
full_generation_smiles_list = []
# These will be docked and scored for generation 0
for i in chosen_mol_to_pass_through_list:
new_generation_smiles_list.append(i)
full_generation_smiles_list.append(i)
if len(full_generation_smiles_list) == 0:
print(
"population failed to import any molecules from the source_compounds_list."
)
return None, None, None
# Save the Full Generation
full_generation_smiles_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"], generation_num, full_generation_smiles_list, None
)
# Save the File to convert to 3d
smiles_to_convert_file, new_gen_folder_path = save_generation_smi(
vars["output_directory"],
generation_num,
new_generation_smiles_list,
"_to_convert",
)
# order files by -2 of each lig
try:
full_generation_smiles_list.sort(key=lambda x: float(x[-2]), reverse=False)
full_generation_smiles_list_printout = [
"\t".join(x) for x in full_generation_smiles_list
]
already_docked = True
except:
print(
"Not all ligands in source compound list are scored. "
+ "We will convert and redock them all."
)
already_docked = False
if already_docked is True:
# Write all the ligands to a ranked file
full_generation_smiles_list_printout = "\n".join(
full_generation_smiles_list_printout
)
ranked_file = new_gen_folder_path + os.sep + "generation_0_ranked.smi"
with open(ranked_file, "w") as f:
f.write(full_generation_smiles_list_printout)
return already_docked, full_generation_smiles_file, full_generation_smiles_list
# If you are to redock and convert the generation zero you will also need
# to do the following:
# CONVERT SMILES TO .sdf USING GYPSUM and convert .sdf to .pdb with
# rdkit This will output sdf files into a folder. The .smi.0.sdf file
# is not a valid mol, but all the others will be valid the 1st Smiles
# in the original .smi file is saved as .smi.1.sdf and 2nd file is
# saved as .smi.2.sdf
conversion_to_3d.convert_to_3d(
vars, smiles_to_convert_file, new_gen_folder_path
)
return already_docked, full_generation_smiles_file, full_generation_smiles_list
#############
# Get seeds
#############
def test_source_smiles_convert(smile_info):
"""
This attempts to convert a SMILES string to an rdkit.Chem.rdchem.Mol
object
-done in a try statement so that it is some bad SMILES string which is
incapable of being converted
- it also checks that the SMILES string is able to be sanitized
Inputs:
:param list smile_info: a list containing the SMILES of a ligand, its ID
and potentially additional information about the ligand
Returns:
:returns: list smile_info: If it passed the test, it returns the list
containing the SMILES of a ligand, its ID and potentially additional
information about the ligand
:returns: str printout: If it failed to convert it returns the error
message. This passess out to prevent MPI print issues
"""
if smile_info is None or len(smile_info) == 0:
printout = (
"REMOVING SMILES FROM SOURCE LIST: Blank "
+ "entry in source compound list.\n"
)
printout = printout + "\tRemoving: {}".format(smile_info)
return printout
if len(smile_info) == 1:
printout = "REMOVING SMILES FROM SOURCE LIST: Unformatted or blank "
printout = printout + "entry in source compound list.\n"
printout = printout + "\tRemoving: {}".format(smile_info)
return printout
# separate out SMILES str and ID
smile_str = smile_info[0]
smile_id = str(smile_info[1])
if type(smile_str) is not type(""):
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string is not a "
printout = printout + "String. Check for formatting errors. \n"
printout = printout + "\tIgnored SMILES is: {}".format(smile_str)
return printout
# Try importing it into RDKit with Sanitization off. Tests for errors in
# having the wrong data type
try:
mol = Chem.MolFromSmiles(str(smile_str), sanitize=False)
except:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string failed "
printout = printout + "to import into RDKit.\n\t "
printout = printout + "Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# This will fail if there are valence errors. We won't try to correct
# someones source compound list Although the MOH.check_sanitization will
# do that. try sanitizing, which is necessary later
try:
Chem.SanitizeMol(mol)
except:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES "
printout = printout + "string failed to Sanitize in RDKit.\n"
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# Make the mol again fresh and try running it through MOH.handleHs() This
# will try protanating and Deprotanating the mol. If it can't handle that
# We reject it as many functions will require this sort of manipulation.
# More advanced sanitization issues will also be removed in this step
mol = Chem.MolFromSmiles(str(smile_str), sanitize=False)
mol = MOH.handleHs(mol, True)
if mol is None:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string failed \
to be protanated or deprotanated.\n"
printout = (
printout
+ "\t This is often an issue with valence and sanitization "
+ "issues with the SMILES string."
)
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# Check there are no * which are atoms with atomic number=0
mol = MOH.check_for_unassigned_atom(mol)
if mol is None:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string contained "
printout = printout + "an unassigned atom type labeled as *.\n"
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# Check for fragments.
if len(Chem.GetMolFrags(mol, asMols=True, sanitizeFrags=False)) != 1:
printout = "REMOVING SMILES FROM SOURCE LIST: SMILES string was fragmented.\n"
printout = printout + "\t Removed SMILE string is: {} \n".format(smile_str)
printout = printout + "\t Removed SMILE ID is: {}".format(smile_id)
return printout
# the ligand is good enough to use throughout the program!
return smile_info
def get_complete_list_prev_gen_or_source_compounds(vars, generation_num):
"""
Get the source compounds list from the previous generation of the source
compound list
This also filters the list to ensure mols can be imported to RDKit and
that they pass the drug-likliness filters.
If generation = 1 use the User specified starting compounds If generation
is >1 than use the previous generations top ligands. This takes an .smi
file
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the interger of the current generation
Returns:
:returns: list usable_list_of_smiles: a list with SMILES strings, names,
and information about the smiles from the previous generation or the
source compound list
"""
source_file_gen_0 = vars[
"output_directory"
] + "generation_{}{}generation_{}_ranked.smi".format(0, os.sep, 0)
if generation_num == 0:
# This will be the full length list of starting molecules as the seed
source_file = str(vars["source_compound_file"])
usable_list_of_smiles = Ranking.get_usable_format(source_file)
if len(usable_list_of_smiles) == 0:
print(
"\nThere were no available ligands in source compound. Check formatting\n"
)
raise Exception(
"There were no available ligands in source compound. Check formatting"
)
elif generation_num == 1 and os.path.exists(source_file_gen_0) is False:
# This will be the full length list of starting molecules as the seed
source_file = str(vars["source_compound_file"])
usable_list_of_smiles = Ranking.get_usable_format(source_file)
if len(usable_list_of_smiles) == 0:
print(
"\nThere were no available ligands in source compound. Check formatting\n"
)
raise Exception(
"There were no available ligands in source compound. Check formatting"
)
else:
source_file = vars[
"output_directory"
] + "generation_{}{}generation_{}_ranked.smi".format(
generation_num - 1, os.sep, generation_num - 1
)
if os.path.exists(source_file) is False:
printout = (
"\n"
+ "There were no available ligands in previous"
+ " generation ranked ligand file.\n"
)
printout = printout + "\tCheck formatting or if file has been moved.\n"
print(printout)
raise Exception(printout)
usable_list_of_smiles = Ranking.get_usable_format(source_file)
if len(usable_list_of_smiles) == 0:
printout = (
"\n"
+ "There were no available ligands in previous"
+ " generation ranked ligand file.\n"
)
printout = printout + "\tCheck formatting or if file has been moved. \n"
print(printout)
raise Exception(printout)
# Test that every SMILES in the usable_list_of_smiles is a valid SMILES
# which will import and Sanitize in RDKit. SMILES will be excluded if they
# are fragmented, contain atoms with no atomic number (*), or do not
# sanitize
job_input = tuple([tuple([i]) for i in usable_list_of_smiles])
usable_list_of_smiles = vars["parallelizer"].run(
job_input, test_source_smiles_convert
)
usable_list_of_smiles = [x for x in usable_list_of_smiles if x is not None]
print_errors = [x for x in usable_list_of_smiles if type(x) is str]
usable_list_of_smiles = [x for x in usable_list_of_smiles if type(x) is list]
for x in print_errors:
print(x)
if len(usable_list_of_smiles) == 0:
printout = "\nThere were no ligands in source compound or previous \
generation which could sanitize.\n"
print(printout)
raise Exception(printout)
if vars["filter_source_compounds"] is True:
prefilter_list = copy.deepcopy(usable_list_of_smiles)
print("")
print("Running Filter on the Compounds from last generation/Source")
usable_list_of_smiles = Filter.run_filter(vars, usable_list_of_smiles)
# Remove Nones:
usable_list_of_smiles = [x for x in usable_list_of_smiles if x is not None]
if len(usable_list_of_smiles) == 0:
printout = "\nThere were no ligands in source compound which \
passed the User-selected Filters.\n"
print(printout)
raise Exception(printout)
for lig in usable_list_of_smiles:
failed_filter_list = []
if lig not in prefilter_list:
failed_filter_list.append(lig[1])
if len(failed_filter_list) != 0:
printout = "\n THE FOLLOWING LIGANDS WERE REMOVED FROM THE\
SOURCE LIST: Failed the User-selected Filters\n"
printout = printout + "\t{}".format(failed_filter_list)
print(printout)
random.shuffle(usable_list_of_smiles)
return usable_list_of_smiles
def make_seed_list(vars, source_compounds_list, generation_num, num_seed_diversity,
num_seed_dock_fitness):
"""
Get the starting compound list for running the Mutation and Crossovers
If generation = 0 use the User specified starting compounds If generation
is >0 than use the previous generations top ligands. This takes an .smi
file
Inputs:
:param dict vars: a dictionary of all user variables
:param list source_compounds_list: a list with SMILES strings, names, and
information about the smiles from either the previous generation or the
source compound list
:param int generation_num: the interger of the current generation
:param int num_seed_diversity: the number of seed molecules which come
from diversity selection
:param int num_seed_dock_fitness: the number of seed molecules which come
from eite selection by docking score
Returns:
:returns: list usable_list_of_smiles: a list with SMILES strings, names,
and information about the smiles which will be used to seed the next
generation
"""
usable_list_of_smiles = copy.deepcopy(source_compounds_list)
full_length = False
if generation_num == 0:
# Get starting compounds for Mutations
full_length = True
elif generation_num == 1:
if vars["use_docked_source_compounds"] is False:
# Get starting compounds for Mutations
full_length = True
else:
source_file_gen_0 = vars[
"output_directory"
] + "generation_{}{}generation_{}_ranked.smi".format(0, os.sep, 0)
if os.path.exists(source_file_gen_0) is False:
full_length = True
else:
# generation_num 1 may run into problems if the source
# compounds are smaller than the seed pool required to seed
# generation 1. Because the seeding options are connected to
# the generation number (due to the depreciation of diversity
# option) Because of this we may need to ignore the ranking
# for the seeds of generation 1 to accomidate the smaller
# source size. This is especially important with
# lead-optimization in which the source pool may be much
# smaller For this reason we will override the seeding of
# generation 1 if the number to seed is greater than exists
# but will provide a warning message.
if (
len(usable_list_of_smiles) < num_seed_diversity
or len(usable_list_of_smiles) < num_seed_diversity
):
# This is problematic so just use what is available
printout = "\n\nNot enough ligands in source compound \
list to seed generation 1. We will use the entire \
list of every ligand in the source compound list \
to seed generation 1. This means there is no \
selection in generation 1's seeding process.\n\n"
print(printout)
full_length = True
else:
full_length = False
else:
full_length = False
if full_length is True or generation_num == 0:
# This will be the full length list of starting molecules as the seed
random.shuffle(usable_list_of_smiles)
else:
selector_choice = vars["selector_choice"]
tourn_size = vars["tourn_size"]
# Get subset of the source_file based on diversity scores and docking
# scores
usable_list_of_smiles = Ranking.create_seed_list(
usable_list_of_smiles,
num_seed_diversity,
num_seed_dock_fitness,
selector_choice,
tourn_size,
)
random.shuffle(usable_list_of_smiles)
return usable_list_of_smiles
def determine_seed_population_sizes(vars, generation_num):
"""
This function determines how many molecules will be chosen to seed a
generation because of their docking score and how many will assess because
of their diversity score.
Inputs:
:param dict vars: a dictionary of all user variables
:param int generation_num: the interger of the current generation
Returns:
:returns: int num_seed_diversity: the number of seed molecules which come
from diversity selection
:returns: int num_seed_dock_fitness: the number of seed molecules which
come from eite selection by docking score
"""
# How many fewer seed mols are chosen from diversity compared to the 1st
# generation This is also how many more get chosen from elitist selection
diversity_depreciation = (
int(generation_num - 1) * vars["diversity_seed_depreciation_per_gen"]
)
if generation_num == 1:
top_mols_to_seed_next_generation = vars[
"top_mols_to_seed_next_generation_first_generation"
]
else:
top_mols_to_seed_next_generation = vars["top_mols_to_seed_next_generation"]
# Number of mols chosen because of their diversity score
num_seed_diversity = (
vars["diversity_mols_to_seed_first_generation"] - diversity_depreciation
)
# Number of mols chosen because of their docking score. Elitist style
# selection but this will be chosen using a weight roulette selector later
# on.
if num_seed_diversity <= 0:
num_seed_dock_fitness = (
top_mols_to_seed_next_generation
+ vars["diversity_mols_to_seed_first_generation"]
)
num_seed_diversity = 0
elif num_seed_diversity > 0:
num_seed_dock_fitness = (
top_mols_to_seed_next_generation + diversity_depreciation
)
return num_seed_diversity, num_seed_dock_fitness
def make_pass_through_list(vars, smiles_from_previous_gen_list,
num_elite_to_advance_from_previous_gen,
generation_num):
"""
This function determines the molecules which elite ligands will advance
from the previous generation without being altered into the next
generation.
Inputs:
:param dict vars: a dictionary of all user variables
:param list smiles_from_previous_gen_list: List of SMILES from the last
generation chosen to seed the list of molecules to advance to the next
generation without modification via elitism.
:param int num_elite_to_advance_from_previous_gen: the number of molecules
to advance from the last generation without modifications.
:param int generation_num: the interger of the current generation
Returns:
:returns: list list_of_ligands_to_advance: a list of ligands which should
advance into the new generation without modifications, via elitism from
the last generation. Returns a printout of why it failed if it fails
"""
# this will be a list of lists. Each sublist will be [SMILES_string, ID]
list_of_ligands_to_advance = []
# If not enough of your previous generation sanitize to make the list
# Return None and trigger an Error
if (
generation_num != 0
and len(smiles_from_previous_gen_list) < num_elite_to_advance_from_previous_gen
):
printout = "Not enough ligands in initial list the filter to progress"
printout = (
printout
+ "\n len(smiles_from_previous_gen_list): {} ; \
num_elite_to_advance_from_previous_gen: {}".format(
len(smiles_from_previous_gen_list),
num_elite_to_advance_from_previous_gen)
)
return printout
smiles_from_previous_gen_list = [
x for x in smiles_from_previous_gen_list if type(x) == list
]
if generation_num == 0 and vars["filter_source_compounds"] is True:
# Run Filters on ligand list
ligands_which_passed_filters = Filter.run_filter(
vars, smiles_from_previous_gen_list
)
# Remove Nones:
ligands_which_passed_filters = [
x for x in ligands_which_passed_filters if x is not None
]
else:
ligands_which_passed_filters = [
x for x in smiles_from_previous_gen_list if x is not None
]
# If not enough of your previous generation sanitize to make the list
# Return None and trigger an Error
if (
generation_num != 0
and len(ligands_which_passed_filters) < num_elite_to_advance_from_previous_gen
):
printout = "Not enough ligands passed the filter to progress"
return printout
# Save seed list of all ligands which passed which will serve as the seed
# list.
save_ligand_list(
vars["output_directory"],
generation_num,
ligands_which_passed_filters,
"Previous_Gen_Elite_Seed_List",
)
# check if ligands_which_passed_filters has docking scores
has_dock_score = False
try:
temp = [float(x[-2]) for x in ligands_which_passed_filters]
has_dock_score = True
except:
has_dock_score = False
if generation_num == 0 and has_dock_score is False:
# Take the 1st num_elite_to_advance_from_previous_gen number of
# molecules from ligands_which_passed_filters
random.shuffle(ligands_which_passed_filters)
list_of_ligands_to_advance = []
for x in range(0, len(ligands_which_passed_filters)):
selected_mol = ligands_which_passed_filters[x]
list_of_ligands_to_advance.append(selected_mol)
elif generation_num == 0 and has_dock_score is True:
# Use the make_seed_list function to select the list to advance.
# This list will be chosen strictly by
list_of_ligands_to_advance = make_seed_list(
vars,
ligands_which_passed_filters,
generation_num,
len(ligands_which_passed_filters),
num_elite_to_advance_from_previous_gen,
)
elif generation_num != 0 and has_dock_score is False:
# Take the 1st num_elite_to_advance_from_previous_gen number of
# molecules from ligands_which_passed_filters
random.shuffle(ligands_which_passed_filters)
list_of_ligands_to_advance = []
for x in range(0, num_elite_to_advance_from_previous_gen):
selected_mol = ligands_which_passed_filters[x]
list_of_ligands_to_advance.append(selected_mol)
elif generation_num != 0 and has_dock_score is True:
# Use the make_seed_list function to select the list to advance. This
# list will be chosen strictly by
list_of_ligands_to_advance = make_seed_list(
vars,
ligands_which_passed_filters,
generation_num,
0,
num_elite_to_advance_from_previous_gen,
)
if generation_num == 0:
return list_of_ligands_to_advance
elif len(list_of_ligands_to_advance) >= num_elite_to_advance_from_previous_gen:
return list_of_ligands_to_advance
printout = "Not enough ligands were chosen to advance to the next generation."
return printout
#############
# Saving Output files for generations and seeds
#############
def save_generation_smi(output_directory, generation_num,
formatted_smile_list, nomenclature_tag):
""""
This function saves a list of newly generated population of ligands as an
.smi file. .smi file column 1 is the SMILES string and column 2 is its
smile ID
Inputs:
:param dict output_directory: the directory of the run to save the
generation
:param int generation_num: the interger of the current generation
:param list formatted_smile_list: list of the newly generated population
of ligands
:param str nomenclature_tag: The str describing the ligand list if None
than don't add tag. It is the full list of all ligands for the
generation
If it says '_to_convert' its the list of ligands which will need to be
converted to 3D -this may or may not have the ligands which pass
through from the last gen.
Returns:
:returns: str output_file_name: name of the output file
:returns: str new_gen_folder_path: the path to the folder containing all
that will be in this generation
"""
# folder for this new generation
new_gen_folder_path = output_directory + "generation_{}{}".format(
generation_num, os.sep
)
if nomenclature_tag is None:
# make the name for the new generation file
output_file_name = new_gen_folder_path + "generation_{}.smi".format(
generation_num
)
else:
# make the name for the new generation file
output_file_name = new_gen_folder_path + "generation_{}{}.smi".format(
generation_num, nomenclature_tag
)
# write as a tab delineated .smi file
with open(output_file_name, "w") as f:
for smile in formatted_smile_list:
# smile_string = smile[0]
# smile_id = smile[1]
x = str(smile[0] + "\t" + str(smile[1]) + "\n")
f.write(x)
sys.stdout.flush()
return output_file_name, new_gen_folder_path
def save_ligand_list(output_directory, generation_num,
list_of_chosen_ligands, nomenclature_tag):
"""
Save the list of ligands. nomenclature_tag is a string such as "Mutation"
or "Crossover" or "Previous_Gen_choice" describing what this data is used
for. If it says seeding it is the chosen mols from the previous generation
being used to seed the next generation
Inputs:
:param dict output_directory: the directory of the run to save the
generation
:param int generation_num: The generation number
:param list list_of_chosen_ligands: The formatted list of ligands to seed
a generation
:param str nomenclature_tag: The str describing the ligand list
-ie seeding_mutations is the list that seeded the mutations while
mutations would be the list of mutations generated from the
seeding_mutations list
-ie. mutation, crossover, Previous_Gen_choice
"""
# make a folder for the new generation
new_gen_folder_path = output_directory + "generation_{}{}".format(
generation_num, os.sep
)
# make a folder for the Seed files
seed_folder_path = new_gen_folder_path + "SeedFolder" + os.sep
# check if folders exist, if not make them
if not os.path.isdir(new_gen_folder_path):
os.makedirs(new_gen_folder_path)
if not os.path.isdir(seed_folder_path):
os.makedirs(seed_folder_path)
output_file_name = "{}{}_Gen_{}.smi".format(
seed_folder_path, nomenclature_tag, generation_num
)
# save to a new output smiles file. ie. save to ranked_smiles_file
with open(output_file_name, "w") as output:
for line in list_of_chosen_ligands:
output_line = "\t".join(line) + "\n"
output.write(output_line)
sys.stdout.flush()
| 42,982 | 36.970848 | 114 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/execute_filters.py | """
Top level for running filters.
"""
import __future__
import copy
import rdkit
from rdkit import Chem
from rdkit.Chem.MolStandardize import rdMolStandardize
# Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog("rdApp.*")
from autogrow.operators.filter.filter_classes.parent_filter_class import ParentFilter
from autogrow.operators.filter.filter_classes.get_child_filter_class import get_all_subclasses
import autogrow.operators.convert_files.gypsum_dl.gypsum_dl.MolObjectHandling as MOH
from autogrow.operators.filter.filter_classes.filter_children_classes import *
def make_run_class_dict(filters_to_use):
"""
This will retrieve all the names of every child class of the parent class
ParentFilter
Inputs:
:param list filters_to_use: list of filters to be used.
defined in vars["chosen_ligand_filters"]
Returns:
:returns: dict child_dict: This dictionary contains all the names of the
chosen filters as keys and the the filter objects as the items. returns
None if no filters are specified by user.
"""
if filters_to_use is None:
# if the user turned off filters
return None
children = get_all_subclasses(ParentFilter)
child_dict = {}
for child in children:
child_object = child()
child_name = child_object.get_name()
if child_name in filters_to_use:
child_dict[child_name] = child_object
return child_dict
def run_filter(vars, list_of_new_ligands):
"""
This will run a filter of the Users chosing.
This will take a list of lists of ligands to filter. list_of_new_ligands =
[["CCC","Zinc123],["CCCC","Zinc1234]]
Inputs:
:param dict vars: User variables which will govern how the programs runs
:param list list_of_new_ligands: list of lists containing all the newly
generated ligands and their names
Returns:
:returns: list ligands_which_passed_filter: a list of only the molecules
which passed the filter. Excludes all molecules which failed.
"""
# Get the already generated dictionary of filter objects
filter_object_dict = vars["filter_object_dict"]
# make a list of tuples for multi-processing Filter
job_input = []
for smiles_info in list_of_new_ligands:
temp_tuple = tuple([smiles_info, filter_object_dict])
job_input.append(temp_tuple)
job_input = tuple(job_input)
results = vars["parallelizer"].run(job_input, run_filter_mol)
# remove mols which fail the filter
ligands_which_passed_filter = [x for x in results if x is not None]
return ligands_which_passed_filter
def run_filter_mol(smiles_info, child_dict):
"""
This takes a smiles_string and the selected filter list (child_dict) and
runs it through the selected filters.
Inputs:
:param list smiles_info: A list with info about a ligand, the SMILES string
is idx=0 and the name/ID is idx=1. example: smiles_info
["CCCCCCC","zinc123"]
:param dict child_dict: This dictionary contains all the names of the
chosen filters as keys and the the filter objects as the items Or None if
User specifies no filters
Returns:
:returns: list smiles_info: list of the smiles_info if it passed the filter.
returns None If the mol fails a filter.
"""
smiles_string = smiles_info[0]
mol = Chem.MolFromSmiles(smiles_string, sanitize=False)
# try sanitizing, which is necessary later
mol = MOH.check_sanitization(mol)
if mol is None:
return None
mol = MOH.try_deprotanation(mol)
if mol is None:
return None
mol = MOH.check_sanitization(mol)
if mol is None:
return None
# remove charge from mol objects. This affects some properties
# such as: logP, Mol refractivity, and polar surface area
# which can impact filters such as Ghose and VandeWaterbeemd
# This is done because logP is traditionally applied to neutral molecules
uncharger_obj = rdMolStandardize.Uncharger()
mol = uncharger_obj.uncharge(mol)
if mol is None:
return None
if child_dict is not None:
# run through the filters
filter_result = run_all_selected_filters(mol, child_dict)
# see if passed
if filter_result is False:
return None
# it passed return the smiles_info
return smiles_info
# This will return None
return smiles_info
def run_filter_on_just_smiles(smile_string, child_dict):
"""
This takes a smiles_string and the selected filter list (child_dict) and
runs it through the selected filters.
Inputs:
:param str smile_string: A smiles_string. example: smiles_info
["CCCCCCC","zinc123"]
:param dict child_dict: This dictionary contains all the names of the
chosen filters as keys and the the filter objects as the items Or None if
User specifies no filters
Returns:
:returns: str smile_string: smile_string if it passed the filter. returns
False If the mol fails a filter.
"""
mol = Chem.MolFromSmiles(smile_string, sanitize=False)
# try sanitizing, which is necessary later
mol = MOH.check_sanitization(mol)
if mol is None:
return False
mol = MOH.try_deprotanation(mol)
if mol is None:
return False
if child_dict is not None:
# run through the filters
filter_result = run_all_selected_filters(mol, child_dict)
# see if passed
if filter_result is False:
return False
# it passed return the smiles_info
return smile_string
# return the smile string
return smile_string
def run_all_selected_filters(mol, child_dict):
"""
Iterate through all of the filters specified by the user for a single
molecule. returns True if the mol passes all the chosen filters. returns
False if the mol fails any of the filters.
Inputs:
:param rdkit.Chem.rdchem.Mol object mol: An rdkit mol object to be tested
if it passes the filters
:param dict child_dict: This dictionary contains all the names of the
chosen filters as keys and the the filter objects as the items
Returns:
returns bol bol: True if the mol passes all the filters. False if the mol
fails any filters.
"""
filters_failed = 0
mol = MOH.check_sanitization(mol)
if mol is None:
return False
for child in list(child_dict.keys()):
mol_copy = copy.deepcopy(mol)
filter_function = child_dict[child].run_filter
if filter_function(mol_copy) is False:
filters_failed = filters_failed + 1
if filters_failed == 0:
return True
# failed one or more filters
return False
| 6,794 | 30.169725 | 94 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/__init__.py | 1 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/get_child_filter_class.py | """
An object for auto-detecting and creating jobs with the proper templates.
"""
# You'll need to import the base class first
def get_all_subclasses(base_class):
"""
Method for getting all child classes from a parent object. Taken from:
http://stackoverflow.com/questions/3862310/how-can-i-find-all-subclasses-of-a-class-given-its-name
Inputs:
:param class base_class: The parent class which we are looking towards.
Returns
:returns: class all_subclasses: A list of classes representing the child
classes of the base_class
"""
all_subclasses = []
for subclass in base_class.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(get_all_subclasses(subclass))
return all_subclasses
| 775 | 26.714286 | 102 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/parent_filter_class.py | """
This script holds the parent class for filtering.
This is used as the basis for all filter classes.
"""
import __future__
class ParentFilter(object):
"""
This is a script containing all of the filters for drug likeliness
Filters for orally bio-available drugs:
1) Lipinski
Filters for for lead-likeness:
1) GhoseFilter
2) GhoseModifiedFilter
3) MozziconacciFilter
Filters for CNS/Blood Brain Barrier Permeable:
1) VandeWaterbeemdFilter
False-Positive/Metabolite substructure searches:
1) PAINSFilter
2) NIHFilter
3) BRENKFilter
"""
def get_name(self):
"""
Returns the current class name.
Returns:
:returns: str self.__class__.__name__: the current class name.
"""
return self.__class__.__name__
def run_filter(self, input_string):
"""
run_filter is needs to be implemented in each class.
Inputs:
:param str input_string: A string to raise an exception
"""
raise NotImplementedError("run_filter() not implemented")
| 1,124 | 24 | 70 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/__init__.py | 0 | 0 | 0 | py | |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/filter_children_classes/lipinski_lenient_filter.py | """Lipinski Lenient
This runs a Lenient Lipinski filter. Lipinski filter refines for orally
available drugs. It filters molecules by Molecular weight (MW), the number of
hydrogen donors, the number hydrogen acceptors, and the logP value.
To pass the Lipinski filter a molecule must be:
MW: Max 500 dalton
Number of H acceptors: Max 10
Number of H donors: Max 5
logP Max +5.0
If you use the Lipinski Filter please cite: C.A. Lipinski et al. Experimental
and computational approaches to estimate solubility and permeability in drug
discovery and development settings Advanced Drug Delivery Reviews, 46 (2001),
pp. 3-26
"""
import __future__
import rdkit
import rdkit.Chem as Chem
import rdkit.Chem.Lipinski as Lipinski
import rdkit.Chem.Crippen as Crippen
import rdkit.Chem.Descriptors as Descriptors
#Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog('rdApp.*')
from autogrow.operators.filter.filter_classes.parent_filter_class import ParentFilter
class LipinskiLenientFilter(ParentFilter):
"""
This runs a Lenient Lipinski filter. Lipinski filter refines for orally
available drugs. It filters molecules by Molecular weight (MW), the number
of hydrogen donors, the number hydrogen acceptors, and the logP value.
This is a Lenient Lipinski which means a ligand is allowed one violation
exception to the Lipinski Rule of 5 restraints.
To pass the Lipinski filter a molecule must be:
MW: Max 500 dalton
Number of H acceptors: Max 10
Number of H donors: Max 5
logP Max +5.0
If you use the Lipinski Filter please cite: C.A. Lipinski et al.
Experimental and computational approaches to estimate solubility and
permeability in drug discovery and development settings Advanced Drug
Delivery Reviews, 46 (2001), pp. 3-26
Inputs:
:param class ParentFilter: a parent class to initialize off
"""
def run_filter(self, mol):
"""
This runs the Lenient Lipinski filter. Lipinski filter refines for
orally available drugs. It filters molecules by Molecular weight (MW),
the number of hydrogen donors, the number hydrogen acceptors, and the
logP value.
This is a Lenient Lipinski which means a ligand is allowed one
violation exception to the Lipinski Rule of 5 restraints.
To pass the Lipinski filter a molecule must be:
MW: Max 500 dalton
Number of H acceptors: Max 10
Number of H donors: Max 5
logP Max +5.0
Inputs:
:param rdkit.Chem.rdchem.Mol object mol: An rdkit mol object to be
tested if it passes the filters
Returns:
:returns: bool bool: True if the mol passes the filter; False if it
fails the filter
"""
violation_counter = 0
exact_mwt = Descriptors.ExactMolWt(mol)
if exact_mwt > 500:
violation_counter = violation_counter + 1
num_hydrogen_bond_donors = Lipinski.NumHDonors(mol)
if num_hydrogen_bond_donors > 5:
violation_counter = violation_counter + 1
num_hydrogen_bond_acceptors = Lipinski.NumHAcceptors(mol)
if num_hydrogen_bond_acceptors > 10:
violation_counter = violation_counter + 1
mol_log_p = Crippen.MolLogP(mol)
if mol_log_p > 5:
violation_counter = violation_counter + 1
if violation_counter < 2:
return True
# Failed more than two filters
return False
| 3,537 | 34.029703 | 85 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/filter_children_classes/mozziconacci_filter.py | """Mozziconacci Filter
This runs a Mozziconacci filter. Mozziconacci filter is a filter for
Drug-likeliness which filters molecules by the number of: rotatable bonds,
rings, oxygens, and halogens.
To pass the filter a molecule must be:
# of Rotatable bonds: Max 15
# of Rings: Max 6
# of Oxygens: Min 1
# of Nitrogens: Min 1
# of Halogens: Max 7
If you use the Mozziconacci Filter please cite: Mozziconacci, J. C. et al.
Preparation of a Molecular Database from a Set of 2 Million Compounds for
Virtual Screening Applications: Gathering, Structural Analysis and Filtering.
9th Electronic Computational Chemistry Conference, World Wide Web, March
(2003).
"""
import __future__
import rdkit
import rdkit.Chem as Chem
import rdkit.Chem.Lipinski as Lipinski
# Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog("rdApp.*")
from autogrow.operators.filter.filter_classes.parent_filter_class import ParentFilter
class MozziconacciFilter(ParentFilter):
"""
This runs a Mozziconacci filter. Mozziconacci filter is a filter for
Drug-likeliness which filters molecules by the number of:
To pass the filter a molecule must be:
# of Rotatable bonds: Max 15
# of Rings: Max 6
# of Oxygens: Min 1
# of Nitrogens: Min 1
# of Halogens: Max 7
If you use the Mozziconacci Filter please cite: Mozziconacci, J. C. et al.
Preparation of a Molecular Database from a Set of 2 Million Compounds for
Virtual Screening Applications: Gathering, Structural Analysis and
Filtering. 9th Electronic Computational Chemistry Conference, World Wide
Web, March (2003).
Inputs:
:param class ParentFilter: a parent class to initialize off
"""
def run_filter(self, mol):
"""
This runs a Mozziconacci filter. Mozziconacci filter is a filter for
Drug-likeliness which filters molecules by the number of:
To pass the filter a molecule must be:
# of Rotatable bonds: Max 15
# of Rings: Max 6
# of Oxygens: Min 1
# of Nitrogens: Min 1
# of Halogens: Max 7
Inputs:
:param rdkit.Chem.rdchem.Mol object mol: An rdkit mol object to be
tested if it passes the filters
Returns:
:returns: bool bool: True if the mol passes the filter; False if it
fails the filter
"""
halogen = Chem.MolFromSmarts("[*;#9,#17,#35,#53,#85]")
number_of_halogens = len(mol.GetSubstructMatches(halogen, maxMatches=8))
if number_of_halogens > 7:
return False
oxygen = Chem.MolFromSmarts("[#8]")
number_of_oxygens = len(mol.GetSubstructMatches(oxygen, maxMatches=2))
if number_of_oxygens < 1:
return False
nitrogen = Chem.MolFromSmarts("[#7]")
number_of_nitrogen = len(mol.GetSubstructMatches(nitrogen, maxMatches=2))
if number_of_nitrogen < 1:
return False
num_rotatable_bonds = Lipinski.NumRotatableBonds(mol)
if num_rotatable_bonds > 15:
return False
ring_count = Chem.rdmolops.GetSSSR(mol)
if ring_count > 6:
return False
# Passes everything
return True
| 3,265 | 31.66 | 85 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/filter_children_classes/lipinski_strict_filter.py | """Lipinski Strict
This runs a Strict Lipinski filter. Lipinski filter refines for orally
available drugs. It filters molecules by Molecular weight (MW), the number of
hydrogen donors, the number hydrogen acceptors, and the logP value.
To pass the Lipinski filter a molecule must be:
MW: Max 500 dalton
Number of H acceptors: Max 10
Number of H donors: Max 5
logP Max +5.0
If you use the Lipinski Filter please cite: C.A. Lipinski et al. Experimental
and computational approaches to estimate solubility and permeability in drug
discovery and development settings Advanced Drug Delivery Reviews, 46 (2001),
pp. 3-26
"""
import __future__
import rdkit
import rdkit.Chem as Chem
import rdkit.Chem.Lipinski as Lipinski
import rdkit.Chem.Crippen as Crippen
import rdkit.Chem.Descriptors as Descriptors
#Disable the unnecessary RDKit warnings
rdkit.RDLogger.DisableLog('rdApp.*')
from autogrow.operators.filter.filter_classes.parent_filter_class import ParentFilter
class LipinskiStrictFilter(ParentFilter):
"""
This runs a Strict Lipinski filter. Lipinski filter refines for orally
available drugs. It filters molecules by Molecular weight (MW), the number
of hydrogen donors, the number hydrogen acceptors, and the logP value.
This is a strict Lipinski which means a ligand must pass all the
requirements.
If you use the Lipinski Filter please cite: C.A. Lipinski et al.
Experimental and computational approaches to estimate solubility and
permeability in drug discovery and development settings Advanced Drug
Delivery Reviews, 46 (2001), pp. 3-26
Inputs:
:param class ParentFilter: a parent class to initialize off
"""
def run_filter(self, mol):
"""
This runs a Strict Lipinski filter. Lipinski filter refines for orally
available drugs. It filters molecules by Molecular weight (MW), the
number of hydrogen donors, the number hydrogen acceptors, and the logP
value.
This is a strict Lipinski which means a ligand must pass all the
requirements.
To pass the Lipinski filter a molecule must be:
MW: Max 500 dalton
Number of H acceptors: Max 10
Number of H donors: Max 5
logP Max +5.0
If you use the Lipinski Filter please cite: C.A. Lipinski et al.
Experimental and computational approaches to estimate solubility and
permeability in drug discovery and development settings Advanced Drug
Delivery Reviews, 46 (2001), pp. 3-26
Inputs:
:param rdkit.Chem.rdchem.Mol object mol: An rdkit mol object to be
tested if it passes the filters
Returns:
:returns: bool bool: True if the mol passes the filter; False if it
fails the filter
"""
exact_mwt = Descriptors.ExactMolWt(mol)
if exact_mwt > 500:
return False
num_hydrogen_bond_donors = Lipinski.NumHDonors(mol)
if num_hydrogen_bond_donors > 5:
return False
num_hydrogen_bond_acceptors = Lipinski.NumHAcceptors(mol)
if num_hydrogen_bond_acceptors > 10:
return False
mol_log_p = Crippen.MolLogP(mol)
if mol_log_p > 5:
return False
# Passed all filters
return True
| 3,329 | 33.6875 | 85 | py |
reinforced-genetic-algorithm | reinforced-genetic-algorithm-main/autogrow/operators/filter/filter_classes/filter_children_classes/brenk_filter.py | """#BRENK filter
This will filter a ligand using the BRENK filter for lead-likeliness, by
matching common false positive molecules to the current mol..
This script relies on the RDKit predefined FilterCatalog. FilterCatalog is
maintained by RDKit.
If using the BRENK filter please cite: Brenk R et al. Lessons Learnt from
Assembling Screening Libraries for Drug Discovery for Neglected Diseases.
ChemMedChem 3 (2008) 435-444. doi:10.1002/cmdc.200700139.
"""
import __future__
from rdkit.Chem import FilterCatalog
from rdkit.Chem.FilterCatalog import FilterCatalogParams
from autogrow.operators.filter.filter_classes.parent_filter_class import ParentFilter
class BRENKFilter(ParentFilter):
"""
This will filter a ligand using a BRENK screening filter for
lead-likeliness, by matching common false positive molecules to the
current mol.
This script relies on the RDKit predefined FilterCatalog. FilterCatalog is
maintained by RDKit.
If using the BRENK filter please cite: Brenk R et al. Lessons Learnt from
Assembling Screening Libraries for Drug Discovery for Neglected Diseases.
ChemMedChem 3 (2008) 435-444. doi:10.1002/cmdc.200700139.
Inputs:
:param class ParentFilter: a parent class to initialize off of.
"""
def __init__(self):
"""
This loads in the filters which will be used.
"""
self.filters = self.get_filters()
def get_filters(self):
"""
This loads in the filters which will be used.
Returns:
:returns: rdkit.Chem.rdfiltercatalog.FilterCatalog filters: A set of
RDKit Filters
"""
# Make a list of the BRENK filter.
params = FilterCatalogParams()
params.AddCatalog(FilterCatalogParams.FilterCatalogs.BRENK)
# This is our set of all the BRENK filters
filters = FilterCatalog.FilterCatalog(params)
return filters
def run_filter(self, mol):
"""
Runs a BRENK filter by matching common false positive molecules to the
current mol. Filters for for lead-likeliness.
Based on the PAINS filter implementation in RDKit described in
http://rdkit.blogspot.com/2016/04/changes-in-201603-release-filtercatalog.html
Inputs:
:param rdkit.Chem.rdchem.Mol object mol: An rdkit mol object to be
tested if it passes the filters
Returns:
:returns: bool bool: True if the mol passes the filter; False if it
fails the filter
"""
# If the mol matches a mol in the filter list. we return a False (as
# it failed the filter).
if self.filters.HasMatch(mol) is True:
return False
# if No matches are found to filter list this will return a True
# as it Passed the filter.
return True
| 2,846 | 32.104651 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.