content
stringlengths 5
1.05M
|
|---|
import sys
import cv2
import numpy as np
import gpyocr
plateCascade = cv2.CascadeClassifier("/home/pi/haarcascade_eu_plate_number.xml")
color = (255,120,255)
arg = float(sys.argv[1])
bbox_x = float(sys.argv[2])
bbox_y = float(sys.argv[3])
bbox_w = float(sys.argv[4])
bbox_h = float(sys.argv[5])
outText = ""
def getText(filename):
outText = gpyocr.tesseract_ocr(filename,psm=7)[0]
return outText
def resize(img):
imgCopy = img.copy()
resized_img = cv2.resize(imgCopy, (int(imgCopy.shape[1] * 220/100), int(imgCopy.shape[0] * 220/100)), interpolation = cv2.INTER_AREA)
return resized_img
def getMorph(img):
kernel = np.ones((3, 3), np.uint8)
_, morph = cv2.threshold(cv2.GaussianBlur(img,(5,5),0),0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
morph = cv2.erode(morph,kernel)
morph = cv2.dilate(morph,kernel)
morph = cv2.bitwise_not(morph)
pad = cv2.copyMakeBorder(morph, 1,1,1,1, cv2.BORDER_CONSTANT, value=255)
h, w = pad.shape
mask = np.zeros([h+2, w+2], np.uint8)
img_flood = cv2.floodFill(pad, mask, (0,0), 0, (5), (0), flags=8)[1]
img_flood = img_flood[1:h-1, 1:w-1]
morph = cv2.bitwise_not(img_flood)
morph = cv2.dilate(morph, kernel)
morph = cv2.erode(morph, kernel, iterations=3)
morph = cv2.dilate(morph, kernel)
return morph
def getBox(img):
height, width, _ = img.shape
return (bbox_x*width, bbox_y*height, bbox_w*width, bbox_h*height)
def detect(img):
errorFlag = 1
foundText = ""
global bbox_x, bbox_y, bbox_w, bbox_h
x = 0
y = 0
h = 0
w = 0
max_level = 0
bbox_x, bbox_y, bbox_w, bbox_h = getBox(img)
temp = img
imgGray = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)
plates = plateCascade.detectMultiScale3(imgGray, arg, outputRejectLevels=True)
if len(plates[0]) > 0:
totalPlates = zip(plates[0], plates[2])
totalPlates = list(totalPlates)
totalPlates.sort(key = lambda x: x[1][0], reverse=True)
for plate in totalPlates:
if plate[0][0] >= bbox_x and plate[0][1] >= bbox_y and plate[0][0]+plate[0][2]<=bbox_x+bbox_w and plate[0][1]+plate[0][3] <= bbox_y+bbox_h:
x = plate[0][0]
y = plate[0][1]
w = plate[0][2]
h = plate[0][3]
errorFlag = 0
break
if errorFlag:
raise Exception("No plates found!")
else:
roi_gray = imgGray[y:y+h, x:x+w]
roi_color = img[y+2:y+h-2, x+2:x+w-2]
cv2.rectangle(temp, (x, y), (x + w, y + h), color, 2)
cv2.putText(temp, "Tablica", (x, y - 8), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 2)
cv2.imwrite("result.png", roi_color)
resized = resize(roi_gray)
morph = getMorph(resized)
cv2.imwrite("morph.png", morph)
foundText = getText("/home/pi/morph.png")
cv2.imwrite("bbox.png",temp)
return foundText
img = cv2.imread("image.png")
outText = detect(img)
if len(outText)==0:
raise Exception("Numbers not detected!")
else:
print(outText)
|
import flask
from flask import request, Response
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/service/sub')
def sub():
qp = request.args
va = qp.get('va')
vb = qp.get('vb')
result = float(va) - float(vb)
print("VA: " + str(va) + " VB: " + str(vb) + " == " + str(result))
return Response(str(result))
app.run(host='0.0.0.0')
|
"""
coding: utf-8
Created on 11/11/2020
@author: github.com/edrmonteiro
From: Hackerrank challenges
Language: Python
Title: Minimum Swaps 2
You are given an unordered array consisting of consecutive integers [1, 2, 3, ..., n] without any duplicates. You are allowed to swap any two elements. You need to find the minimum number of swaps required to sort the array in ascending order.
For example, given the array we perform the following steps:
i arr swap (indices)
0 [7, 1, 3, 2, 4, 5, 6] swap (0,3)
1 [2, 1, 3, 7, 4, 5, 6] swap (0,1)
2 [1, 2, 3, 7, 4, 5, 6] swap (3,4)
3 [1, 2, 3, 4, 7, 5, 6] swap (4,5)
4 [1, 2, 3, 4, 5, 7, 6] swap (5,6)
5 [1, 2, 3, 4, 5, 6, 7]
It took swaps to sort the array.
Function Description
Complete the function minimumSwaps in the editor below. It must return an integer representing the minimum number of swaps to sort the array.
minimumSwaps has the following parameter(s):
arr: an unordered array of integers
Input Format
The first line contains an integer, , the size of .
The second line contains space-separated integers .
Constraints
Output Format
Return the minimum number of swaps to sort the given array.
Sample Input 0
4
4 3 1 2
Sample Output 0
3
Explanation 0
Given array
After swapping we get
After swapping we get
After swapping we get
So, we need a minimum of swaps to sort the array in ascending order.
Sample Input 1
5
2 3 4 1 5
Sample Output 1
3
Explanation 1
Given array
After swapping we get
After swapping we get
After swapping we get
So, we need a minimum of swaps to sort the array in ascending order.
Sample Input 2
7
1 3 5 2 4 6 7
Sample Output 2
3
Explanation 2
Given array
After swapping we get
After swapping we get
After swapping we get
So, we need a minimum of swaps to sort the array in ascending order.
"""
def minimumSwaps(arr):
#newArr = countShift(arr)
swaps = 0
i = 0
while True:
if i != arr[i] - 1:
item = arr[i]
arr[i], arr[item - 1] = arr[item - 1], arr[i]
swaps += 1
if i == arr[i] - 1:
i += 1
if i == len(arr):
break
return swaps
arr = [7, 1, 3, 2, 4, 5, 6]
print(minimumSwaps(arr))
stop = True
|
# ##############################################################################
# This file is part of df_websockets #
# #
# Copyright (C) 2020 Matthieu Gallet <github@19pouces.net> #
# All Rights Reserved #
# #
# You may use, distribute and modify this code under the #
# terms of the (BSD-like) CeCILL-B license. #
# #
# You should have received a copy of the CeCILL-B license with #
# this file. If not, please visit: #
# https://cecill.info/licences/Licence_CeCILL-B_V1-en.txt (English) #
# or https://cecill.info/licences/Licence_CeCILL-B_V1-fr.txt (French) #
# #
# ##############################################################################
import logging
from typing import Any, Dict
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.context_processors import PermWrapper
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages import DEFAULT_LEVELS
from django.db.models import Q
from django.http import HttpRequest
from django.utils import translation
from django.utils.translation import get_language_from_request
logger = logging.getLogger("df_websockets.signals")
class WindowInfoMiddleware:
"""Base class for the WindowInfo middlewares."""
def from_request(self, request: HttpRequest, window_info):
"""add attributes to a window_info instance created from a HttpRequest"""
pass
def new_window_info(self, window_info):
"""add attributes to a plain new window_info instance"""
pass
def to_dict(self, window_info) -> Dict[str, Any]:
"""serialize some window_info attributes as a dict"""
return {}
def from_dict(self, window_info, values: Dict[str, Any]):
"""
update a newly-created window_info instance with data stored in `values`.
used for deserializing a previously serialized window_info object.
:param window_info: WindowInfo instance to update
:param values: a dict representing a serialized window_info
:return:
"""
pass
def get_context(self, window_info):
"""get context variables from the window_info instance.
used in :meth:`df_websockets.window_info:render_to_string`
"""
return {}
def install_methods(self, window_info_cls):
"""add new methods to the WindowInfo class.
Used for adding methods related to the user management"""
pass
class WindowKeyMiddleware(WindowInfoMiddleware):
"""handle the unique ID generated for each :class:`django.http.request.HttpRequest` and copy
it to the :class:`WindowInfo` object"""
def from_request(self, request, window_info):
# noinspection PyTypeChecker
window_info.window_key = getattr(request, "window_key", None)
def new_window_info(self, window_info):
window_info.window_key = None
def to_dict(self, window_info):
return {"window_key": window_info.window_key}
def from_dict(self, window_info, values):
window_info.window_key = values.get("window_key")
def get_context(self, window_info):
# noinspection PyTypeChecker
return {"df_window_key": getattr(window_info, "window_key")}
class DjangoAuthMiddleware(WindowInfoMiddleware):
"""handle attributes related to the :mod:`django.contrib.auth` framework"""
def from_request(self, request, window_info):
self.new_window_info(window_info)
assert isinstance(request, HttpRequest)
# auth and perms part
# noinspection PyTypeChecker
user = getattr(request, "user", None)
window_info._user = user
window_info._perms = None
window_info._template_perms = None
window_info.user_agent = request.META.get("HTTP_USER_AGENT", "")
window_info.csrf_cookie = request.COOKIES.get("csrftoken", "")
if user and user.is_authenticated:
window_info.user_pk = user.pk
window_info.username = user.get_username()
window_info.is_superuser = user.is_superuser
window_info.is_staff = user.is_staff
window_info.is_active = user.is_active
window_info.user_set = True
else:
window_info.user_pk = None
window_info.username = None
window_info.is_superuser = False
window_info.is_staff = False
window_info.is_active = False
window_info.user_set = False
def new_window_info(self, window_info):
window_info._user = None
window_info._perms = None
window_info._template_perms = None
window_info.user_agent = ""
window_info.user_pk = None
window_info.username = None
window_info.is_superuser = False
window_info.is_staff = False
window_info.user_set = False
window_info.is_active = False
window_info.csrf_cookie = ""
def to_dict(self, window_info):
# noinspection PyProtectedMember
return {
"user_pk": window_info.user_pk,
"username": window_info.username,
"is_superuser": window_info.is_superuser,
"is_staff": window_info.is_staff,
"is_active": window_info.is_active,
"csrf_cookie": window_info.csrf_cookie,
"perms": list(window_info._perms)
if isinstance(window_info._perms, set)
else None,
"user_agent": window_info.user_agent,
"user_set": window_info.user_set,
}
def from_dict(self, window_info, values):
window_info._user = None
window_info.csrf_cookie = values.get("csrf_cookie")
window_info.user_pk = values.get("user_pk")
window_info.user_set = values.get("user_set")
if window_info.user_pk and not window_info.user_set:
user = get_user_model().objects.filter(pk=window_info.user_pk).first()
window_info.username = user.username
window_info.is_superuser = user.is_superuser
window_info.is_staff = user.is_staff
window_info.is_active = user.is_active
else:
window_info.username = values.get("username")
window_info.is_superuser = values.get("is_superuser")
window_info.is_staff = values.get("is_staff")
window_info.is_active = values.get("is_active")
window_info.is_authenticated = bool(window_info.user_pk)
window_info.is_anonymous = not bool(window_info.user_pk)
window_info._perms = (
set(values["perms"]) if values.get("perms") is not None else None
)
window_info._template_perms = None
window_info.user_agent = values.get("user_agent")
def get_context(self, window_info):
"""provide the same context data as the :mod:`django.contrib.auth.context_processors`:
* `user`: a user or :class:`django.contrib.auth.models.AnonymousUser`
* `perms`, with the same meaning
"""
user = window_info.user or AnonymousUser()
return {
"user": user,
"perms": PermWrapper(user),
"csrf_token": window_info.csrf_cookie,
}
def install_methods(self, window_info_cls):
def get_user(req):
"""return the user object if authenticated, else return `None`"""
# noinspection PyProtectedMember
if req._user or req.user_pk is None:
# noinspection PyProtectedMember
return req._user
user = get_user_model().objects.filter(pk=req.user_pk).first()
if user:
req._user = user
return req._user
return None
def has_perm(req, perm):
"""return true is the user has the required perm.
>>> from df_websockets.window_info import WindowInfo
>>> r = WindowInfo.from_dict({'username': 'username', 'perms':['app_label.codename']})
>>> r.has_perm('app_label.codename')
True
:param req: WindowInfo
:param perm: name of the permission ("app_label.codename")
:return: True if the user has the required perm
:rtype: :class:`bool`
"""
return perm in req.perms
def get_perms(req):
""":class:`set` of all perms of the user (set of "app_label.codename")"""
if not req.user_pk:
return set()
elif req._perms is not None:
return req._perms
from django.contrib.auth.models import Permission
if req.is_superuser:
query = Permission.objects.all()
else:
query = Permission.objects.filter(
Q(user__pk=req.user_pk) | Q(group__user__pk=req.user_pk)
)
req._perms = set(
"%s.%s" % p
for p in query.select_related("content_type").values_list(
"content_type__app_label", "codename"
)
)
return req._perms
window_info_cls.user = property(get_user)
window_info_cls.has_perm = has_perm
window_info_cls.perms = property(get_perms)
class BrowserMiddleware(WindowInfoMiddleware):
"""add attributes related to the browser (currently only the HTTP_USER_AGENT header)"""
def from_request(self, request, window_info):
window_info.user_agent = request.META.get("HTTP_USER_AGENT", "")
def new_window_info(self, window_info):
window_info.user_agent = ""
def to_dict(self, window_info):
return {"user_agent": window_info.user_agent}
def from_dict(self, window_info, values):
window_info.user_agent = values.get("user_agent", "")
def get_context(self, window_info):
return {
"df_user_agent": window_info.user_agent,
"messages": [],
"DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS,
}
class Djangoi18nMiddleware(WindowInfoMiddleware):
"""Add attributes required for using i18n-related functions."""
def from_request(self, request, window_info):
# noinspection PyTypeChecker
if getattr(request, "session", None):
window_info.language_code = get_language_from_request(request)
else:
window_info.language_code = settings.LANGUAGE_CODE
def new_window_info(self, window_info):
window_info.language_code = None
def to_dict(self, window_info):
return {"language_code": window_info.language_code}
def from_dict(self, window_info, values):
window_info.language_code = values.get("language_code")
def get_context(self, window_info):
return {
"LANGUAGES": settings.LANGUAGES,
"LANGUAGE_CODE": window_info.language_code,
"LANGUAGE_BIDI": translation.get_language_bidi(),
}
|
# 987. 二叉树的垂序遍历
#
# 20200917
# huao
from typing import List
from functools import cmp_to_key
def cmp(x, y):
xx = x[1][0]
xy = x[1][1]
yx = y[1][0]
yy = y[1][1]
xnode = x[0].val
ynode = y[0].val
if xx < yx:
return -1
elif xx > yx:
return 1
else:
if xy > yy:
return -1
elif xy < yy:
return 1
else:
if xnode > ynode:
return 1
elif xnode == ynode:
return 0
else:
return -1
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
if root == None:
return []
nodeList = []
stack = [[root, [0, 0]]]
while stack != []:
node = stack.pop()
nodeList.append(node)
if node[0].left != None:
stack.append([node[0].left, [node[1][0]-1, node[1][1]-1]])
if node[0].right != None:
stack.append([node[0].right, [node[1][0]+1, node[1][1]-1]])
nodeList.sort(key=cmp_to_key(cmp))
numList = {}
for node, [x, y] in nodeList:
if x in numList.keys():
numList[x].append(node.val)
else:
numList[x] = [node.val]
return numList.values()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import time
import os
from six.moves import cPickle
import torch.backends.cudnn as cudnn
import yaml
import opts
import misc.eval_utils
import misc.utils as utils
import misc.AttModel as AttModel
import yaml
# from misc.rewards import get_self_critical_reward
import torchvision.transforms as transforms
import pdb
import argparse
import torch.nn.functional as F
import matplotlib.pyplot as plt
from PIL import Image
plt.switch_backend('agg')
import json
def demo(opt):
model.eval()
#########################################################################################
# eval begins here
#########################################################################################
data_iter_val = iter(dataloader_val)
loss_temp = 0
start = time.time()
num_show = 0
predictions = []
count = 0
for step in range(1000):
data = data_iter_val.next()
img, iseq, gts_seq, num, proposals, bboxs, box_mask, img_id = data
# if img_id[0] != 134688:
# continue
# # for i in range(proposals.size(1)): print(opt.itoc[proposals[0][i][4]], i)
# # list1 = [6, 10]
# list1 = [0, 1, 10, 2, 3, 4, 5, 6, 7, 8, 9]
# proposals = proposals[:,list1]
# num[0,1] = len(list1)
proposals = proposals[:,:max(int(max(num[:,1])),1),:]
input_imgs.resize_(img.size()).copy_(img)
input_seqs.resize_(iseq.size()).copy_(iseq)
gt_seqs.resize_(gts_seq.size()).copy_(gts_seq)
input_num.resize_(num.size()).copy_(num)
input_ppls.resize_(proposals.size()).copy_(proposals)
gt_bboxs.resize_(bboxs.size()).copy_(bboxs)
mask_bboxs.resize_(box_mask.size()).copy_(box_mask)
input_imgs.resize_(img.size()).copy_(img)
eval_opt = {'sample_max':1, 'beam_size': opt.beam_size, 'inference_mode' : True, 'tag_size' : opt.cbs_tag_size}
seq, bn_seq, fg_seq, _, _, _ = model._sample(input_imgs, input_ppls, input_num, eval_opt)
sents, det_idx, det_word = utils.decode_sequence_det(dataset_val.itow, dataset_val.itod, dataset_val.ltow, dataset_val.itoc, dataset_val.wtod, \
seq, bn_seq, fg_seq, opt.vocab_size, opt)
if opt.dataset == 'flickr30k':
im2show = Image.open(os.path.join(opt.image_path, '%d.jpg' % img_id[0])).convert('RGB')
else:
if os.path.isfile(os.path.join(opt.image_path, 'val2014/COCO_val2014_%012d.jpg' % img_id[0])):
im2show = Image.open(os.path.join(opt.image_path, 'val2014/COCO_val2014_%012d.jpg' % img_id[0])).convert('RGB')
else:
im2show = Image.open(os.path.join(opt.image_path, 'train2014/COCO_train2014_%012d.jpg' % img_id[0])).convert('RGB')
w, h = im2show.size
rest_idx = []
for i in range(proposals[0].shape[0]):
if i not in det_idx:
rest_idx.append(i)
if len(det_idx) > 0:
# for visulization
proposals = proposals[0].numpy()
proposals[:,0] = proposals[:,0] * w / float(opt.image_crop_size)
proposals[:,2] = proposals[:,2] * w / float(opt.image_crop_size)
proposals[:,1] = proposals[:,1] * h / float(opt.image_crop_size)
proposals[:,3] = proposals[:,3] * h / float(opt.image_crop_size)
cls_dets = proposals[det_idx]
rest_dets = proposals[rest_idx]
# fig = plt.figure()
# fig = plt.figure(frameon=False)
# ax = plt.Axes(fig, [0., 0., 1., 1.])
fig = plt.figure(frameon=False)
# fig.set_size_inches(5,5*h/w)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
a=fig.gca()
a.set_frame_on(False)
a.set_xticks([]); a.set_yticks([])
plt.axis('off')
plt.xlim(0,w); plt.ylim(h,0)
# fig, ax = plt.subplots(1)
# show other box in grey.
plt.imshow(im2show)
if len(rest_idx) > 0:
for i in range(len(rest_dets)):
ax = utils.vis_detections(ax, dataset_val.itoc[int(rest_dets[i,4])], rest_dets[i,:5], i, 1)
if len(det_idx) > 0:
for i in range(len(cls_dets)):
ax = utils.vis_detections(ax, dataset_val.itoc[int(cls_dets[i,4])], cls_dets[i,:5], i, 0)
# plt.axis('off')
# plt.axis('tight')
# plt.tight_layout()
fig.savefig('visu/%d.jpg' %(img_id[0].item()), bbox_inches='tight', pad_inches=0, dpi=150)
print(str(img_id[0].item()) + ': ' + sents[0])
entry = {'image_id': img_id[0].item(), 'caption': sents[0]}
predictions.append(entry)
return predictions
####################################################################################
# Main
####################################################################################
# initialize the data holder.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start_from', type=str, default='', help='')
parser.add_argument('--load_best_score', type=int, default=1,
help='Do we load previous best score when resuming training.')
parser.add_argument('--id', type=str, default='',
help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--image_path', type=str, default='/home/fanfu/newdisk/dataset/coco/2014',
help='path to the h5file containing the image data')
parser.add_argument('--cbs', type=bool, default=False,
help='whether use constraint beam search.')
parser.add_argument('--cbs_tag_size', type=int, default=3,
help='whether use constraint beam search.')
parser.add_argument('--cbs_mode', type=str, default='all',
help='which cbs mode to use in the decoding stage. cbs_mode: all|unique|novel')
parser.add_argument('--det_oracle', type=bool, default=False,
help='whether use oracle bounding box.')
parser.add_argument('--cnn_backend', type=str, default='res101',
help='res101 or vgg16')
parser.add_argument('--data_path', type=str, default='')
parser.add_argument('--beam_size', type=int, default=1)
args = parser.parse_args()
infos = {}
histories = {}
if args.start_from is not None:
if args.load_best_score == 1:
model_path = os.path.join(args.start_from, 'model-best.pth')
info_path = os.path.join(args.start_from, 'infos_'+args.id+'-best.pkl')
else:
model_path = os.path.join(args.start_from, 'model.pth')
info_path = os.path.join(args.start_from, 'infos_'+args.id+'.pkl')
# import pdb
# pdb.set_trace()
# open old infos and check if models are compatible
with open(info_path, 'rb') as f:
infos = cPickle.load(f)
opt = infos['opt']
opt.image_path = args.image_path
opt.cbs = args.cbs
opt.cbs_tag_size = args.cbs_tag_size
opt.cbs_mode = args.cbs_mode
opt.det_oracle = args.det_oracle
opt.cnn_backend = args.cnn_backend
opt.data_path = args.data_path
opt.beam_size = args.beam_size
else:
print("please specify the model path...")
pdb.set_trace()
cudnn.benchmark = True
if opt.dataset == 'flickr30k':
from misc.dataloader_flickr30k import DataLoader
else:
from misc.dataloader_coco import DataLoader
####################################################################################
# Data Loader
####################################################################################
opt.data_path = 'data'
dataset_val = DataLoader(opt, split='test')
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=1,
shuffle=False, num_workers=0)
input_imgs = torch.FloatTensor(1)
input_seqs = torch.LongTensor(1)
input_ppls = torch.FloatTensor(1)
gt_bboxs = torch.FloatTensor(1)
mask_bboxs = torch.ByteTensor(1)
gt_seqs = torch.LongTensor(1)
input_num = torch.LongTensor(1)
if opt.cuda:
input_imgs = input_imgs.cuda()
input_seqs = input_seqs.cuda()
gt_seqs = gt_seqs.cuda()
input_num = input_num.cuda()
input_ppls = input_ppls.cuda()
gt_bboxs = gt_bboxs.cuda()
mask_bboxs = mask_bboxs.cuda()
input_imgs = Variable(input_imgs)
input_seqs = Variable(input_seqs)
gt_seqs = Variable(gt_seqs)
input_num = Variable(input_num)
input_ppls = Variable(input_ppls)
gt_bboxs = Variable(gt_bboxs)
mask_bboxs = Variable(mask_bboxs)
####################################################################################
# Build the Model
####################################################################################
opt.vocab_size = dataset_val.vocab_size
opt.detect_size = dataset_val.detect_size
opt.seq_length = opt.seq_length
opt.fg_size = dataset_val.fg_size
opt.fg_mask = torch.from_numpy(dataset_val.fg_mask).byte()
opt.glove_fg = torch.from_numpy(dataset_val.glove_fg).float()
opt.glove_clss = torch.from_numpy(dataset_val.glove_clss).float()
opt.st2towidx = torch.from_numpy(dataset_val.st2towidx).long()
opt.itow = dataset_val.itow
opt.itod = dataset_val.itod
opt.ltow = dataset_val.ltow
opt.itoc = dataset_val.itoc
if opt.att_model == 'topdown':
model = AttModel.TopDownModel(opt)
elif opt.att_model == 'att2in2':
model = AttModel.Att2in2Model(opt)
if opt.decode_noc:
model._reinit_word_weight(opt, dataset_val.ctoi, dataset_val.wtoi)
if args.start_from != None:
# opt.learning_rate = saved_model_opt.learning_rate
print('Loading the model %s...' %(model_path))
model.load_state_dict(torch.load(model_path))
if os.path.isfile(os.path.join(args.start_from, 'histories_'+opt.id+'.pkl')):
with open(os.path.join(args.start_from, 'histories_'+opt.id+'.pkl'), 'rb') as f:
histories = cPickle.load(f)
if opt.cuda:
model.cuda()
predictions = demo(opt)
# import pdb
# pdb.set_trace()
print('saving...')
json.dump(predictions, open('visu/visu.json', 'w'))
|
"""Main module."""
from enum import Enum, auto
from .parser import parse
# def parse(filepath: str):
# with open('tests/erds/one.er') as f:
# lxer.input(f.read())
# for tok in parser:
# print(tok)
class Option(Enum):
LABEL = 'label'
BG_COLOR = 'bgcolor'
COLOR = 'color'
FONT_FACE = 'font'
FONT_SIZE = 'size'
BORDER = 'border'
BORDER_COLOR = 'border-color'
CELL_SPACING = 'cellspacing'
CELL_BORDER = 'cellborder'
CELL_PADDING = 'cellpadding'
TEXT_ALIGNMENT = 'text-alignment'
DEFAULT_TITLE_OPTIONS = [(Option.FONT_SIZE, 30)]
DEFAULT_HEADER_OPTIONS = [(Option.FONT_SIZE, 16)]
DEFAULT_ENTITY_OPTIONS = [
(Option.BORDER, 0),
(Option.CELL_BORDER, 1),
(Option.CELL_SPACING, 0),
(Option.CELL_PADDING, 4),
(Option.FONT_FACE, 'Helvetica')
]
|
#lucas numbers
def lucas():
first= 0
second = 1
terms = int(input("Number of terms?"))
n1, n2 = 2, 1
counter = 0
if terms <= 0:
print("Please enter a positive integer")
elif terms == 1:
print(n1)
elif terms == 2:
print(n1)
print(n2)
else:
while counter < terms:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
counter += 1
lucas()
|
#
# Copyright (c) 2021 Incisive Technology Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This program generates all the 'model' package & api version modules
This program works off a specified Kubernetes swagger file and from there
builds out the model sub-package of hikaru. It first removes all existing
content of this package and then re-creates it from scratch. If the swagger
file hasn't changed, then these generated modules will be identical, otherwise
they will contain the new contents from the swagger file.
Usage is:
python build.py <path to swagger file>
The assumption is to create the 'build' package in the cwd.
Just some notes to remember how this all works:
For the collection types, you must parameterize the
types from typing (List, Dict, etc).
If you want an optional type, you can use typing.Optional,
which turns into Union[T, NoneType].
You can use dataclasses.field(default_factory=list) to indicate
where optional args should be defaulted to new empty lists.
You can acquire the fields of class X with
dataclasses.fields(X). The type annotation for the field is
stored in the 'type' attribute.
You now want to understand Union and List types. There are two
different ways to do this; the change comes at Py3.8. This
are both performed on the type object found in the above
mentioned attribute:
Operation Pre-3.8 3.8 and later
========================================================
get origin __origin__ typing.get_origin()
get args __args__ typing.get_args()
inspect.signature() can give the argument signature for a
method; can find how many required positional args there are.
"""
from pathlib import Path
import sys
from typing import Union, List, Optional
import json
import networkx
from hikaru.naming import process_swagger_name, full_swagger_name
from hikaru.meta import HikaruBase, HikaruDocumentBase
python_reserved = {"except", "continue", "from"}
types_map = {"boolean": "bool",
"integer": "int",
"string": "str",
"float": "float",
"number": "float"}
NoneType = type(None)
unversioned_module_name = "unversioned"
def _clean_directory(dirpath: str, including_dirpath=False):
path = Path(dirpath)
for p in path.iterdir():
if p.is_file():
p.unlink()
elif p.is_dir():
_clean_directory(str(p), including_dirpath=True)
if including_dirpath:
p.rmdir()
_package_init_code = \
"""
try:
from .v1 import *
except ImportError:
pass"""
def prep_package(directory: str):
"""
This function empties the directory named 'directory', creating it if needed
:param directory: string; name of an empty directory to create. Creates it
if needed, and removes any existing content if it's already there.
"""
path = Path(directory)
if not path.exists():
path.mkdir(parents=True)
else:
if not path.is_dir():
path.unlink()
path.mkdir(parents=True)
# once here, the directory exists and is a directory; clean it out
_clean_directory(str(path))
init = path / "__init__.py"
init.touch()
f = init.open('w')
print(_module_docstring, file=f)
print(_package_init_code, file=f)
print(file=f)
output_footer(stream=f)
_copyright_string = \
"""#
# Copyright (c) 2021 Incisive Technology Ltd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE."""
_module_docstring = f'''{_copyright_string}
"""
DO NOT EDIT THIS FILE!
This module is automatically generated using the hikaru.build program that turns
a Kubernetes swagger spec into the code for the hikaru.model module.
"""
'''
def output_boilerplate(stream=sys.stdout, other_imports=None):
"""
Write out the standard module header imports
:param stream: where to write; sys.stdout default
:param other_imports: None, or a list of strings to generate import
statements for
"""
print(_module_docstring, file=stream)
print(file=stream)
print(f"from hikaru.meta import {HikaruBase.__name__}, {HikaruDocumentBase.__name__}",
file=stream)
print("from typing import Optional, List, Dict", file=stream)
print("from dataclasses import dataclass, field", file=stream)
if other_imports is not None:
for line in other_imports:
print(line, file=stream)
print(file=stream)
print(file=stream)
_module_footer = '''globs = dict(globals())
__all__ = [c.__name__ for c in globs.values()
if type(c) == type]
del globs'''
def output_footer(stream=sys.stdout):
"""
Write out the footer that defines '__all__'
:param stream: file to write the footer to
"""
print(_module_footer, file=stream)
def build_digraph(all_classes: dict) -> networkx.DiGraph:
dg = networkx.DiGraph()
for cd in all_classes.values():
assert isinstance(cd, ClassDescriptor)
deps = cd.depends_on(include_external=True)
dg.add_node(cd)
for c in deps:
dg.add_edge(cd, c)
return dg
def write_classes(class_list, stream=sys.stdout):
for dc in class_list:
print(dc.as_python_class(), file=stream)
def load_stable(swagger_file_path: str) -> NoneType:
f = open(swagger_file_path, 'r')
d = json.load(f)
for k, v in d["definitions"].items():
if 'apiextensions' not in k:
group, version, name = process_swagger_name(k)
mod_def = get_module_def(version)
cd = mod_def.get_class_desc(name)
if cd is None:
cd = ClassDescriptor(k, v)
mod_def.save_class_desc(cd)
else:
cd.update(v)
cd.process_properties()
def write_modules(pkgpath: str):
pkg = Path(pkgpath)
d = module_defs()
mod_names = []
base = d.get(None)
# first, create the module with un-versioned object defs
if base:
unversioned = pkg / f'{unversioned_module_name}.py'
assert isinstance(base, ModuleDef)
f = unversioned.open('w')
base.as_python_module(stream=f)
f.close()
# next, write out all the version-specific object defs
for k, md in d.items():
if k is not None:
assert isinstance(md, ModuleDef)
mod_names.append(md.version)
mod = pkg / f'{md.version}.py'
f = mod.open('w')
md.as_python_module(stream=f)
f.close()
# finally, capture the names of all the version modules in version module
versions = pkg / 'versions.py'
f = versions.open('w')
print(f"versions = {str(mod_names)}", file=f)
f.close()
class ClassDescriptor(object):
def __init__(self, key, d):
self.full_name = full_swagger_name(key)
group, version, name = process_swagger_name(self.full_name)
self.short_name = name
self.group = group
self.version = version
self.description = None
self.all_properties = None
self.required = None
self.type = None
self.is_subclass_of = None
self.is_document = False
self.required_props = []
self.optional_props = []
self.update(d)
def update(self, d: dict):
self.description = d.get("description")
self.all_properties = d.get("properties", {})
self.required = d.get("required", [])
self.type = d.get('type', None)
if self.type in types_map:
self.is_subclass_of = types_map[self.type]
_doc_markers = ("apiVersion", "kind", "metadata")
def process_properties(self):
self.required_props = []
self.optional_props = []
seen_markers = set(self._doc_markers)
if self.is_subclass_of is None: # then there are properties
for k, v in self.all_properties.items():
if k in seen_markers:
seen_markers.remove(k)
if not seen_markers:
self.is_document = True
fd = PropertyDescriptor(self, k, v)
if k in self.required:
self.required_props.append(fd)
else:
self.optional_props.append(fd)
self.required_props.sort(key=lambda x: x.name)
self.optional_props.sort(key=lambda x: x.name)
@staticmethod
def split_line(line, prefix: str = " ", hanging_indent: str = "") -> List[str]:
parts = []
if line is not None:
words = line.split()
current_line = [prefix]
for w in words:
if sum(len(s) for s in current_line) + len(current_line) + len(w) > 90:
parts.append(" ".join(current_line))
current_line = [prefix]
if hanging_indent:
current_line.append(hanging_indent)
current_line.append(w)
else:
if current_line:
parts.append(" ".join(current_line))
return parts
def as_python_class(self) -> str:
lines = list()
# start of class statement
if self.is_subclass_of is not None:
base = self.is_subclass_of
else:
# then it is to be a dataclass
base = (HikaruDocumentBase.__name__
if self.is_document else
HikaruBase.__name__)
lines.append("@dataclass")
lines.append(f"class {self.short_name}({base}):")
# now the docstring
ds_parts = [' """']
ds_parts.extend(self.split_line(self.description))
ds_parts.append("")
ds_parts.append(f' Full name: {self.full_name.split("/")[-1]}')
if self.is_subclass_of is None:
ds_parts.append("")
ds_parts.append(" Attributes:")
for p in self.required_props:
ds_parts.extend(self.split_line(f'{p.name}: {p.description}',
hanging_indent=" "))
for p in (x for x in self.optional_props if x.container_type is None):
ds_parts.extend(self.split_line(f'{p.name}: {p.description}',
hanging_indent=" "))
for p in (x for x in self.optional_props if x.container_type is not None):
ds_parts.extend(self.split_line(f'{p.name}: {p.description}',
hanging_indent=" "))
ds_parts.append(' """')
lines.extend(ds_parts)
if self.is_subclass_of is None:
if self.required_props or self.optional_props:
lines.append("")
if self.is_document:
lines.append(f" _version = '{self.version}'")
for p in self.required_props:
lines.append(p.as_python_typeanno(True))
for p in (x for x in self.optional_props if x.container_type is None):
lines.append(p.as_python_typeanno(False))
for p in (x for x in self.optional_props if x.container_type is not None):
lines.append(p.as_python_typeanno(False))
lines.append("")
lines.append("")
return "\n".join(lines)
def depends_on(self, include_external=False) -> list:
r = [p.depends_on() for p in self.required_props]
deps = [p for p in r
if p is not None]
o = [p.depends_on() for p in self.optional_props]
deps.extend(p for p in o
if p is not None and (True
if include_external else
self.version == p.version))
return deps
class ModuleDef(object):
def __init__(self, version):
self.version = version
self.all_classes = {}
def get_class_desc(self, sname: str) -> Optional[ClassDescriptor]:
return self.all_classes.get(sname)
def save_class_desc(self, class_def: ClassDescriptor):
assert isinstance(class_def, ClassDescriptor)
self.all_classes[class_def.short_name] = class_def
def external_versions_used(self) -> List[str]:
ext_ver = set()
for cd in self.all_classes.values():
for dep in cd.depends_on(include_external=True):
assert isinstance(dep, ClassDescriptor)
if dep.version != self.version:
ext_ver.add(dep.version)
return list(ext_ver)
def as_python_module(self, stream=sys.stdout):
externals = self.external_versions_used()
other_imports = []
if None in externals:
other_imports.append(f'from .{unversioned_module_name} import *')
externals.remove(None)
externals.sort()
all_classes = {}
for ext in externals:
emod = _all_module_defs[ext]
assert isinstance(emod, ModuleDef)
all_classes.update(emod.all_classes)
all_classes.update(self.all_classes)
g = build_digraph(all_classes)
output_boilerplate(stream=stream, other_imports=other_imports)
traversal = list(reversed(list(networkx.topological_sort(g))))
if not traversal:
traversal = list(self.all_classes.values())
write_classes(traversal, stream=stream)
output_footer(stream=stream)
_all_module_defs = {}
def get_module_def(version) -> ModuleDef:
md = _all_module_defs.get(version)
if md is None:
md = _all_module_defs[version] = ModuleDef(version)
return md
def module_defs() -> dict:
return dict(_all_module_defs)
class PropertyDescriptor(object):
def __init__(self, containing_class, name, d):
"""
capture the information
:param containing_class:
:param d:
"""
self.name = f'{name}_' if name in python_reserved else name
self.containing_class = containing_class
self.description = d.get('description', "")
# all possible attributes that could be populated
self.container_type = self.item_type = self.prop_type = None
ctype = d.get("type")
if ctype == "array":
self.container_type = list
items = d["items"]
self.item_type = items.get('type')
if self.item_type is None:
# then it is a list of objects; possibly of one of the defs
ref = items["$ref"] # should become get when we know this is true
group, version, short_name = process_swagger_name(ref)
fullname = full_swagger_name(ref)
mod_def = get_module_def(version)
item_ref = mod_def.get_class_desc(short_name)
if item_ref is None:
item_ref = ClassDescriptor(fullname, {}) # make a placeholder
mod_def.save_class_desc(item_ref)
self.item_type = item_ref
elif self.item_type in types_map:
self.item_type = types_map[self.item_type]
else:
raise TypeError(f"Unknown type of {self.item_type} in {self.name}")
elif ctype == "object":
self.container_type = dict
else:
self.container_type = None
if self.container_type is None:
if ctype in types_map:
self.prop_type = types_map[ctype]
else:
group, version, short_name = process_swagger_name(d["$ref"])
fullname = full_swagger_name(d["$ref"])
mod_def = get_module_def(version)
ref_class = mod_def.get_class_desc(short_name)
if ref_class is None:
ref_class = ClassDescriptor(fullname, {})
mod_def.save_class_desc(ref_class)
self.prop_type = ref_class
@staticmethod
def as_required(anno: str, as_required: bool) -> str:
return anno if as_required else f"Optional[{anno}]"
def depends_on(self) -> Union[ClassDescriptor, NoneType]:
result = None
if isinstance(self.item_type, ClassDescriptor):
result = self.item_type
elif isinstance(self.prop_type, ClassDescriptor):
result = self.prop_type
return result
def as_python_typeanno(self, as_required: bool) -> str:
parts = [" ", self.name, ": "]
if self.container_type is None:
# then a straight-up type, either scalar or another object
if isinstance(self.prop_type, str):
parts.append(self.as_required(self.prop_type, as_required))
elif isinstance(self.prop_type, ClassDescriptor):
parts.append(self.as_required(self.prop_type.short_name,
as_required))
elif self.container_type is list:
if isinstance(self.item_type, ClassDescriptor):
parts.append(self.as_required(f"List[{self.item_type.short_name}]",
as_required))
else:
parts.append(self.as_required(f"List[{self.item_type}]",
as_required))
elif self.container_type is dict:
parts.append(self.as_required("Dict[str, str]", as_required))
else:
raise TypeError(f"Unknown attribute {self.name} in "
f"{self.containing_class.short_name}")
# now check if we should add a field default
if not as_required:
# then we need to add a default value so we don't have to
# supply this argument when creating it programmatically
if self.container_type is not None:
# then we need a field
factory = "list" if self.container_type is list else "dict"
parts.append(f" = field(default_factory={factory})")
else:
# just default it to None
parts.append(" = None")
return "".join(parts)
model_package = "hikaru/model"
def build_it(swagger_file: str):
"""
Initiate the swagger-file-driven model package build
:param swagger_file: string; path to the swagger file to process
"""
load_stable(swagger_file)
prep_package(model_package)
write_modules(model_package)
if __name__ == "__main__":
if len(sys.argv) < 2:
print(f"usage: {sys.argv[0]} <path-to-swagger-json-file>")
sys.exit(1)
build_it(sys.argv[1])
sys.exit(0)
|
# coding: utf8
from .base_view import BaseView
class DummyView(BaseView):
def __init__(self, config, **kwargs):
super(DummyView).__init__(config, **kwargs)
|
#! /usr/bin/env python
#
# ReplayGain Album Analysis using gstreamer rganalysis element
# Copyright (C) 2005,2007,2009 Michael Urman
# 2012 Nick Boultbee
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Pango
from gi.repository import Gst
from gi.repository import GLib
from quodlibet.qltk.views import HintedTreeView
from quodlibet.plugins.songsmenu import SongsMenuPlugin
__all__ = ['ReplayGain']
def get_num_threads():
# multiprocessing is >= 2.6.
# Default to 2 threads if cpu_count isn't implemented for the current arch
# or multiprocessing isn't available
try:
import multiprocessing
threads = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
threads = 2
return threads
class RGAlbum(object):
def __init__(self, rg_songs):
self.songs = rg_songs
self.gain = None
self.peak = None
@property
def progress(self):
all_ = 0.0
done = 0.0
for song in self.songs:
all_ += song.length
done += song.length * song.progress
try:
return max(min(done / all_, 1.0), 0.0)
except ZeroDivisionError:
return 0.0
@property
def done(self):
for song in self.songs:
if not song.done:
return False
return True
@property
def title(self):
if not self.songs:
return ""
return self.songs[0].song.comma('~artist~album')
@property
def error(self):
for song in self.songs:
if song.error:
return True
return False
def write(self):
# Don't write incomplete data
if not self.done:
return
for song in self.songs:
song._write(self.gain, self.peak)
@classmethod
def from_songs(self, songs):
return RGAlbum([RGSong(s) for s in songs])
class RGSong(object):
def __init__(self, song):
self.song = song
self.error = False
self.gain = None
self.peak = None
self.progress = 0.0
self.done = False
def _write(self, album_gain, album_peak):
if self.error or not self.done:
return
song = self.song
if self.gain is not None:
song['replaygain_track_gain'] = '%.2f dB' % self.gain
if self.peak is not None:
song['replaygain_track_peak'] = '%.4f' % self.peak
if album_gain is not None:
song['replaygain_album_gain'] = '%.2f dB' % album_gain
if album_peak is not None:
song['replaygain_album_peak'] = '%.4f' % album_peak
@property
def title(self):
return self.song('~tracknumber~title~version')
@property
def filename(self):
return self.song("~filename")
@property
def length(self):
return self.song("~#length")
class ReplayGainPipeline(GObject.Object):
__gsignals__ = {
# done(self, album)
'done': (GObject.SignalFlags.RUN_LAST, None, (object,)),
# update(self, album, song)
'update': (GObject.SignalFlags.RUN_LAST, None,
(object, object,)),
}
def __init__(self):
super(ReplayGainPipeline, self).__init__()
self._current = None
self._setup_pipe()
def _setup_pipe(self):
# gst pipeline for replay gain analysis:
# filesrc!decodebin!audioconvert!audioresample!rganalysis!fakesink
self.pipe = Gst.Pipeline()
self.filesrc = Gst.ElementFactory.make("filesrc", "source")
self.pipe.add(self.filesrc)
self.decode = Gst.ElementFactory.make("decodebin", "decode")
def new_decoded_pad(dbin, pad):
pad.link(self.convert.get_static_pad("sink"))
def sort_decoders(decode, pad, caps, factories):
def set_prio(x):
i, f = x
i = {"mad": -1, "mpg123audiodec": -2}.get(f.get_name(), i)
return (i, f)
return zip(*sorted(map(set_prio, enumerate(factories))))[1]
self.decode.connect("autoplug-sort", sort_decoders)
self.decode.connect("pad-added", new_decoded_pad)
self.pipe.add(self.decode)
self.filesrc.link(self.decode)
self.convert = Gst.ElementFactory.make("audioconvert", "convert")
self.pipe.add(self.convert)
self.resample = Gst.ElementFactory.make("audioresample", "resample")
self.pipe.add(self.resample)
self.convert.link(self.resample)
self.analysis = Gst.ElementFactory.make("rganalysis", "analysis")
self.pipe.add(self.analysis)
self.resample.link(self.analysis)
self.sink = Gst.ElementFactory.make("fakesink", "sink")
self.pipe.add(self.sink)
self.analysis.link(self.sink)
self.bus = bus = self.pipe.get_bus()
bus.add_signal_watch()
bus.connect("message", self._bus_message)
def request_update(self):
if not self._current:
return
ok, p = self.pipe.query_position(Gst.Format.TIME)
if ok:
length = self._current.length
try:
progress = float(p / Gst.SECOND) / length
except ZeroDivisionError:
progress = 0.0
progress = max(min(progress, 1.0), 0.0)
self._current.progress = progress
self._emit_update()
def _emit_update(self):
self.emit("update", self._album, self._current)
def start(self, album):
self._album = album
self._songs = list(album.songs)
self._done = []
self._next_song(first=True)
def quit(self):
self.bus.remove_signal_watch()
self.pipe.set_state(Gst.State.NULL)
def _next_song(self, first=False):
if self._current:
self._current.progress = 1.0
self._current.done = True
self._emit_update()
self._done.append(self._current)
self._current = None
if not self._songs:
self.pipe.set_state(Gst.State.NULL)
self.emit("done", self._album)
return
if first:
self.analysis.set_property("num-tracks", len(self._songs))
else:
self.analysis.set_locked_state(True)
self.pipe.set_state(Gst.State.NULL)
self._current = self._songs.pop(0)
self.filesrc.set_property("location", self._current.filename)
if not first:
# flush, so the element takes new data after EOS
pad = self.analysis.get_static_pad("src")
pad.send_event(Gst.Event.new_flush_start())
pad.send_event(Gst.Event.new_flush_stop(True))
self.analysis.set_locked_state(False)
self.pipe.set_state(Gst.State.PLAYING)
def _bus_message(self, bus, message):
if message.type == Gst.MessageType.TAG:
tags = message.parse_tag()
ok, value = tags.get_double(Gst.TAG_TRACK_GAIN)
if ok:
self._current.gain = value
ok, value = tags.get_double(Gst.TAG_TRACK_PEAK)
if ok:
self._current.peak = value
ok, value = tags.get_double(Gst.TAG_ALBUM_GAIN)
if ok:
self._album.gain = value
ok, value = tags.get_double(Gst.TAG_ALBUM_PEAK)
if ok:
self._album.peak = value
self._emit_update()
elif message.type == Gst.MessageType.EOS:
self._next_song()
elif message.type == Gst.MessageType.ERROR:
gerror, debug = message.parse_error()
if gerror:
print_e(gerror.message)
print_e(debug)
self._current.error = True
self._next_song()
class RGDialog(Gtk.Dialog):
def __init__(self, albums, parent):
super(RGDialog, self).__init__(
title=_('ReplayGain Analyzer'), parent=parent,
buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
)
self.set_default_size(500, 350)
self.set_border_width(6)
swin = Gtk.ScrolledWindow()
swin.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
swin.set_shadow_type(Gtk.ShadowType.IN)
self.vbox.pack_start(swin, True, True, 0)
view = HintedTreeView()
swin.add(view)
def icon_cdf(column, cell, model, iter_, *args):
item = model[iter_][0]
if item.error:
cell.set_property('stock-id', Gtk.STOCK_DIALOG_ERROR)
else:
cell.set_property('stock-id', None)
column = Gtk.TreeViewColumn()
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
icon_render = Gtk.CellRendererPixbuf()
column.pack_start(icon_render, True)
column.set_cell_data_func(icon_render, icon_cdf)
view.append_column(column)
def track_cdf(column, cell, model, iter_, *args):
item = model[iter_][0]
cell.set_property('text', item.title)
column = Gtk.TreeViewColumn(_("Track"))
column.set_expand(True)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
track_render = Gtk.CellRendererText()
track_render.set_property('ellipsize', Pango.EllipsizeMode.END)
column.pack_start(track_render, True)
column.set_cell_data_func(track_render, track_cdf)
view.append_column(column)
def progress_cdf(column, cell, model, iter_, *args):
item = model[iter_][0]
cell.set_property('value', int(item.progress * 100))
column = Gtk.TreeViewColumn(_("Progress"))
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
progress_render = Gtk.CellRendererProgress()
column.pack_start(progress_render, True)
column.set_cell_data_func(progress_render, progress_cdf)
view.append_column(column)
def gain_cdf(column, cell, model, iter_, *args):
item = model[iter_][0]
if item.gain is None or not item.done:
cell.set_property('text', "-")
else:
cell.set_property('text', "%.2f db" % item.gain)
column = Gtk.TreeViewColumn(_("Gain"))
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
gain_renderer = Gtk.CellRendererText()
column.pack_start(gain_renderer, True)
column.set_cell_data_func(gain_renderer, gain_cdf)
view.append_column(column)
def peak_cdf(column, cell, model, iter_, *args):
item = model[iter_][0]
if item.gain is None or not item.done:
cell.set_property('text', "-")
else:
cell.set_property('text', "%.2f" % item.peak)
column = Gtk.TreeViewColumn(_("Peak"))
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
peak_renderer = Gtk.CellRendererText()
column.pack_start(peak_renderer, True)
column.set_cell_data_func(peak_renderer, peak_cdf)
view.append_column(column)
# create as many pipelines as threads
self.pipes = []
for i in xrange(get_num_threads()):
self.pipes.append(ReplayGainPipeline())
self._timeout = None
self._sigs = {}
self._done = []
self._todo = list([RGAlbum.from_songs(a) for a in albums])
self._count = len(self._todo)
# fill the view
self.model = model = Gtk.TreeStore(object)
insert = model.insert
for album in reversed(self._todo):
base = insert(None, 0, row=[album])
for song in reversed(album.songs):
insert(base, 0, row=[song])
view.set_model(model)
if len(self._todo) == 1:
view.expand_all()
self.connect("destroy", self.__destroy)
self.connect('response', self.__response)
def start_analysis(self):
self._timeout = GLib.idle_add(self.__request_update)
# fill the pipelines
for p in self.pipes:
if not self._todo:
break
self._sigs[p] = [
p.connect("done", self.__done),
p.connect("update", self.__update),
]
p.start(self._todo.pop(0))
def __response(self, win, response):
if response == Gtk.ResponseType.CANCEL:
self.destroy()
elif response == Gtk.ResponseType.OK:
for album in self._done:
album.write()
self.destroy()
def __destroy(self, *args):
# shut down any active processing and clean up resources, timeouts
if self._timeout:
GLib.source_remove(self._timeout)
for p in self.pipes:
if p in self._sigs:
for s in self._sigs.get(p, []):
p.disconnect(s)
p.quit()
def __update(self, pipeline, album, song):
for row in self.model:
row_album = row[0]
if row_album is album:
self.model.row_changed(row.path, row.iter)
for child in row.iterchildren():
row_song = child[0]
if row_song is song:
self.model.row_changed(child.path, child.iter)
break
break
def __done(self, pipeline, album):
self._done.append(album)
if self._todo:
pipeline.start(self._todo.pop(0))
for row in self.model:
row_album = row[0]
if row_album is album:
self.model.row_changed(row.path, row.iter)
break
def __request_update(self):
GLib.source_remove(self._timeout)
self._timeout = None
# all done, stop
if len(self._done) < self._count:
for p in self.pipes:
p.request_update()
self._timeout = GLib.timeout_add(400, self.__request_update)
return False
class ReplayGain(SongsMenuPlugin):
PLUGIN_ID = 'ReplayGain'
PLUGIN_NAME = 'Replay Gain'
PLUGIN_DESC = _('Analyzes ReplayGain with gstreamer, grouped by album')
PLUGIN_ICON = Gtk.STOCK_MEDIA_PLAY
def plugin_albums(self, albums):
win = RGDialog(albums, parent=self.plugin_window)
win.show_all()
win.start_analysis()
# plugin_done checks for metadata changes and opens the write dialog
win.connect("destroy", self.__plugin_done)
def __plugin_done(self, win):
self.plugin_finish()
if not Gst.Registry.get().find_plugin("replaygain"):
__all__ = []
del ReplayGain
raise ImportError("GStreamer replaygain plugin not found")
|
# Copyright 2018 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import sushy
import testtools
from proliantutils import exception
from proliantutils.redfish.resources.system import bios
from proliantutils.redfish.resources.system import constants as sys_cons
from proliantutils.redfish.resources.system import ethernet_interface
from proliantutils.redfish.resources.system import memory
from proliantutils.redfish.resources.system import secure_boot
from proliantutils.redfish.resources.system import smart_storage_config
from proliantutils.redfish.resources.system.storage import array_controller
from proliantutils.redfish.resources.system.storage import simple_storage
from proliantutils.redfish.resources.system.storage import smart_storage
from proliantutils.redfish.resources.system.storage import storage
from proliantutils.redfish.resources.system import system
from proliantutils.redfish import utils
from sushy.resources.system import system as sushy_system
class HPESystemTestCase(testtools.TestCase):
def setUp(self):
super(HPESystemTestCase, self).setUp()
self.conn = mock.MagicMock()
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
system_json = json.loads(f.read())
self.conn.get.return_value.json.return_value = system_json['default']
self.sys_inst = system.HPESystem(
self.conn, '/redfish/v1/Systems/1',
redfish_version='1.0.2')
def test_attributes(self):
self.assertEqual(sys_cons.SUPPORTED_LEGACY_BIOS_AND_UEFI,
self.sys_inst.supported_boot_mode)
def test__get_hpe_push_power_button_action_element(self):
value = self.sys_inst._get_hpe_push_power_button_action_element()
self.assertEqual("/redfish/v1/Systems/1/Actions/Oem/Hpe/"
"HpeComputerSystemExt.PowerButton/",
value.target_uri)
self.assertEqual(["Press", "PressAndHold"], value.allowed_values)
def test__get_hpe_push_power_button_action_element_missing_action(self):
self.sys_inst._hpe_actions.computer_system_ext_powerbutton = None
self.assertRaisesRegex(
exception.MissingAttributeError,
'Oem/Hpe/Actions/#HpeComputerSystemExt.PowerButton is missing',
self.sys_inst._get_hpe_push_power_button_action_element)
def test_push_power_button(self):
self.sys_inst.push_power_button(
sys_cons.PUSH_POWER_BUTTON_PRESS)
self.sys_inst._conn.post.assert_called_once_with(
'/redfish/v1/Systems/1/Actions/Oem/Hpe/'
'HpeComputerSystemExt.PowerButton/',
data={'PushType': 'Press'})
def test_push_power_button_invalid_value(self):
self.assertRaises(exception.InvalidInputError,
self.sys_inst.push_power_button, 'invalid-value')
def test_bios_settings(self):
self.assertIsNone(self.sys_inst._bios_settings)
self.conn.get.return_value.json.reset_mock()
with open('proliantutils/tests/redfish/'
'json_samples/bios.json', 'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
actual_bios = self.sys_inst.bios_settings
self.assertIsInstance(actual_bios,
bios.BIOSSettings)
self.conn.get.return_value.json.assert_called_once_with()
# reset mock
self.conn.get.return_value.json.reset_mock()
self.assertIs(actual_bios,
self.sys_inst.bios_settings)
self.conn.get.return_value.json.assert_not_called()
@mock.patch.object(sushy_system.System, 'set_system_boot_source')
def test_update_persistent_boot_persistent(self,
set_system_boot_source_mock):
self.sys_inst.update_persistent_boot(['CDROM'], persistent=True)
set_system_boot_source_mock.assert_called_once_with(
sushy.BOOT_SOURCE_TARGET_CD,
enabled=sushy.BOOT_SOURCE_ENABLED_CONTINUOUS)
@mock.patch.object(sushy_system.System, 'set_system_boot_source')
def test_update_persistent_boot_device_unknown_persistent(
self, set_system_boot_source_mock):
self.sys_inst.update_persistent_boot(['unknown'], persistent=True)
set_system_boot_source_mock.assert_called_once_with(
sushy.BOOT_SOURCE_TARGET_NONE,
enabled=sushy.BOOT_SOURCE_ENABLED_CONTINUOUS)
@mock.patch.object(sushy_system.System, 'set_system_boot_source')
def test_update_persistent_boot_not_persistent(
self, set_system_boot_source_mock):
self.sys_inst.update_persistent_boot(['CDROM'], persistent=False)
set_system_boot_source_mock.assert_called_once_with(
sushy.BOOT_SOURCE_TARGET_CD,
enabled=sushy.BOOT_SOURCE_ENABLED_ONCE)
def test_bios_settings_on_refresh(self):
# | GIVEN |
with open('proliantutils/tests/redfish/json_samples/bios.json',
'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
# | WHEN & THEN |
self.assertIsInstance(self.sys_inst.bios_settings,
bios.BIOSSettings)
# On refreshing the system instance...
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
self.sys_inst.invalidate()
self.sys_inst.refresh(force=False)
# | WHEN & THEN |
self.assertIsNotNone(self.sys_inst._bios_settings)
self.assertTrue(self.sys_inst._bios_settings._is_stale)
# | GIVEN |
with open('proliantutils/tests/redfish/json_samples/bios.json',
'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
# | WHEN & THEN |
self.assertIsInstance(self.sys_inst.bios_settings,
bios.BIOSSettings)
self.assertFalse(self.sys_inst._bios_settings._is_stale)
def test_update_persistent_boot_uefi_target(self):
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
system_json = (json.loads(f.read())[
'System_op_for_update_persistent_boot_uefi_target'])
self.sys_inst.uefi_target_override_devices = (system_json[
'Boot']['UefiTargetBootSourceOverride@Redfish.AllowableValues'])
self.sys_inst.update_persistent_boot(['ISCSI'], persistent=True)
uefi_boot_settings = {
'Boot': {'UefiTargetBootSourceOverride': 'PciRoot(0x0)/Pci(0x1C,0x0)/Pci(0x0,0x0)/MAC(98F2B3EEF886,0x0)/IPv4(172.17.1.32)/iSCSI(iqn.2001-04.com.paresh.boot:volume.bin,0x1,0x0,None,None,None,TCP)'} # noqa
}
calls = [mock.call('/redfish/v1/Systems/1', data=uefi_boot_settings),
mock.call('/redfish/v1/Systems/1',
data={'Boot':
{'BootSourceOverrideTarget': 'UefiTarget',
'BootSourceOverrideEnabled': 'Continuous'}})]
self.sys_inst._conn.patch.assert_has_calls(calls)
def test_update_persistent_boot_uefi_no_iscsi_device(self):
self.assertRaisesRegex(
exception.IloError,
'No UEFI iSCSI bootable device found on system.',
self.sys_inst.update_persistent_boot, ['ISCSI'], True)
def test_update_persistent_boot_uefi_target_fail(self):
update_mock = mock.PropertyMock(
side_effect=sushy.exceptions.SushyError)
type(self.sys_inst).uefi_target_override_devices = update_mock
self.assertRaisesRegex(
exception.IloError,
'Unable to get uefi target override devices.',
self.sys_inst.update_persistent_boot, ['ISCSI'], True)
del type(self.sys_inst).uefi_target_override_devices
def test_pci_devices(self):
pci_dev_return_value = None
pci_dev1_return_value = None
pci_coll_return_value = None
self.assertIsNone(self.sys_inst._pci_devices)
self.conn.get.return_value.json.reset_mock()
with open('proliantutils/tests/redfish/'
'json_samples/pci_device_collection.json') as f:
pci_coll_return_value = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/pci_device.json') as f:
pci_dev_return_value = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/pci_device1.json') as f:
pci_dev1_return_value = json.loads(f.read())
self.conn.get.return_value.json.side_effect = (
[pci_coll_return_value, pci_dev_return_value,
pci_dev1_return_value])
actual_pci = self.sys_inst.pci_devices
self.conn.get.return_value.json.reset_mock()
self.assertIs(actual_pci,
self.sys_inst.pci_devices)
self.conn.get.return_value.json.assert_not_called()
def test_secure_boot_with_missing_path_attr(self):
def _get_secure_boot():
return self.sys_inst.secure_boot
self.sys_inst._json.pop('SecureBoot')
self.assertRaisesRegex(
exception.MissingAttributeError,
'attribute SecureBoot is missing',
_get_secure_boot)
def test_secure_boot(self):
# check for the underneath variable value
self.assertIsNone(self.sys_inst._secure_boot)
# | GIVEN |
self.conn.get.return_value.json.reset_mock()
with open('proliantutils/tests/redfish/json_samples/secure_boot.json',
'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
# | WHEN |
actual_secure_boot = self.sys_inst.secure_boot
# | THEN |
self.assertIsInstance(actual_secure_boot,
secure_boot.SecureBoot)
self.conn.get.return_value.json.assert_called_once_with()
# reset mock
self.conn.get.return_value.json.reset_mock()
# | WHEN & THEN |
# tests for same object on invoking subsequently
self.assertIs(actual_secure_boot,
self.sys_inst.secure_boot)
self.conn.get.return_value.json.assert_not_called()
def test_secure_boot_on_refresh(self):
# | GIVEN |
with open('proliantutils/tests/redfish/json_samples/secure_boot.json',
'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
# | WHEN & THEN |
self.assertIsInstance(self.sys_inst.secure_boot,
secure_boot.SecureBoot)
# On refreshing the system instance...
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
self.sys_inst.invalidate()
self.sys_inst.refresh(force=False)
# | WHEN & THEN |
self.assertIsNotNone(self.sys_inst._secure_boot)
self.assertTrue(self.sys_inst._secure_boot._is_stale)
# | GIVEN |
with open('proliantutils/tests/redfish/json_samples/secure_boot.json',
'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
# | WHEN & THEN |
self.assertIsInstance(self.sys_inst.secure_boot,
secure_boot.SecureBoot)
self.assertFalse(self.sys_inst._secure_boot._is_stale)
@mock.patch.object(utils, 'get_subresource_path_by')
def test_get_hpe_sub_resource_collection_path(self, res_mock):
res = 'EthernetInterfaces'
res_mock.return_value = '/redfish/v1/Systems/1/EthernetInterfaces'
path = self.sys_inst._get_hpe_sub_resource_collection_path(res)
self.assertTrue(res_mock.called)
self.assertEqual(path, res_mock.return_value)
@mock.patch.object(utils, 'get_subresource_path_by')
def test_get_hpe_sub_resource_collection_path_oem_path(self, res_mock):
res = 'EthernetInterfaces'
error_val = exception.MissingAttributeError
oem_path = '/redfish/v1/Systems/1/EthernetInterfaces'
res_mock.side_effect = [error_val, oem_path]
path = self.sys_inst._get_hpe_sub_resource_collection_path(res)
self.assertTrue(res_mock.called)
self.assertEqual(path, oem_path)
@mock.patch.object(utils, 'get_subresource_path_by')
def test_get_hpe_sub_resource_collection_path_fail(self, res_mock):
error_val = exception.MissingAttributeError
res_mock.side_effect = [error_val, error_val]
self.assertRaises(
exception.MissingAttributeError,
self.sys_inst._get_hpe_sub_resource_collection_path,
'EthernetInterfaces')
self.assertTrue(res_mock.called)
def test_ethernet_interfaces(self):
self.conn.get.return_value.json.reset_mock()
eth_coll = None
eth_value = None
path = ('proliantutils/tests/redfish/json_samples/'
'ethernet_interface_collection.json')
with open(path, 'r') as f:
eth_coll = json.loads(f.read())
with open('proliantutils/tests/redfish/json_samples/'
'ethernet_interface.json', 'r') as f:
eth_value = (json.loads(f.read()))
self.conn.get.return_value.json.side_effect = [eth_coll,
eth_value]
self.assertIsNone(self.sys_inst._ethernet_interfaces)
actual_macs = self.sys_inst.ethernet_interfaces.summary
self.assertEqual({'Port 1': '12:44:6A:3B:04:11'},
actual_macs)
self.assertIsInstance(self.sys_inst._ethernet_interfaces,
ethernet_interface.EthernetInterfaceCollection)
def test_ethernet_interfaces_oem(self):
self.conn = mock.MagicMock()
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
system_json = json.loads(f.read())
self.conn.get.return_value.json.return_value = (
system_json['System_for_oem_ethernet_interfaces'])
self.sys_inst = system.HPESystem(
self.conn, '/redfish/v1/Systems/1',
redfish_version='1.0.2')
self.conn.get.return_value.json.reset_mock()
eth_coll = None
eth_value = None
path = ('proliantutils/tests/redfish/json_samples/'
'ethernet_interface_collection.json')
with open(path, 'r') as f:
eth_coll = json.loads(f.read())
with open('proliantutils/tests/redfish/json_samples/'
'ethernet_interface.json', 'r') as f:
eth_value = (json.loads(f.read()))
self.conn.get.return_value.json.side_effect = [eth_coll,
eth_value]
self.assertIsNone(self.sys_inst._ethernet_interfaces)
actual_macs = self.sys_inst.ethernet_interfaces.summary
self.assertEqual({'Port 1': '12:44:6A:3B:04:11'},
actual_macs)
self.assertIsInstance(self.sys_inst._ethernet_interfaces,
ethernet_interface.EthernetInterfaceCollection)
def test_smart_storage(self):
self.conn.get.return_value.json.reset_mock()
value = None
with open('proliantutils/tests/redfish/json_samples/'
'smart_storage.json', 'r') as f:
value = (json.loads(f.read()))
self.conn.get.return_value.json.return_value = value
self.assertIsNone(self.sys_inst._smart_storage)
value = self.sys_inst.smart_storage
self.assertIsInstance(value, smart_storage.HPESmartStorage)
def test_storages(self):
self.conn.get.return_value.json.reset_mock()
coll = None
value = None
path = ('proliantutils/tests/redfish/json_samples/'
'storage_collection.json')
with open(path, 'r') as f:
coll = json.loads(f.read())
with open('proliantutils/tests/redfish/json_samples/'
'storage.json', 'r') as f:
value = (json.loads(f.read()))
self.conn.get.return_value.json.side_effect = [coll, value]
self.assertIsNone(self.sys_inst._storages)
value = self.sys_inst.storages
self.assertIsInstance(value, storage.StorageCollection)
def test_simple_storages(self):
self.conn.get.return_value.json.reset_mock()
coll = None
value = None
path = ('proliantutils/tests/redfish/json_samples/'
'simple_storage_collection.json')
with open(path, 'r') as f:
coll = json.loads(f.read())
with open('proliantutils/tests/redfish/json_samples/'
'simple_storage.json', 'r') as f:
value = (json.loads(f.read()))
self.conn.get.return_value.json.side_effect = [coll, value]
self.assertIsNone(self.sys_inst._simple_storages)
value = self.sys_inst.simple_storages
self.assertIsInstance(value, simple_storage.SimpleStorageCollection)
def test_simple_storage_on_refresh(self):
with open('proliantutils/tests/redfish/json_samples/'
'simple_storage_collection.json',
'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
self.assertIsInstance(self.sys_inst.simple_storages,
simple_storage.SimpleStorageCollection)
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
self.sys_inst.invalidate()
self.sys_inst.refresh(force=False)
self.assertIsNotNone(self.sys_inst._simple_storages)
self.assertTrue(self.sys_inst._simple_storages._is_stale)
with open('proliantutils/tests/redfish/json_samples/'
'simple_storage_collection.json', 'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
self.assertIsInstance(self.sys_inst.simple_storages,
simple_storage.SimpleStorageCollection)
self.assertFalse(self.sys_inst._simple_storages._is_stale)
def test_memory(self):
self.assertIsNone(self.sys_inst._memory)
self.conn.get.return_value.json.reset_mock()
with open('proliantutils/tests/redfish/'
'json_samples/memory_collection.json', 'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
actual_memory = self.sys_inst.memory
self.assertIsInstance(actual_memory,
memory.MemoryCollection)
self.conn.get.return_value.json.assert_called_once_with()
# reset mock
self.conn.get.return_value.json.reset_mock()
self.assertIs(actual_memory,
self.sys_inst.memory)
self.conn.get.return_value.json.assert_not_called()
def test_memory_collection_on_refresh(self):
# | GIVEN |
with open('proliantutils/tests/redfish/json_samples/'
'memory_collection.json', 'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
# | WHEN & THEN |
self.assertIsInstance(self.sys_inst.memory,
memory.MemoryCollection)
# On refreshing the system instance...
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
self.sys_inst.invalidate()
self.sys_inst.refresh(force=False)
# | WHEN & THEN |
self.assertIsNotNone(self.sys_inst._memory)
self.assertTrue(self.sys_inst._memory._is_stale)
# | GIVEN |
with open('proliantutils/tests/redfish/json_samples/'
'memory_collection.json', 'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
# | WHEN & THEN |
self.assertIsInstance(self.sys_inst.memory,
memory.MemoryCollection)
self.assertFalse(self.sys_inst._memory._is_stale)
def test_storage_on_refresh(self):
with open('proliantutils/tests/redfish/json_samples/'
'storage_collection.json',
'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
self.assertIsInstance(self.sys_inst.storages,
storage.StorageCollection)
# On refreshing the system instance...
with open('proliantutils/tests/redfish/'
'json_samples/system.json', 'r') as f:
self.conn.get.return_value.json.return_value = (
json.loads(f.read())['default'])
self.sys_inst.invalidate()
self.sys_inst.refresh(force=False)
self.assertIsNotNone(self.sys_inst._storages)
self.assertTrue(self.sys_inst._storages._is_stale)
with open('proliantutils/tests/redfish/json_samples/'
'simple_storage_collection.json', 'r') as f:
self.conn.get.return_value.json.return_value = json.loads(f.read())
self.assertIsInstance(self.sys_inst.storages,
storage.StorageCollection)
self.assertFalse(self.sys_inst._storages._is_stale)
def test_get_host_post_state(self):
expected = sys_cons.POST_STATE_FINISHEDPOST
self.assertEqual(expected, self.sys_inst.post_state)
@mock.patch.object(smart_storage_config, 'HPESmartStorageConfig',
autospec=True)
def test_get_smart_storage_config(self, mock_ssc):
ssc_element = '/redfish/v1/systems/1/smartstorageconfig/'
ssc_inst = self.sys_inst.get_smart_storage_config(ssc_element)
self.assertIsInstance(ssc_inst,
smart_storage_config.HPESmartStorageConfig.
__class__)
mock_ssc.assert_called_once_with(
self.conn, "/redfish/v1/systems/1/smartstorageconfig/",
redfish_version='1.0.2')
@mock.patch.object(system.HPESystem, 'get_smart_storage_config')
def test_delete_raid(self, get_smart_storage_config_mock):
config_id = ['/redfish/v1/systems/1/smartstorageconfig/']
type(self.sys_inst).smart_storage_config_identities = (
mock.PropertyMock(return_value=config_id))
self.sys_inst.delete_raid()
get_smart_storage_config_mock.assert_called_once_with(config_id[0])
(get_smart_storage_config_mock.return_value.
delete_raid.assert_called_once_with())
@mock.patch.object(system.HPESystem, 'get_smart_storage_config')
def test_delete_raid_controller_failed(self,
get_smart_storage_config_mock):
config_id = ['/redfish/v1/systems/1/smartstorageconfig/',
'/redfish/v1/systems/1/smartstorageconfig1/',
'/redfish/v1/systems/1/smartstorageconfig2/']
type(self.sys_inst).smart_storage_config_identities = (
mock.PropertyMock(return_value=config_id))
get_smart_storage_config_mock.return_value.delete_raid.side_effect = (
[None, sushy.exceptions.SushyError, None])
self.assertRaisesRegex(
exception.IloError,
"The Redfish controller failed to delete the "
"raid configuration in one or more controllers with",
self.sys_inst.delete_raid)
@mock.patch.object(system.HPESystem, 'get_smart_storage_config')
def test_delete_raid_logical_drive_not_found(
self, get_smart_storage_config_mock):
config_id = ['/redfish/v1/systems/1/smartstorageconfig/',
'/redfish/v1/systems/1/smartstorageconfig1/']
type(self.sys_inst).smart_storage_config_identities = (
mock.PropertyMock(return_value=config_id))
get_smart_storage_config_mock.return_value.delete_raid.side_effect = (
exception.IloLogicalDriveNotFoundError('No logical drive found'))
self.assertRaisesRegex(
exception.IloError,
"No logical drives are found in any controllers. "
"Nothing to delete.",
self.sys_inst.delete_raid)
def test_check_smart_storage_config_ids(self):
type(self.sys_inst).smart_storage_config_identities = (
mock.PropertyMock(return_value=None))
self.assertRaisesRegex(
exception.IloError,
"The Redfish controller failed to get the SmartStorageConfig "
"controller configurations",
self.sys_inst.check_smart_storage_config_ids)
@mock.patch.object(system.HPESystem, 'check_smart_storage_config_ids')
@mock.patch.object(system.HPESystem, '_parse_raid_config_data')
@mock.patch.object(system.HPESystem,
'_get_smart_storage_config_by_controller_model')
def test_create_raid(self, get_smart_storage_config_model_mock,
parse_raid_config_mock,
check_smart_storage_config_ids_mock):
ld1 = {'raid_level': '0', 'is_root_volume': True,
'size_gb': 150,
'controller': 'HPE Smart Array P408i-p SR Gen10'}
ld2 = {'raid_level': '1', 'size_gb': 200,
'controller': 'HPE Smart Array P408i-p SR Gen10'}
raid_config = {'logical_disks': [ld1, ld2]}
parse_data = {'HPE Smart Array P408i-p SR Gen10': [ld1, ld2]}
parse_raid_config_mock.return_value = parse_data
check_smart_storage_config_ids_mock.return_value = None
self.sys_inst.create_raid(raid_config)
get_smart_storage_config_model_mock.assert_called_once_with(
'HPE Smart Array P408i-p SR Gen10')
(get_smart_storage_config_model_mock.return_value.
create_raid.assert_called_once_with(raid_config))
@mock.patch.object(system.HPESystem, 'check_smart_storage_config_ids')
@mock.patch.object(system.HPESystem, '_parse_raid_config_data')
@mock.patch.object(system.HPESystem,
'_get_smart_storage_config_by_controller_model')
def test_create_raid_controller_not_found(
self, get_smart_storage_config_model_mock, parse_raid_config_mock,
check_smart_storage_config_ids_mock):
with open('proliantutils/tests/redfish/'
'json_samples/smart_storage.json', 'r') as f:
ss_json = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/array_controller_collection.json', 'r') as f:
acc_json = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/array_controller.json', 'r') as f:
ac_json = json.loads(f.read())
self.conn.get.return_value.json.reset_mock()
self.conn.get.return_value.json.side_effect = [
ss_json, acc_json, ac_json]
ld1 = {'raid_level': '1', 'size_gb': 200,
'controller': 'HPE Gen10 Controller'}
raid_config = {'logical_disks': [ld1]}
parse_data = {'HPE Gen10 Controller': [ld1]}
parse_raid_config_mock.return_value = parse_data
check_smart_storage_config_ids_mock.return_value = None
get_smart_storage_config_model_mock.return_value = None
self.assertRaisesRegexp(
exception.IloError,
"The Redfish controller failed to create the raid "
"configuration for one or more controllers with",
self.sys_inst.create_raid, raid_config)
@mock.patch.object(system.HPESystem, 'check_smart_storage_config_ids')
@mock.patch.object(system.HPESystem, '_parse_raid_config_data')
@mock.patch.object(system.HPESystem,
'_get_smart_storage_config_by_controller_model')
def test_create_raid_failed(self, get_smart_storage_config_model_mock,
parse_raid_config_mock,
check_smart_storage_config_ids_mock):
ld1 = {'raid_level': '0', 'is_root_volume': True,
'size_gb': 150,
'controller': 'HPE Smart Array P408i-p SR Gen10'}
ld2 = {'raid_level': '1', 'size_gb': 200,
'controller': 'HPE Smart Array P408i-p SR Gen10'}
raid_config = {'logical_disks': [ld1, ld2]}
parse_data = {'HPE Smart Array P408i-p SR Gen10': [ld1, ld2]}
check_smart_storage_config_ids_mock.return_value = None
parse_raid_config_mock.return_value = parse_data
(get_smart_storage_config_model_mock.
return_value.create_raid.side_effect) = sushy.exceptions.SushyError
self.assertRaisesRegexp(
exception.IloError,
"The Redfish controller failed to create the "
"raid configuration for one or more controllers with Error:",
self.sys_inst.create_raid, raid_config)
def test__parse_raid_config_data(self):
ld1 = {'raid_level': '0', 'is_root_volume': True,
'size_gb': 150,
'controller': 'HPE Smart Array P408i-a SR Gen10'}
ld2 = {'raid_level': '1', 'size_gb': 200,
'controller': 'HPE Smart Array P408i-a SR Gen10'}
raid_config = {'logical_disks': [ld1, ld2]}
with open('proliantutils/tests/redfish/'
'json_samples/smart_storage.json', 'r') as f:
ss_json = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/array_controller_collection.json', 'r') as f:
acc_json = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/array_controller.json', 'r') as f:
ac_json = json.loads(f.read())
self.conn.get.return_value.json.reset_mock()
self.conn.get.return_value.json.side_effect = [
ss_json, acc_json, ac_json]
expected = {'HPE Smart Array P408i-a SR Gen10': [ld1, ld2]}
self.assertEqual(expected,
self.sys_inst._parse_raid_config_data(raid_config))
@mock.patch.object(array_controller.HPEArrayControllerCollection,
'array_controller_by_model')
@mock.patch.object(system.HPESystem, 'get_smart_storage_config')
def test__get_smart_storage_config_by_controller_model(
self, get_smart_storage_config_mock,
array_controller_by_model_mock):
with open('proliantutils/tests/redfish/'
'json_samples/smart_storage.json', 'r') as f:
ss_json = json.loads(f.read())
with open('proliantutils/tests/redfish/'
'json_samples/array_controller_collection.json', 'r') as f:
acc_json = json.loads(f.read())
self.conn.get.return_value.json.reset_mock()
self.conn.get.return_value.json.side_effect = [ss_json, acc_json]
type(array_controller_by_model_mock.return_value).location = 'Slot 0'
ssc_obj_mock = mock.Mock(location='Slot 0')
get_smart_storage_config_mock.return_value = ssc_obj_mock
self.assertEqual(
ssc_obj_mock.location,
self.sys_inst._get_smart_storage_config_by_controller_model(
'HPE Smart Array P408i-a SR Gen10').location)
|
values = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000,
}
class RightToLeftSolution:
def romanToInt(self, str):
total = values.get(str[-1])
for i in (range(len(str) - 1)):
if values[str[i]] < values[str[i + 1]]:
total -= values[str[i]]
else:
total += values[str[i]]
return total
|
#part1
# Change to use relplot() instead of scatterplot()
sns.relplot(x="absences", y="G3",
data=student_data,kind="scatter")
# Show plot
plt.show()
#part2
# Change to make subplots based on study time
sns.relplot(x="absences", y="G3",
data=student_data,
kind="scatter",col="study_time")
# Show plot
plt.show()
#part3
# Change this scatter plot to arrange the plots in rows instead of columns
sns.relplot(x="absences", y="G3",
data=student_data,
kind="scatter",
col="study_time",row='study_time')
# Show plot
plt.show()
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from data.batcher import decode_tensor
from utils import remove_sep, mean
from metric.metric import Metric
class Loss(nn.CrossEntropyLoss):
def __init__(self, padding_idx=0):
super(Loss, self).__init__(ignore_index=padding_idx, reduction='mean')
def forward(self, hypo, tgt, Model=None):
hypo = hypo.contiguous()
tgt = tgt.contiguous()
if hypo.nelement() == 0 or tgt.nelement() == 0: # failsafe for empty tensor
loss = None
else:
loss = super().forward(hypo.view(-1, hypo.shape[-1]),
tgt.view(-1))
return loss, {}
class RLLoss(_Loss):
def __init__(self, tokenizer, metrics=['meteor'], use_vist=False,
reinforce_group=False):
super(RLLoss, self).__init__(reduction='mean')
self.group = reinforce_group
self.use_vist = use_vist
self.tokenizer = tokenizer
self.tokenizer.whitespace = getattr(self.tokenizer, 'whitespace', b'\xc4\xa0'.decode())
self.metric = Metric(metrics, use_vist=self.use_vist)
def decode(self, tensor):
tensor = decode_tensor(self.tokenizer, tensor, use_vist=self.use_vist,
remove_past_sep=True)
tensor = remove_sep(tensor, self.tokenizer.sep_token)
return tensor
def calc_score(self, hypo, tgt):
hypo = self.decode(hypo)
tgt = self.decode(tgt)
score_texts = {'temp': (hypo, tgt)}
score_stats = self.metric.calculate(score_texts)
return mean(list(score_stats.values())) # TODO: account for bleu
def calc_score_group(self, hypos, tgts):
hypos = [self.decode(hypo) for hypo in hypos]
tgts = [self.decode(tgt) for tgt in tgts]
hypo = ' '.join(hypos)
tgt = ' '.join(tgts)
score_texts = {'temp': (hypo, tgt)}
score_stats = self.metric.calculate(score_texts)
return mean(list(score_stats.values())) # TODO: account for bleu
def v_loss(self, reward, baseline, mask):
o = (reward - baseline) ** 2 * mask
o = o.sum() / mask.sum()
return o
def a_loss(self, log_prob, reward, baseline, mask):
advantage = reward - baseline.detach()
o = - log_prob * advantage * mask
o = o.sum() / mask.sum()
return o
def forward(self, hypos, tgts, logits, baseline):
# BGL
G = hypos.shape[1]
mask = hypos != self.tokenizer.pad_id
mask = mask.float()
log_prob = F.log_softmax(logits, dim=-1)
log_prob = log_prob.gather(dim=-1, index=hypos.unsqueeze(-1)).squeeze(-1)
if self.group:
reward = torch.Tensor(
[self.calc_score_group(group_hypo, group_tgt)
for group_hypo, group_tgt in zip(hypos, tgts)]
).float().to(log_prob.device).unsqueeze(1).repeat(1, G)
else:
reward = torch.Tensor(
[[self.calc_score(hypo, tgt) for hypo, tgt in zip(group_hypo, group_tgt)]
for group_hypo, group_tgt in zip(hypos, tgts)]
).float().to(log_prob.device)
reward = reward.float().to(log_prob.device).unsqueeze(-1)
v_loss = self.v_loss(reward, baseline, mask)
a_loss = self.a_loss(log_prob, reward, baseline, mask)
loss = v_loss + a_loss
stats = {
'reward': reward.mean().item(),
'a_loss': a_loss.item(),
'v_loss': v_loss.item(),
'rl_loss': loss.item()
}
return loss, stats
# from fairseq (github.com/facebook/fairseq)
# with all rights reserved
# label smoothed cross entropy
class SmoothLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean',
eps=0, padding_idx=0):
super(SmoothLoss, self).__init__(size_average, reduce, reduction)
self.eps = eps
self.padding_idx = padding_idx
def get_log_probs(self, hypo):
if getattr(hypo, 'if_log_softmax', False):
lprobs = hypo
else:
lprobs = F.log_softmax(hypo, dim=-1)
return lprobs
def forward(self, hypo, tgt, model=None):
hypo = hypo.contiguous()
tgt = tgt.contiguous()
lprobs = self.get_log_probs(hypo)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = tgt.view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
ppl = 2 ** nll_loss.mean().item()
nll_loss_report = nll_loss.mean().item()
nll_loss = self._reduce(nll_loss)
smooth_loss = self._reduce(smooth_loss)
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss, {'nll_loss': nll_loss_report, 'ppl': ppl}
def _reduce(self, t):
func = {
'none': lambda x: x,
'mean': lambda x: x.mean(),
'sum': lambda x: x.sum()
}[self.reduction]
return func(t)
class FocalLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean', gamma=2):
super(FocalLoss, self).__init__(size_average, reduce, reduction)
self.gamma = gamma
def forward(self, hypo_logit, tgt):
hypo = F.sigmoid(hypo_logit)
hypo = hypo.contiguous()
tgt = tgt.contiguous().bool().float()
# hypo = (tgt=1: hypo, tgt=0: (1-hypo))
p_t = hypo * (2 * tgt - 1) + (1 - tgt)
loss = -((1 - p_t) ** self.gamma) * p_t.log()
loss = loss.mean(dim=-1)
loss = self._reduce(loss)
return loss, {}
def _reduce(self, t):
func = {
'none': lambda x: x,
'mean': lambda x: x.mean(),
'sum': lambda x: x.sum()
}[self.reduction]
return func(t)
class BinaryCELoss(nn.BCEWithLogitsLoss):
def __init__(self, reduction='mean'):
super(BinaryCELoss, self).__init__(reduction=reduction)
def forward(self, hypo, tgt):
hypo = hypo.contiguous()
tgt = tgt.contiguous().bool().float()
loss = super().forward(hypo, tgt)
return loss, {}
class BinaryCERecallLoss(_Loss):
def __init__(self, recall_weight=1, reduction='mean'):
super(BinaryCERecallLoss, self).__init__(reduction=reduction)
self.reduction = reduction
self.weight = recall_weight
def forward(self, hypo, tgt):
# BK
hypo = hypo.contiguous()
tgt = tgt.contiguous().bool().float()
recall_weight = torch.FloatTensor([self.weight]).to(hypo.device).expand(hypo.shape[-1])
loss = F.binary_cross_entropy_with_logits(hypo, tgt,
pos_weight=recall_weight,
reduction=self.reduction)
return loss, {}
class SortedLoss(_Loss):
def __init__(self, top_k=20, reduction='mean', normalized=True):
super(SortedLoss, self).__init__(reduction=reduction)
self.reduction = reduction
self.top_k = top_k
self.normalized = normalized
def get_loss(self, hs, ts, max_val=None):
losses = []
for i, (h, t) in enumerate(zip(hs, ts)):
h = h.masked_select(t.bool()) # get keywords outside topk
if h.nelement() > 0:
if self.normalized:
loss = (1 - torch.sigmoid(h)).mean()
else:
loss = (max_val[i].item() - h).mean()
losses.append(loss)
return sum(losses) / len(losses)
def forward(self, hypo, tgt, Model=None):
# BK
hypo = hypo.contiguous().view(-1, hypo.shape[-1]).contiguous()
tgt = tgt.contiguous().view(-1, tgt.shape[-1]).contiguous().bool().float()
h_sorted, h_ids = hypo.sort(dim=-1, descending=True)
t_sorted = tgt.gather(dim=-1, index=h_ids)
reg_loss = (h_sorted.sum(dim=-1) - self.top_k).abs().mean()
max_val = h_sorted[:, 0]
h_sorted = h_sorted[:, self.top_k:]
t_sorted = t_sorted[:, self.top_k:]
sort_loss = self.get_loss(h_sorted, t_sorted, max_val=max_val)
loss = reg_loss + sort_loss
return loss, {'sort_loss': sort_loss.item(), 'reg_loss': reg_loss.item()}
|
from unittest import TestCase
from models.user import UserModel
class UserTest(TestCase):
def test_create_user(self):
# Setup
# Exercise
user = UserModel('test', 'abcd')
# Verify
self.assertEqual('test', user.username)
self.assertEqual('abcd', user.password)
def test_user_json(self):
# Setup
# Exercise
item = UserModel('test', 'abcd')
# Verify
expected = {'id': None, 'username': 'test'}
self.assertEqual(expected, item.json())
|
from SpatialCluster.methods.DMoN_core import convert_scipy_sparse_to_sparse_tensor, build_dmon, normalize_graph
from SpatialCluster.utils.get_areas import get_areas
from SpatialCluster.preprocess import adjacencyMatrix
from SpatialCluster.utils.data_format import data_format, position_data_format
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import numpy as np
import tensorflow.compat.v2 as tf
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
"""
----------------
Parameters:
features_X: (DataFrame) Table with the features of each point
features_position: (DataFrame) Table with the position of each point (lon, lat)
r_max: (float) Max ratio to search for neighbours
n_clusters: (int) Number of clusters
reg: (int) Collapse regularization
dropout: (float) Dropout rate for the model
num_epochs: (int) Number of epochs to train the model
----------------
Return:
areas_to_points: (dict) Dictionary with the cluster id as keys and a list of points in as values
"""
def DMoN_Clustering(features_X, features_position, r_max = 0.00034, n_clusters = 4, reg = 1, dropout = 0.0, num_epochs = 500):
features_X = data_format(features_X)
features_position = position_data_format(features_position)
X = features_X.to_numpy(dtype=float)
x_min, x_max = np.min(X, axis=0), np.max(X, axis=0)
try:
X = (X - x_min)/(x_max - x_min)
except RuntimeWarning:
X = (X - x_min)/(x_max - x_min)
print("--------------------Debug------------------------")
print(f"X:{X}\nx_min:{x_min}\nx_max:{x_max}")
X = np.asmatrix(X) # feature matrix
# --------------------------------------------------------------------------
points = list(zip(features_position.lon, features_position.lat))
A = adjacencyMatrix(features_position)
n_nodes = A.shape[0]
feature_size = X.shape[1]
# --------------------------------------------------------------------------
graph = convert_scipy_sparse_to_sparse_tensor(A)
graph_normalized = convert_scipy_sparse_to_sparse_tensor(normalize_graph(A.copy()))
input_features = tf.keras.layers.Input(shape=(feature_size,))
input_graph = tf.keras.layers.Input((n_nodes,), sparse=True)
input_adjacency = tf.keras.layers.Input((n_nodes,), sparse=True)
model = build_dmon(input_features, input_graph, input_adjacency, n_clusters, reg, dropout)
# Computes the gradients wrt. the sum of losses, returns a list of them.
def grad(model, inputs):
with tf.GradientTape() as tape:
_ = model(inputs, training=True)
loss_value = sum(model.losses)
return model.losses, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.Adam(0.001)
model.compile(optimizer, None)
# --------------------------------------------------------------------------
for epoch in range(num_epochs):
loss_values, grads = grad(model, [X, graph_normalized, graph])
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# --------------------------------------------------------------------------
_, assignments = model([X, graph_normalized, graph], training=False)
assignments = assignments.numpy()
clusters = assignments.argmax(axis=1)
areas_to_points = get_areas(clusters, points)
return areas_to_points, clusters
"""
Dejar matriz implicita no más
Bag of parameters
"""
|
#!/usr/bin/python
#
# =================================================================
# README
# =================================================================
#
#
# Author:
# Suraj singh bisht
# surajsinghbisht054@gmail.com
# www.bitforestinfo.com
#
# -------------------------------------------------------------
# Please Don't Remove Author Initials
# ------------------------------------------------------------
#
#
# Description:
# Simple Tic Tac Toe Game Implementation in Python
#
#
# ===================================================================
# This is Configuration File
# ===================================================================
#
# neural network configurations
INPUTNN = 10 # Number of Input Layer Neural Inputs
OUTPUTNN = 9 # Number of Output layer Neural Output
TANHLYNN = 18 # Tanh Hidden Layer
SIGMODNN = 15 # Sigmod hidden Layer
|
import urllib.parse
import requests
def shorten_url(url):
r = requests.get(
"http://is.gd/create.php?format=simple&url={}".format(urllib.parse.quote(url))
)
r.raise_for_status()
return r.text
|
from django.db import models
from django.contrib.auth.models import User
from papel.models import Papel
from compra.models import Compra
# Create your models here.
class Negociacoes(models.Model):
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
papel = models.ForeignKey(Papel, on_delete=models.CASCADE)
quantidade = models.IntegerField()
preco_medio_unitario = models.FloatField()
preco_medio_total = models.FloatField()
ultima_data_compra = models.DateField()
resultado_porcentagem = models.FloatField()
resultado = models.FloatField()
def __str__(self):
return f'{self.usuario}, {self.compra}, {self.quantidade}, {self.preco_medio}, {self.ultima_data_compra}'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# src/routes.py
# Developed in 2019 by Travis Kessler <Travis_Kessler@student.uml.edu>
#
# Contains website routing information
#
# 3rd party imports
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField, BooleanField
# CombustDB web app imports
from src.properties import PROPERTIES
class MoleculeSearch(FlaskForm):
''' MoleculeSearch: flask_wtf.FlaskForm child object, handles user input,
field selection, property filtering
'''
search_string = StringField('Search String')
search_field = SelectField('Search String Type', choices=[
('iupac_name', 'IUPAC Name'),
('cas', 'CAS Number'),
('molecular_formula', 'Molecular Formula'),
('isomeric_smiles', 'SMILES')
])
show_predictions = BooleanField('Show Property Predictions')
properties = list(PROPERTIES.keys())
for prop in properties:
locals()[prop] = BooleanField(prop.upper())
submit_search = SubmitField('Search Database')
submit_csv = SubmitField('Export to CSV')
|
"""Various helper scripts for serialization."""
import re
def serialize_enum(enum_object):
serialized_enum = {}
for element in enum_object:
serialized_enum[element.name] = element.value
return serialized_enum
def clean_input_string(line: str) -> str:
return re.sub('[!@#$&?%*+:;,/]', '', line)
|
from threepy.core import *
from threepy.geometry import *
from threepy.material import *
class AxesHelper(Mesh):
def __init__(self, axisLength=1, lineWidth=4):
vertexPositionData = [[0, 0, 0], [axisLength, 0, 0], [0, 0, 0],
[0, axisLength, 0], [0, 0, 0], [0, 0, axisLength]]
vertexColorData = [[0.5, 0, 0], [1, 0.2, 0.2], [0, 0.5, 0],
[0.2, 1, 0.2], [0, 0, 0.5], [0.2, 0.2, 1]]
geo = LineGeometry(vertexPositionData)
geo.setAttribute("vec3", "vertexColor", vertexColorData)
mat = LineSegmentMaterial(lineWidth=lineWidth, useVertexColors=True)
# initialize the mesh
super().__init__(geo, mat)
|
#!/usr/bin/env python3
"""RNA-Seq SE(dUTP)/PE(dUTP), ChIP-Seq SE/PE script produces jobs for four workflows"""
import os
import datetime
import sys
from json import dumps, loads
from collections import OrderedDict
import logging
import decimal
from .utils import biowardrobe_settings, remove_not_set_inputs
from .constants import (STAR_INDICES,
BOWTIE_INDICES,
CHR_LENGTH_GENERIC_TSV,
ANNOTATIONS,
ANNOTATION_GENERIC_TSV)
_logger = logging.getLogger(__name__)
def get_biowardrobe_data(cursor, biowardrobe_uid):
"""Generate and export job file to a specific folder"""
_settings = biowardrobe_settings(cursor)
_sql = f"""select
e.etype, e.workflow, e.template, e.upload_rules,
g.db, g.findex, g.annotation, g.annottable, g.genome, g.gsize as genome_size,
l.uid, l.forcerun, l.url, l.params, l.deleted,
COALESCE(l.trim5,0) as clip_5p_end, COALESCE(l.trim3,0) as clip_3p_end,
COALESCE(fragmentsizeexp,0) as exp_fragment_size, COALESCE(fragmentsizeforceuse,0) as force_fragment_size,
COALESCE(l.rmdup,0) as remove_duplicates,
COALESCE(control,0) as control, COALESCE(control_id,'') as control_id,
COALESCE(a.properties,0) as broad_peak,
COALESCE(w.email,'') as email
from labdata l
inner join (experimenttype e,genome g ) ON (e.id=experimenttype_id and g.id=genome_id)
LEFT JOIN (antibody a) ON (l.antibody_id=a.id)
LEFT JOIN (worker w) ON (l.worker_id=w.id)
where l.uid='{biowardrobe_uid}' """
# COALESCE(egroup_id, '') <> '' and COALESCE(name4browser, '') <> ''
_logger.debug(f"SQL: {_sql}")
cursor.execute(_sql)
row = cursor.fetchone()
if not row:
return None
def norm_path(path):
return os.path.abspath(os.path.normpath(os.path.normcase(path)))
kwargs = row
kwargs.update({
"pair": ('pair' in row['etype']),
"dUTP": ('dUTP' in row['etype']),
"forcerun": (int(row['forcerun']) == 1),
"spike": ('spike' in row['genome']),
"force_fragment_size": (int(row['force_fragment_size']) == 1),
"broad_peak": (int(row['broad_peak']) == 2),
"remove_duplicates": (int(row['remove_duplicates']) == 1),
"params": row['params'] if row['params'] else '{}',
"raw_data": norm_path("/".join((_settings['wardrobe'], _settings['preliminary']))),
"upload": norm_path("/".join((_settings['wardrobe'], _settings['upload']))),
"indices": norm_path("/".join((_settings['wardrobe'], _settings['indices']))),
"threads": _settings['maxthreads'],
"experimentsdb": _settings['experimentsdb']
})
kwargs.update({
# TODO: move extension into database
"fastq_file_upstream": norm_path("/".join((kwargs["raw_data"], kwargs["uid"], kwargs["uid"] + '.fastq.bz2'))),
"fastq_file_downstream": norm_path("/".join((kwargs["raw_data"], kwargs["uid"], kwargs["uid"] + '_2.fastq.bz2'))),
"star_indices_folder": norm_path("/".join((kwargs["indices"], STAR_INDICES, kwargs["findex"]))),
"bowtie_indices_folder": norm_path("/".join((kwargs["indices"], BOWTIE_INDICES, kwargs["findex"]))),
"bowtie_indices_folder_ribo": norm_path("/".join((kwargs["indices"], BOWTIE_INDICES, kwargs["findex"] + "_ribo"))),
"chrom_length": norm_path("/".join((kwargs["indices"], BOWTIE_INDICES, kwargs["findex"], CHR_LENGTH_GENERIC_TSV))),
"annotation_input_file": norm_path("/".join((kwargs["indices"], ANNOTATIONS, kwargs["findex"],
ANNOTATION_GENERIC_TSV))),
"exclude_chr": "control" if kwargs["spike"] else "",
"output_folder": norm_path("/".join((kwargs["raw_data"], kwargs["uid"]))),
"control_file": norm_path("/".join((kwargs["raw_data"], kwargs["control_id"], kwargs["control_id"] + '.bam')))
if kwargs['control_id'] else None
})
#### Uncomment
# if not os.path.isfile(kwargs["fastq_file_upstream"]) or (
# kwargs['pair'] and not os.path.isfile(kwargs["fastq_file_downstream"])):
# raise BiowFileNotFoundException(kwargs["uid"])
kwargs = {key: (value if not isinstance(value, decimal.Decimal) else int(value)) for key, value in kwargs.items()}
filled_job_object = remove_not_set_inputs(loads(kwargs['template'].
replace('\n', ' ').format(**kwargs).
replace("'True'", 'true').replace("'False'", 'false').
replace('"True"', 'true').replace('"False"', 'false')))
filled_job = OrderedDict(sorted(filled_job_object.items()))
kwargs['job'] = filled_job
_logger.info("Result: \n{}".format(dumps(kwargs, indent=4)))
return kwargs
|
import sys
sys.path.append('../../')
from cortstim.edp.loaders.dataset.clinical.excel_meta import ExcelReader
def load_clinical_df(excelfilepath):
excelreader = ExcelReader(filepath=excelfilepath)
excelreader.read_formatted_df(excelfilepath)
clindf = excelreader.ieegdf.clindf
return clindf
|
#!python3
""" Main Meshtastic
"""
import argparse
import platform
import logging
import os
import sys
import time
import yaml
from pubsub import pub
import pyqrcode
import pkg_resources
import meshtastic.util
import meshtastic.test
from meshtastic import remote_hardware
from meshtastic.ble_interface import BLEInterface
from meshtastic import portnums_pb2, channel_pb2, radioconfig_pb2
from meshtastic.globals import Globals
from meshtastic.__init__ import BROADCAST_ADDR
def onReceive(packet, interface):
"""Callback invoked when a packet arrives"""
our_globals = Globals.getInstance()
args = our_globals.get_args()
try:
d = packet.get('decoded')
logging.debug(f'in onReceive() d:{d}')
# Exit once we receive a reply
if args and args.sendtext and packet["to"] == interface.myInfo.my_node_num and d["portnum"] == portnums_pb2.PortNum.TEXT_MESSAGE_APP:
interface.close() # after running command then exit
# Reply to every received message with some stats
if args and args.reply:
msg = d.get('text')
if msg:
rxSnr = packet['rxSnr']
hopLimit = packet['hopLimit']
print(f"message: {msg}")
reply = f"got msg \'{msg}\' with rxSnr: {rxSnr} and hopLimit: {hopLimit}"
print("Sending reply: ", reply)
interface.sendText(reply)
except Exception as ex:
print(f'Warning: There is no field {ex} in the packet.')
def onConnection(interface, topic=pub.AUTO_TOPIC): # pylint: disable=W0613
"""Callback invoked when we connect/disconnect from a radio"""
print(f"Connection changed: {topic.getName()}")
def getPref(attributes, name):
"""Get a channel or preferences value"""
camel_name = meshtastic.util.snake_to_camel(name)
# Note: protobufs has the keys in snake_case, so snake internally
snake_name = meshtastic.util.camel_to_snake(name)
logging.debug(f'snake_name:{snake_name} camel_name:{camel_name}')
logging.debug(f'use camel:{Globals.getInstance().get_camel_case()}')
objDesc = attributes.DESCRIPTOR
field = objDesc.fields_by_name.get(snake_name)
if not field:
if Globals.getInstance().get_camel_case():
print(f"{attributes.__class__.__name__} does not have an attribute called {camel_name}, so you can not get it.")
else:
print(f"{attributes.__class__.__name__} does not have an attribute called {snake_name}, so you can not get it.")
print(f"Choices in sorted order are:")
names = []
for f in objDesc.fields:
tmp_name = f'{f.name}'
if Globals.getInstance().get_camel_case():
tmp_name = meshtastic.util.snake_to_camel(tmp_name)
names.append(tmp_name)
for temp_name in sorted(names):
print(f" {temp_name}")
return
# read the value
val = getattr(attributes, snake_name)
if Globals.getInstance().get_camel_case():
print(f"{camel_name}: {str(val)}")
logging.debug(f"{camel_name}: {str(val)}")
else:
print(f"{snake_name}: {str(val)}")
logging.debug(f"{snake_name}: {str(val)}")
def setPref(attributes, name, valStr):
"""Set a channel or preferences value"""
snake_name = meshtastic.util.camel_to_snake(name)
camel_name = meshtastic.util.snake_to_camel(name)
logging.debug(f'snake_name:{snake_name}')
logging.debug(f'camel_name:{camel_name}')
objDesc = attributes.DESCRIPTOR
field = objDesc.fields_by_name.get(snake_name)
if not field:
if Globals.getInstance().get_camel_case():
print(f"{attributes.__class__.__name__} does not have an attribute called {camel_name}, so you can not set it.")
else:
print(f"{attributes.__class__.__name__} does not have an attribute called {snake_name}, so you can not set it.")
print(f"Choices in sorted order are:")
names = []
for f in objDesc.fields:
tmp_name = f'{f.name}'
if Globals.getInstance().get_camel_case():
tmp_name = meshtastic.util.snake_to_camel(tmp_name)
names.append(tmp_name)
for temp_name in sorted(names):
print(f" {temp_name}")
return
val = meshtastic.util.fromStr(valStr)
logging.debug(f'valStr:{valStr} val:{val}')
enumType = field.enum_type
# pylint: disable=C0123
if enumType and type(val) == str:
# We've failed so far to convert this string into an enum, try to find it by reflection
e = enumType.values_by_name.get(val)
if e:
val = e.number
else:
if Globals.getInstance().get_camel_case():
print(f"{camel_name} does not have an enum called {val}, so you can not set it.")
else:
print(f"{snake_name} does not have an enum called {val}, so you can not set it.")
print(f"Choices in sorted order are:")
names = []
for f in enumType.values:
# Note: We must use the value of the enum (regardless if camel or snake case)
names.append(f'{f.name}')
for temp_name in sorted(names):
print(f" {temp_name}")
return
# note: 'ignore_incoming' is a repeating field
if snake_name != 'ignore_incoming':
try:
setattr(attributes, snake_name, val)
except TypeError:
# The setter didn't like our arg type guess try again as a string
setattr(attributes, snake_name, valStr)
else:
if val == 0:
# clear values
print("Clearing ignore_incoming list")
del attributes.ignore_incoming[:]
else:
print(f"Adding '{val}' to the ignore_incoming list")
attributes.ignore_incoming.extend([val])
if Globals.getInstance().get_camel_case():
print(f"Set {camel_name} to {valStr}")
else:
print(f"Set {snake_name} to {valStr}")
def onConnected(interface):
"""Callback invoked when we connect to a radio"""
closeNow = False # Should we drop the connection after we finish?
try:
our_globals = Globals.getInstance()
args = our_globals.get_args()
# do not print this line if we are exporting the config
if not args.export_config:
print("Connected to radio")
if args.setlat or args.setlon or args.setalt:
closeNow = True
alt = 0
lat = 0.0
lon = 0.0
prefs = interface.localNode.radioConfig.preferences
if args.setalt:
alt = int(args.setalt)
prefs.fixed_position = True
print(f"Fixing altitude at {alt} meters")
if args.setlat:
lat = float(args.setlat)
prefs.fixed_position = True
print(f"Fixing latitude at {lat} degrees")
if args.setlon:
lon = float(args.setlon)
prefs.fixed_position = True
print(f"Fixing longitude at {lon} degrees")
print("Setting device position")
# can include lat/long/alt etc: latitude = 37.5, longitude = -122.1
interface.sendPosition(lat, lon, alt)
interface.localNode.writeConfig()
elif not args.no_time:
# We normally provide a current time to the mesh when we connect
interface.sendPosition()
if args.set_owner:
closeNow = True
print(f"Setting device owner to {args.set_owner}")
interface.getNode(args.dest).setOwner(args.set_owner)
if args.set_owner_short:
closeNow = True
print(f"Setting device owner short to {args.set_owner_short}")
interface.getNode(args.dest).setOwner(long_name=None, short_name=args.set_owner_short)
# TODO: add to export-config and configure
if args.set_canned_message:
closeNow = True
print(f"Setting canned plugin message to {args.set_canned_message}")
interface.getNode(args.dest).set_canned_message(args.set_canned_message)
if args.pos_fields:
# If --pos-fields invoked with args, set position fields
closeNow = True
prefs = interface.getNode(args.dest).radioConfig.preferences
allFields = 0
try:
for field in args.pos_fields:
v_field = radioconfig_pb2.PositionFlags.Value(field)
allFields |= v_field
except ValueError:
print("ERROR: supported position fields are:")
print(radioconfig_pb2.PositionFlags.keys())
print("If no fields are specified, will read and display current value.")
else:
print(f"Setting position fields to {allFields}")
setPref(prefs, 'position_flags', f'{allFields:d}')
print("Writing modified preferences to device")
interface.getNode(args.dest).writeConfig()
elif args.pos_fields is not None:
# If --pos-fields invoked without args, read and display current value
closeNow = True
prefs = interface.getNode(args.dest).radioConfig.preferences
fieldNames = []
for bit in radioconfig_pb2.PositionFlags.values():
if prefs.position_flags & bit:
fieldNames.append(radioconfig_pb2.PositionFlags.Name(bit))
print(' '.join(fieldNames))
if args.set_team:
closeNow = True
try:
v_team = meshtastic.mesh_pb2.Team.Value(args.set_team.upper())
except ValueError:
v_team = 0
print(f"ERROR: Team \'{args.set_team}\' not found.")
print("Try a team name from the sorted list below, or use 'CLEAR' for unaffiliated:")
print(sorted(meshtastic.mesh_pb2.Team.keys()))
else:
print(f"Setting team to {meshtastic.mesh_pb2.Team.Name(v_team)}")
interface.getNode(args.dest).setOwner(team=v_team)
if args.set_ham:
closeNow = True
print(f"Setting Ham ID to {args.set_ham} and turning off encryption")
interface.getNode(args.dest).setOwner(args.set_ham, is_licensed=True)
# Must turn off encryption on primary channel
interface.getNode(args.dest).turnOffEncryptionOnPrimaryChannel()
if args.reboot:
closeNow = True
interface.getNode(args.dest).reboot()
if args.shutdown:
closeNow = True
interface.getNode(args.dest).shutdown()
if args.sendtext:
closeNow = True
channelIndex = 0
if args.ch_index is not None:
channelIndex = int(args.ch_index)
ch = interface.localNode.getChannelByChannelIndex(channelIndex)
logging.debug(f'ch:{ch}')
if ch and ch.role != channel_pb2.Channel.Role.DISABLED:
print(f"Sending text message {args.sendtext} to {args.dest} on channelIndex:{channelIndex}")
interface.sendText(args.sendtext, args.dest, wantAck=True, channelIndex=channelIndex)
else:
meshtastic.util.our_exit(f"Warning: {channelIndex} is not a valid channel. Channel must not be DISABLED.")
if args.sendping:
payload = str.encode("test string")
print(f"Sending ping message to {args.dest}")
interface.sendData(payload, args.dest, portNum=portnums_pb2.PortNum.REPLY_APP,
wantAck=True, wantResponse=True)
if args.gpio_wrb or args.gpio_rd or args.gpio_watch:
if args.dest == BROADCAST_ADDR:
meshtastic.util.our_exit("Warning: Must use a destination node ID.")
else:
rhc = remote_hardware.RemoteHardwareClient(interface)
if args.gpio_wrb:
bitmask = 0
bitval = 0
for wrpair in (args.gpio_wrb or []):
bitmask |= 1 << int(wrpair[0])
bitval |= int(wrpair[1]) << int(wrpair[0])
print(f"Writing GPIO mask 0x{bitmask:x} with value 0x{bitval:x} to {args.dest}")
rhc.writeGPIOs(args.dest, bitmask, bitval)
closeNow = True
if args.gpio_rd:
bitmask = int(args.gpio_rd, 16)
print(f"Reading GPIO mask 0x{bitmask:x} from {args.dest}")
interface.mask = bitmask
rhc.readGPIOs(args.dest, bitmask, None)
# wait up to X seconds for a response
for _ in range(10):
time.sleep(1)
if interface.gotResponse:
break
logging.debug(f'end of gpio_rd')
if args.gpio_watch:
bitmask = int(args.gpio_watch, 16)
print(f"Watching GPIO mask 0x{bitmask:x} from {args.dest}. Press ctrl-c to exit")
while True:
rhc.watchGPIOs(args.dest, bitmask)
time.sleep(1)
# handle settings
if args.set:
closeNow = True
prefs = interface.getNode(args.dest).radioConfig.preferences
# Handle the int/float/bool arguments
for pref in args.set:
setPref(prefs, pref[0], pref[1])
print("Writing modified preferences to device")
interface.getNode(args.dest).writeConfig()
if args.configure:
with open(args.configure[0], encoding='utf8') as file:
configuration = yaml.safe_load(file)
closeNow = True
if 'owner' in configuration:
print(f"Setting device owner to {configuration['owner']}")
interface.getNode(args.dest).setOwner(configuration['owner'])
if 'owner_short' in configuration:
print(f"Setting device owner short to {configuration['owner_short']}")
interface.getNode(args.dest).setOwner(long_name=None, short_name=configuration['owner_short'])
if 'ownerShort' in configuration:
print(f"Setting device owner short to {configuration['ownerShort']}")
interface.getNode(args.dest).setOwner(long_name=None, short_name=configuration['ownerShort'])
if 'channel_url' in configuration:
print("Setting channel url to", configuration['channel_url'])
interface.getNode(args.dest).setURL(configuration['channel_url'])
if 'channelUrl' in configuration:
print("Setting channel url to", configuration['channelUrl'])
interface.getNode(args.dest).setURL(configuration['channelUrl'])
if 'location' in configuration:
alt = 0
lat = 0.0
lon = 0.0
prefs = interface.localNode.radioConfig.preferences
if 'alt' in configuration['location']:
alt = int(configuration['location']['alt'])
prefs.fixed_position = True
print(f"Fixing altitude at {alt} meters")
if 'lat' in configuration['location']:
lat = float(configuration['location']['lat'])
prefs.fixed_position = True
print(f"Fixing latitude at {lat} degrees")
if 'lon' in configuration['location']:
lon = float(configuration['location']['lon'])
prefs.fixed_position = True
print(f"Fixing longitude at {lon} degrees")
print("Setting device position")
interface.sendPosition(lat, lon, alt)
interface.localNode.writeConfig()
if 'user_prefs' in configuration:
prefs = interface.getNode(args.dest).radioConfig.preferences
for pref in configuration['user_prefs']:
setPref(prefs, pref, str(configuration['user_prefs'][pref]))
print("Writing modified preferences to device")
interface.getNode(args.dest).writeConfig()
if 'userPrefs' in configuration:
prefs = interface.getNode(args.dest).radioConfig.preferences
for pref in configuration['userPrefs']:
setPref(prefs, pref, str(configuration['userPrefs'][pref]))
print("Writing modified preferences to device")
interface.getNode(args.dest).writeConfig()
if args.export_config:
# export the configuration (the opposite of '--configure')
closeNow = True
export_config(interface)
if args.seturl:
closeNow = True
interface.getNode(args.dest).setURL(args.seturl)
# handle changing channels
if args.ch_add:
closeNow = True
if len(args.ch_add) > 10:
meshtastic.util.our_exit("Warning: Channel name must be shorter. Channel not added.")
n = interface.getNode(args.dest)
ch = n.getChannelByName(args.ch_add)
if ch:
meshtastic.util.our_exit(f"Warning: This node already has a '{args.ch_add}' channel. No changes were made.")
else:
# get the first channel that is disabled (i.e., available)
ch = n.getDisabledChannel()
if not ch:
meshtastic.util.our_exit("Warning: No free channels were found")
chs = channel_pb2.ChannelSettings()
chs.psk = meshtastic.util.genPSK256()
chs.name = args.ch_add
ch.settings.CopyFrom(chs)
ch.role = channel_pb2.Channel.Role.SECONDARY
print(f"Writing modified channels to device")
n.writeChannel(ch.index)
if args.ch_del:
closeNow = True
channelIndex = our_globals.get_channel_index()
if channelIndex is None:
meshtastic.util.our_exit("Warning: Need to specify '--ch-index' for '--ch-del'.", 1)
else:
if channelIndex == 0:
meshtastic.util.our_exit("Warning: Cannot delete primary channel.", 1)
else:
print(f"Deleting channel {channelIndex}")
ch = interface.getNode(args.dest).deleteChannel(channelIndex)
ch_changes = [args.ch_vlongslow, args.ch_longslow, args.ch_longfast,
args.ch_midslow, args.ch_midfast,
args.ch_shortslow, args.ch_shortfast]
any_primary_channel_changes = any(x for x in ch_changes)
if args.ch_set or any_primary_channel_changes or args.ch_enable or args.ch_disable:
closeNow = True
channelIndex = our_globals.get_channel_index()
if channelIndex is None:
if any_primary_channel_changes:
# we assume that they want the primary channel if they're setting range values
channelIndex = 0
else:
meshtastic.util.our_exit("Warning: Need to specify '--ch-index'.", 1)
ch = interface.getNode(args.dest).channels[channelIndex]
if any_primary_channel_changes or args.ch_enable or args.ch_disable:
if channelIndex == 0 and not any_primary_channel_changes:
meshtastic.util.our_exit("Warning: Cannot enable/disable PRIMARY channel.")
if channelIndex != 0:
if any_primary_channel_changes:
meshtastic.util.our_exit("Warning: Standard channel settings can only be applied to the PRIMARY channel")
enable = True # default to enable
if args.ch_enable:
enable = True
if args.ch_disable:
enable = False
def setSimpleChannel(modem_config):
"""Set one of the simple modem_config only based channels"""
# Completely new channel settings
chs = channel_pb2.ChannelSettings()
chs.modem_config = modem_config
chs.psk = bytes([1]) # Use default channel psk 1
ch.settings.CopyFrom(chs)
# handle the simple channel set commands
if args.ch_vlongslow:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.VLongSlow)
if args.ch_longslow:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.LongSlow)
if args.ch_longfast:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.LongFast)
if args.ch_midslow:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.MidSlow)
if args.ch_midfast:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.MidFast)
if args.ch_shortslow:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.ShortSlow)
if args.ch_shortfast:
setSimpleChannel(channel_pb2.ChannelSettings.ModemConfig.ShortFast)
# Handle the channel settings
for pref in (args.ch_set or []):
if pref[0] == "psk":
ch.settings.psk = meshtastic.util.fromPSK(pref[1])
else:
setPref(ch.settings, pref[0], pref[1])
enable = True # If we set any pref, assume the user wants to enable the channel
if enable:
ch.role = channel_pb2.Channel.Role.PRIMARY if (
channelIndex == 0) else channel_pb2.Channel.Role.SECONDARY
else:
ch.role = channel_pb2.Channel.Role.DISABLED
print(f"Writing modified channels to device")
interface.getNode(args.dest).writeChannel(channelIndex)
if args.get_canned_message:
closeNow = True
print("")
interface.getNode(args.dest).get_canned_message()
if args.info:
print("")
# If we aren't trying to talk to our local node, don't show it
if args.dest == BROADCAST_ADDR:
interface.showInfo()
print("")
interface.getNode(args.dest).showInfo()
closeNow = True # FIXME, for now we leave the link up while talking to remote nodes
print("")
if args.get:
closeNow = True
prefs = interface.getNode(args.dest).radioConfig.preferences
# Handle the int/float/bool arguments
for pref in args.get:
getPref(prefs, pref[0])
print("Completed getting preferences")
if args.nodes:
closeNow = True
interface.showNodes()
if args.qr:
closeNow = True
url = interface.localNode.getURL(includeAll=False)
print(f"Primary channel URL {url}")
qr = pyqrcode.create(url)
print(qr.terminal())
have_tunnel = platform.system() == 'Linux'
if have_tunnel and args.tunnel:
# pylint: disable=C0415
from . import tunnel
# Even if others said we could close, stay open if the user asked for a tunnel
closeNow = False
if interface.noProto:
logging.warning(f"Not starting Tunnel - disabled by noProto")
else:
tunnel.Tunnel(interface, subnet=args.tunnel_net)
# if the user didn't ask for serial debugging output, we might want to exit after we've done our operation
if (not args.seriallog) and closeNow:
interface.close() # after running command then exit
except Exception as ex:
print(f"Aborting due to: {ex}")
interface.close() # close the connection now, so that our app exits
def onNode(node):
"""Callback invoked when the node DB changes"""
print(f"Node changed: {node}")
def subscribe():
"""Subscribe to the topics the user probably wants to see, prints output to stdout"""
pub.subscribe(onReceive, "meshtastic.receive")
# pub.subscribe(onConnection, "meshtastic.connection")
# We now call onConnected from main
# pub.subscribe(onConnected, "meshtastic.connection.established")
# pub.subscribe(onNode, "meshtastic.node")
def export_config(interface):
"""used in--export-config"""
owner = interface.getLongName()
owner_short = interface.getShortName()
channel_url = interface.localNode.getURL()
myinfo = interface.getMyNodeInfo()
pos = myinfo.get('position')
lat = None
lon = None
alt = None
if pos:
lat = pos.get('latitude')
lon = pos.get('longitude')
alt = pos.get('altitude')
config = "# start of Meshtastic configure yaml\n"
if owner:
config += f"owner: {owner}\n\n"
if owner_short:
config += f"owner_short: {owner_short}\n\n"
if channel_url:
if Globals.getInstance().get_camel_case():
config += f"channelUrl: {channel_url}\n\n"
else:
config += f"channel_url: {channel_url}\n\n"
if lat or lon or alt:
config += "location:\n"
if lat:
config += f" lat: {lat}\n"
if lon:
config += f" lon: {lon}\n"
if alt:
config += f" alt: {alt}\n"
config += "\n"
preferences = f'{interface.localNode.radioConfig.preferences}'
prefs = preferences.splitlines()
if prefs:
if Globals.getInstance().get_camel_case():
config += "userPrefs:\n"
else:
config += "user_prefs:\n"
for pref in prefs:
if Globals.getInstance().get_camel_case():
# Note: This may not work if the value has '_'
config += f" {meshtastic.util.snake_to_camel(meshtastic.util.quoteBooleans(pref))}\n"
else:
config += f" {meshtastic.util.quoteBooleans(pref)}\n"
print(config)
return config
def common():
"""Shared code for all of our command line wrappers"""
logfile = None
our_globals = Globals.getInstance()
args = our_globals.get_args()
parser = our_globals.get_parser()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO,
format='%(levelname)s file:%(filename)s %(funcName)s line:%(lineno)s %(message)s')
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
meshtastic.util.our_exit("", 1)
else:
if args.support:
meshtastic.util.support_info()
meshtastic.util.our_exit("", 0)
if args.ch_index is not None:
channelIndex = int(args.ch_index)
our_globals.set_channel_index(channelIndex)
if not args.dest:
args.dest = BROADCAST_ADDR
if not args.seriallog:
if args.noproto:
args.seriallog = "stdout"
else:
args.seriallog = "none" # assume no debug output in this case
if args.deprecated is not None:
logging.error(
'This option has been deprecated, see help below for the correct replacement...')
parser.print_help(sys.stderr)
meshtastic.util.our_exit('', 1)
elif args.test:
result = meshtastic.test.testAll()
if not result:
meshtastic.util.our_exit("Warning: Test was not successful.")
else:
meshtastic.util.our_exit("Test was a success.", 0)
else:
if args.seriallog == "stdout":
logfile = sys.stdout
elif args.seriallog == "none":
args.seriallog = None
logging.debug("Not logging serial output")
logfile = None
else:
logging.info(f"Logging serial output to {args.seriallog}")
# Note: using "line buffering"
# pylint: disable=R1732
logfile = open(args.seriallog, 'w+', buffering=1, encoding='utf8')
our_globals.set_logfile(logfile)
subscribe()
if args.ble:
client = BLEInterface(args.ble, debugOut=logfile, noProto=args.noproto)
elif args.host:
client = meshtastic.tcp_interface.TCPInterface(args.host, debugOut=logfile, noProto=args.noproto)
else:
try:
client = meshtastic.serial_interface.SerialInterface(args.port, debugOut=logfile, noProto=args.noproto)
except PermissionError as ex:
username = os.getlogin()
message = "Permission Error:\n"
message += " Need to add yourself to the 'dialout' group by running:\n"
message += f" sudo usermod -a -G dialout {username}\n"
message += " After running that command, log out and re-login for it to take effect.\n"
message += f"Error was:{ex}"
meshtastic.util.our_exit(message)
# We assume client is fully connected now
onConnected(client)
have_tunnel = platform.system() == 'Linux'
if args.noproto or args.reply or (have_tunnel and args.tunnel): # loop until someone presses ctrlc
while True:
time.sleep(1000)
# don't call exit, background threads might be running still
# sys.exit(0)
def initParser():
"""Initialize the command line argument parsing."""
our_globals = Globals.getInstance()
parser = our_globals.get_parser()
args = our_globals.get_args()
parser.add_argument(
"--configure",
help="Specify a path to a yaml(.yml) file containing the desired settings for the connected device.",
action='append')
parser.add_argument(
"--export-config",
help="Export the configuration in yaml(.yml) format.",
action='store_true')
parser.add_argument(
"--port",
help="The port the Meshtastic device is connected to, i.e. /dev/ttyUSB0. If unspecified, we'll try to find it.",
default=None)
parser.add_argument(
"--host",
help="The hostname/ipaddr of the device to connect to (over TCP)",
default=None)
parser.add_argument(
"--seriallog",
help="Log device serial output to either 'stdout', 'none' or a filename to append to.")
parser.add_argument("--info", help="Read and display the radio config information",
action="store_true")
parser.add_argument("--get-canned-message", help="Show the canned message plugin message",
action="store_true")
parser.add_argument("--nodes", help="Print Node List in a pretty formatted table",
action="store_true")
parser.add_argument("--qr", help="Display the QR code that corresponds to the current channel",
action="store_true")
parser.add_argument(
"--get", help=("Get a preferences field. Use an invalid field such as '0' to get a list of all fields."
" Can use either snake_case or camelCase format. (ex: 'ls_secs' or 'lsSecs')"),
nargs=1, action='append')
parser.add_argument(
"--set", help="Set a preferences field. Can use either snake_case or camelCase format. (ex: 'ls_secs' or 'lsSecs')", nargs=2, action='append')
parser.add_argument(
"--seturl", help="Set a channel URL", action="store")
parser.add_argument(
"--ch-index", help="Set the specified channel index. Channels start at 0 (0 is the PRIMARY channel).", action="store")
parser.add_argument(
"--ch-add", help="Add a secondary channel, you must specify a channel name", default=None)
parser.add_argument(
"--ch-del", help="Delete the ch-index channel", action='store_true')
parser.add_argument(
"--ch-enable", help="Enable the specified channel", action="store_true", dest="ch_enable", default=False)
# Note: We are doing a double negative here (Do we want to disable? If ch_disable==True, then disable.)
parser.add_argument(
"--ch-disable", help="Disable the specified channel", action="store_true", dest="ch_disable", default=False)
parser.add_argument(
"--ch-set", help=("Set a channel parameter. To see channel settings available:'--ch-set all all --ch-index 0'. "
"Can set the 'psk' using this command. To disable encryption on primary channel:'--ch-set psk none --ch-index 0'. "
"To set encryption with a new random key on second channel:'--ch-set psk random --ch-index 1'. "
"To set encryption back to the default:'--ch-set default --ch-index 0'. To set encryption with your "
"own key: '--ch-set psk 0x1a1a1a1a2b2b2b2b1a1a1a1a2b2b2b2b1a1a1a1a2b2b2b2b1a1a1a1a2b2b2b2b --ch-index 0'."),
nargs=2, action='append')
parser.add_argument(
"--ch-vlongslow", help="Change to the very long-range and slow channel", action='store_true')
parser.add_argument(
"--ch-longslow", help="Change to the long-range and slow channel", action='store_true')
parser.add_argument(
"--ch-longfast", help="Change to the long-range and fast channel", action='store_true')
parser.add_argument(
"--ch-midslow", help="Change to the mid-range and slow channel", action='store_true')
parser.add_argument(
"--ch-midfast", help="Change to the mid-range and fast channel", action='store_true')
parser.add_argument(
"--ch-shortslow", help="Change to the short-range and slow channel", action='store_true')
parser.add_argument(
"--ch-shortfast", help="Change to the short-range and fast channel", action='store_true')
parser.add_argument(
"--set-owner", help="Set device owner name", action="store")
parser.add_argument(
"--set-canned-message", help="Set the canned messages plugin message (up to 1000 characters).", action="store")
parser.add_argument(
"--set-owner-short", help="Set device owner short name", action="store")
parser.add_argument(
"--set-team", help="Set team affiliation (an invalid team will list valid values)", action="store")
parser.add_argument(
"--set-ham", help="Set licensed Ham ID and turn off encryption", action="store")
parser.add_argument(
"--dest", help="The destination node id for any sent commands, if not set '^all' or '^local' is assumed as appropriate", default=None)
parser.add_argument(
"--sendtext", help="Send a text message. Can specify a destination '--dest' and/or channel index '--ch-index'.")
parser.add_argument(
"--sendping", help="Send a ping message (which requests a reply)", action="store_true")
parser.add_argument(
"--reboot", help="Tell the destination node to reboot", action="store_true")
parser.add_argument(
"--shutdown", help="Tell the destination node to shutdown", action="store_true")
parser.add_argument(
"--reply", help="Reply to received messages",
action="store_true")
parser.add_argument(
"--gpio-wrb", nargs=2, help="Set a particular GPIO # to 1 or 0", action='append')
parser.add_argument(
"--gpio-rd", help="Read from a GPIO mask (ex: '0x10')")
parser.add_argument(
"--gpio-watch", help="Start watching a GPIO mask for changes (ex: '0x10')")
parser.add_argument(
"--no-time", help="Suppress sending the current time to the mesh", action="store_true")
parser.add_argument(
"--setalt", help="Set device altitude (allows use without GPS)")
parser.add_argument(
"--setlat", help="Set device latitude (allows use without GPS)")
parser.add_argument(
"--setlon", help="Set device longitude (allows use without GPS)")
parser.add_argument(
"--pos-fields", help="Specify fields to send when sending a position. Use no argument for a list of valid values. "\
"Can pass multiple values as a space separated list like "\
"this: '--pos-fields POS_ALTITUDE POS_ALT_MSL'",
nargs="*", action="store")
parser.add_argument("--debug", help="Show API library debug log messages",
action="store_true")
parser.add_argument("--test", help="Run stress test against all connected Meshtastic devices",
action="store_true")
parser.add_argument("--ble", help="BLE mac address to connect to (BLE is not yet supported for this tool)",
default=None)
parser.add_argument("--noproto", help="Don't start the API, just function as a dumb serial terminal.",
action="store_true")
parser.add_argument('--setchan', dest='deprecated', nargs=2, action='append',
help='Deprecated, use "--ch-set param value" instead')
parser.add_argument('--set-router', dest='deprecated',
action='store_true', help='Deprecated, use "--set is_router true" instead')
parser.add_argument('--unset-router', dest='deprecated',
action='store_false', help='Deprecated, use "--set is_router false" instead')
have_tunnel = platform.system() == 'Linux'
if have_tunnel:
parser.add_argument('--tunnel', action='store_true',
help="Create a TUN tunnel device for forwarding IP packets over the mesh")
parser.add_argument("--subnet", dest='tunnel_net',
help="Sets the local-end subnet address for the TUN IP bridge. (ex: 10.115' which is the default)",
default=None)
parser.set_defaults(deprecated=None)
the_version = pkg_resources.get_distribution("meshtastic").version
parser.add_argument('--version', action='version', version=f"{the_version}")
parser.add_argument(
"--support", action='store_true', help="Show support info (useful when troubleshooting an issue)")
args = parser.parse_args()
our_globals.set_args(args)
our_globals.set_parser(parser)
def main():
"""Perform command line meshtastic operations"""
our_globals = Globals.getInstance()
parser = argparse.ArgumentParser()
our_globals.set_parser(parser)
initParser()
common()
logfile = our_globals.get_logfile()
if logfile:
logfile.close()
def tunnelMain():
"""Run a meshtastic IP tunnel"""
our_globals = Globals.getInstance()
parser = argparse.ArgumentParser()
our_globals.set_parser(parser)
initParser()
args = our_globals.get_args()
args.tunnel = True
our_globals.set_args(args)
common()
if __name__ == "__main__":
main()
|
from __future__ import annotations
from typing import List, Dict, Union, Generator, ItemsView, Optional, \
Tuple, Any, no_type_check
from base64 import b64encode, b64decode
from datetime import date, datetime
from . import APTypes
from .namespace import *
def dateobj(source: str) -> Union[date, datetime]:
'''Parse `date` or `datetime` string to it's corresponding class'''
try:
return date.fromisoformat(source)
except ValueError:
return datetime.fromisoformat(source)
def isbool(element: PlistElement) -> bool:
'''Returns `True` if the `element.tag` is defined inside 'NSBool' tag
definitions'''
return element.tag in (DEFAULT_KEY_IDS['true'] + DEFAULT_KEY_IDS['false'])
def isdirectory(element: PlistElement) -> bool:
'''Returns `True` if the `element.tag` is defined inside 'NSArray' and
'NSDictionary' tag definitions'''
return element.tag in (DEFAULT_KEY_IDS['array'] + DEFAULT_KEY_IDS['dict'])
class PlistElement(object):
'''
`PlistElement` is a node class that supports all default 'plist' types, such
as 'NSDictionary' and 'NSKey'. `PlistElement` is a duck typed extension of
`xml.etree.ElementTree.Element`.
Parameters
----------
PLIST_TAG : str
PLIST_TAG is used to initialize a `PlistElement` with the passed tag.
PLIST_TAG is being checked if it is defined in the current key
namespace (`DEFAULT_KEYS` global).
text : Union[int, float, str, bytes, bytearray, date, datetime] = ""
`text` holds the text of an 'xml/plist' element. `PlistElement`
prohibits the use of the parameter in directory nodes, such as
'NSDictionary' and 'NSArray'. `text` is being converted to an 'base64'
string if the type is `bytes` or `bytearray`. If `text` is `date` or
`datetime`, the `isoformat` method is being used to convert it to string
attribs: Dict[str, str] = {}
`attribs` are the parameters defined inside an 'xml/plist' tag and where
the keys and values are being separated by a '='.
parsedobj : bool = False
`parsedobj` is an boolean identifier that is used to indicate that the
`text` attribute is going to be set later by hand. It is intended to
be used only while parsing strings or files, where the inner tag text is
not known yet.
Attributes
----------
_dictitems : Dict[str, int]
`_dictitems` is used only when a `PlistElement` is a type of
'NSDictionary'. It points a string key to the index of the corresponding
`PlistElement` in the `_children` list attribute.
_children : List[PlistElement]
`_children` is a list that holds all subnodes of a directory node.
`_children` attribute is allowed only to be used if the `PlistElement`
is a type of 'NSDictionary' or 'NSArray'.
attrib : Dict[str, str]
`attrib` holds the parameters that are being defined inside an
'xml/plist' tag and where the keys and values are being separated by a
'='.
tag : str
`tag` is the tag that the current `PlistElement` holds.
text : str
`text` is the text that is being placed between the opening and
ending tag. It cannot be used if the `PlistElement` is not a directory
node, such as 'NSDictionary' or 'NSArray'.
tail : str
Not used in this package, but reserved for a valid duck typed extension
of `xml.etree.ElementTree.Element`
'''
_dictitems: Dict[str, int] = {}
_children: List[PlistElement] = []
attrib : Dict[str, str] = {}
tag: str = ''
tail: str = ''
# `text` is only `str`. `date` and `datetime` are being wrapped inside to
# prevent `mypy` warnings when trying to set `.text` attribute with a
# `date` or `datetime` inside a 'NSDate' object. `__setattr__` is being
# overloaded and `date` and `datetime` objects are being converted to string
# with `isoformat` method
text: Union[str, date, datetime] = ''
def __init__(self, PLIST_TAG: str, text: APTypes = "",
attribs : Dict[str, str] = {}, parsedobj: bool = False) -> None:
if not PLIST_TAG in DEFAULT_KEYS:
raise ValueError("`%s` not in current 'namespace'" % PLIST_TAG)
if not isinstance(attribs, dict):
raise ValueError("`attribs` parameter must be `dict`, not `%s`" %
attribs.__class__.__name__)
self._dictitems = {}
self._children = []
self.attrib = attribs
super().__setattr__('tag', PLIST_TAG)
# Ignore text errors if the object is created from parsing a string/file
if parsedobj: return
self.text = text # type: ignore
def __setattr__(self, attr: str, value: APTypes) -> None:
'''Forbids the change of the `tag` attribute and the set of `text`
attribute in 'NSDictionary' and 'NSArray'. Value is being converted to
a 'base64' string if the value is type of `bytes` or `bytearray`.
Parameters
----------
attr : str
Attribute's name that is going to be set or changed in this class
value : Union[int, float, str, bytes, bytearray, date, datetime]
Value to be setted to the attribute
Raises
------
AttributeError
Raised when trying to change the `tag` attribute
SyntaxError
Raised when trying to set the `text` attribute in 'NSDictionary' and
'NSArray'
ValueError
Raised when trying to set the `text` attribute with a invalid type
value that is not acceptable for the current 'PlistElement' object
'''
if attr == 'text' and not value in (None, ''):
if isdirectory(self) or isbool(self):
raise SyntaxError("`%s` cannot have a `text` attribute" % self.tag)
elif self.tag in DEFAULT_KEY_IDS['date']:
if not isinstance(value, (date, datetime)):
raise ValueError("`value` parameter must be `date` or\
`datetime`, not `%s` when changing `text` attribute in a 'NSDate' object" % type(value))
value = value.isoformat()
elif self.tag in DEFAULT_KEY_IDS['int'] + DEFAULT_KEY_IDS['float']:
if not isinstance(value, (int, float, str)):
raise ValueError("`value` parameter must be `int`, `float`\
or `str` number, not `%s` when changing `text` attribute on 'NSNumber' object" % type(value))
try:
float(value)
except ValueError:
raise ValueError('`%s` is not a valid `int` or `float`' % value)
value = str(value)
elif self.tag in DEFAULT_KEY_IDS['data']:
if not isinstance(value, (bytearray, bytes)):
raise ValueError("`value` parameter must be `bytes` or\
`bytearray`, not `%s` when changing `text` attribute on 'NSData' object" % type(value))
value = b64encode(value).decode("ASCII")
elif attr == 'tag':
raise AttributeError('Cannot change the `tag` attribute. Replace\
this element instead')
super().__setattr__(attr, value)
def __repr__(self) -> str:
'''This method is called when trying to print a `PlistElement`.
Returns a string that includes class name, tag and class id.'''
return "<%s %r at %#x>" % (self.__class__.__name__, self.tag, id(self))
def __len__(self) -> int:
'''This instance is called on `len(PlistElement)`. It returns
the number of sub-nodes in the `PlistElement`.'''
return len(self._children)
def __iter__(self) -> Generator[PlistElement, None, None]:
'''This instance is being called when trying to iterate through a
`PlistElement` in 'foreach' loops. It returns its sub-nodes if they
exist.'''
return (x for x in list.__iter__(self._children))
def items(self) -> ItemsView[str, str]:
'''Method copied from `xml.etree.ElementTree.Element` that is needed in
a class for the `ElementTree.tostring` to work properly. It returns a
'zip' that is being created from `self.attrib` keys and values.'''
return self.attrib.items()
def iter(self, tag: Optional[str] = None) -> Generator[PlistElement, None, None]:
'''Method copied from `xml.etree.ElementTree.Element` that is needed in
a class for the `ElementTree.tostring` to work properly. It iterates
recursively on `PlistElement` sub-nodes if they exist.'''
if tag == "*":
tag = None
if tag is None or self.tag == tag:
yield self
for e in self._children:
yield from e.iter(tag)
def __getitem__(self, index : Union[int, str]) -> Union[PlistElement]:
'''`__getitem__` is called on `PlistElement[key]`. It returns a sub-node
if it does exist.
Parameters
----------
index : Union[int, str]
Key or index that is used to access a sub-node. If a `PlistElement`
is a type of 'NSDictionary', `index` needs to be a string key. If
a `PlistElement` is a type of 'NSArray', `index` needs to be
a integer.
Returns
-------
PlistElement
Returns the target sub-node, if it was found in the sub-node list.
Raises
------
SyntaxError
Raised when trying to access sub-nodes of a non directory
`PlistElement`.
ValueError
Raised when trying to access to dict nodes by a integer, or when
accessing array nodes by a string key. ValueError is also being
raised when `index` parameter is not a type of `int` or `str`.
IndexError
Raised automatically when trying to access to sub-nodes in a
'NSArray' by a integer index that is larger than the number of
sub-nodes - 1.
KeyError
Raised automatically when trying to access to sub-nodes in a
'NSDictionary' by a string key that does not exist in the dict.
'''
if not isdirectory(self):
raise SyntaxError("PlistElement with tag `%s` can't have any nodes" % \
self.tag)
if isinstance(index, int):
if self.tag in DEFAULT_KEY_IDS['dict']:
raise ValueError("Can't access to dict nodes by a integer index")
return self._children[index]
elif isinstance(index, str):
if self.tag in DEFAULT_KEY_IDS['array']:
raise ValueError("Can't access to array nodes by a string key")
# Get the index of `PlistElement` value that is linked by the string key
# and return the target `PlistElement` from the subnode list by the index
return self._children[self._dictitems[index]]
else:
raise ValueError("`index` is type of `%s`, not `str` or `int`" % type(index))
def __setitem__(self, index : Union[int, str], value: PlistElement) -> None:
'''`__setitem__` is called on `PlistElement[key] = PlistElement`
It replaces an already setted `PlistElement` in a 'NSDictionary' and
'NSArray'. If a key is not setted in a 'NSDictionary', it creates a new
key and links the value to it. If you are trying to append new elements
to a 'NSArray', use the `PlistElement.append` method instead.
Parameters
----------
index : Union[int, str]
Key or index that is used to access the target sub-node which is
going to be replaced later.
value : PlistElement
The element that is going to be assigned to the the target key/index.
Raises
------
SyntaxError
Raised when trying to set a subnode in a non directory `PlistElement`
ValueError
Raised when trying to access to a sub-node in a 'NSDictionary' by a
integer index, or when accessing a sub-node in 'NSArray' by a string
key. ValueError is also being raised when `index` parameter is not a
type of `int` or `str`.
IndexError
Raised automatically when trying to set a sub-node by a integer index
in a 'NSArray', which is larger than the number of sub-nodes - 1.
'''
if not isdirectory(self):
raise SyntaxError("PlistElement with tag `%s` cannot have any nodes")
if isinstance(index, int):
if self.tag in DEFAULT_KEY_IDS['dict']:
raise ValueError("Cannot access to 'NSDictionary' nodes by int index")
self._children[index] = value
elif isinstance(index, str):
if self.tag in DEFAULT_KEY_IDS['array']:
raise ValueError("Cannot access to 'NSArray' nodes by a string key")
try:
self._children[self._dictitems[index]] = value
except KeyError:
# Create a 'NSKey' and append the value to it
self._children.append(PlistElement(DEFAULT_KEY_IDS['key'][0], index))
self._children.append(value)
# Link the index of the value to the string key
self._dictitems[index] = len(self._children) - 1
else:
raise ValueError("`index` input is a `%s`, not `int` or `str`" % type(index))
def append(self, value: Union[PlistElement, List[PlistElement], Tuple[PlistElement]]) \
-> None:
'''Appends new `PlistElement` elements into a 'NSArray'. It cannot be
used in a 'NSDictionary'.
Parameters
----------
value : Union[PlistElement, List[PlistElement], Tuple[PlistElement]]
The value/values that are going to be appended to the current
sub-node list. If trying to append multiple `PlistElement` elements,
wrap them inside a tuple or a list.
Raises
------
SyntaxError
Raised when trying to append elements to a non 'NSArray'
`PlistElement`.
ValueError
Raised when `value` is not a type of `PlistElement`,
`List[PlistElement]` or `Tuple[PlistElement]`
'''
if not self.tag in DEFAULT_KEY_IDS['array']:
raise SyntaxError("Cannot append elements to a `%s` `PlistElement`" % self.tag)
if not isinstance(value, (PlistElement, list, tuple)):
# Simple type checking. I know that you can pass non PlistElement
# in lists and tuples
raise ValueError("`value` parameter is a type of `%s`, not `PlistElement`, \
`List[PlistElement]` or `Tuple[PlistElement]`" % type(value))
if isinstance(value, PlistElement):
# list.extend() can't add a single custom class (bug), so I wrap the
# single element inside a list
value = [value]
self._children.extend(value)
def pop(self, index: Union[int, str]) -> None:
'''Deletes a sub-node in a 'NSArray'. In a 'NSDictionary' both the key
and value is being deleted from the sub-node list.
Parameters
----------
index : Union[int, str]
String key or integer index of the target sub-node that is going to
be removed from the sub-node list.
Raises
------
ValueError
Raised when trying to remove the target `PlistElement` by a string
key in a 'NSArray' or when trying to remove the target element by a
integer index in a 'NSDictionary'. ValueError is also raised when
`index` parameter is not a type of `int` or `str`.
IndexError
Raised automatically when trying to delete a sub-node with an index
that is larger than the number of sub-nodes - 1.
'''
if isinstance(index, int):
if not self.tag in DEFAULT_KEY_IDS['array']:
raise ValueError("Cannot delete a child by a integer index in a\
`%s` `PlistElement`" % self.tag)
del self._children[index]
elif isinstance(index, str):
if not self.tag in DEFAULT_KEY_IDS['dict']:
raise ValueError("Cannot delete a dict value in a `%s`\
PlistElement" % self.tag)
# Delete the refered value
del self._children[self._dictitems[index]]
# Delete the key itself
del self._children[self._dictitems[index] - 1]
# Delete the infomation about the value index
self._dictitems.pop(index)
else: raise ValueError('`index` input is not a `int` or `str`')
|
"""Testing data splitter."""
import os
import tempfile
import unittest
from typing import Tuple
import pandas as pd
from pytoda.data_splitter import csv_data_splitter
from pytoda.tests.utils import TestFileContent
from pytoda.types import Files
class TestDataSplitter(unittest.TestCase):
"""Testing csv data splitting."""
def _read_dfs(self, filepaths: Files) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Read data frames from a list of files.
Args:
filepaths (Files): a list of files.
Returns:
Tuple[pd.DataFrame, ...]: a tuple of data frames.
"""
return tuple(pd.read_csv(filepath, index_col=0) for filepath in filepaths)
def test_data_splitter(self) -> None:
"""Test csv_data_splitter."""
a_content = os.linesep.join(
[
'genes,A,B,C,D',
'sample_3,9.45,4.984,7.016,8.336',
'sample_2,7.188,0.695,10.34,6.047',
'sample_1,9.25,6.133,5.047,5.6',
]
)
another_content = os.linesep.join(
[
'genes,B,C,D,E,F',
'sample_10,4.918,0.0794,1.605,3.463,10.18',
'sample_11,3.043,8.56,1.961,0.6226,5.027',
'sample_12,4.76,1.124,6.06,0.3743,11.05',
'sample_13,0.626,5.164,4.277,4.414,2.7',
]
)
# first row for random splits, second for file split
ground_truth = [
[(6, 6), (1, 6)],
[(5, 6), (2, 6)],
[(3, 6), (4, 6)],
[(3, 4), (4, 5)],
[(3, 4), (4, 5)],
[(3, 4), (4, 5)],
]
index = 0
with tempfile.TemporaryDirectory() as directory:
for mode in ['random', 'file']:
for test_fraction in [0.1, 0.2, 0.5]:
with TestFileContent(a_content) as a_test_file:
with TestFileContent(another_content) as another_test_file:
train_filepath, test_filepath = csv_data_splitter(
[a_test_file.filename, another_test_file.filename],
directory,
'general',
mode=mode,
test_fraction=test_fraction,
)
train_df, test_df = self._read_dfs(
[train_filepath, test_filepath]
)
self.assertEqual(
ground_truth[index], [train_df.shape, test_df.shape]
)
index += 1
if __name__ == '__main__':
unittest.main()
|
# see http://influxdb.com/docs/v0.8/api/reading_and_writing_data.html
import tornado.ioloop
import tornado.web
import logging
import json
logger = logging.getLogger('docker')
class BaseHandler(tornado.web.RequestHandler):
def _handle_request_exception(self, e):
logger.error(e)
self.write_error(status=500, message=str(e))
def set_default_headers(self):
self.set_header('Content-Type', 'application/json')
def write_error(self, status=400, **kw):
self.set_status(status)
if 'message' not in kw:
if status == 405:
kw['message'] = 'Invalid HTTP method.'
else:
kw['message'] = 'Unknown error.'
self.response = kw
self.write_json()
def prepare(self):
print(self.request.method + ' ' + self.request.uri)
if self.request.body:
body = self.request.body.decode('utf8')
try:
json_data = json.loads(body)
self.request.arguments.update(json_data)
except ValueError as e:
self.send_error(400, message=str(e))
self.response = {}
def write_json(self):
output = json.dumps(self.response)
self.write(output)
self.finish()
class DatabasesHandler(BaseHandler):
def post(self):
"""Database creation
"""
self.set_status(201)
self.write('')
self.finish()
class DatabaseHandler(BaseHandler):
def delete(self):
"""Database deletion
"""
self.set_status(204)
self.write('')
self.finish()
class CatchAll(BaseHandler):
def get(self):
self.write_json()
application = tornado.web.Application([
(r"/db", DatabasesHandler),
(r"/db/.*", DatabaseHandler),
(".*", CatchAll)
])
def main():
application.listen(8086)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
import contextlib
import copy
import csv
import json
import logging
import os
import re
from pathlib import Path
import click
from fuzzywuzzy import process
from utils import make_mongodb
log_format = logging.Formatter(
"[%(asctime)s][%(filename)s:%(lineno)4s - %(funcName)10s()] %(message)s"
)
handler = logging.StreamHandler()
handler.setFormatter(log_format)
logging.getLogger().addHandler(handler)
handler = logging.FileHandler("conflict.log")
handler.setFormatter(log_format)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.INFO)
@click.group()
def conflict():
pass
@conflict.command()
@click.option("-i", "--submission_id", type=int)
@click.option(
"-s", "--submission_dbcol_name", required=True, default="hotcrp:submission"
)
@click.option("-m", "--mag_dbcol_name", required=True, default="mag:paper")
@click.option("-p", "--pcmember_dbcol_name", required=True, default="hotcrp:pc")
@click.option("-f", "--force", is_flag=True)
def check_author_in_conflict(
submission_id, submission_dbcol_name, mag_dbcol_name, pcmember_dbcol_name, force
):
"""Check if a submission's author is specified as a conflict"""
sdb = make_mongodb(submission_dbcol_name)
mdb = make_mongodb(mag_dbcol_name)
pdb = make_mongodb(pcmember_dbcol_name)
all_pc = pdb.client.find()
if submission_id:
check_author_in_conflict_single(submission_id, sdb, mdb, pdb, all_pc, force)
else:
ids = sdb.client.find().distinct("_id")
for sid in ids:
check_author_in_conflict_single(
sid, sdb, mdb, pdb, copy.deepcopy(all_pc), force
)
@conflict.command()
@click.option(
"-s", "--submission_dbcol_name", required=True, default="hotcrp:submission"
)
@click.option("-p", "--pcmember_dbcol_name", required=True, default="hotcrp:pc")
def find_chair_conflict_papers(submission_dbcol_name, pcmember_dbcol_name):
"""Find papers that specify all chairs as conflicts"""
sdb = make_mongodb(submission_dbcol_name)
pdb = make_mongodb(pcmember_dbcol_name)
logger = logging.getLogger()
all_chairs = list(
pdb.client.find({"roles": {"$regex": r".*chair.*"}}, {"name": 1, "email": 1})
)
all_papers = list(sdb.client.find())
for p in all_papers:
all_conflict = True
for c in all_chairs:
if c["email"] not in p["pc_conflicts"]:
all_conflict = False
break
if all_conflict:
logger.info(f"Paper [{p['_id']:>4}] is conflict with all chairs")
def check_author_in_conflict_single(sid, sdb, mdb, pdb, all_pc, force=False):
logger = logging.getLogger()
logger.info(f"Checking paper [{sid:>4}]")
sub = sdb.client.find_one({"_id": sid})
assert sub is not None
# 1. Check by email
all_pc_email = set([p["email"] for p in copy.deepcopy(all_pc)])
for author in sub["authors"]:
if "email" not in author:
logger.error(f" Author does not have email field: {author}")
continue
email = author["email"]
if email in all_pc_email:
if email not in sub["pc_conflicts"]:
logger.error(
f" [ERROR][Detected by email] Author is PC but not declared as conflict: {author}"
)
# 2. Check by name
all_pc_name = [p["name"] for p in copy.deepcopy(all_pc)]
for author in sub["authors"]:
if "first" not in author or "last" not in author:
logger.error(f" Author does not have first/last name field: {author}")
continue
name = f"{author['first']} {author['last']}"
name_best_match = process.extractOne(name, all_pc_name, score_cutoff=90)
logger.debug(f" Best name match: {name} == {name_best_match}")
if name_best_match:
for p in copy.deepcopy(all_pc):
if p["name"] == name_best_match[0]:
pc_email = p["email"]
if pc_email not in sub["pc_conflicts"]:
logger.error(
f" [ERROR][Detected by name ] Author is PC but not declared as conflict: \n\tPC Member: {name_best_match}\n\tAuthor: {author}"
)
if "email" in author:
if author["email"] not in sub["pc_conflicts"]:
logger.info(
f" [INFO ][Detected by name ] Author is PC but using different email from PC profile: {author}"
)
if __name__ == "__main__":
conflict()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 18:02:06 2019
@author: jiahuei
Adapted from `https://github.com/ruotianluo/self-critical.pytorch/blob/master/scripts/prepro_ngrams.py`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, json, argparse, time
from six.moves import cPickle as pickle
from collections import defaultdict
from tqdm import tqdm
CURR_DIR = os.path.dirname(os.path.realpath(__file__))
pjoin = os.path.join
def precook(s, n=4, out=False):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in xrange(1,n+1):
for i in xrange(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return counts
def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
'''
return [precook(ref, n) for ref in refs]
def create_crefs(refs):
crefs = []
for ref in tqdm(refs, ncols=100, desc='create_crefs'):
# ref is a list of 5 captions
crefs.append(cook_refs(ref))
return crefs
def compute_doc_freq(crefs):
'''
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
From `cider_scorer.py` in `coco_caption`.
'''
document_frequency = defaultdict(float)
for refs in tqdm(crefs, ncols=100, desc='compute_doc_freq'):
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]):
document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return document_frequency
def get_ngrams(refs_words, wtoi, params):
"""
Calculates the n-grams and lengths
"""
#refs_idxs = []
#for ref_words in tqdm(refs_words, ncols=100, desc='Token-to-idx'):
# # `ref_words` is a list of captions for an image
# ref_idxs = []
# for caption in ref_words:
# tokens = caption.split(' ')
# idx = [wtoi.get(t, wtoi['<UNK>']) for t in tokens]
# idx = ' '.join([str(i) for i in idx])
# ref_idxs.append(idx)
# refs_idxs.append(ref_idxs)
print('\nINFO: Computing term frequency: word.')
time.sleep(0.1)
ngram_words = compute_doc_freq(create_crefs(refs_words))
print('\nINFO: Computing term frequency: indices. (SKIPPED)')
#time.sleep(0.1)
#ngram_idxs = compute_doc_freq(create_crefs(refs_idxs))
ngram_idxs = None
return ngram_words, ngram_idxs, len(refs_words)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
base_dir = os.path.dirname((os.path.dirname(CURR_DIR)))
parser.add_argument(
'--dataset_dir', type=str, default='',
help='The dataset directory.')
parser.add_argument(
'--dataset_file_pattern', type=str,
default='mscoco_{}_w5_s20_include_restval',
help='The dataset file pattern, example: `mscoco_{}_w5_s20`.')
parser.add_argument(
'--split', type=str, default='train',
help='The split for generating n-grams.')
args = parser.parse_args()
dataset = args.dataset_file_pattern.split('_')[0]
if args.dataset_dir == '':
args.dataset_dir = pjoin(base_dir, 'datasets', dataset)
# Data format: filepath,w0 w1 w2 w3 w4 ... wN
fp = pjoin(args.dataset_dir, 'captions',
args.dataset_file_pattern.format(args.split))
with open(fp + '.txt', 'r') as f:
data = [l.strip().split(',') for l in f.readlines()]
data_dict = {}
for d in data:
if d[0] not in data_dict:
data_dict[d[0]] = []
data_dict[d[0]].append(d[1].replace('<GO> ', ''))
captions_group = [v for v in data_dict.values()]
assert len(data_dict.keys()) == len(captions_group)
fp = pjoin(args.dataset_dir, 'captions',
args.dataset_file_pattern.format('wtoi'))
with open(fp + '.json', 'r') as f:
wtoi = json.load(f)
print('\nINFO: Data reading complete.')
#time.sleep(0.2)
ngram_words, ngram_idxs, ref_len = get_ngrams(captions_group, wtoi, args)
time.sleep(0.2)
print('\nINFO: Saving output files.')
fp = pjoin(args.dataset_dir, 'captions', args.dataset_file_pattern)
with open(fp.format('scst-words') + '.p', 'w') as f:
pickle.dump({'document_frequency': ngram_words, 'ref_len': ref_len},
f, pickle.HIGHEST_PROTOCOL)
#with open(fp.format('scst-idxs') + '.p', 'w') as f:
# pickle.dump({'document_frequency': ngram_idxs, 'ref_len': ref_len},
# f, pickle.HIGHEST_PROTOCOL)
print('\nINFO: Completed.')
|
#!/usr/bin/env python
# python parser module for pre-mir and mature miRNAs, guided by mirbase.org GFF3
# version 0.0.9 (1-6-2014)
# Usage MirParser.py <1:index source> <2:extraction directive> <3:output pre-mir> <4: output mature miRs> <5:mirbase GFF3>
# <6:pathToLatticeDataframe or "dummy_dataframe_path"> <7:Rcode or "dummy_plotCode"> <8:latticePDF or "dummy_latticePDF">
# <9:10:11 filePath:FileExt:FileLabel> <.. ad lib>
import sys
import subprocess
from smRtools import HandleSmRNAwindows
IndexSource = sys.argv[1]
ExtractionDirective = sys.argv[2]
if ExtractionDirective == "--do_not_extract_index":
genomeRefFormat = "fastaSource"
elif ExtractionDirective == "--extract_index":
genomeRefFormat = "bowtieIndex"
OutputPre_mirs = sys.argv[3]
OutputMature_Mirs = sys.argv[4]
GFF3_file = sys.argv[5]
lattice = sys.argv[6]
Rcode = sys.argv[7]
latticePDF = sys.argv[8]
Triplets = [sys.argv[9:][i:i + 3] for i in xrange(0, len(sys.argv[9:]), 3)]
MasterListOfGenomes = {}
for [filePath, FileExt, FileLabel] in Triplets:
print FileLabel
MasterListOfGenomes[FileLabel] = HandleSmRNAwindows(alignmentFile=filePath,
alignmentFileFormat=FileExt,
genomeRefFile=IndexSource,
genomeRefFormat=genomeRefFormat,
biosample=FileLabel)
header = ["gene"]
for [filePath, FileExt, FileLabel] in Triplets:
header.append(FileLabel)
hit_table = ["\t".join(header)] # table header: gene, sample1, sample2, sample3, etc. separated by tabulation
# read GFF3 to subinstantiate
gff3 = open(GFF3_file, "r")
lattice_dataframe = []
for line in gff3:
if line[0] == "#":
continue
gff_fields = line[:-1].split("\t")
chrom = gff_fields[0]
gff_name = gff_fields[-1].split("Name=")[-1].split(";")[0] # to isolate the GFF Name
item_upstream_coordinate = int(gff_fields[3])
item_downstream_coordinate = int(gff_fields[4])
if gff_fields[6] == "+":
item_polarity = "forward"
else:
item_polarity = "reverse"
item_line = [gff_name]
for sample in header[1:]:
count = MasterListOfGenomes[sample].instanceDict[chrom].readcount(upstream_coord=item_upstream_coordinate,
downstream_coord=item_downstream_coordinate,
polarity=item_polarity)
item_line.append(str(count))
# subtreatement for lattice
if lattice != "dummy_dataframe_path":
if ("5p" not in gff_name) and ("3p" not in gff_name):
lattice_dataframe.append(MasterListOfGenomes[sample].instanceDict[chrom].readcoverage(
upstream_coord=item_upstream_coordinate,
downstream_coord=item_downstream_coordinate,
windowName=gff_name + "_" + sample))
# end of subtreatement for lattice
hit_table.append("\t".join(item_line))
gff3.close()
Fpremirs = open(OutputPre_mirs, "w")
print >> Fpremirs, hit_table[0]
finalPreList = [i for i in sorted(hit_table[1:]) if ("5p" not in i) and ("3p" not in i)]
print >> Fpremirs, "\n".join(finalPreList)
Fpremirs.close()
Fmaturemires = open(OutputMature_Mirs, "w")
print >> Fmaturemires, hit_table[0]
finalMatureList = [i for i in sorted(hit_table[1:]) if ("5p" in i) or ("3p" in i)]
print >> Fmaturemires, "\n".join(finalMatureList)
Fmaturemires.close()
if lattice != "dummy_dataframe_path":
Flattice = open(lattice, "w")
print >> Flattice, "%s\t%s\t%s\t%s\t%s\t%s\t%s" % ("sample",
"mir",
"offset",
"offsetNorm",
"counts",
"countsNorm",
"polarity")
print >> Flattice, "\n".join(lattice_dataframe)
Flattice.close()
R_command = "Rscript " + Rcode
process = subprocess.Popen(R_command.split())
process.wait()
|
import numpy as np
from gym.envs.robotics.rotations import *
import cv2
# -------------------- Generic ----------------------------
def get_intrinsics(fovy, img_width, img_height):
# fovy = self.sim.model.cam_fovy[cam_no]
aspect = float(img_width) / img_height
fovx = 2 * np.arctan(np.tan(np.deg2rad(fovy) * 0.5) * aspect)
fovx = np.rad2deg(fovx)
cx = img_width / 2.
cy = img_height / 2.
fx = cx / np.tan(np.deg2rad(fovx / 2.))
fy = cy / np.tan(np.deg2rad(fovy / 2.))
K = np.zeros((3,3), dtype=np.float)
K[2][2] = 1
K[0][0] = fx
K[1][1] = fy
K[0][2] = cx
K[1][2] = cy
return K
def depth2xyz(depth, cam_K):
h, w = depth.shape
ymap, xmap = np.meshgrid(np.arange(w), np.arange(h))
x = ymap
y = xmap
z = depth
x = (x - cam_K[0,2]) * z / cam_K[0,0]
y = (y - cam_K[1,2]) * z / cam_K[1,1]
xyz = np.stack([x, y, z], axis=2)
return xyz
def visualize_point_cloud_from_nparray(d, c=None):
if c is not None:
if len(c.shape) == 3:
c = c.reshape(-1, 3)
if c.max() > 1:
c = c.astype(np.float64)/256
import open3d
point_cloud = open3d.PointCloud()
point_cloud.points = open3d.Vector3dVector(d)
if c is not None:
point_cloud.colors = open3d.Vector3dVector(c)
open3d.draw_geometries([point_cloud])
# -------------------- MuJoCo Specific ----------------------------
def get_transformation_matrix(pos, quat):
arr = np.identity(4)
arr[:3, :3] = quat2mat(quat)
arr[3:, :3] = pos
return arr
def convert_depth(env, depth):
# Convert depth into meter
extent = env.sim.model.stat.extent
near = env.sim.model.vis.map.znear * extent
far = env.sim.model.vis.map.zfar * extent
depth_m = depth * 2 - 1
depth_m = (2 * near * far) / (far + near - depth_m * (far - near))
# Check this as well: https://github.com/deepmind/dm_control/blob/master/dm_control/mujoco/engine.py#L734
return depth_m
def get_object_point_cloud(env, depth, img):
depth = convert_depth(env, depth)
full_pc = get_point_cloud(env, depth)
obj_mask = get_obj_mask(img)
pc = full_pc[obj_mask.reshape(-1),:]
return pc
def get_point_cloud(env, depth):
# make sure to convert the raw depth image from MuJoCo using convert_depth
# output is flattened
fovy = env.sim.model.cam_fovy[env.sim.model.camera_name2id(env.camera_names[0])]
K = get_intrinsics(fovy, depth.shape[0], depth.shape[1])
pc = depth2xyz(depth, K)
return pc.reshape(-1, 3)
def get_obj_mask(img):
# Given an [n,n,3] image, output a [n,n] mask of the red object in the scene
# Assume the object is red
img = img[:, :, -1::-1] # RGB to BGR
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower mask (0-10)
lower_red = np.array([0, 50, 50])
upper_red = np.array([10, 255, 255])
mask0 = cv2.inRange(img_hsv, lower_red, upper_red)
# upper mask (170-180)
lower_red = np.array([170, 50, 50])
upper_red = np.array([180, 255, 255])
mask1 = cv2.inRange(img_hsv, lower_red, upper_red)
# join my masks
mask = mask0 + mask1
# # set my output img to zero everywhere except my mask
# output_img = img.copy()
# output_img[np.where(mask == 0)] = 0
# cv2.imwrite('color_img.jpg', output_img)
return mask != 0
def convert_grasp_to_graspnet_input(env, grasp, origin=None):
# From grasp pose in object frame to the grasp pose in camera frame that lies with the point cloud
cube_quat = env.sim.data.body_xquat[env.cube_body_id]
cube_pos = env.sim.data.body_xpos[env.cube_body_id]
pos, quat = env.get_global_grasp_pose(cube_pos, cube_quat, grasp[:3], grasp[3:])
cam_id = env.sim.model.camera_name2id(env.camera_names[0])
cam_pos = env.sim.model.cam_pos[cam_id]
cam_quat = env.sim.model.cam_quat[cam_id]
cam_quat = quat_mul(cam_quat, euler2quat([np.pi, 0, 0]))
pos, quat = env.get_local_grasp_pose(cam_pos, cam_quat, pos, quat)
grasp_mat = np.identity(4)
grasp_mat[:3, :3] = quat2mat(quat).transpose()
grasp_mat[3:, :3] = pos
if origin is not None:
grasp_mat[3:, :3] -= origin
return grasp_mat.transpose()
def prepare_to_visualize_in_graspnet(env, obs):
data = dict()
color = obs['agentview_image'][-1::-1]
data['image'] = color
depth = obs['agentview_depth'][-1::-1]
depth = convert_depth(env, depth)
data['depth'] = depth
obj_mask = get_obj_mask(color)
obj_mask = obj_mask.reshape(-1)
pc = get_point_cloud(env, depth)
data['smoothed_object_pc'] = pc[obj_mask, :]
fovy = env.sim.model.cam_fovy[env.sim.model.camera_name2id(env.camera_names[0])]
K = get_intrinsics(fovy, depth.shape[0], depth.shape[1])
data['intrinsics_matrix'] = K
data['grasps'] = []
for i in range(12):
env.goal_range = 'fixed_' + str(i)
grasp = env.sample_goal()
data['grasps'].append(convert_grasp_to_graspnet_input(env, grasp))
import pickle
pickle.dump(data, open('../mujoco_scene.pkl', 'wb'))
if __name__ == "__main__":
print("hello")
|
#!/usr/bin/python3
# Python script to convert RGB code in format NN,NN,NN to Hex
from math import floor
rgbSource = input("What is the RGB code to convert (format as nn,nn,nn)")
rgb = rgbSource.split(",")
red = int(rgb[0])
green = int(rgb[1])
blue = int(rgb[2])
charOne = floor(red / 16)
charTwo = red % 16
charThree = floor(green / 16)
charFour = green % 16
charFive = floor(blue / 16)
charSix = blue % 16
def numberToLetter(i):
if i == 10:
i = "A"
elif i == 11:
i = "B"
elif i == 12:
i = "C"
elif i == 13:
i = "D"
elif i == 14:
i = "E"
elif i == 15:
i = "F"
else:
i = i
return i
charOne = str(numberToLetter(charOne))
charTwo = str(numberToLetter(charTwo))
charThree = str(numberToLetter(charThree))
charFour = str(numberToLetter(charFour))
charFive = str(numberToLetter(charFive))
charSix = str(numberToLetter(charSix))
print("#" + charOne + charTwo + charThree + charFour + charFive + charSix)
|
import concurrent
from threading import Lock
from typing import List
from base_automation import BaseAutomation
from lib.annoucer.announcement import Announcement
from lib.annoucer.announcer_config import AnnouncerConfig
from lib.annoucer.media_manager import MediaManager
from lib.annoucer.player import Player
from lib.annoucer.player import create_player
from lib.core.monitored_callback import monitored_callback
class Announcer(BaseAutomation):
_dnd_player_entity_ids: List
_announcer_config: AnnouncerConfig
_players: List[Player]
_media_manager: MediaManager
_dnd_entity_id: str
_queue: List
_announcer_lock: Lock
def initialize(self):
self._announcer_config = AnnouncerConfig({
'tts_platform': self.cfg.value('tts_platform'),
'api_base_url': self.cfg.value('api_base_url'),
'api_token': self.cfg.value('api_token'),
'default_volume': self.cfg.value('default_volume'),
'enabler_entity_id': self.cfg.value('enabler_entity_id'),
'sleeping_time_entity_id': self.cfg.value('sleeping_time_entity_id'),
'library_base_filepath': self.cfg.value('library_base_filepath'),
'library_base_url_path': self.cfg.value('library_base_url_path'),
'tts_base_filepath': self.cfg.value('tts_base_filepath'),
'sound_path': self.cfg.value('sound_path'),
})
self._announcer_lock = Lock()
self._queue = []
self._dnd_player_entity_ids = []
self._media_manager = MediaManager(self, self._announcer_config)
self._players = [self._create_player(p) for p in self.cfg.value('players')]
self.listen_state(self._sleeping_time_state_change_handler, self._announcer_config.sleeping_time_entity_id)
self.disable_do_not_disturb('media_player.office')
def _create_player(self, raw_player_config):
player_volume = raw_player_config.get('volume', {})
raw_player_config['volume'] = {**self._announcer_config.default_volume, **player_volume}
return create_player(self, raw_player_config, self._media_manager)
@monitored_callback
def _sleeping_time_state_change_handler(self, entity, attribute, old, new, kwargs):
self._update_player_volumes()
def _update_player_volumes(self):
for player in self._players:
for player_entity_id in player.player_entity_ids:
volume_mode = self._figure_volume_mode(player_entity_id)
player.update_player_volume(player_entity_id, volume_mode)
def _figure_volume_mode(self, player_entity_id):
if player_entity_id in self._dnd_player_entity_ids:
return 'dnd'
if self.get_state(self._announcer_config.sleeping_time_entity_id) == 'on':
return 'sleeping'
return 'regular'
def enable_do_not_disturb(self, player_entity_id):
if player_entity_id not in self._dnd_player_entity_ids:
self._dnd_player_entity_ids.append(player_entity_id)
self._update_player_volumes()
def disable_do_not_disturb(self, player_entity_id):
if player_entity_id in self._dnd_player_entity_ids:
self._dnd_player_entity_ids.remove(player_entity_id)
self._update_player_volumes()
def announce(self, message, use_cache=True, player_entity_ids=[], motion_entity_id=None, prelude_name=None):
if self.get_state(self._announcer_config.enabler_entity_id) != 'on':
self.log('Skipping ... announcer disable')
return
players = self._figure_players(player_entity_ids, motion_entity_id)
if not players:
self.warn('Unble to find matching player with player_entity_ids={}, motion_entity_id={}'.format(
player_entity_ids,
motion_entity_id))
return
self.debug('Using {} players'.format(len(players)))
self._queue.append(Announcement(message, use_cache, prelude_name, False))
self._lock_and_announce(players)
def _figure_players(self, player_entity_ids, motion_entity_id):
self.debug('About to figure players player={}, motion={}'.format(player_entity_ids, motion_entity_id))
if player_entity_ids:
players = []
for player in self._players:
if all(id in player_entity_ids for id in player.player_entity_ids):
self.debug('Using {}'.format(player.player_entity_ids))
players.append(player)
return players
if motion_entity_id:
for player in self._players:
if motion_entity_id in player.motion_entity_ids:
self.debug('Using {}'.format(player))
return [player]
return None
return [p for p in self._players if not p.targeted_only]
def _lock_and_announce(self, players):
if not self._queue:
self.debug('Nothing in the queue, skipping ...')
return
if not players:
self.error('No player specified')
return
self._announcer_lock.acquire()
try:
self._do_announce(players)
finally:
self._announcer_lock.release()
def _do_announce(self, players):
queue = self._dequeue_all()
if not queue:
self.debug('Nothing in the queue, skipping ....')
return
with_chime = True
medias = []
previous_announcement = None
while queue:
announcement = queue.pop(0)
if announcement == previous_announcement:
self.debug('Skipping duplicate announcement: {}'.format(announcement))
continue
if announcement.prelude_name:
with_chime = False
media_data = self._media_manager.get_media(announcement, with_chime)
medias.append(media_data)
previous_announcement = announcement
with_chime = False
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = {
executor.submit(p.play_media, medias) for p in players
}
for future in concurrent.futures.as_completed(futures):
future.result()
def _dequeue_all(self):
dequeued, self._queue[:] = self._queue[:], []
return dequeued
|
#convert string to hex
def toHex(s):
lst = []
for ch in s:
hv = hex(ord(ch)).replace('0x', '')
if len(hv) == 1:
hv = '0'+hv
lst.append(hv)
return reduce(lambda x,y:x+y, lst)
#convert hex repr to string
def toStr(s):
return s and chr(atoi(s[:2], base=16)) + toStr(s[2:]) or ''
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.recipe_api import Property
from recipe_engine.types import freeze
DEPS = [
'build/gitiles',
'build/url',
'depot_tools/bot_update',
'depot_tools/gclient',
'depot_tools/gsutil',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
PROPERTIES = {
'buildername': Property(kind=str),
}
BUILDERS = freeze({
'V8 lkgr finder': {
'project': 'v8',
'allowed_lag': 4,
'lkgr_status_gs_path': 'chromium-v8/lkgr-status',
'repo': 'https://chromium.googlesource.com/v8/v8',
'ref': 'refs/heads/lkgr',
},
})
def RunSteps(api, buildername):
botconfig = BUILDERS[buildername]
api.gclient.set_config('infra')
api.gclient.c.revisions['infra'] = 'HEAD'
api.bot_update.ensure_checkout()
api.gclient.runhooks()
args = [
'infra.services.lkgr_finder',
'--project=%s' % botconfig['project'],
# TODO(machenbach,friedman): Add shared creds for status apps.
'--password-file=/creds/gatekeeper/%s_status_password' %
botconfig['project'],
'--verbose',
'--email-errors',
'--post',
]
# Check if somebody manually pushed a newer lkgr to the lkgr ref.
# In this case manually override the lkgr in the app. This can be used
# to manually advance the lkgr when backing services are down.
if botconfig.get('ref'):
lkgr_from_ref = api.gitiles.commit_log(
botconfig['repo'], botconfig['ref'],
step_name='lkgr from ref')['commit']
lkgr_from_app = api.url.fetch(
'https://%s-status.appspot.com/lkgr' % botconfig['project'],
step_name='lkgr from app'
)
commits, _ = api.gitiles.log(
botconfig['repo'], '%s..%s' % (lkgr_from_app, lkgr_from_ref),
step_name='check lkgr override')
if commits:
args.append('--manual=%s' % lkgr_from_ref)
if botconfig.get('allowed_lag') is not None:
args.append('--allowed-lag=%d' % botconfig['allowed_lag'])
kwargs = {}
if botconfig.get('lkgr_status_gs_path'):
args += ['--html', api.raw_io.output_text()]
kwargs['step_test_data'] = lambda: api.raw_io.test_api.output_text(
'<html>lkgr-status</html>')
step_result = api.python(
'calculate %s lkgr' % botconfig['project'],
api.path['checkout'].join('run.py'),
args,
**kwargs
)
if botconfig.get('lkgr_status_gs_path'):
api.gsutil.upload(
api.raw_io.input_text(step_result.raw_io.output_text),
botconfig['lkgr_status_gs_path'],
'%s-lkgr-status.html' % botconfig['project'],
args=['-a', 'public-read'],
metadata={'Content-Type': 'text/html'},
)
def GenTests(api):
def lkgr_test_data():
return (
api.step_data(
'lkgr from ref',
api.gitiles.make_commit_test_data('deadbeef1', 'Commit1'),
) +
api.step_data(
'lkgr from app',
api.raw_io.stream_output('deadbeef2'),
)
)
for buildername, botconfig in BUILDERS.iteritems():
yield (
api.test(botconfig['project']) +
api.properties.generic(
buildername=buildername,
) +
lkgr_test_data() +
api.step_data(
'check lkgr override',
api.gitiles.make_log_test_data('A', n=0),
)
)
yield (
api.test(botconfig['project'] + '_manual') +
api.properties.generic(
buildername=buildername,
revision='deadbeef',
) +
lkgr_test_data() +
api.step_data(
'check lkgr override',
api.gitiles.make_log_test_data('A'),
)
)
|
from netapp.netapp_object import NetAppObject
class CoreSegmentInfo(NetAppObject):
"""
Core Segment Info
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_node = None
@property
def node(self):
"""
Node
Attributes: key, required-for-create, non-modifiable
"""
return self._node
@node.setter
def node(self, val):
if val != None:
self.validate('node', val)
self._node = val
_segment_name = None
@property
def segment_name(self):
"""
Name of the Core Segment.
Attributes: non-creatable, non-modifiable
"""
return self._segment_name
@segment_name.setter
def segment_name(self, val):
if val != None:
self.validate('segment_name', val)
self._segment_name = val
_panic_system_id = None
@property
def panic_system_id(self):
"""
System ID of Node That Generated Core.
Attributes: non-creatable, non-modifiable
"""
return self._panic_system_id
@panic_system_id.setter
def panic_system_id(self, val):
if val != None:
self.validate('panic_system_id', val)
self._panic_system_id = val
_total_segment_count = None
@property
def total_segment_count(self):
"""
Number of Segments Generated
Attributes: non-creatable, non-modifiable
"""
return self._total_segment_count
@total_segment_count.setter
def total_segment_count(self, val):
if val != None:
self.validate('total_segment_count', val)
self._total_segment_count = val
_md5_data_chksum = None
@property
def md5_data_chksum(self):
"""
MD5 checksum of the compressed data of the core segment.
Attributes: non-creatable, non-modifiable
"""
return self._md5_data_chksum
@md5_data_chksum.setter
def md5_data_chksum(self, val):
if val != None:
self.validate('md5_data_chksum', val)
self._md5_data_chksum = val
_owner_node = None
@property
def owner_node(self):
"""
Node that owns the Core Segment.
Attributes: key, optional-for-create, non-modifiable
"""
return self._owner_node
@owner_node.setter
def owner_node(self, val):
if val != None:
self.validate('owner_node', val)
self._owner_node = val
_panic_time = None
@property
def panic_time(self):
"""
Time when the panic occurred.
Attributes: non-creatable, non-modifiable
"""
return self._panic_time
@panic_time.setter
def panic_time(self, val):
if val != None:
self.validate('panic_time', val)
self._panic_time = val
_segment = None
@property
def segment(self):
"""
Name of the Core Segment File
Attributes: key, required-for-create, non-modifiable
"""
return self._segment
@segment.setter
def segment(self, val):
if val != None:
self.validate('segment', val)
self._segment = val
@staticmethod
def get_api_name():
return "core-segment-info"
@staticmethod
def get_desired_attrs():
return [
'node',
'segment-name',
'panic-system-id',
'total-segment-count',
'md5-data-chksum',
'owner-node',
'panic-time',
'segment',
]
def describe_properties(self):
return {
'node': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'segment_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'panic_system_id': { 'class': int, 'is_list': False, 'required': 'optional' },
'total_segment_count': { 'class': int, 'is_list': False, 'required': 'optional' },
'md5_data_chksum': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'owner_node': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'panic_time': { 'class': int, 'is_list': False, 'required': 'optional' },
'segment': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
"""
__M4IfClauseComplete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Wed Oct 23 17:09:15 2013
________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__SwcToEcuMapping import *
from graph_MT_pre__SwcToEcuMapping import *
from graph_LHS import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def M4IfClauseComplete_MDL(self, rootNode, MT_pre__GM2AUTOSAR_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__GM2AUTOSAR_MM ---
if( MT_pre__GM2AUTOSAR_MMRootNode ):
# author
MT_pre__GM2AUTOSAR_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__GM2AUTOSAR_MMRootNode.description.setValue('\n')
MT_pre__GM2AUTOSAR_MMRootNode.description.setHeight(15)
# name
MT_pre__GM2AUTOSAR_MMRootNode.name.setValue('')
MT_pre__GM2AUTOSAR_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('M4IfClauseComplete')
# --- ASG attributes over ---
self.obj21120=LHS(self)
self.obj21120.isGraphObjectVisual = True
if(hasattr(self.obj21120, '_setHierarchicalLink')):
self.obj21120._setHierarchicalLink(False)
# constraint
self.obj21120.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj21120.constraint.setHeight(15)
self.obj21120.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(120.0,100.0,self.obj21120)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj21120.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj21120)
self.globalAndLocalPostcondition(self.obj21120, rootNode)
self.obj21120.postAction( rootNode.CREATE )
self.obj21121=MT_pre__SwcToEcuMapping(self)
self.obj21121.isGraphObjectVisual = True
if(hasattr(self.obj21121, '_setHierarchicalLink')):
self.obj21121._setHierarchicalLink(False)
# MT_pivotOut__
self.obj21121.MT_pivotOut__.setValue('element1')
# MT_subtypeMatching__
self.obj21121.MT_subtypeMatching__.setValue(('True', 0))
self.obj21121.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj21121.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21121.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj21121.MT_pivotIn__.setValue('')
self.obj21121.MT_pivotIn__.setNone()
# MT_label__
self.obj21121.MT_label__.setValue('1')
# MT_pre__cardinality
self.obj21121.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21121.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj21121.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21121.MT_pre__name.setHeight(15)
self.obj21121.graphClass_= graph_MT_pre__SwcToEcuMapping
if self.genGraphics:
new_obj = graph_MT_pre__SwcToEcuMapping(260.0,180.0,self.obj21121)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__SwcToEcuMapping", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj21121.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj21121)
self.globalAndLocalPostcondition(self.obj21121, rootNode)
self.obj21121.postAction( rootNode.CREATE )
# Connections for obj21120 (graphObject_: Obj92) of type LHS
self.drawConnections(
)
# Connections for obj21121 (graphObject_: Obj93) of type MT_pre__SwcToEcuMapping
self.drawConnections(
)
newfunction = M4IfClauseComplete_MDL
loadedMMName = ['MT_pre__GM2AUTOSAR_MM_META', 'MoTifRule_META']
atom3version = '0.3'
|
import os
import sys
import argparse
import datetime
import time
import json
from common import ColonyClient, LoggerService
def parse_user_input():
parser = argparse.ArgumentParser(prog='Colony Sandbox Start')
parser.add_argument("sandbox_id", type=str, help="The name of sandbox")
parser.add_argument("timeout", type=int, help="Set the timeout in minutes to wait for the sandbox to become active")
return parser.parse_args()
def build_shortcuts_json(sandbox_spec):
res = {}
applications = sandbox_spec.get('applications', [])
for app in applications:
res[app["name"]] = list(app['shortcuts']).copy()
return res
def _simplify_state(sandbox_progress):
return {step: description["status"] for step, description in sandbox_progress.items()}
if __name__ == "__main__":
args = parse_user_input()
space = os.environ.get("COLONY_SPACE", "")
token = os.environ.get("COLONY_TOKEN", "")
client = ColonyClient(space, token)
sandbox_id = args.sandbox_id
timeout = args.timeout
if not sandbox_id:
LoggerService.error("Sandbox Id cannot be empty")
if timeout < 0:
LoggerService.error("Timeout must be positive")
start_time = datetime.datetime.now()
LoggerService.message(f"Waiting for the Sandbox {sandbox_id} to start...")
sandbox_state = {}
while (datetime.datetime.now() - start_time).seconds < timeout * 60:
try:
sandbox = client.get_sandbox(sandbox_id)
except Exception as e:
LoggerService.error(f"Unable to get sandbox with ID {sandbox_id}; reason: {e}")
status = sandbox["sandbox_status"]
progress = sandbox["launching_progress"]
simple_state = _simplify_state(progress)
if simple_state != sandbox_state:
sandbox_state.update(simple_state)
LoggerService.message(f"Current state: {str(sandbox_state)}")
if status == "Active":
LoggerService.set_output("sandbox_details", json.dumps(sandbox))
LoggerService.set_output("sandbox_shortcuts", json.dumps(build_shortcuts_json(sandbox)))
LoggerService.success(f"Sandbox {sandbox_id} is active!")
elif status == "Launching":
time.sleep(10)
else:
LoggerService.error(f"Launching failed. The state of Sandbox {sandbox_id} is: {status}")
LoggerService.error(f"Sandbox {sandbox_id} was not active after the provided timeout of {timeout} minutes")
|
import os
import sys
import json
projectfile = ".project"
def update_key_value(filepath, **kwargs):
with open(filepath, "r") as json_file:
data = json.load(json_file)
data.update(**kwargs)
with open(filepath, "w") as json_file:
json.dump(data, json_file)
def get_key_value(filepath, key):
with open(filepath, "r") as json_file:
data = json.load(json_file)
return data[key]
def set_project_name(projectfile, name):
data = { "name" : name }
update_key_value(projectfile, data)
def set_project_nickname(projectfile, nickname):
data = { "nickname" : nickname }
update_key_value(projectfile, data)
def add_department(projectfile, new_department):
current_departments = get_key_value(projectfile, "departments")
if department in current_departments:
if new_department.name == department["name"]:
print "Department already exists"
return False
else:
current_departments.append(department.as_dict())
data = { "departments" : current_departments }
update_key_value(projectfile, data)
print "Added {}".format(project_file)
return True
def update_department(projectfile, updated_department):
current_departments = get_key_value(projectfile, "departments")
match = [ x for x in current_departments if x["name"] == updated_department.name ]
if len(match) < 1:
print "Department does not exist"
return False
|
#Written for Python3
#This code is responsible for connecting, fetching and committing data
#to the mariaDB.
import mariadb
import spooler
import configReader
config = configReader.parseFile("db")
err = None
def isError():
global err
return err
try:
conn = mariadb.connect(
host=config["dbHost"],
port=int(config["dbPort"]),
user=config["dbUser"],
password=config["dbPassword"],
database=config["dbDatabase"]
)
cur = conn.cursor()
except mariadb.Error as error:
print ("Error conntecting to MariaDB.")
err = error
print (err)
def setLimitForUser(userID,limit):
try:
cur.execute(f"UPDATE mitarbeiter SET INFO={limit} WHERE ID={userID};")
conn.commit()
print(f"LIMIT {limit}€ updated for mitarbeiter id {userID}")
except mariadb.Error as error:
global err
err = error
return err
def fetchAllCustomers():
try:
cur.execute("SELECT NACHNAME, VORNAME, RFID, ID, INFO FROM mitarbeiter;")
tmpLst = []
print("All customers fetched")
for entry in cur:
tmpLst.append(entry)
return tmpLst
except mariadb.Error as error:
global err
err = error
return err
def setRFID(rfidKey,userID):
try:
cur.execute(f"UPDATE mitarbeiter SET RFID='{rfidKey}' WHERE ID={userID};")
conn.commit()
except mariadb.Error as error:
global err
err = error
print(err)
return err
def fetchRfid(rfidKey):
try:
cur.execute(f"SELECT ID FROM mitarbeiter WHERE RFID='{rfidKey}';")
tmpID = ()
for entry in cur:
tmpID = entry
return tmpID[0]
return tmpID
except mariadb.Error as error:
global err
err = error
return err
def fetchAllDrinks():
try:
cur.execute("SELECT g.ID, p.ID, g.Name, p.WERT FROM getraenke g LEFT JOIN preise p ON p.ID = g.PREIS WHERE g.ANGEBOT = 1;")
tmpLst = []
print("All drinks fetched")
for entry in cur:
tmpLst.append(entry)
return tmpLst
except mariadb.Error as error:
global err
err = error
return err
def fetchSingleDrink(drinkName):
try:
cur.execute(f"SELECT ID FROM getraenke WHERE NAME='{drinkName}';")
tmpID = ()
for entry in cur:
tmpID = entry
print("rofl")
return tmpID[0]
except mariadb.Error as error:
global err
err = error
return err
def buyDrink(rfidKey, drinkID, priceID):
try:
customerID = int(fetchRfid(rfidKey))
q = (customerID, drinkID, priceID)
cur.execute(f"INSERT INTO `kaeufe` (MITARBEITER, GETRAENK, PREISID) VALUES ({customerID},{drinkID},{priceID}) ")
conn.commit()
print("Kauf erfolgreich")
except mariadb.Error as error:
global err
err = error
return err
def terminateConnection():
conn.close()
|
from qtpy import QtWidgets
from stream_viewer.widgets.interface import IControlPanel
class VisbrainControlPanel(IControlPanel):
"""
A panel of configuration widgets for configuring a visbrain mesh plot.
This widget assumes the renderer is an instance of CortexVB (maybe other VisBrain too).
"""
def __init__(self, renderer, name="VisbrainControlPanelWidget", **kwargs):
super().__init__(renderer, name=name, **kwargs) # Will call _init_widgets and reset_widgets
def _init_widgets(self):
super()._init_widgets()
# Continue filling in the grid of widgets
row_ix = self._last_row
# Handle Out-of-range ComboBox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Out of Range"), row_ix, 0, 1, 1)
_combo = QtWidgets.QComboBox()
_combo.setObjectName("OutOfRange_ComboBox")
_combo.addItems(['clip', 'clip-gray-red', 'mask'])
self.layout().addWidget(_combo, row_ix, 1, 1, 1)
# Colorbar checkbox
row_ix += 1
self.layout().addWidget(QtWidgets.QLabel("Colorbar"), row_ix, 0, 1, 1)
_checkbox = QtWidgets.QCheckBox()
_checkbox.setObjectName("Colorbar_CheckBox")
self.layout().addWidget(_checkbox, row_ix, 1, 1, 1)
self._last_row = row_ix
def reset_widgets(self, renderer):
super().reset_widgets(renderer)
# Colorbar checkbox
_checkbox = self.findChild(QtWidgets.QCheckBox, name="Colorbar_CheckBox")
try:
_checkbox.stateChanged.disconnect()
except TypeError:
pass
_checkbox.setChecked(renderer.show_colorbar)
_checkbox.stateChanged.connect(renderer.show_colorbar_stateChanged)
|
# schemas.py
#
# Starling Bank schema definitions
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
# = ACCOUNTS ==========================================================================================================
class StarlingAccountSchema(BaseModel):
"""Represents a Starling Bank account."""
accountUid: str
name: str
accountType: str
currency: str
createdAt: datetime
defaultCategory: str
class StarlingAccountsSchema(BaseModel):
accounts: List[StarlingAccountSchema]
class StarlingMainAccountsSchema(BaseModel):
type_name: str
accounts: List[StarlingAccountSchema]
class StarlingSignedCurrencyAndAmountSchema(BaseModel):
currency: str
minorUnits: int
class StarlingBalanceSchema(BaseModel):
clearedBalance: StarlingSignedCurrencyAndAmountSchema
pendingTransactions: StarlingSignedCurrencyAndAmountSchema
effectiveBalance: StarlingSignedCurrencyAndAmountSchema
# = TRANSACTIONS ======================================================================================================
class SourceAmount(BaseModel):
"""Currency dcit"""
currency: str
minorUnits: int
def compute_amount(self, direction: str) -> float:
"""Compute a transaction amount in pounds. Outflow is negative."""
if direction == "OUT":
sign = -1
else:
sign = 1
return sign * self.minorUnits / 100.0
class StarlingTransactionSchema(BaseModel):
"""Represents a Starling Bank transaction."""
feedItemUid: str
transactionTime: datetime
counterPartyUid: str
counterPartyName: str
counterPartyType: str
direction: str
sourceAmount: SourceAmount
reference: Optional[str] = None
status: str
class StarlingTransactionsSchema(BaseModel):
feedItems: List[StarlingTransactionSchema]
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
##################################################################
# Documentation
"""Package containing a collection of custom Keras layers.
Attributes:
.. moduleauthor:: Wladimir Sidorenko (Uladzimir Sidarenka)
"""
##################################################################
# Imports
from __future__ import absolute_import, print_function, unicode_literals
from .attention import Attention, MergeAttention, RawAttention
from .cba import CBA
from .common import DFLT_INITIALIZER
from .lba import LBA
from .mvrn import MVRN
from .msrn import MSRN
from .rn import RN
from .rnt import RNT
from .membeddings import MatrixEmbedding
from .ymembeddings import YMatrixEmbedding
from .word2vec import WORD2VEC_LAYER_NAME, Word2Vec
##################################################################
# Variables and Constants
EMPTY_IDX = 0
UNK_IDX = 1
CUSTOM_OBJECTS = {"Attention": Attention, "MatrixEmbedding": MatrixEmbedding,
"Word2Vec": Word2Vec, "CBA": CBA, "LBA": LBA,
"MergeAttention": MergeAttention,
"MVRN": MVRN, "MSRN": MSRN,
"RawAttention": RawAttention,
"RN": RN, "RNT": RNT,
"YMatrixEmbedding": YMatrixEmbedding}
__name__ = "cgsa.dl.layers"
__all__ = ["Attention", "EMPTY_IDX", "UNK_IDX", "MVRN", "RN", "RNT",
"CBA", "LBA", "MatrixEmbedding", "MergeAttention",
"RawAttention", "WORD2VEC_LAYER_NAME", "Word2Vec",
"CUSTOM_OBJECTS", "DFLT_INITIALIZER"]
__author__ = "Uladzimir Sidarenka"
__email__ = "sidarenk at uni dash potsdam dot de"
__version__ = "0.1.0"
|
from user.recommendations.constraints.base_constraint import BaseConstraint
class Vegetarian(BaseConstraint):
threshold = 0.9
def is_user_vegetarian(self):
if self.classification['vegetarian'] is None:
return False
return self.classification['vegetarian'] > self.threshold and self.classification['meat'] < (1 - self.threshold)
def filter(self, query):
if self.is_user_vegetarian():
return query.filter(automatic_classification__vegetarian=1)
else:
return query
|
import numpy as np
import pandas as pd
from scipy.stats import variation
def row_corr(A,B):
#number of columns in A or B
N = B.shape[1]
# Store row-wise sums of A and B, as they would be used at few places
sA = A.sum(1)
sB = B.sum(1)
# Compute the four terms in pcc matrix-wise
p1 = N*np.einsum('ik,jk->ij',A,B)
p2 = sB*sA[:,None]
p3 = N*((B**2).sum(1)) - (sB**2)
p4 = N*((A**2).sum(1)) - (sA**2)
# compute pcc as 2D array
pcorr = ((p1 - p2)/np.sqrt(p3*p4[:,None]))
return pcorr
def row_corr_weighted(A,B,weights):
# converted to python from here: https://stackoverflow.com/questions/9460664/weighted-pearsons-correlation
w = weights/sum(weights)
A = A - (A*w).sum(1)[:,None]
B = B - (B*w).sum(1)[:,None]
pcorr = np.matmul(A,((B*w).T))/np.sqrt(np.matmul(((A**2)*w).sum(1)[:,None],(((B**2)*w).sum(1)[:,None]).T))
return pcorr
def match(similarity_matrix,asa_24,ncc,N):
TOP = N # set the number of matches to return
# Get indices of the top matches from the combined similarity matrix
indices_top = np.argsort(-similarity_matrix,axis=1,)[:,:TOP]
pcc_top = np.sort(-similarity_matrix,axis=1,)[:,:TOP]
iters = [np.arange(0,asa_24.shape[0],1).tolist(),np.arange(0,TOP,1).tolist()]
# Construct dataframe to store top results
results_cols = asa_24.columns.values.tolist() + ['similarity'] + ncc.columns.values.tolist()
mi = pd.MultiIndex.from_product(iters, names=['asa_index', 'match_index'])
results_top = pd.DataFrame(index=mi,columns=results_cols)
# Copy ASA24 values to left side
results_top.loc[results_top.eval('match_index==0'), asa_24.columns] = asa_24.values
results_top.loc[:,ncc.columns] = ncc.iloc[indices_top.flatten(),:].values
results_top.loc[:,'similarity'] = -pcc_top.flatten()
# calculate variation across matches
variations = pd.DataFrame(results_top['Lactose (g)'].groupby("asa_index").apply(variation))
for index in results_top.index.get_level_values(0).unique():
results_top.loc[index,'variation'] = variations.loc[index,:].values[0]
return results_top
|
import json
import requests
import pickle as pkl
def save(object, filename):
filename = "pickles/" + filename
pkl.dump(object, open(filename, "wb"))
def load(filename, req=None, try_load=True):
filename = "pickles/" + filename
if not req and not try_load:
return {}
if not req and try_load:
try:
return pkl.load(open(filename, "rb"))
except:
return {}
if req and try_load:
try:
return pkl.load(open(filename, "rb"))
except:
data = requests.get(req).json()
pkl.dump(data, open(filename, "wb"))
return data
if req and not try_load:
data = requests.get(req).json()
pkl.dump(data, open(filename, "wb"))
return data
def print_json(data, indent=2):
print(json.dumps(data, indent=indent))
|
from trasto.infrastructure.awsmultiprocess.comando_repository import (
COMANDOS_QUEUE_NAME, ComandoRepository)
from trasto.infrastructure.memory.repositories import Idefier
from trasto.model.commands import (Comando, ComandoNuevaAccion,
ComandoNuevaTarea)
from trasto.model.entities import Accion, Tarea
from trasto.model.value_entities import Idd, TipoAccion
def test_comando_nueva_accion():
comando_repo = ComandoRepository()
cna = ComandoNuevaAccion(
idd=Idd(Idefier()),
accion=Accion(
idd=Idd(Idefier()),
nombre="nombreaccion",
script_url="url",
tipo=TipoAccion(nombre="buenhumor")
)
)
comando_repo.send_comando(cna)
count = 0
for ccna in comando_repo.next_comando():
assert not ccna is None
assert isinstance(ccna, ComandoNuevaAccion)
assert ccna.accion.nombre == "nombreaccion"
count = count + 1
break
assert count == 1
def test_comando_nueva_tarea():
comando_repo = ComandoRepository()
cnt_alta = ComandoNuevaTarea(
idd=Idd(Idefier()),
tarea=Tarea(
idd=Idd(Idefier()),
nombre="tareaalta",
parametros="parametros",
prioridad=1,
accionid="accion"
)
)
cnt_baja = ComandoNuevaTarea(
idd=Idd(Idefier()),
tarea=Tarea(
idd=Idd(Idefier()),
nombre="tareabaja",
parametros="parametros",
prioridad=0,
accionid="accion"
)
)
comando_repo.send_comando(cnt_alta)
comando_repo.send_comando(cnt_baja)
count = 0
for ccnt in comando_repo.next_comando():
assert isinstance(ccnt, ComandoNuevaTarea)
assert ccnt.tarea.nombre in ("tareabaja", "tareaalta")
print(f"vamos por contador: {count}")
count = count + 1
if count == 2:
break
assert count == 2
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import re
import random
import numpy as np
import h5py
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# def getDataFiles(list_filename):
# return [line.rstrip() for line in open(list_filename)]
# def loadDataFile(path):
# data = np.loadtxt(path)
# num = data.shape[0]
# point_xyz = data[:, 0:3]
# point_normal = data[:, 3:6]
# point_rgb = data[:, 6:9]
# # label just a example, should be repalced the real.
# # modlenet40 is 0-39, so the label can be 40 and 41
# label = np.ones((num, 1), dtype=int)+39
# return point_xyz, label
def change_scale(data):
#centre
xyz_min = np.min(data[:, 0:3], axis=0)
xyz_max = np.max(data[:, 0:3], axis=0)
xyz_move = xyz_min+(xyz_max-xyz_min)/2
data[:, 0:3] = data[:, 0:3]-xyz_move
#scale
scale = np.max(np.sqrt(np.sum(abs(data[:, 0:3])**2, axis=-1)))
data[:, 0:3] = data[:, 0:3]/scale
return data
# def sample_data(data, num_sample):
# """ data is in N x ...
# we want to keep num_samplexC of them.
# if N > num_sample, we will randomly keep num_sample of them.
# if N < num_sample, we will randomly duplicate samples.
# """
# N = data.shape[0]
# if (N == num_sample):
# return data, range(N)
# elif (N > num_sample):
# sample = np.random.choice(N, num_sample)
# return data[sample, ...], sample
# else:
# sample = np.random.choice(N, num_sample-N)
# dup_data = data[sample, ...]
# return np.concatenate([data, dup_data], 0), list(range(N))+list(sample)
def get_csv_list(path):
# 获取csv文件列表
csv_file_list = [[], [], []]
file_list = os.listdir(path)
for file_name in file_list:
if file_name.endswith('csv'):
filename = re.split('(\.)', file_name)[0]
csv_type = len(filename) - len(str(int(filename)))
csv_file_list[csv_type].append(path + file_name)
return csv_file_list
def get_csv_data(path_list_arr):
# 创建空的定维数组
sum_data = np.empty([0, 1024, 3], dtype=np.dtype('<f4'))
type_data = np.empty([0, 1], dtype=np.dtype('|u1'))
# 类型序号
type_serial = -1
# 遍历每个类型的目录
for path_list in path_list_arr:
# 每个目录对应一种类型的数据
type_serial += 1
# 遍历每个csv文件
for path in path_list:
# 将每个csv文件读取为Numpy的数据
data = np.genfromtxt(path, delimiter=',', dtype=np.dtype('<f4'))
data_len = len(data)
# 计算空值补缺的数量
empty_len = 1024 - data_len
# 以下方法二选一
# 完整的1024个元数据=csv文件数据+在csv文件中随机指定下标数据
count = 0
while count < empty_len:
data = np.append(data, [data[random.randint(0, data_len-1)]], axis=0)
count += 1
# 完整的1024个元数据=csv文件数据+空值数据
# data = np.append(change_scale(data), change_scale(np.empty([empty_len, 3], dtype=np.dtype('<f4'))), axis=0)
# 数据归并
sum_data = np.append(sum_data, [change_scale(data)], axis=0)
# 数据类型归并
type_data = np.append(type_data, [[type_serial]], axis=0)
return sum_data, type_data
if __name__ == "__main__":
TRAIN_CSV_PATH = './pointdata3/traindata/csv/'
TEST_CSV_PATH = './pointdata3/testdata/csv/'
train_csv_list = get_csv_list(TRAIN_CSV_PATH)
test_csv_list = get_csv_list(TEST_CSV_PATH)
train_data, train_type_data = get_csv_data(train_csv_list)
test_data, test_type_data = get_csv_data(test_csv_list)
open("plant_train.h5", 'w')
with h5py.File('plant_train.h5', 'r+') as f:
f.create_dataset('data', data=train_data)
f.create_dataset('faceId', data=np.empty([1024,1024], dtype=np.dtype('<i4')))
f.create_dataset('label', data=train_type_data)
f.create_dataset('normal', data=train_data)
open("plant_test.h5", 'w')
with h5py.File('plant_test.h5', 'r+') as f:
f.create_dataset('data', data=test_data)
f.create_dataset('faceId', data=np.empty([1024,1024], dtype=np.dtype('<i4')))
f.create_dataset('label', data=test_type_data)
f.create_dataset('normal', data=test_data)
# DATA_FILES = getDataFiles(os.path.join(BASE_DIR, 'file_path.txt'))
# num_sample = 4096
# DATA_ALL = []
# for fn in range(len(DATA_FILES)):
# current_data, current_label = loadDataFile(DATA_FILES[fn])
# change_data = change_scale(current_data)
# data_sample, index = sample_data(change_data, num_sample)
# data_label = np.hstack((data_sample, current_label[index]))
# DATA_ALL.append(data_label)
# output = np.vstack(DATA_ALL)
# output = output.reshape(-1, num_sample, 4)
# # train and test number, save data
# if not os.path.exists('plant_train.h5'):
# with h5py.File('plant_train.h5') as f:
# f.create_dataset('data', data=output[0:7, 0:3])
# f['labels'] = output[0:8, 4]
# if not os.path.exists('plant_test.h5'):
# with h5py.File('plant_test.h5') as f:
# f['data'] = output[7:9, 0:3]
# f['labels'] = output[7:9, 4]
|
import types
import pytest
from stp_core.loop.eventually import eventually
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.util import updateNamedTuple
from plenum.test.helper import sdk_send_random_requests, \
sdk_send_random_and_check
from plenum.test.test_node import getNonPrimaryReplicas, \
getPrimaryReplica
@pytest.fixture(scope="module")
def setup(tconf, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
# Patch the 3phase request sending method to send incorrect digest and
pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \
getNonPrimaryReplicas(txnPoolNodeSet, instId=0)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, tconf.Max3PCBatchSize)
stateRoot = pr.stateRootHash(DOMAIN_LEDGER_ID, to_str=False)
origMethod = pr.create3PCBatch
malignedOnce = None
def badMethod(self, ledgerId):
nonlocal malignedOnce
pp = origMethod(ledgerId)
if not malignedOnce:
pp = updateNamedTuple(pp, digest=pp.digest + '123')
malignedOnce = True
return pp
pr.create3PCBatch = types.MethodType(badMethod, pr)
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
tconf.Max3PCBatchSize)
return pr, otherR, stateRoot
@pytest.fixture(scope="module")
def reverted(setup, looper):
pr, otherR, oldStateRoot = setup
def chkStateRoot(root):
for r in [pr] + otherR:
r.stateRootHash(DOMAIN_LEDGER_ID, to_str=False) == root
looper.run(eventually(chkStateRoot, oldStateRoot))
@pytest.fixture(scope="module")
def viewChanged(reverted, looper, txnPoolNodeSet):
def chk():
for n in txnPoolNodeSet:
assert n.viewNo == 1
assert all([r.primaryName for r in n.replicas])
looper.run(eventually(chk, retryWait=1, timeout=15))
def testTreeStateRevertedAfterBatchRejection(reverted):
""""
After a batch is rejected, all nodes revert their trees to last known
correct state
"""
def testViewChangeAfterBatchRejected(viewChanged):
""""
After a batch is rejected and each batch that was created based on the
rejected batch is discarded, the discarded batches are tried again
"""
def testMoreBatchesWillBeSentAfterViewChange(reverted, viewChanged,
txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client,
tconf, looper):
"""
After retrying discarded batches, new batches are sent
:return:
"""
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, tconf.Max3PCBatchSize)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-31 16:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('genevieve_client', '0008_auto_20160628_1453'),
]
operations = [
migrations.RenameField(
model_name='genomereport',
old_name='report_type',
new_name='report_source',
),
]
|
__version__ = '0.0.3a3'
|
'''Visualization functions'''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import matplotlib.patches
import matplotlib.collections
from matplotlib import cm
import numpy as np
from .globalVariables import *
from . import (visibilityGraphAuxiliaryFunctions,
clusteringFunctions,
coreFunctions,
extendedDataFrame,
visibilityGraphCommunityDetection,
utilityFunctions)
def saveFigure(fig, saveDir, label, extension, dpi, close=True):
"""Function primarily used internally to save and close figures
Parameters:
saveDir: str
Path of directories to save the object to
extension: str, Default '.png'
Path of directories to save the object to
dpi: int, Default 300
Figure resolution if rasterized
close: boolean: Default True
Whether to close the figure after saving
Returns:
None
Usage:
saveFigure(fig, saveDir, label, extension, dpi)
"""
utilityFunctions.createDirectories(saveDir)
try:
if not extension[0]=='.':
extension = ''.join(['.', extension])
except Exception as exception:
print(exception)
print('Chack figure extension/format')
if extension in ['.png', '.jpeg', '.tiff']:
fig.savefig(os.path.join(saveDir, label + extension), dpi=dpi)
elif extension in ['.svg', '.eps', '.pdf']:
fig.savefig(os.path.join(saveDir, label + extension))
else:
print('Unsupported format. Figure not saved')
plt.close(fig)
return None
def makeDataHistograms(df, saveDir, dataName, figsize=(8,8), range_min=np.min, range_max=np.max, includeTitle=True, title='Data @ timePoint:', fontsize=8, fontcolor='b', N_bins=100, color='b', extension='.png', dpi=300):
"""Make a histogram for each Series (time point) in a Dataframe.
Parameters:
df: pandas.DataFrame
Data to visualize
saveDir: str
Path of directories to save the object to
dataName: str
Label to include in the file name
figsize: tuple, Default (8,8)
Size of the figure in inches
range_min: str, Default
How to determine data minimum
range_max: int, float or function, Default
How to determine data maximum
includeTitle: boolean, Default True
Path of directories to save the object to
title: str, Default 'Data @ timePoint:'
Text of the title
fontsize: str, Default 8
Fontsize of the labels
fontcolor: str, Default 'b'
Color of the title font
N_bins: int, Default 100
Number of bins in the histogram
color: str, Default 'b'
Color of the bars
extension: str, Default '.png'
Path of directories to save the object to
dpi: int, Default 300
Figure resolution if rasterized
Returns:
None
Usage:
makeDataHistograms(df, '/dir1', 'myData')
"""
if not os.path.exists(saveDir):
os.makedirs(saveDir)
for timePoint in df.columns[:]:
label = str(timePoint)
subset = df[timePoint]
subset = subset[~np.isnan(subset.values)].values
if type(range_min) is types.FunctionType:
range_min = range_min(subset)
if type(range_max) is types.FunctionType:
range_max = range_max(subset)
hist_of_subset = scipy.stats.rv_histogram(np.histogram(subset, bins=N_bins, range=(range_min,range_max)))
hist_data = hist_of_subset._hpdf / N_bins
hist_bins = hist_of_subset._hbins
fig, ax = plt.subplots(figsize=figsize)
bar_bin_width = range_max / N_bins
ax.bar(hist_bins, hist_data[:-1], width=0.9 * bar_bin_width, color=color, align='center')
if includeTitle:
ax.set_title(title + label, fontdict={'color': fontcolor})
ax.set_xlabel('Values', fontsize=fontsize)
ax.set_ylabel('Density', fontsize=fontsize)
ax.set_xlim(range_min - 0.5 * bar_bin_width, range_max + 0.5 * bar_bin_width)
fig.tight_layout()
saveFigure(fig, saveDir, dataName + '_' + label + '_histogram', extension, dpi)
return None
def makeLombScarglePeriodograms(df, saveDir, dataName, minNumberOfNonzeroPoints=5, oversamplingRate=100, figsize=(5,5), title1='TimeSeries Data', title2='Lomb-Scargle periodogram' , extension='.png', dpi=300):
"""Make a combined plot of the signal and its Lomb-Scargle periodogram
for each pandas Series (time point) in a Dataframe.
Parameters:
df: pandas.DataFrame
Data to visualize
saveDir: str
Path of directories to save the object to
dataName: str
Label to include in the file name
minNumberOfNonzeroPoints: int, Default 5
Minimum number of non-zero points in signal to use it
oversamplingRate: int, Default 100
Periodogram oversampling rate
figsize: tuple, Default (8,8)
Size of the figure in inches
title1: str, Default 'TimeSeries Data'
Text of the upper title
title2: str, Default 'Lomb-Scargle periodogram'
Text of the lower title
extension: str, Default '.png'
Path of directories to save the object to
dpi: int, Default 300
Figure resolution if rasterized
Returns:
None
Usage:
makeLombScarglePeriodograms(df, '/dir1', 'myData')
"""
if not os.path.exists(saveDir):
os.makedirs(saveDir)
for geneIndex in range(len(df.index[:])):
geneName = df.index[geneIndex].replace(':', '_')
subset = df.iloc[geneIndex]
subset = subset[subset > 0.]
setTimes, setValues, inputSetTimes = subset.index.values, subset.values, df.columns.values
if len(subset) < minNumberOfNonzeroPoints:
print(geneName, ' skipped (only %s non-zero point%s), ' % (len(subset), 's' if len(subset) != 1 else ''), end=' ', flush=True)
continue
pgram = coreFunctions.LombScargle(setTimes, setValues, inputSetTimes, OversamplingRate=oversamplingRate)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize)
ax1.plot(setTimes, setValues, 'bo', linewidth=3, markersize=5, markeredgecolor='k', markeredgewidth=2)
zero_points = np.array(list(set(inputSetTimes) - set(setTimes)))
ax1.plot(zero_points, np.zeros((len(zero_points),)), 'ro', linewidth=3, markersize=3, markeredgecolor='k', markeredgewidth=0)
ax1.set_aspect('auto')
minTime = np.min(inputSetTimes)
maxTime = np.max(inputSetTimes)
extraTime = (maxTime - minTime) / 10
ax1.set_xlim(minTime - extraTime, maxTime + extraTime)
ax1.set_title(title1)
ax2.plot(2 * np.pi * pgram[0], pgram[1], 'r-', linewidth=1)
ax2.set_aspect('auto')
ax2.set_title(title2)
fig.tight_layout()
saveFigure(fig, saveDir, dataName + '_' + geneName + '_Lomb_Scargle_periodogram', extension, dpi)
return None
def addVisibilityGraph(data, times, dataName='G1S1', coords=[0.05,0.95,0.05,0.95],
numberOfVGs=1, groups_ac_colors=['b'], fig=None, numberOfCommunities=6, printCommunities=False,
fontsize=None, nodesize=None, level=0.55, commLineWidth=0.5, lineWidth=1.0,
withLabel=True, withTitle=False, layout='circle', radius=0.07, noplot=False, horizontal=False, communities=None, minNumberOfCommunities=2, communitiesMethod='betweenness_centrality', direction='left', weight='distance'):
"""Draw a Visibility graph of data on a provided Matplotlib figure.
We represent each timepoint in a series as a node.
Temporal events are detected and indicated with solid blue
lines encompassing groups of points, or communities.
The shortest path identifies nodes (i.e. timepoints) that display high
intensity, and thus dominate the global signal profile, are robust
to noise, and are likely drivers of the global temporal behavior.
Parameters:
data: 2d numpy.array
Array of data to visualize
times: 1d numpy.array
Times corresponding to each data point, used for labels
dataName: str, Default 'G1S1'
label to include in file name
coords: list, Default [0.05,0.95,0.05,0.95]
Coordinates of location of the plot on the figure
numberOfVGs: int, Default 1
Number of plots to add to this figure
groups_ac_colors: list, Default ['b']
Colors corresponding to different groups of graphs
fig: matplotlib.figure, Default None
Figure object
numberOfCommunities: int, Default 6
Number of communities
printCommunities: boolean, Default False
Whether to print communities details to screen
fontsize: float, Default None
Size of labels
nodesize: float, Default None
Size of nodes
level: float, Default 0.55
Distance of the community lines to nodes
commLineWidth: float, Default 0.5
Width of the community lines
lineWidth: float, Default 1.0
Width of the edges between nodes
withLabel: boolean, Default True
Whether to include label on plot
withTitle: boolean, Default False
Whether to include title on plot
layout: str, Default 'circle'
Type of the layout. Other option is 'line'
radius: float, Default 0.07
Radius of the circle
noplot: boolean, Default False
Whether to make a plot or only calculate communities
horizontal: boolean, Default False
Whether to use horizontal or natural visibility graph.
communities: tuple, Default None
A tuple containing communities sturcture of network, and networkx Graph:
List of list, e.g. [[],[],...]
networkx.Graph
minNumberOfCommunities: int, Default 2
Number of communities to find depends on the number of splits.
This parameter is ignored in methods that automatically
estimate optimal number of communities.
communitiesMethod: str, Default 'betweenness_centrality'
String defining the method to use for communitiy detection:
'Girvan_Newman': edge betweenness centrality based approach
'betweenness_centrality': reflected graph node betweenness centrality based approach
'WDPVG': weighted dual perspective visibility graph method (note to also set weight variable)
direction:str, default 'left'
The direction that nodes aggregate to communities:
None: no specific direction, e.g. both sides.
'left': nodes can only aggregate to the left side hubs, e.g. early hubs
'right': nodes can only aggregate to the right side hubs, e.g. later hubs
weight: str, Default 'distance'
Type of weight for communitiesMethod='WDPVG':
None: no weighted
'time': weight = abs(times[i] - times[j])
'tan': weight = abs((data[i] - data[j])/(times[i] - times[j])) + 10**(-8)
'distance': weight = A[i, j] = A[j, i] = ((data[i] - data[j])**2 + (times[i] - times[j])**2)**0.5
Returns:
tuple
(graph_nx, data, communities)
Usage:
addVisibilityGraph(exampleData, exampleTimes, fig=fig, fontsize=16, nodesize=700,
level=0.85, commLineWidth=3.0, lineWidth=2.0, withLabel=False)
"""
if len(data.shape)>1:
data = extendedDataFrame.DataFrame(data=data).imputeMissingWithMedian().apply(lambda data: np.sum(data[data > 0.0]) / len(data), axis=0).values
if communities is None:
communities, graph_nx = clusteringFunctions.getCommunitiesOfTimeSeries(data, times, minNumberOfCommunities=minNumberOfCommunities, horizontal=horizontal, method=communitiesMethod, direction=direction, weight=weight)
else:
communities, graph_nx = communities
if printCommunities:
print('Communities:')
[print(community) for community in communities]
print('\n')
if noplot:
return graph_nx, data, communities
group = int(dataName[:dataName.find('S')].strip('G'))
if fontsize is None:
fontsize = 4. * (8. + 5.) / (numberOfVGs + 5.)
if nodesize is None:
nodesize = 30. * (8. + 5.) / (numberOfVGs + 5.)
(x1,x2,y1,y2) = coords
axisVG = fig.add_axes([x1,y1,x2 - x1,y2 - y1])
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('GR', [(0, 1, 0), (1, 0, 0)], N=1000)
if layout=='line':
pos = {i:[float(i)/float(len(graph_nx)), 0.5] for i in range(len(graph_nx))}
else:
pos = nx.circular_layout(graph_nx)
keys = np.array(list(pos.keys())[::-1])
values = np.array(list(pos.values()))
values = (values - np.min(values, axis=0))/(np.max(values, axis=0)-np.min(values, axis=0))
keys = np.roll(keys, np.argmax(values.T[1]) - np.argmin(keys))
pos = dict(zip(keys, values))
keys = np.array(list(pos.keys()))
values = np.array(list(pos.values()))
shortest_path = nx.shortest_path(graph_nx, source=min(keys), target=max(keys))
shortest_path_edges = [(shortest_path[i],shortest_path[i + 1]) for i in range(len(shortest_path) - 1)]
if layout=='line':
for edge in graph_nx.edges:
l = np.array(pos[edge[0]])
r = np.array(pos[edge[1]])
if edge in shortest_path_edges:
axisVG.add_artist(matplotlib.patches.Wedge((l+r)/2., 0.5*np.sqrt((l-r)[0]*(l-r)[0]+(l-r)[1]*(l-r)[1]), 0, 180, fill=False, edgecolor='y', linewidth=0.5*3.*lineWidth, alpha=0.7, width=0.001))
axisVG.add_artist(matplotlib.patches.Wedge((l+r)/2., 0.5*np.sqrt((l-r)[0]*(l-r)[0]+(l-r)[1]*(l-r)[1]), 0, 180, fill=False, edgecolor='k', linewidth=0.5*lineWidth, alpha=0.7, width=0.001))
nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color='y', edge_color='y', node_size=nodesize * 1.7, width=0., nodelist=shortest_path, edgelist=shortest_path_edges, with_labels=False)
nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color=data, cmap=cmap, alpha=1.0, font_size=fontsize, width=0., font_color='k', node_size=nodesize)
else:
nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color='y', edge_color='y', node_size=nodesize * 1.7, width=3.0*lineWidth, nodelist=shortest_path, edgelist=shortest_path_edges, with_labels=False)
nx.draw_networkx(graph_nx, pos=pos, ax=axisVG, node_color=data, cmap=cmap, alpha=1.0, font_size=fontsize, width=lineWidth, font_color='k', node_size=nodesize)
if layout=='line':
xmin, xmax = (-1.,1.)
ymin, ymax = (-1.,1.)
else:
xmin, xmax = axisVG.get_xlim()
ymin, ymax = axisVG.get_ylim()
X, Y = np.meshgrid(np.arange(xmin, xmax, (xmax - xmin) / 300.), np.arange(ymin, ymax, (ymax - ymin) / 300.))
def smooth(Z, N=7.):
for ix in range(1,Z.shape[0]-1,1):
Z[ix] = ((N-1.)*Z[ix] + (Z[ix-1] + Z[ix+1])/2.)/N
return Z
for icommunity, community in enumerate(communities):
Z = np.exp(X ** 2 - Y ** 2) * 0.
nX, nY = tuple(np.array([pos[node] for node in community]).T)
for i in range(len(community)-1):
p1, p2 = np.array([nX[i], nY[i]]), np.array([nX[i+1], nY[i+1]])
for j in range(-2, 32):
pm = p1 + (p2-p1)*float(j)/30.
Z[np.where((X-pm[0])**2+(Y-pm[1])**2<=radius**2)] = 1.
for _ in range(20):
Z = smooth(smooth(Z).T).T
CS = axisVG.contour(X, Y, Z, [level], linewidths=commLineWidth, alpha=0.8, colors=groups_ac_colors[group - 1])
#axisVG.clabel(CS, inline=True,fontsize=4,colors=group_colors[group-1], fmt ={level:'C%s'%icommunity})
if layout=='line':
axisVG.set_xlim(-0.1,1.)
axisVG.set_ylim(-0.1,1.)
axisVG.spines['left'].set_visible(False)
axisVG.spines['right'].set_visible(False)
axisVG.spines['top'].set_visible(False)
axisVG.spines['bottom'].set_visible(False)
axisVG.set_xticklabels([])
axisVG.set_yticklabels([])
axisVG.set_xticks([])
axisVG.set_yticks([])
if withLabel:
axisVG.text(axisVG.get_xlim()[1], (axisVG.get_ylim()[1] + axisVG.get_ylim()[0]) * 0.5, dataName, ha='left', va='center',
fontsize=8).set_path_effects([path_effects.Stroke(linewidth=0.4, foreground=groups_ac_colors[group - 1]),path_effects.Normal()])
if withTitle:
titleText = dataName + ' (size: ' + str(data.shape[0]) + ')' + ' min=%s max=%s' % (np.round(min(data),2), np.round(max(data),2))
axisVG.set_title(titleText, fontsize=10)
return graph_nx, data, communities
def makeVisibilityGraph(intensities, positions, saveDir, fileName, communities=None, minNumberOfCommunities=2, communitiesMethod='betweenness_centrality', direction='left', weight='distance', printCommunities=False,fontsize=16, nodesize=500, level=0.5, commLineWidth=3.0, lineWidth=2.0, layout='circle', horizontal=False, radius=0.03, figsize=(10,10), addColorbar=True, colorbarAxisCoordinates=[0.90,0.7,0.02,0.2], colorbarLabelsize=12, colorbarPrecision=2, extension='png', dpi=300):
'''Make either horizontal or normal visibility graph of a time series using function addVisibilityGraph.
We represent each timepoint in a series as a node.
Temporal events are detected and indicated with solid blue
lines encompassing groups of points, or communities.
The shortest path identifies nodes (i.e. timepoints) that display high
intensity, and thus dominate the global signal profile, are robust
to noise, and are likely drivers of the global temporal behavior.
Parameters:
intensities:
Data to plot
positions:
Time points corresponding to data
saveDir: str
Path of directories to save the object to
fileName: str
Label to include in the file name
horizontal: boolean, Default False
Whether to use horizontal or natural visibility graph. Note that if communitiesMethod 'WDPVG' is set, this setting has no effect.
communities: tuple, Default None
A tuple containing communities sturcture of network, and networkx Graph:
List of list, e.g. [[],[],...]
networkx.Graph
minNumberOfCommunities: int, Default 2
Number of communities to find depends on the number of splits.
This parameter is ignored in methods that automatically
estimate optimal number of communities.
communitiesMethod: str, Default 'betweenness_centrality'
String defining the method to use for communitiy detection:
'Girvan_Newman': edge betweenness centrality based approach
'betweenness_centrality': reflected graph node betweenness centrality based approach
'WDPVG': weighted dual perspective visibility graph method (note to also set weight variable)
direction:str, default 'left'
The direction that nodes aggregate to communities:
None: no specific direction, e.g. both sides.
'left': nodes can only aggregate to the left side hubs, e.g. early hubs
'right': nodes can only aggregate to the right side hubs, e.g. later hubs
weight: str, Default 'distance'
Type of weight for communitiesMethod='WDPVG':
None: no weighted
'time': weight = abs(times[i] - times[j])
'tan': weight = abs((data[i] - data[j])/(times[i] - times[j])) + 10**(-8)
'distance': weight = A[i, j] = A[j, i] = ((data[i] - data[j])**2 + (times[i] - times[j])**2)**0.5
printCommunities: boolean, Default False
Whether to print communities details to screen
fontsize: float, Default 16
Labels fontsize
nodesize: int, Default 500
Node size
level: float, Default 0.5
Level
commLineWidth: float, Default 3.0
Communities lines width
lineWidth: float, Default 2.0
Edge lines width
layout: str, Default 'circle'
Type of layout, 'circle' or 'line'
horizontal: boolean, Default False
Whether to make horizontal of normal visibility graph
radius: float, Default 0.03
Rounding of the lines
figsize: tuple, Default (10,10)
Figure size in inches
addColorbar: boolean, Default True
Whether to add colorbar
colorbarAxisCoordinates: list, Default [0.90,0.7,0.02,0.2]
colorbar axis coordinates
colorbarLabelsize: float, Default 12
Colorbar labels size
colorbarPrecision: int, Default 2
colorbar labels rounding
extension: str, Default '.png'
Figure extension
dpi: int, Default 300
Figure resolution
Returns:
None
Usage:
makeVisibilityGraph(data, times, 'dir1/', 'myData')
'''
fig = plt.figure(figsize=figsize)
addVisibilityGraph(intensities, positions, fig=fig, fontsize=fontsize, nodesize=nodesize, level=level,
commLineWidth=commLineWidth, lineWidth=lineWidth, withLabel=False, layout=layout,
printCommunities=printCommunities, radius=radius, horizontal=horizontal,communities=communities, minNumberOfCommunities=minNumberOfCommunities, communitiesMethod=communitiesMethod, direction=direction, weight=weight)
addColorbarToFigure(fig, intensities, axisCoordinates=colorbarAxisCoordinates, labelsize=colorbarLabelsize, precision=colorbarPrecision)
saveFigure(fig, saveDir, ('horizontal_' if horizontal else 'normal_') + fileName, extension, dpi)
return
def makeVisibilityBarGraph(data, times, saveDir, fileName, AdjacencyMatrix=None, horizontal=False, barWidth=0.2, dotColor='b', barColor='r', arrowColor='k', id='', extension='.png', figsize=(8,4), dpi=300):
"""Bar-plot style visibility graph.
Representing the intensities as bars, this is equivalent to connecting the top
of each bar to another top if there is a direct line-of-sight to that top.
The resulting visibility graph has characteristics that reflect the equivalent
time series temporal structure and can be used to identify trends.
Parameters:
data: 2d numpy.array
Numpy array of floats
times: 2d numpy.array
Numpy array of floats
fileName: str
Path where to save the figure file
fileName: str
Name of the figure file to save
AdjacencyMatrix: 2d numpy.array, Default None
Adjacency matrix of network
horizontal: boolean, default False
Horizontal or normal visibility graph
barWidth: float, default 0.2
Horizontal or normal visibility graph
dotColor: str, default 'b'
Color of the data points
barColor: str, default 'r'
Color of the bars
arrowColor: str, default 'k'
Color of lines
id: str or int, default ''
Label to add to the figure title
extension: str, Default '.png'
Figure format
figsize: tuple of int, Default (8,4)
Figure size in inches
dpi: int, Default 300
Figure resolution
Returns:
None
Usage:
makeVisibilityBarGraph(A, data, times, 'my_figure')
"""
fig, ax = plt.subplots(figsize=figsize)
if AdjacencyMatrix is None:
if horizontal:
A = visibilityGraphAuxiliaryFunctions.getAdjacencyMatrixOfHVG(data)
else:
A = visibilityGraphAuxiliaryFunctions.getAdjacencyMatrixOfNVG(data, times)
else:
A = AdjacencyMatrix
ax.bar(times, data, width=barWidth, color=barColor, align='center', zorder=-np.inf)
ax.scatter(times, data, color=dotColor)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if i>j and A[i,j] == 1:
if horizontal:
level1 = level2 = np.min([data[i],data[j]])
else:
level1 = data[i]
level2 = data[j]
ax.annotate(text='', xy=(times[i],level1), xytext=(times[j],level2),
arrowprops=dict(arrowstyle='<->', shrinkA=0, shrinkB=0, linestyle='--'))
ax.set_title('%s Time Series'%(id), fontdict={'color': arrowColor})
ax.set_xlabel('Times', fontsize=8)
ax.set_ylabel('Signal intensity', fontsize=8)
ax.set_xticks(times)
ax.set_xticklabels(times, fontsize=10, rotation=90)
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(True)
fig.tight_layout()
saveFigure(fig, saveDir, ('horizontal_' if horizontal else 'normal_') + fileName, extension, dpi)
return None
def makePlotOfPeak(data_all, scores, selected_peak, selected_peak_value, plotID):
'''Plot peaks. Function used internally during certain data processing steps'''
fig, ax = plt.subplots()
ax.plot(data_all.T[0], data_all.T[1], 'g', lw=3)
ax.plot(scores.T[0], scores.T[1], 'ro', ms=5)
ax.plot(selected_peak, selected_peak_value, 'bo', alpha=0.5, ms=10)
# saveFigure(fig, saveDir, 'spline_%s' % ('' if plotID == None else str(plotID)), extension, dpi)
return
def addColorbarToFigure(fig, data, axisCoordinates=[0.90,0.7,0.02,0.2], cmap=None, norm=None, labelsize=12, precision=2):
'''Add colorbar to figure
Parameters:
fig: matplotlib.figure
Data to plot
cmap: matplotlib.colors.LinearSegmentedColormap, Default None
Colormap to use
norm: matplotlib.colors.Normalize, Default None
Colormap normalization
axisCoordinates: list, Default [0.90,0.7,0.02,0.2]
colorbar axis coordinates
labelsize: float, Default 12
Colorbar labels size
precision: int, Default 2
Colorbar labels rounding
Returns:
None
Usage:
addColorbarToFigure(fig, data)
'''
data = np.array(data)
dataMin = np.min(data)
dataMax = np.max(data)
axisColor = fig.add_axes(axisCoordinates)
if cmap is None:
cmap=matplotlib.colors.LinearSegmentedColormap.from_list('GR', [(0, 1, 0), (1, 0, 0)], N=100)
if norm is None:
norm=matplotlib.colors.Normalize(vmin=dataMin, vmax=dataMax)
mapp=cm.ScalarMappable(norm=norm, cmap=cmap)
mapp.set_array(data)
fig.colorbar(mapp, cax=axisColor, ticks=[dataMax,dataMin])
axisColor.tick_params(labelsize=labelsize)
axisColor.set_yticklabels([np.round(dataMax,precision), np.round(dataMin,precision)])
return
def makeDendrogramHeatmapOfClusteringObject(ClusteringObject, saveDir, dataName, AutocorrNotPeriodogr=True, textScale=1.0, figsize=(12,8), extension='.png', dpi=300, xLabel='Time', plotLabel='Transformed Expression',horizontal=False, minNumberOfCommunities=2, communitiesMethod='WDPVG', direction='left', weight='distance'):
"""Make Dendrogram-Heatmap plot along with Visibility graphs.
Parameters:
ClusteringObject:
Clustering object
saveDir: str
Path of directories to save the object to
dataName: str
Label to include in the file name
AutocorrNotPeriodogr: boolean, Default True
Whether to use autocorrelation method instead of periodograms
textScale: float, Default 1.0
scaling of text size
figsize: tuple, Default (12,8)
Figure size in inches
extension: str, Default '.png'
Figure format extension
dpi: int, Default 300
Figure resolution
xLabel: str, Default 'Time'
Label for the x axis in the heatmap
plotLabel: str, Default 'Transformed Expression'
Label for the heatmap plot
horizontal: boolean, Default False
Whether to use horizontal or natural visibility graph. Note that if communitiesMethod 'WDPVG' is set, this setting has no effect.
minNumberOfCommunities: int, Default 2
Number of communities to find depends on the number of splits.
This parameter is ignored in methods that automatically
estimate optimal number of communities.
communitiesMethod: str, Default 'WDPVG'
String defining the method to use for communitiy detection:
'Girvan_Newman': edge betweenness centrality based approach
'betweenness_centrality': reflected graph node betweenness centrality based approach
'WDPVG': weighted dual perspective visibility graph method (note to also set weight variable)
direction:str, default 'left'
The direction that nodes aggregate to communities:
None: no specific direction, e.g. both sides.
'left': nodes can only aggregate to the left side hubs, e.g. early hubs
'right': nodes can only aggregate to the right side hubs, e.g. later hubs
weight: str, Default 'distance'
Type of weight for communitiesMethod='WDPVG':
None: no weighted
'time': weight = abs(times[i] - times[j])
'tan': weight = abs((data[i] - data[j])/(times[i] - times[j])) + 10**(-8)
'distance': weight = A[i, j] = A[j, i] = ((data[i] - data[j])**2 + (times[i] - times[j])**2)**0.5
Returns:
None
Usage:
makeDendrogramHeatmap(myObj, '/dir1', 'myData', AutocorrNotPeriodogr=True)
"""
def addAutocorrelationDendrogramAndHeatmap(ClusteringObject, groupColors, fig, AutocorrNotPeriodogr=AutocorrNotPeriodogr):
axisDendro = fig.add_axes([0.68,0.1,0.17,0.8], frame_on=False)
n_clusters = len(ClusteringObject.keys()) - 1
hierarchy.set_link_color_palette(groupColors[:n_clusters]) #gist_ncar #nipy_spectral #hsv
origLineWidth = matplotlib.rcParams['lines.linewidth']
matplotlib.rcParams['lines.linewidth'] = 0.5
Y_ac = ClusteringObject['linkage']
Z_ac = hierarchy.dendrogram(Y_ac, orientation='left',color_threshold=Y_ac[-n_clusters + 1][2])
hierarchy.set_link_color_palette(None)
matplotlib.rcParams['lines.linewidth'] = origLineWidth
axisDendro.set_xticks([])
axisDendro.set_yticks([])
posA = ((axisDendro.get_xlim()[0] if n_clusters == 1 else Y_ac[-n_clusters + 1][2]) + Y_ac[-n_clusters][2]) / 2
axisDendro.plot([posA, posA], [axisDendro.get_ylim()[0], axisDendro.get_ylim()[1]], 'k--', linewidth = 1)
clusters = []
order = []
tempData = None
for group in sorted([item for item in list(ClusteringObject.keys()) if not item=='linkage']):
for subgroup in sorted([item for item in list(ClusteringObject[group].keys()) if not item=='linkage']):
subgroupData = ClusteringObject[group][subgroup]['dataAutocorr'].values
tempData = subgroupData if tempData is None else np.vstack((tempData, subgroupData))
clusters.extend([group for _ in range(subgroupData.shape[0])])
order.extend(ClusteringObject[group][subgroup]['order'])
tempData = tempData[np.argsort(order),:][Z_ac['leaves'],:].T[1:].T
clusters = np.array(clusters)
cluster_line_positions = np.where(clusters - np.roll(clusters,1) != 0)[0]
for i in range(n_clusters - 1):
posB = cluster_line_positions[i + 1] * (axisDendro.get_ylim()[1] / len(Z_ac['leaves'])) - 5. * 0
axisDendro.plot([posA, axisDendro.get_xlim()[1]], [posB, posB], '--', color='black', linewidth = 1.0)
axisDendro.plot([posA, axisDendro.get_xlim()[1]], [0., 0.], '--', color='k', linewidth = 1.0)
axisDendro.plot([posA, axisDendro.get_xlim()[1]], [-0. + axisDendro.get_ylim()[1], -0. + axisDendro.get_ylim()[1]], '--', color='k', linewidth = 1.0)
axisMatrixAC = fig.add_axes([0.78 + 0.07,0.1,0.18 - 0.075,0.8])
imAC = axisMatrixAC.imshow(tempData, aspect='auto', vmin=np.min(tempData), vmax=np.max(tempData), origin='lower', cmap=plt.cm.bwr)
for i in range(n_clusters - 1):
axisMatrixAC.plot([-0.5, tempData.shape[1] - 0.5], [cluster_line_positions[i + 1] - 0.5, cluster_line_positions[i + 1] - 0.5], '--', color='black', linewidth = 1.0)
axisMatrixAC.set_xticks([i for i in range(tempData.shape[1] - 1)])
axisMatrixAC.set_xticklabels([i + 1 for i in range(tempData.shape[1] - 1)], fontsize=6*textScale)
axisMatrixAC.set_yticks([])
axisMatrixAC.set_xlabel('Lag' if AutocorrNotPeriodogr else 'Frequency', fontsize=axisMatrixAC.xaxis.label._fontproperties._size*textScale)
axisMatrixAC.set_title('Autocorrelation' if AutocorrNotPeriodogr else 'Periodogram', fontsize=axisMatrixAC.title._fontproperties._size*textScale)
axisColorAC = fig.add_axes([0.9 + 0.065,0.55,0.01,0.35])
axisColorAC.tick_params(labelsize=6*textScale)
plt.colorbar(imAC, cax=axisColorAC, ticks=[np.round(np.min(tempData),2),np.round(np.max(tempData),2)])
return
def addGroupDendrogramAndShowSubgroups(ClusteringObject, groupSize, bottom, top, group, groupColors, fig):
Y = ClusteringObject[group]['linkage']
n_clusters = len(sorted([item for item in list(ClusteringObject[group].keys()) if not item=='linkage']))
print("Number of subgroups:", n_clusters)
axisDendro = fig.add_axes([left, bottom, dx + 0.005, top - bottom], frame_on=False)
hierarchy.set_link_color_palette([matplotlib.colors.rgb2hex(rgb[:3]) for rgb in cm.nipy_spectral(np.linspace(0, 0.5, n_clusters + 1))]) #gist_ncar #nipy_spectral #hsv
origLineWidth = matplotlib.rcParams['lines.linewidth']
matplotlib.rcParams['lines.linewidth'] = 0.5
Z = hierarchy.dendrogram(Y, orientation='left',color_threshold=Y[-n_clusters + 1][2])
hierarchy.set_link_color_palette(None)
matplotlib.rcParams['lines.linewidth'] = origLineWidth
axisDendro.set_xticks([])
axisDendro.set_yticks([])
posA = axisDendro.get_xlim()[0]/2 if n_clusters == groupSize else (Y[-n_clusters + 1][2] + Y[-n_clusters][2])/2
axisDendro.plot([posA, posA], [axisDendro.get_ylim()[0], axisDendro.get_ylim()[1]], 'k--', linewidth = 1)
clusters = []
for subgroup in sorted([item for item in list(ClusteringObject[group].keys()) if not item=='linkage']):
clusters.extend([subgroup for _ in range(ClusteringObject[group][subgroup]['data'].values.shape[0])])
clusters = np.array(clusters)
cluster_line_positions = np.where(clusters - np.roll(clusters,1) != 0)[0]
for i in range(n_clusters - 1):
posB = cluster_line_positions[i + 1] * (axisDendro.get_ylim()[1] / len(Z['leaves'])) - 5. * 0
axisDendro.plot([posA, axisDendro.get_xlim()[1]], [posB, posB], '--', color='black', linewidth = 1.0)
axisDendro.plot([posA, axisDendro.get_xlim()[1]], [0., 0.], '--', color='k', linewidth = 1.0)
axisDendro.plot([posA, axisDendro.get_xlim()[1]], [-0. + axisDendro.get_ylim()[1], -0. + axisDendro.get_ylim()[1]], '--', color='k', linewidth = 1.0)
axisDendro.text(axisDendro.get_xlim()[0], 0.5 * axisDendro.get_ylim()[1],
'G%s:' % group + str(groupSize), fontsize=14*textScale).set_path_effects([path_effects.Stroke(linewidth=1, foreground=groupColors[group - 1]),path_effects.Normal()])
return n_clusters, clusters, cluster_line_positions
def addGroupHeatmapAndColorbar(data_loc, n_clusters, clusters, cluster_line_positions, bottom, top, group, groupColors, fig,xLabel = 'Time',plotLabel = 'Transformed Expression'):
axisMatrix = fig.add_axes([left + 0.205, bottom, dx + 0.025 + 0.075, top - bottom])
masked_array = np.ma.array(data_loc, mask=np.isnan(data_loc))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('GR', [(0, 1, 0), (1, 0, 0)], N=1000) #plt.cm.prism #plt.cm.hsv_r #plt.cm.RdYlGn_r
cmap.set_bad('grey')
im = axisMatrix.imshow(masked_array, aspect='auto', origin='lower', vmin=np.min(data_loc[np.isnan(data_loc) == False]), vmax=np.max(data_loc[np.isnan(data_loc) == False]), cmap=cmap)
for i in range(n_clusters - 1):
posB = cluster_line_positions[i + 1]
posBm = cluster_line_positions[i]
axisMatrix.plot([-0.5, data_loc.shape[1] - 0.5], [posB - 0.5, posB - 0.5], '--', color='black', linewidth = 1.0)
def add_label(pos, labelText):
return axisMatrix.text(-1., pos, labelText, ha='right', va='center', fontsize=12.*textScale).set_path_effects([path_effects.Stroke(linewidth=0.4, foreground=groupColors[group - 1]),path_effects.Normal()])
order = clusters[np.sort(np.unique(clusters,return_index=True)[1])] - 1
for i in range(n_clusters - 1):
if len(data_loc[clusters == i + 1]) >= 5.:
try:
add_label((cluster_line_positions[np.where(order == i)[0][0]] + cluster_line_positions[np.where(order == i)[0][0] + 1]) * 0.5, 'G%sS%s:%s' % (group,i + 1,len(data_loc[clusters == i + 1])))
except Exception as exception:
print(exception)
print('Label printing error!')
if len(data_loc[clusters == n_clusters]) >= 5.:
posC = axisMatrix.get_ylim()[0] if n_clusters == 1 else cluster_line_positions[n_clusters - 1]
add_label((posC + axisMatrix.get_ylim()[1]) * 0.5, 'G%sS%s:%s' % (group,n_clusters,len(data_loc[clusters == n_clusters])))
axisMatrix.set_xticks([])
axisMatrix.set_yticks([])
times = ClusteringObject[group][subgroup]['data'].columns.values
if group == 1:
axisMatrix.set_xticks(range(data_loc.shape[1]))
axisMatrix.set_xticklabels([('' if (i%2==1 and textScale>1.3) else np.int(time)) for i, time in enumerate(np.round(times,1))], rotation=0, fontsize=6*textScale)
axisMatrix.set_xlabel(xLabel, fontsize=axisMatrix.xaxis.label._fontproperties._size*textScale)
if group == sorted([item for item in list(ClusteringObject.keys()) if not item=='linkage'])[-1]:
axisMatrix.set_title(plotLabel, fontsize=axisMatrix.title._fontproperties._size*textScale)
axisColor = fig.add_axes([0.635 - 0.075 - 0.1 + 0.075,current_bottom + 0.01,0.01, max(0.01,(current_top - current_bottom) - 0.02)])
plt.colorbar(im, cax=axisColor, ticks=[np.max(im._A),np.min(im._A)])
axisColor.tick_params(labelsize=6*textScale)
axisColor.set_yticklabels([np.round(np.max(im._A),2),np.round(np.min(im._A),2)])
return
signalsInClusteringObject = 0
for group in sorted([item for item in list(ClusteringObject.keys()) if not item=='linkage']):
for subgroup in sorted([item for item in list(ClusteringObject[group].keys()) if not item=='linkage']):
signalsInClusteringObject += ClusteringObject[group][subgroup]['data'].shape[0]
fig = plt.figure(figsize=(12,8))
left = 0.02
bottom = 0.1
current_top = bottom
dx = 0.2
dy = 0.8
groupColors = ['b','g','r','c','m','y','k']
[groupColors.extend(groupColors) for _ in range(10)]
addAutocorrelationDendrogramAndHeatmap(ClusteringObject, groupColors, fig)
for group in sorted([item for item in list(ClusteringObject.keys()) if not item=='linkage']):
tempData = None
for subgroup in sorted([item for item in list(ClusteringObject[group].keys()) if not item=='linkage']):
subgroupData = ClusteringObject[group][subgroup]['data'].values
tempData = subgroupData if tempData is None else np.vstack((tempData, subgroupData))
current_bottom = current_top
current_top += dy * float(len(tempData)) / float(signalsInClusteringObject)
if len(tempData)==1:
n_clusters, clusters, cluster_line_positions = 1, np.array([]), np.array([])
else:
n_clusters, clusters, cluster_line_positions = addGroupDendrogramAndShowSubgroups(ClusteringObject, len(tempData), current_bottom, current_top, group, groupColors, fig)
addGroupHeatmapAndColorbar(tempData, n_clusters, clusters, cluster_line_positions, current_bottom, current_top, group, groupColors, fig,xLabel=xLabel,plotLabel=plotLabel)
data_list = []
data_names_list = []
for group in sorted([item for item in list(ClusteringObject.keys()) if not item=='linkage']):
for subgroup in sorted([item for item in list(ClusteringObject[group].keys()) if not item=='linkage']):
if ClusteringObject[group][subgroup]['data'].shape[0] >= 5:
data_list.append(ClusteringObject[group][subgroup]['data'].values)
data_names_list.append('G%sS%s' % (group, subgroup))
times = ClusteringObject[group][subgroup]['data'].columns.values
for indVG, (dataVG, dataNameVG) in enumerate(zip(data_list, data_names_list)):
x_min = 0.57
x_max = 0.66
y_min = 0.1
y_max = 0.9
numberOfVGs = len(data_list)
height = min((y_max - y_min) / numberOfVGs, (x_max - x_min) * (12. / 8.))
x_displacement = (x_max - x_min - height / 1.5) * 0.5
y_displacement = (y_max - y_min - numberOfVGs * height) / numberOfVGs
coords = [x_min + x_displacement, x_min + x_displacement + height / (12. / 8.), y_min + indVG * height + (0.5 + indVG) * y_displacement, y_min + (indVG + 1) * height + (0.5 + indVG) * y_displacement]
addVisibilityGraph(dataVG, times, dataNameVG, coords, numberOfVGs, groupColors, fig,horizontal=horizontal, minNumberOfCommunities=minNumberOfCommunities, communitiesMethod=communitiesMethod, direction=direction, weight=weight)
saveFigure(fig, saveDir, dataName + '_DendrogramHeatmap', extension, dpi)
return None
def plotHVGBarGraphDual(A, data, times, fileName, title='', fontsize=8, barwidth=0.05, figsize=(8,4), dpi=600):
"""Bar-plot style horizontal visibility graph with different link colors for different perspectives
Parameters:
A: 2d numpy.array
Adjacency matrix
data: 2d numpy.array
Data used to make the visibility graph
times: 1d numpy.array
Times corresponding to data points
fileName: str
Name of the figure file to save
title: str, Default ''
Label to add to the figure title
figsize: tuple of int, Default (8,4)
Figure size in inches
dpi: int, 600
Resolution of the image
barwidth: float, Default 0.05
The bar width
fontsize:int, Default 8
The text font size
Returns:
None
Usage:
PlotHorizontalVisibilityGraph(A, data, times, 'Figure.png', 'Test Data')
"""
fig, ax = plt.subplots(figsize=figsize)
ax.bar(times, data, width = barwidth, color='k', align='center', zorder=-np.inf)
ax.scatter(times, data, color='k')
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if i>j and A[i,j] > 0:
if data[i] > 0 and data[j] >0:
level = np.min([data[i],data[j]])
elif data[i] < 0 and data[j] < 0:
level = np.max([data[i],data[j]])
else:
level = 0
ax.annotate(text='', xy=(times[i],level), xytext=(times[j],level),
arrowprops=dict(arrowstyle='->', shrinkA=0, shrinkB=0,linestyle='--', color='r'))
if i>j and A[i,j] < 0:
if data[i] > 0 and data[j] >0:
level = np.min([data[i],data[j]])
elif data[i] < 0 and data[j] < 0:
level = np.max([data[i],data[j]])
else:
level = 0
ax.annotate(text='', xy=(times[i],level), xytext=(times[j],level),
arrowprops=dict(arrowstyle='->', shrinkA=0, shrinkB=0,linestyle='--', color='b'))
ax.set_title('%s'%(id), fontdict={'color': 'k'},fontsize=fontsize)
ax.set_xlabel('Times', fontsize=fontsize)
ax.set_ylabel('Signal intensity', fontsize=fontsize)
ax.set_xticks(times)
ax.set_xticklabels([str(item) for item in np.round(times,2)],fontsize=fontsize, rotation=0)
ax.set_yticks([])
ax.axhline(y=0, color='k')
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(True)
fig.tight_layout()
fig.savefig(fileName, dpi=dpi)
plt.close(fig)
return None
def plotNVGBarGraphDual(A, data, times, fileName, title='', fontsize=8, barwidth=0.05, figsize=(8,4), dpi=600):
"""Bar-plot style natural visibility graph with different link colors for different perspectives
Parameters:
A: 2d numpy.array
Adjacency matrix
data: 2d numpy.array
Data used to make the visibility graph
times: 1d numpy.array
Times corresponding to data points
fileName: str
Name of the figure file to save
title: str, Default ''
Label to add to the figure title
figsize: tuple of int, Default (8,4)
Figure size in inches
dpi: int, 600
Resolution of the image
barwidth: float, Default 0.05
The bar width
fontsize:int, Default 8
The text font size
Returns:
None
Usage:
PlotVisibilityGraph(A, data, times, 'FIgure.png', 'Test Data')
"""
fig, ax = plt.subplots(figsize=figsize)
ax.bar(times, data, width = barwidth, color='k', align='center', zorder=-np.inf)
ax.scatter(times, data, color='k')
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if i>j and A[i,j] > 0:
ax.annotate(text='', xy=(times[i],data[i]), xytext=(times[j],data[j]),
arrowprops=dict(arrowstyle='->', shrinkA=0, shrinkB=0,linestyle='--',color='r'))
if i>j and A[i,j] < 0:
ax.annotate(text='', xy=(times[i],data[i]), xytext=(times[j],data[j]),
arrowprops=dict(arrowstyle='->', shrinkA=0, shrinkB=0,linestyle='--',color='b'))
ax.set_title('%s'%(title), fontdict={'color': 'k'},fontsize=fontsize)
ax.set_xlabel('Times', fontsize=fontsize)
ax.set_ylabel('Signal intensity', fontsize=fontsize)
ax.set_xticks(times)
ax.set_xticklabels([str(item) for item in np.round(times,2)],fontsize=fontsize, rotation=0)
ax.set_yticks([])
ax.axhline(y=0, color='k')
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(True)
fig.tight_layout()
fig.savefig(fileName, dpi=dpi)
plt.close(fig)
return None
|
# -*- coding: utf-8 -*-
"""
test_extract
========
Test extraction of chemical schematic diagrams
"""
import unittest
import os
import chemschematicresolver as csr
tests_dir = os.path.dirname(os.path.abspath(__file__))
img_dir = os.path.join(tests_dir, 'data')
# train_markush_small_dir = os.path.join(train_dir, 'train_markush_small')
# r_group_diag_dir = os.path.join(train_dir, 'r_group_diags')
# train_imgs_dir = os.path.join(train_dir, 'train_imgs')
class TestExtract(unittest.TestCase):
""" Tests the overall extraction case"""
def do_extract_small(self, filename, gold):
""" Extract images from the small markush directory"""
path = os.path.join(img_dir, filename)
result = csr.extract.extract_image(path, debug=True)
self.assertEqual(gold, result)
return result
def do_extract_all_imgs(self, dir_name=img_dir):
""" Run extraction on the 'train_imgs' directory (no assertions)"""
test_path = img_dir
test_imgs = os.listdir(test_path)
for img_path in test_imgs:
full_path = os.path.join(test_path, img_path)
csr.extract.extract_image(full_path, debug=True)
def test_run_train_imgs(self):
""" Run all images in train_imgs directory"""
self.do_extract_all_imgs()
def test_r_group_extract(self):
gold = [(['EtNAPH'], 'c1c2n(c3c(c2ccc1)cc(cc3)/C=C/c1ccc(/C=C/c2ccc3n(c4ccccc4c3c2)CC)c2c1cccc2)CC'),
(['MeNAPH'], 'c1c(ccc(c1)N(c1ccc(/C=C/c2c3c(c(cc2)/C=C/c2ccc(N(c4ccc(C)cc4)c4ccc(cc4)C)cc2)cccc3)cc1)c1ccc(C)cc1)C'),
(['MeONAPH'], 'c1c(ccc(c1)N(c1ccc(/C=C/c2c3c(c(cc2)/C=C/c2ccc(N(c4ccc(OC)cc4)c4ccc(cc4)OC)cc2)cccc3)cc1)c1ccc(OC)cc1)OC')]
foo = img_dir
self.do_extract_small('S014372081630119X_gr1.jpg', gold)
def test_r_group_extract2(self):
# This example fails as the chemical names need to be resolved
gold = []
self.do_extract_small('S0143720816300286_gr1.jpg', gold)
def test_r_group_extract3(self):
# NB : This test is automatically filtering out the failed R-group resolution from the presence of wildcards
gold = [(['107'], 'c1c2ccc3cccc4c3c2c(cc4)cc1/C=N/CCSSCC/N=C/c1cc2ccc3cccc4ccc(c1)c2c34'),
(['106'], 'c1c2ccc3cc(/C=N/c4ccccc4O)c(O)c4c3c2c(cc1)cc4')]
self.do_extract_small('S0143720816301115_r75.jpg', gold)
def test_r_group_extract4(self):
# Identified one of the numeric labels as an atom indicator (inside osra)
# Could be solved by doing a numerical clean
gold = [(['1'], 'N(CC)(CC)c1ccc2cc(C#N)c(=N)oc2c1'),
(['2'], 'N(CC)(CC)c1ccc2cc(C#N)/c(=N/C(=O)OCC)/oc2c1'),
(['3'], 'N(CC)(CC)c1ccc2cc(C#N)c(=O)oc2c1'),
(['4'], 'N(CC)(CC)c1ccc2ccc(=O)oc2c1')]
self.do_extract_small('S0143720816301681_gr1.jpg', gold)
def do_extract_r_group_diags(self, filename, gold):
""" Extract images from the small markush directory"""
path = os.path.join(img_dir, filename)
result = csr.extract.extract_image(path, debug=True)
self.assertEqual(gold, result)
def test_r_group_diag_1(self):
# Identified numerical points as labels
gold = []
self.do_extract_r_group_diags('S0143720816301565_gr1.jpg', gold)
def test_r_group_diag_2(self):
# Octyl and Hectyl resolved correctly. NB the smiles are still wrong though due to OSRA failing
# Need to improve the relative label-detection as well : at the moment it's just returning all candidates
gold = [(['A'],
'c1(ccc(s1)c1cc(F)c(c2cc3S4(c5c(c3s2)sc(c5)c2c([F]CC(C4)CCCC)cc(c3sc(c4ccc(CCCCCC)s4)cc3)c3nsnc23)CC(CC)CCCC)c2nsnc12)c1sc(CCCCCC)cc1'),
(['1'],
'N1(C(=O)c2c(C1=O)ccc(c1sc(c3cc(F)c(c4cc5S6(c7c(c5s4)sc(c7)c4c([F]CC(C6)CCCC)cc(c5sc(cc5)c5ccc6C(=O)N(C(=O)c6c5)CCCCCCCC)c5nsnc45)CC(CC)CCCC)c4nsnc34)cc1)c2)CCCCCCCC'),
(['2'],
'N1(C(=O)c2c(C1=O)ccc(c1sc(c3cc(F)c(c4cc5S6(c7c(c5s4)sc(c7)c4c([F]CC(C6)CCCC)cc(c5sc(cc5)c5ccc6C(=O)N(C(=O)c6c5)CCCCCC)c5nsnc45)CC(CC)CCCC)c4nsnc34)cc1)c2)CCCCCC')]
self.do_extract_r_group_diags('S0143720816302054_sc1.jpg', gold)
def test_r_group_diag_3(self):
# Has assigned the same label to both diagrams, resulting in different outcomes
# Either some of the 4 R-values are not resolved, or something else is going wrong on the rgroup diag
gold = []
self.do_extract_r_group_diags('S0143720816302108_gr1.jpg', gold)
def test_r_group_diag_4(self):
# Schematic flow diagram, including arrows etc
gold = []
self.do_extract_r_group_diags('S0143720816301826_sc1.jpg', gold)
def test_r_group_diag_5(self):
# Containing segmenting lines, and large R-groups
gold = []
self.do_extract_r_group_diags('S0143720816301401_gr5.jpg', gold)
def do_extract_train_imgs(self, filename, gold):
""" Extract images from the train_imgs directory"""
path = os.path.join(img_dir, filename)
result = csr.extract.extract_image(path, debug=False)
self.assertEqual(gold, result)
def test_train_imgs_1(self):
gold = []
self.do_extract_train_imgs('S0143720816301115_gr1.jpg', gold)
def test_extract_document(self):
path = os.path.join(img_dir, 'train_docs', '10.1039_C4TC01753F.html')
records = csr.extract.extract_document(path)
import pprint
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(records)
def test_substitute_labels(self):
records = [{'names': ['pyrenes']},
{'names': ['Godumala Mallesham']},
{'names': ['Cedex']},
{'names': ['diphenyl phosphine–oxide']},
{'names': ['diarylamino']},
{'names': ['anthracene']},
{'names': ['triphenylamine']},
{'names': ['diphenyl']},
{'names': ['phosphine']},
{'names': ['oxide']},
{'names': ['biphenyl']},
{'diag': ['csd']},
{'names': ['bromo pyrene 1']},
{'names': ['2,7-dibromo-9,9-dimethyl-9H-fluorene']},
{'names': ['aryl lithio']},
{'names': ['hydrogen peroxide']},
{'names': ['non-pyrene']},
{'names': ['amine']},
{'names': ['hydroxyl']},
{'names': ['C2–H2 ⋯ O1']},
{'diag': ['csd']},
{'names': ['calomel']},
{'names': ['ferrocenium']},
{'names': ['acetonitrile']},
{'names': ['methanol']},
{'names': ['diphenyl phospheneoxide']},
{'names': ['4,4′-bis[N-(1-naphthyl)-N-phenyl amino ] biphenyl']},
{'names': ['Cd']},
{'names': ['carbozole–dimesitylborane']},
{'names': ['triphenylamine–imidazole']},
{'names': ['Li']},
{'names': ['1 × 10 − 4 to 5 × 10 − 4 cm2 V−1 s−1']},
{'names': ['toluene']},
{'names': ['nitrogen gas']},
{'names': ['n-hexane']},
{'names': ['chloroform–hexane']},
{'names': ['dry methanol']},
{'names': ['sodium sulfate']},
{'names': ['H2O2']},
{'names': ['diethyl ether']},
{'names': ['C28H19OP']},
{'names': ['12H']},
{'names': ['C40H27OP']},
{'names': ['( hexane–ethylacetate 1 : 1 )']},
{'names': ['C43H31OP']},
{'names': ['phosphoric acid']},
{'names': ['CH']},
{'names': ['glassy carbon']},
{'names': ['Ag']},
{'names': ['AgCl']},
{'names': ['platinum']},
{'names': ['alumina']},
{'names': ['Ti']},
{'names': ['sapphire']},
{'names': ['CH3OH–CHCl3']},
{'names': ['trichloroethylene']},
{'names': ['acetone']},
{'names': ['isopropyl alcohol']},
{'names': ['ozone']},
{'names': ['quartz']},
{'names': ['N,N-Diphenyl-N′,N′-bis(1-naphthyl)-1,1′-biphenyl-4,4′-diamine']},
{'names': ['2,9-dimethyl-4,7-diphenyl-1,10-phenanthroline', 'BCP']},
{'names': ['tris(8-hydroxyquinoline)-aluminium']},
{'names': ['Hyderabad 500007']},
{'names': ['C–H ⋯ O']},
{'names': ['fluorene']},
{'names': ['methyl']},
{'labels': ['3'], 'names': ['dibromo'], 'roles': ['derivative']},
{'labels': ['4'], 'roles': ['compound']},
{'names': ['1,4-dibromobenzene', '1,4-Dibromobenzene']},
{'names': ['4,4′-dibromobiphenyl']},
{'names': ['1,3-dibromobenzene']},
{'names': ['1,4-dibromo-2,5-dimethylbenzene']},
{'names': ['H']},
{'names': ['hydrogen']},
{'names': ['O']},
{'names': ['ferrocene']},
{'names': ['aryl']},
{'names': ['quinine sulphate']},
{'names': ['Na2SO4']},
{'names': ['dichloromethane']},
{'names': ['THF']},
{'names': ['C–H']},
{'names': ['phenyl']},
{'names': ['diphenyl phosphine oxide', 'diphenylphosphine oxide']},
{'names': ['TCNQ', '2,3,5,6-tetrafluoro-7,7′,8,8′-tetracyanoquinodimethane']},
{'labels': ['6'],
'melting_points': [{'units': '°C', 'value': '217–220'}],
'names': ['1-(4′-bromobiphenyl-4-yl)pyrene'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'multiplicity': 'm',
'number': '4H',
'shift': '8.28–8.16'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.13–8.10'},
{'multiplicity': 'm',
'number': '3H',
'shift': '8.08–8.00'},
{'multiplicity': 'm',
'number': '4H',
'shift': '7.79–7.69'},
{'multiplicity': 'm',
'number': '4H',
'shift': '7.66–7.57'}],
'solvent': 'CDCl3'}],
'roles': ['product', 'compound']},
{'names': ['Pd(PPh3)4']},
{'names': ['K2CO3']},
{'labels': ['7'],
'melting_points': [{'units': '°C', 'value': '125–127'}],
'names': ['1-(3-bromophenyl)pyrene'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.21'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.17'},
{'multiplicity': 'm',
'number': '3H',
'shift': '8.13–8.07'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.06–8.00'},
{'coupling': '7.9',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.93'},
{'coupling': '1.5',
'coupling_units': 'Hz',
'multiplicity': 't',
'number': '1H',
'shift': '7.79'},
{'coupling': '8.1',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.60'},
{'coupling': '7.6',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.55'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 't',
'number': '1H',
'shift': '7.42'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '143.26'},
{'shift': '135.85'},
{'shift': '133.33'},
{'shift': '131.35'},
{'shift': '130.85'},
{'shift': '130.81'},
{'shift': '130.23'},
{'shift': '129.80'},
{'shift': '129.21'},
{'shift': '128.33'},
{'shift': '127.80'},
{'shift': '127.64'},
{'shift': '127.38'},
{'shift': '127.27'},
{'shift': '126.05'},
{'shift': '125.28'},
{'shift': '124.99'},
{'shift': '124.82'},
{'shift': '124.72'},
{'shift': '124.67'},
{'shift': '124.57'},
{'shift': '122.44'}],
'solvent': 'CDCl3'}],
'roles': ['product', 'compound']},
{'labels': ['8'],
'melting_points': [{'units': '°C', 'value': '119–121'}],
'names': ['1-(4-bromo-2,5-dimethylphenyl)pyrene'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '7.9',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.17'},
{'multiplicity': 'm',
'number': '1H',
'shift': '8.16–8.11'},
{'multiplicity': 's',
'number': '2H',
'shift': '8.07'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.01–7.93'},
{'coupling': '7.7',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.80'},
{'coupling': '9.1',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.68'},
{'multiplicity': 's',
'number': '1H',
'shift': '7.56'},
{'multiplicity': 's',
'number': '1H',
'shift': '7.21'},
{'multiplicity': 's',
'number': '3H',
'shift': '2.43'},
{'multiplicity': 's',
'number': '3H',
'shift': '1.96'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '139.86'},
{'shift': '136.36'},
{'shift': '136.04'},
{'shift': '134.88'},
{'shift': '133.38'},
{'shift': '132.87'},
{'shift': '131.33'},
{'shift': '130.89'},
{'shift': '130.62'},
{'shift': '128.77'},
{'shift': '127.60'},
{'shift': '127.38'},
{'shift': '127.35'},
{'shift': '127.09'},
{'shift': '126.02'},
{'shift': '125.17'},
{'shift': '125.08'},
{'shift': '124.99'},
{'shift': '124.73'},
{'shift': '124.66'},
{'shift': '124.50'},
{'shift': '123.91'},
{'shift': '22.38'},
{'shift': '19.47'}],
'solvent': 'CDCl3'}],
'roles': ['product', 'compound']},
{'labels': ['2'], 'names': ['borolane'], 'roles': ['compound', 'derivative']},
{'labels': ['9'],
'melting_points': [{'units': '°C', 'value': '186–188'}],
'names': ['1-(7-bromo-9,9-dimethyl-9H-fluoren-2-yl)pyrene'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '2.0',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.25'},
{'coupling': '3.4',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.24'},
{'coupling': '7.6 , 1.1',
'coupling_units': 'Hz',
'multiplicity': 'dd',
'number': '1H',
'shift': '8.21'},
{'coupling': '7.6',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.17'},
{'coupling': '0.6',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.11'},
{'multiplicity': 'm',
'number': '3H',
'shift': '8.06–8.01'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.86'},
{'multiplicity': 'm',
'number': '2H',
'shift': '7.70–7.66'},
{'multiplicity': 'm',
'number': '2H',
'shift': '7.64–7.61'},
{'coupling': '8.1 , 1.8',
'coupling_units': 'Hz',
'multiplicity': 'dd',
'number': '1H',
'shift': '7.52'},
{'multiplicity': 's',
'number': '6H',
'shift': '1.57'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '155.89'},
{'shift': '153.43'},
{'shift': '140.63'},
{'shift': '137.90'},
{'shift': '137.82'},
{'shift': '137.20'},
{'shift': '131.43'},
{'shift': '130.89'},
{'shift': '130.54'},
{'shift': '130.19'},
{'shift': '129.74'},
{'shift': '128.46'},
{'shift': '127.60'},
{'shift': '127.51'},
{'shift': '127.38'},
{'shift': '126.19'},
{'shift': '125.99'},
{'shift': '125.16'},
{'shift': '125.12'},
{'shift': '124.88'},
{'shift': '124.79'},
{'shift': '124.63'},
{'shift': '121.47'},
{'shift': '121.13'},
{'shift': '119.88'},
{'shift': '47.23'},
{'shift': '27.04'}],
'solvent': 'CDCl3'}],
'roles': ['product', 'compound']},
{'names': ['chloroform']},
{'names': ['chloroform–diethyl ether']},
{'names': ['cyclohexane']},
{'names': ['ethyl acetate']},
{'names': ['n-BuLi']},
{'names': ['chlorodiphenylphosphine']},
{'names': ['31P']},
{'names': ['nitrogen']},
{'names': ['LiF']},
{'names': ['Al']},
{'names': ['Phosphine oxide', 'phosphine oxide']},
{'names': ['ITO',
'indium tin oxide',
'indium – tin oxide',
'indium–tin oxide']},
{'names': ['CHCl3']},
{'names': ['α-NPD']},
{'labels': ['5', '1'],
'melting_points': [{'units': '°C', 'value': '142–144'}],
'names': ['1-(4-bromophenyl)pyrene', 'mono bromo', 'bromo'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '7.9',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.18'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.16–8.09'},
{'multiplicity': 's',
'number': '2H',
'shift': '8.07'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.03–7.96'},
{'coupling': '7.9',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.90'},
{'coupling': '8.3',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '7.67'},
{'coupling': '8.3',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '7.47'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '144.87'},
{'shift': '136.05'},
{'shift': '133.09'},
{'shift': '132.13'},
{'shift': '131.99'},
{'shift': '131.92'},
{'shift': '131.26'},
{'shift': '130.88'},
{'shift': '130.71'},
{'shift': '130.53'},
{'shift': '128.58'},
{'shift': '128.44'},
{'shift': '128.21'},
{'shift': '127.80'},
{'shift': '127.68'},
{'shift': '127.25'},
{'shift': '127.18'},
{'shift': '126.04'},
{'shift': '125.26'},
{'shift': '124.97'},
{'shift': '124.77'},
{'shift': '124.57'}],
'solvent': 'CDCl3'}],
'roles': ['product', 'compound', 'intermediates']},
{'names': ['hexane']},
{'names': ['2H']},
{'ir_spectra': [{'peaks': [{'units': 'cm−1', 'value': '3041'},
{'units': 'cm−1', 'value': '1589'},
{'units': 'cm−1', 'value': '1197'},
{'units': 'cm−1', 'value': '831'},
{'units': 'cm−1', 'value': '692'}],
'solvent': 'KBr'}],
'labels': ['11'],
'melting_points': [{'units': '°C', 'value': '208'}],
'names': ['diphenyl(4-(pyren-1-yl)phenyl)phosphine oxide'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '144.98'},
{'shift': '144.95'},
{'shift': '136.13'},
{'shift': '133.20'},
{'shift': '132.17'},
{'shift': '132.04'},
{'shift': '131.98'},
{'shift': '131.82'},
{'shift': '131.34'},
{'shift': '130.94'},
{'shift': '130.78'},
{'shift': '130.73'},
{'shift': '130.57'},
{'shift': '128.62'},
{'shift': '128.46'},
{'shift': '128.28'},
{'shift': '127.83'},
{'shift': '127.72'},
{'shift': '127.28'},
{'shift': '127.24'},
{'shift': '126.07'},
{'shift': '125.32'},
{'shift': '125.00'},
{'shift': '124.85'},
{'shift': '124.70'},
{'shift': '124.62'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '500',
'frequency_units': 'MHz',
'nucleus': '31P',
'peaks': [{'shift': '29.141'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 't',
'number': '2H',
'shift': '8.19'},
{'coupling': '7.5',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.15'},
{'coupling': '9.2',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.11'},
{'coupling': '1.4',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.07'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.03–7.98'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.93'},
{'multiplicity': 'm',
'number': '6H',
'shift': '7.88–7.78'},
{'coupling': '8.1 , 2.4',
'coupling_units': 'Hz',
'multiplicity': 'dd',
'number': '2H',
'shift': '7.72'},
{'multiplicity': 'm',
'number': '6H',
'shift': '7.60–7.50'}],
'solvent': 'CDCl3'}],
'quantum_yields': [{'type': 'Φf', 'value': '0.67'}],
'roles': ['product', 'compound'],
'uvvis_spectra': [{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '270'},
{'units': 'nm', 'value': '281'},
{'units': 'nm', 'value': '330'},
{'units': 'nm', 'value': '346'},
{'units': 'nm', 'value': '355'}],
'solvent': 'chloroform'},
{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '480'}],
'solvent': 'chloroform'}]},
{'ir_spectra': [{'peaks': [{'units': 'cm−1', 'value': '3047'},
{'units': 'cm−1', 'value': '1594'},
{'units': 'cm−1', 'value': '1180'},
{'units': 'cm−1', 'value': '849'},
{'units': 'cm−1', 'value': '696'}],
'solvent': 'KBr'}],
'labels': ['12'],
'melting_points': [{'units': '°C', 'value': '244'}],
'names': ['diphenyl(4′-(pyren-1-yl)-[1,1′-biphenyl]-4-yl)phosphine oxide'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '144.24'},
{'shift': '141.11'},
{'shift': '138.64'},
{'shift': '136.85'},
{'shift': '133.21'},
{'shift': '132.74'},
{'shift': '132.60'},
{'shift': '132.15'},
{'shift': '132.01'},
{'shift': '131.41'},
{'shift': '131.15'},
{'shift': '130.88'},
{'shift': '130.69'},
{'shift': '130.50'},
{'shift': '128.61'},
{'shift': '128.45'},
{'shift': '127.59'},
{'shift': '127.51'},
{'shift': '127.47'},
{'shift': '127.34'},
{'shift': '127.19'},
{'shift': '127.03'},
{'shift': '126.03'},
{'shift': '125.18'},
{'shift': '125.03'},
{'shift': '124.87'},
{'shift': '124.66'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '500',
'frequency_units': 'MHz',
'nucleus': '31P',
'peaks': [{'shift': '28.996'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'multiplicity': 'm',
'number': '4H',
'shift': '8.24–8.15'},
{'multiplicity': 's',
'number': '2H',
'shift': '8.09'},
{'multiplicity': 'm',
'number': '3H',
'shift': '8.05–7.98'},
{'multiplicity': 'm',
'number': '12H',
'shift': '7.81–7.70'},
{'multiplicity': 'm',
'number': '6H',
'shift': '7.60–7.47'}],
'solvent': 'CDCl3'}],
'quantum_yields': [{'type': 'Φf', 'value': '0.87'}],
'roles': ['product', 'compound'],
'uvvis_spectra': [{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '269'},
{'units': 'nm', 'value': '281'},
{'units': 'nm', 'value': '330'},
{'units': 'nm', 'value': '347'},
{'units': 'nm', 'value': '362'}],
'solvent': 'chloroform'},
{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '476'}],
'solvent': 'chloroform'}]},
{'names': ['13C']},
{'names': ['1H']},
{'ir_spectra': [{'peaks': [{'units': 'cm−1', 'value': '3048'},
{'units': 'cm−1', 'value': '2917'},
{'units': 'cm−1', 'value': '1598'},
{'units': 'cm−1', 'value': '1186'},
{'units': 'cm−1', 'value': '848'},
{'units': 'cm−1', 'value': '717'}],
'solvent': 'KBr'}],
'labels': ['14'],
'melting_points': [{'units': '°C', 'value': '188'}],
'names': ['(2,5-dimethyl-4-(pyren-1-yl)phenyl)diphenyl phosphine oxide'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '7.9',
'coupling_units': 'Hz',
'multiplicity': 't',
'number': '2H',
'shift': '8.21'},
{'coupling': '7.2',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.17'},
{'multiplicity': 's',
'number': '2H',
'shift': '8.10'},
{'multiplicity': 'm',
'number': '2H',
'shift': '8.03–7.99'},
{'coupling': '7.9',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.84'},
{'multiplicity': 'm',
'number': '4H',
'shift': '7.83–7.77'},
{'coupling': '9.1',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.67'},
{'multiplicity': 'm',
'number': '6H',
'shift': '7.61–7.50'},
{'coupling': '4.3',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.31'},
{'coupling': '14.3',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.09'},
{'multiplicity': 's',
'number': '3H',
'shift': '2.47'},
{'multiplicity': 's',
'number': '3H',
'shift': '1.90'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '144.58'},
{'shift': '140.10'},
{'shift': '139.99'},
{'shift': '135.91'},
{'shift': '135.07'},
{'shift': '134.92'},
{'shift': '134.28'},
{'shift': '134.14'},
{'shift': '133.96'},
{'shift': '133.79'},
{'shift': '133.56'},
{'shift': '132.19'},
{'shift': '132.04'},
{'shift': '131.93'},
{'shift': '131.78'},
{'shift': '131.26'},
{'shift': '130.82'},
{'shift': '130.68'},
{'shift': '128.64'},
{'shift': '128.47'},
{'shift': '127.68'},
{'shift': '127.45'},
{'shift': '127.27'},
{'shift': '126.69'},
{'shift': '126.02'},
{'shift': '125.21'},
{'shift': '125.03'},
{'shift': '124.92'},
{'shift': '124.45'},
{'shift': '21.23'},
{'shift': '19.75'}],
'solvent': 'CDCl3'}],
'quantum_yields': [{'type': 'Φf', 'value': '0.51'}],
'roles': ['product', 'compound', 'compounds', 'data of'],
'uvvis_spectra': [{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '269'},
{'units': 'nm', 'value': '279'},
{'units': 'nm', 'value': '315'},
{'units': 'nm', 'value': '330'},
{'units': 'nm', 'value': '345'},
{'units': 'nm', 'value': '341'}],
'solvent': 'chloroform'},
{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '465'}],
'solvent': 'chloroform'}]},
{'names': ['CDCl3']},
{'ir_spectra': [{'peaks': [{'units': 'cm−1', 'value': '3038'},
{'units': 'cm−1', 'value': '2961'},
{'units': 'cm−1', 'value': '1598'},
{'units': 'cm−1', 'value': '1434'},
{'units': 'cm−1', 'value': '1192'},
{'units': 'cm−1', 'value': '844'},
{'units': 'cm−1', 'value': '696'}],
'solvent': 'KBr'}],
'labels': ['15'],
'melting_points': [{'units': '°C', 'value': '273'}],
'names': ['(9,9-dimethyl-7-(pyren-1-yl)-9H-fluoren-2-yl)diphenylphosphine '
'oxide'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '500',
'frequency_units': 'MHz',
'nucleus': '31P',
'peaks': [{'shift': '29.932'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '9.1',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.22'},
{'coupling': '7.6',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.18'},
{'coupling': '7.6',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.15'},
{'multiplicity': 's',
'number': '2H',
'shift': '8.08'},
{'multiplicity': 'm',
'number': '4H',
'shift': '8.05–7.97'},
{'coupling': '7.6',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.90'},
{'coupling': '7.8 , 2.3',
'coupling_units': 'Hz',
'multiplicity': 'dd',
'number': '1H',
'shift': '7.82'},
{'multiplicity': 'm',
'number': '5H',
'shift': '7.78–7.70'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '7.62'},
{'multiplicity': 'm',
'number': '7H',
'shift': '7.58–7.46'},
{'multiplicity': 's',
'number': '6H',
'shift': '1.56'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '154.52'},
{'shift': '154.10'},
{'shift': '153.94'},
{'shift': '142.69'},
{'shift': '141.48'},
{'shift': '137.59'},
{'shift': '136.87'},
{'shift': '133.51'},
{'shift': '132.10'},
{'shift': '131.97'},
{'shift': '131.84'},
{'shift': '131.35'},
{'shift': '131.27'},
{'shift': '131.19'},
{'shift': '130.81'},
{'shift': '130.54'},
{'shift': '129.88'},
{'shift': '129.74'},
{'shift': '128.50'},
{'shift': '128.35'},
{'shift': '127.52'},
{'shift': '127.39'},
{'shift': '127.30'},
{'shift': '126.46'},
{'shift': '126.34'},
{'shift': '125.95'},
{'shift': '125.09'},
{'shift': '125.01'},
{'shift': '124.97'},
{'shift': '124.77'},
{'shift': '124.58'},
{'shift': '120.54'},
{'shift': '119.86'},
{'shift': '119.68'},
{'shift': '47.28'},
{'shift': '26.90'}],
'solvent': 'CDCl3'}],
'quantum_yields': [{'type': 'Φf', 'value': '0.88'}],
'roles': ['product', 'compound'],
'uvvis_spectra': [{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '268'},
{'units': 'nm', 'value': '280'},
{'units': 'nm', 'value': '312'},
{'units': 'nm', 'value': '351'},
{'units': 'nm', 'value': '368'}],
'solvent': 'chloroform'},
{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '473'}],
'solvent': 'chloroform'}]},
{'ir_spectra': [{'peaks': [{'units': 'cm−1', 'value': '3049'},
{'units': 'cm−1', 'value': '1585'},
{'units': 'cm−1', 'value': '1187'},
{'units': 'cm−1', 'value': '847'},
{'units': 'cm−1', 'value': '695'}],
'solvent': 'KBr'}],
'labels': ['10'],
'melting_points': [{'units': '°C', 'value': '243'}],
'names': ['diphenyl(pyren-1-yl)phosphine oxide'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '9.1',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.93'},
{'multiplicity': 'm',
'number': '3H',
'shift': '8.27–8.15'},
{'multiplicity': 'm',
'number': '4H',
'shift': '8.11–8.00'},
{'multiplicity': 'm',
'number': '5H',
'shift': '7.80–7.67'},
{'multiplicity': 'm',
'number': '2H',
'shift': '7.60–7.52'},
{'multiplicity': 'm',
'number': '4H',
'shift': '7.51–7.42'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '134.16'},
{'shift': '133.89'},
{'shift': '132.50'},
{'shift': '132.22'},
{'shift': '132.09'},
{'shift': '131.85'},
{'shift': '131.22'},
{'shift': '131.06'},
{'shift': '130.94'},
{'shift': '130.34'},
{'shift': '129.82'},
{'shift': '128.87'},
{'shift': '128.66'},
{'shift': '128.50'},
{'shift': '127.06'},
{'shift': '126.44'},
{'shift': '126.28'},
{'shift': '126.15'},
{'shift': '123.59'},
{'shift': '123.41'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '500',
'frequency_units': 'MHz',
'nucleus': '31P',
'peaks': [{'shift': '32.886'}],
'solvent': 'CDCl3'}],
'quantum_yields': [{'type': 'Φf', 'value': '0.91'}],
'roles': ['data of',
'product',
'compound',
'compounds',
'data for',
'derivatives'],
'uvvis_spectra': [{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '491'}],
'solvent': 'chloroform'},
{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '271'},
{'units': 'nm', 'value': '282'},
{'units': 'nm', 'value': '325'},
{'units': 'nm', 'value': '339'},
{'units': 'nm', 'value': '353'},
{'units': 'nm', 'value': '350'}],
'solvent': 'chloroform'}]},
{'ir_spectra': [{'peaks': [{'units': 'cm−1', 'value': '3039'},
{'units': 'cm−1', 'value': '1582'},
{'units': 'cm−1', 'value': '1187'},
{'units': 'cm−1', 'value': '851'},
{'units': 'cm−1', 'value': '697'}],
'solvent': 'KBr'}],
'labels': ['13'],
'melting_points': [{'units': '°C', 'value': '196'}],
'names': ['pyrene',
'Pyrene',
'diphenyl(3-(pyren-1-yl)phenyl)phosphine oxide'],
'nmr_spectra': [{'apparatus': 'Bruker Avance',
'frequency': '300',
'frequency_units': 'MHz',
'nucleus': '1H',
'peaks': [{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.17'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '1H',
'shift': '8.14'},
{'coupling': '2.9',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '8.05'},
{'multiplicity': 'm',
'number': '3H',
'shift': '8.03–7.93'},
{'coupling': '7.8',
'coupling_units': 'Hz',
'multiplicity': 'd',
'number': '2H',
'shift': '7.91'},
{'multiplicity': 'm',
'number': '6H',
'shift': '7.86–7.75'},
{'coupling': '7.6 , 2.9',
'coupling_units': 'Hz',
'multiplicity': 'td',
'number': '1H',
'shift': '7.66'},
{'multiplicity': 'm',
'number': '6H',
'shift': '7.56–7.45'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '75',
'frequency_units': 'MHz',
'nucleus': '13C',
'peaks': [{'shift': '141.45'},
{'shift': '141.36'},
{'shift': '136.08'},
{'shift': '134.07'},
{'shift': '134.05'},
{'shift': '133.92'},
{'shift': '133.84'},
{'shift': '133.37'},
{'shift': '132.80'},
{'shift': '132.55'},
{'shift': '132.09'},
{'shift': '132.01'},
{'shift': '131.97'},
{'shift': '131.95'},
{'shift': '131.31'},
{'shift': '130.93'},
{'shift': '130.85'},
{'shift': '130.80'},
{'shift': '130.73'},
{'shift': '128.61'},
{'shift': '128.56'},
{'shift': '128.52'},
{'shift': '128.46'},
{'shift': '128.25'},
{'shift': '127.73'},
{'shift': '127.62'},
{'shift': '127.47'},
{'shift': '127.23'},
{'shift': '126.02'},
{'shift': '125.25'},
{'shift': '124.91'},
{'shift': '124.79'},
{'shift': '124.67'},
{'shift': '124.59'},
{'shift': '124.51'}],
'solvent': 'CDCl3'},
{'apparatus': 'Bruker Avance',
'frequency': '500',
'frequency_units': 'MHz',
'nucleus': '31P',
'peaks': [{'shift': '29.01'}],
'solvent': 'CDCl3'}],
'quantum_yields': [{'type': 'Φf', 'value': '0.54'}],
'roles': ['product', 'compound', 'data for', 'data of', 'derivative'],
'uvvis_spectra': [{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '272'},
{'units': 'nm', 'value': '281'},
{'units': 'nm', 'value': '330'},
{'units': 'nm', 'value': '348'},
{'units': 'nm', 'value': '354'}],
'solvent': 'chloroform'},
{'apparatus': 'Jasco V-550 spectrophotometer',
'peaks': [{'units': 'nm', 'value': '467'}],
'solvent': 'chloroform'}]}]
results = [ [ ( ['12'],
'c12cccc3ccc4c(ccc(cc1)c4c23)c1ccc(C2=CC=C(CC2)P(=O)(C2=CCCC=C2)C2CCCCC2)cc1'),
( ['15'],
'c1c2ccc3ccc(c4ccc5C6=C(C=C(CC6)[C](=O)(C6=CCCC=C6)C6C=CC=CC6)C(C)(C)c5c4)c4ccc(cc1)c2c34'),
( ['11'],
'c1c2ccc3CCC(=c4ccc(cc1)c2c34)c1ccc(P(C2CCCCC2)(O)C2C=CC=CC2)cc1'),
(['u'], ''),
( ['13'],
'C1C2=c3c(=CC1)ccc1c(ccc(CC2)c31)C1=CC(=CCC1)[C]1(=CC=CC=C1)(C1=CCCC=C1)(C1=CCCC=C1)O'),
(['10'], '')],
[(['znuum'], '')]]
csr.extract.substitute_labels(records, results)
|
"""Tests for :func:`nilearn.plotting.plot_roi`."""
import pytest
import numpy as np
import matplotlib.pyplot as plt
from nibabel import Nifti1Image
from nilearn.plotting import plot_roi
from nilearn.image.resampling import coord_transform
from .testing_utils import MNI_AFFINE
def demo_plot_roi(**kwargs):
"""Demo plotting an ROI."""
data = np.zeros((91, 109, 91))
# Color a asymmetric rectangle around Broca area.
x, y, z = -52, 10, 22
x_map, y_map, z_map = coord_transform(x, y, z,
np.linalg.inv(MNI_AFFINE))
data[int(x_map) - 5:int(x_map) + 5, int(y_map) - 3:int(y_map) + 3,
int(z_map) - 10:int(z_map) + 10] = 1
img = Nifti1Image(data, MNI_AFFINE)
return plot_roi(img, title="Broca's area", **kwargs)
@pytest.mark.parametrize('view_type', ['contours', 'continuous'])
@pytest.mark.parametrize('black_bg', [True, False])
@pytest.mark.parametrize('threshold', [.5, .2])
@pytest.mark.parametrize('alpha', [.7, .1])
def test_plot_roi_view_types(view_type, black_bg, threshold, alpha):
"""Smoke-test for plot_roi.
Tests different combinations of parameters `view_type`, `black_bg`,
`threshold`, and `alpha`.
"""
kwargs = dict()
if view_type == 'contours':
kwargs["linewidth"] = 2.
demo_plot_roi(view_type=view_type, black_bg=black_bg, threshold=threshold,
alpha=alpha, **kwargs)
plt.close()
def test_plot_roi_view_type_error():
"""Test error message for invalid view_type."""
with pytest.raises(ValueError,
match='Unknown view type:'):
demo_plot_roi(view_type='flled')
def test_demo_plot_roi_output_file(tmpdir):
"""Tests plot_roi file saving capabilities."""
filename = str(tmpdir.join('test.png'))
with open(filename, 'wb') as fp:
out = demo_plot_roi(output_file=fp)
assert out is None
|
# Generated by Django 3.1.7 on 2021-02-26 15:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Individual',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gedcom_id', models.CharField(blank=True, max_length=200)),
('name_last', models.CharField(default='НЕИЗВЕСТНО', max_length=200)),
('name_first', models.CharField(blank=True, max_length=200)),
('patronym', models.CharField(blank=True, max_length=200)),
('name_maiden', models.CharField(blank=True, max_length=200)),
('gender', models.CharField(blank=True, choices=[('F', 'женский'), ('M', 'мужской'), ('U', 'неизвестно')], max_length=3)),
('date_birth', models.CharField(blank=True, max_length=200)),
('date_death', models.CharField(blank=True, max_length=200)),
('place_birth', models.CharField(blank=True, max_length=200)),
('place_death', models.CharField(blank=True, max_length=200)),
('individual_notes', models.TextField(blank=True, max_length=500)),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role_name', models.CharField(blank=True, max_length=200)),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('relationship_type', models.CharField(blank=True, max_length=200)),
('individual_1_role', models.CharField(blank=True, max_length=200)),
('individual_2_role', models.CharField(blank=True, max_length=200)),
('marriage_date', models.CharField(blank=True, max_length=200)),
('individual_1_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Individual_1', to='familyroster.individual')),
('individual_2_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Individual_2', to='familyroster.individual')),
],
),
]
|
from ._resource import Resource
from ._resource_provider import ResourceProvider
from ._swagger_module import SwaggerModule, DataPlaneModule, MgmtPlaneModule
from ._swagger_specs import SwaggerSpecs
from ._swagger_loader import SwaggerLoader
|
import os
import torch as t
from utils.config import opt
from data.dataset import Dataset, TestDataset
from torch.utils import data as data_
from tqdm import tqdm
from model import FasterRCNNVGG16
from trainer import FasterRCNNTrainer
from data.util import read_image
from utils.vis_tool import vis_bbox
from utils import array_tool as at
from utils.eval_tool import calc_detection_voc_prec_rec
import numpy as np
def eval_detection_voc(
pred_bboxes, pred_labels, pred_scores, gt_bboxes, gt_labels,
gt_difficults=None,
iou_thresh=0.5):
prec, rec = calc_detection_voc_prec_rec(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
iou_thresh)
return prec, rec
def eval(dataloader, faster_rcnn, test_num=10000):
total = 0
pred_bboxes, pred_labels, pred_scores = list(), list(), list()
gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_) in tqdm(enumerate(dataloader)):
sizes = [sizes[0][0].item(), sizes[1][0].item()]
pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, [sizes])
gt_bboxes += list(gt_bboxes_.numpy())
gt_labels += list(gt_labels_.numpy())
gt_difficults += list(gt_difficults_.numpy())
pred_bboxes += pred_bboxes_
pred_labels += pred_labels_
pred_scores += pred_scores_
if ii == test_num: break
# for target,predict in zip(gt_labels,pred_labels):
# print('target:',target,'predict',predict)
# total += len(i)
# print('labels:',len(total))
result = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults)
return result
faster_rcnn = FasterRCNNVGG16()
trainer = FasterRCNNTrainer(faster_rcnn).cuda()
trainer.load('/home/kdd/Documents/simple-faster-rcnn-pytorch/checkpoints/fasterrcnn_11101036_0.5348237306784394')
opt.caffe_pretrain=True
# get dataloader
testset = TestDataset(opt)
test_dataloader = data_.DataLoader(testset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False, \
pin_memory=True
)
result = eval(test_dataloader,faster_rcnn)
prec, recall = result[0], result[1]
|
x=int(input())
v=0
if x>=1 and x<=100:
for y in range(0,x):
a=input().split(' ')
z=[]
v+=1
for i in a:
if (int(i))<=1000:
try:
int(i)
z.append(int(i))
except:
continue
if len(z)==3:
z.sort()
print(f'Case {v}: {z[0]} {z[1]} {z[2]}')
|
import pyDes
import base64
from secret import security
key = security.key
iv = security.iv
def encrypt(data):
k = pyDes.des(key, pyDes.CBC, iv, pad=None, padmode=pyDes.PAD_PKCS5)
return base64.b64encode(k.encrypt(data))
def decrypt(data):
k = pyDes.des(key, pyDes.CBC, iv, pad=None, padmode=pyDes.PAD_PKCS5)
return k.decrypt(base64.b64decode(data))
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/multiscale_anchor_generator.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n9object_detection/protos/multiscale_anchor_generator.proto\x12\x17object_detection.protos\"\xba\x01\n\x19MultiscaleAnchorGenerator\x12\x14\n\tmin_level\x18\x01 \x01(\x05:\x01\x33\x12\x14\n\tmax_level\x18\x02 \x01(\x05:\x01\x37\x12\x17\n\x0c\x61nchor_scale\x18\x03 \x01(\x02:\x01\x34\x12\x15\n\raspect_ratios\x18\x04 \x03(\x02\x12\x1c\n\x11scales_per_octave\x18\x05 \x01(\x05:\x01\x32\x12#\n\x15normalize_coordinates\x18\x06 \x01(\x08:\x04true')
_MULTISCALEANCHORGENERATOR = DESCRIPTOR.message_types_by_name['MultiscaleAnchorGenerator']
MultiscaleAnchorGenerator = _reflection.GeneratedProtocolMessageType('MultiscaleAnchorGenerator', (_message.Message,), {
'DESCRIPTOR' : _MULTISCALEANCHORGENERATOR,
'__module__' : 'object_detection.protos.multiscale_anchor_generator_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MultiscaleAnchorGenerator)
})
_sym_db.RegisterMessage(MultiscaleAnchorGenerator)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_MULTISCALEANCHORGENERATOR._serialized_start=87
_MULTISCALEANCHORGENERATOR._serialized_end=273
# @@protoc_insertion_point(module_scope)
|
##
#
# Logistic regression
#
# Y_{i} | \beta \sim \textrm{Bin}\left(n_{i},e^{x_{i}^{T}\beta}/(1+e^{x_{i}^{T}\beta})\right)
# \beta \sim N\left(\beta_{0},\Sigma_{0}\right)
#
##
import sys
import numpy as np
########################################################################################
## Handle batch job arguments:
nargs = len(sys.argv)
print 'Command line arguments: ' + str(sys.argv)
####
# sim_start ==> Lowest simulation number to be analyzed by this particular batch job
###
#######################
sim_start = 1000
length_datasets = 200
#######################
# Note: this only sets the random seed for numpy, so if you intend
# on using other modules to generate random numbers, then be sure
# to set the appropriate RNG here
if (nargs==0):
sim_num = sim_start + 1
np.random.seed(1330931)
else:
# Decide on the job number, usually start at 1000:
sim_num = sim_start + int(sys.argv[2])
# Set a different random seed for every job number!!!
np.random.seed(762*sim_num + 1330931)
# Simulation datasets numbered 1001-1200
########################################################################################
########################################################################################
def bayes_logreg(n,y,X,beta_0,Sigma_0_inv,niter=10000,burnin=1000,print_every=1000,retune=100,verbose=False):
# Stuff...
return None
#################################################
beta_0 = np.zeros(2)
Sigma_0_inv = np.diag(np.ones(p))
# More stuff...
#################################################
# Read data corresponding to appropriate sim_num:
# Extract X and y:
# Fit the Bayesian model:
# Extract posterior quantiles...
# Write results to a (99 x p) csv file...
# Go celebrate.
|
#!/usr/bin/env python3
from .ast import Ast
from .error_logger import ErrorLogger
from .token_types import Token, TokenType
class ParseError(RuntimeError):
pass
class Parser:
def __init__(self, tokens):
self.current = 0
self.tokens = tokens
def peek(self) -> Token:
return self.tokens[self.current]
def previous(self) -> Token:
return self.tokens[self.current - 1]
def advance(self) -> Token:
token = self.peek()
if not self.is_end():
self.current += 1
return token
def is_end(self) -> bool:
return self.peek().token_type == TokenType.EOF
def match(self, *token_types) -> bool:
for token_type in token_types:
if self.check(token_type):
self.advance()
return True
return False
def check(self, token_type: TokenType):
return (not self.is_end()) and self.peek().token_type == token_type
def consume(self, token_type: TokenType, message: str) -> Token:
if self.check(token_type):
return self.advance()
self.error(self.peek(), message)
def error(self, token: Token, message: str):
ErrorLogger.error(token=token, message=message)
raise ParseError()
def parse(self) -> Ast.Expr:
"""
program -> declaration* EOF
"""
statements = []
while not self.is_end():
statements.append(self.declaration())
return statements
def declaration(self):
"""
declaration -> funDecl | varDecl | statement
"""
try:
if self.match(TokenType.FUN):
return self.function_declaration("function")
if self.match(TokenType.VAR):
return self.variable_declaration()
return self.statement()
except ParseError:
self.synchronize()
def function_declaration(self, kind):
"""
funDecl -> IDENTIFIER "(" params? ")" block
params -> IDENTIFIER ("," IDENTIFIER)*
"""
name = self.consume(TokenType.IDENTIFIER, f"expected {kind} name.")
self.consume(TokenType.LEFT_PAREN, f"expected ( after {kind} name.")
params = []
if not self.check(TokenType.RIGHT_PAREN):
params.append(self.consume(TokenType.IDENTIFIER, "expected identifier."))
while self.match(TokenType.COMMA):
params.append(
self.consume(TokenType.IDENTIFIER, "expected identifier.")
)
self.consume(TokenType.RIGHT_PAREN, "expected ) after params")
self.consume(TokenType.LEFT_BRACE, f"expected {{ before {kind} body.")
body = self.block()
return Ast.Function(name, params, body)
def variable_declaration(self):
"""
varDecl -> "var" IDENTIFIER ( "=" expression )? ;
"""
name = self.consume(TokenType.IDENTIFIER, "expected identifier")
val = None
if self.match(TokenType.EQUAL):
val = self.expression()
self.consume(TokenType.SEMI_COLON, "expected ;")
return Ast.Var(name, val)
def statement(self):
"""
statement -> if_statement
| while_statement
| for_statement
| expr_statement
| print_statement
| return_statement
| block
"""
if self.match(TokenType.IF):
return self.if_statement()
if self.match(TokenType.WHILE):
return self.while_statement()
if self.match(TokenType.FOR):
return self.for_statement()
if self.match(TokenType.PRINT):
return self.print_statement()
if self.match(TokenType.RETURN):
return self.return_statement()
if self.match(TokenType.LEFT_BRACE):
return self.block()
return self.expression_statement()
def while_statement(self):
"""
while_statement => "while" "(" expression ")" statement
"""
self.consume(TokenType.LEFT_PAREN, "Expected (")
cond = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Expected )")
body = self.statement()
return Ast.While(cond, body)
def for_statement(self):
"""
for_statement => "for" "(" (varDecl | expression_statement | ";")
(expression)? ;
(expression)?")" statement
"""
self.consume(TokenType.LEFT_PAREN, "Expected (")
if self.match(TokenType.SEMI_COLON):
init = None
if self.match(TokenType.VAR):
init = self.variable_declaration()
else:
init = self.expression_statement()
cond = Ast.Literal(True)
if not self.check(TokenType.SEMI_COLON):
cond = self.expression()
self.consume(TokenType.SEMI_COLON, "expected ;")
increment = None
if not self.check(TokenType.RIGHT_PAREN):
increment = self.expression()
self.consume(TokenType.RIGHT_PAREN, "expected )")
body = self.statement()
if increment is not None:
body = Ast.Block([body, Ast.Expression(increment)])
body = Ast.While(cond, body)
if init is not None:
body = Ast.Block([init, body])
return body
def if_statement(self):
"""
if_statement => "if" "(" expression ")" statement ("else" statement)?
"""
self.consume(TokenType.LEFT_PAREN, "Expected (")
cond = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Expected )")
then_branch = self.statement()
else_branch = None
if self.match(TokenType.ELSE):
else_branch = self.statement()
return Ast.If(cond, then_branch, else_branch)
def print_statement(self):
expr = self.expression()
self.consume(TokenType.SEMI_COLON, "expected semicolon")
return Ast.Print(expr)
def return_statement(self):
expr = None
if not self.check(TokenType.SEMI_COLON):
expr = self.expression()
self.consume(TokenType.SEMI_COLON, "expected semicolon")
return Ast.Return(expr)
def expression_statement(self):
expr = self.expression()
self.consume(TokenType.SEMI_COLON, "expected semicolon")
return Ast.Expression(expr)
def block(self) -> Ast.Block:
"""
block = "{" (declaration)* "}"
"""
declarations = []
while not (self.check(TokenType.RIGHT_BRACE) or self.is_end()):
declaration = self.declaration()
declarations.append(declaration)
self.consume(TokenType.RIGHT_BRACE, "expected }.")
return Ast.Block(declarations)
def expression(self) -> Ast.Expr:
"""
expression -> assignment
"""
return self.assignment()
def assignment(self) -> Ast.Assign:
"""
assignment -> IDENTIFIER "=" assignment
| logic_or
"""
expr = self.logic_or()
if self.match(TokenType.EQUAL):
token = self.previous()
value = self.assignment()
if isinstance(expr, Ast.Variable):
return Ast.Assign(expr.name, value)
self.error(token, "Invalid assignement target")
return expr
def logic_or(self):
"""
logic_or -> logic_and ("or" logic_and)*
"""
left = self.logic_and()
right = None
while self.match(TokenType.OR):
op = self.previous()
right = self.logic_and()
left = Ast.Logical(left, op, right)
return left
def logic_and(self):
"""
logic_and -> equality ("or" equality)*
"""
left = self.equality()
while self.match(TokenType.AND):
op = self.previous()
right = self.equality()
left = Ast.Logical(left, op, right)
return left
def equality(self) -> Ast.Expr:
"""
equality -> comparison (('==' | '!=') equality)*
"""
expr = self.comparison()
while self.match(TokenType.EQUAL_EQUAL, TokenType.NOT_EQUAL):
op = self.previous()
right = self.equality()
expr = Ast.Binary(expr, op, right)
return expr
def comparison(self) -> Ast.Expr:
"""
comparison -> addition ((">" | ">=" | "<" | "<=") comparison)*
"""
expr = self.addition()
while self.match(
TokenType.LESSER,
TokenType.LESSER_EQUAL,
TokenType.GREATER,
TokenType.GREATER_EQUAL,
):
op = self.previous()
right = self.comparison()
expr = Ast.Binary(expr, op, right)
return expr
def addition(self) -> Ast.Expr:
"""
addition -> multiplication (("+" | "-") addition)*
"""
expr = self.multiplication()
while self.match(TokenType.PLUS, TokenType.MINUS):
op = self.previous()
right = self.addition()
expr = Ast.Binary(expr, op, right)
return expr
def multiplication(self) -> Ast.Expr:
"""
multiplication -> unary (("*" | "/") multiplication)*
"""
expr = self.unary()
while self.match(TokenType.STAR, TokenType.SLASH):
op = self.previous()
right = self.multiplication()
expr = Ast.Binary(expr, op, right)
return expr
def unary(self) -> Ast.Expr:
"""
unary -> ("!" | "-") unary | call"
"""
if self.match(TokenType.MINUS, TokenType.NOT):
op = self.previous()
right = self.unary()
expr = Ast.Unary(op, right)
return expr
return self.call()
def call(self) -> Ast.Expr:
"""
call -> primary ("(" arguments? ")")*
"""
expr = self.primary()
while self.match(TokenType.LEFT_PAREN):
args = self.arguments()
token = self.consume(TokenType.RIGHT_PAREN, "expected ).")
expr = Ast.Call(expr, token, args)
return expr
def arguments(self) -> list:
"""
arguments -> expression (", " expression)*
"""
args = []
while not self.check(TokenType.RIGHT_PAREN):
args.append(self.expression())
while self.match(TokenType.COMMA):
args.append(self.expression())
if len(args) > 255:
self.error(self.peek(), "cannot have more than 255 arguments")
return args
def primary(self) -> Ast.Expr:
"""
primary -> NUMBER | STRING | "false" | "true" | "nil"
| "(" expression ")" | IDENTIFIER
"""
if self.match(TokenType.FALSE):
return Ast.Literal(False)
if self.match(TokenType.TRUE):
return Ast.Literal(True)
if self.match(TokenType.NIL):
return Ast.Literal(None)
if self.match(TokenType.STRING, TokenType.NUMBER):
return Ast.Literal(self.previous().literal)
if self.match(TokenType.IDENTIFIER):
return Ast.Variable(self.previous())
if self.match(TokenType.LEFT_PAREN):
expr = self.expression()
self.consume(TokenType.RIGHT_PAREN, "Exprected ')' after expression")
return Ast.Grouping(expr)
self.error(self.peek(), "Expcted expression")
def synchronize(self):
while not self.is_end():
if self.match(TokenType.SEMI_COLON):
return
for token_type in [
TokenType.CLASS,
TokenType.FUN,
TokenType.VAR,
TokenType.FOR,
TokenType.IF,
TokenType.WHILE,
TokenType.PRINT,
TokenType.RETURN,
]:
if self.check(token_type):
return
self.advance()
|
# coding=utf-8
import chatcommunicate
import chatcommands
from globalvars import GlobalVars
from datahandling import _remove_pickle
import collections
import io
import os
import os.path
import pytest
import threading
import time
import yaml
from fake import Fake
from unittest.mock import Mock, patch
def test_validate_yaml():
with open("rooms.yml", "r") as f:
room_data = yaml.safe_load(f.read())
with open("users.yml", "r") as f:
user_data = yaml.safe_load(f.read())
flatten = lambda l: [item for sublist in l for item in sublist]
privileged_users = []
for site, site_rooms in room_data.items():
for room_id, room in site_rooms.items():
if "privileges" not in room:
continue
if "additional" in room["privileges"]:
privileged_users.append(room["privileges"]["additional"])
if "inherit" not in room["privileges"]:
privileged_users.append(room["privileges"])
privileged_users = set(flatten(privileged_users))
for uid in privileged_users:
if uid not in user_data:
pytest.fail("Privileged user {} does not have a corresponding entry in users.yml".format(uid))
def test_parse_room_config():
chatcommunicate.parse_room_config("test/test_rooms.yml")
assert ("stackexchange.com", 11540) in chatcommunicate._command_rooms
assert ("stackexchange.com", 30332) in chatcommunicate._command_rooms
assert ("stackoverflow.com", 111347) in chatcommunicate._command_rooms
assert ("stackexchange.com", 3) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._command_rooms
assert ("meta.stackexchange.com", 89) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 11540) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 3) in chatcommunicate._watcher_rooms
assert ("meta.stackexchange.com", 89) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 30332) not in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._watcher_rooms
assert ("stackoverflow.com", 111347) not in chatcommunicate._watcher_rooms
assert chatcommunicate._privileges[("stackexchange.com", 11540)] == {121520, 10145}
assert chatcommunicate._privileges[("stackexchange.com", 30332)] == {121520, 10145}
assert chatcommunicate._privileges[("stackexchange.com", 3)] == set()
assert chatcommunicate._privileges[("stackexchange.com", 54445)] == set()
assert chatcommunicate._privileges[("meta.stackexchange.com", 89)] == {262823}
assert chatcommunicate._privileges[("stackoverflow.com", 111347)] == {3160466, 603346}
assert len(chatcommunicate._room_roles) == 5
assert chatcommunicate._room_roles["debug"] == {("stackexchange.com", 11540)}
assert chatcommunicate._room_roles["all"] == {("stackexchange.com", 11540),
("stackexchange.com", 54445),
("stackoverflow.com", 111347)}
assert chatcommunicate._room_roles["metatavern"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["delay"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["no-all-caps title"] == {("meta.stackexchange.com", 89)}
@patch("chatcommunicate.threading.Thread")
@patch("chatcommunicate.Client")
@patch("chatcommunicate.parse_room_config")
def test_init(room_config, client_constructor, thread):
client = Mock()
client_constructor.return_value = client
client.login.side_effect = Exception()
# https://stackoverflow.com/questions/23337471/
with pytest.raises(Exception) as e:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
assert str(e.value).endswith("Failed to log into {}, max retries exceeded".format(next(iter(chatcommunicate._clients))))
client.login.side_effect = None
client.login.reset_mock()
client_constructor.reset_mock()
room_config.side_effect = lambda _: room_config.get_original()("test/test_rooms.yml")
GlobalVars.standby_mode = True
# See GitHub Issue #2498, temporary workaround
try:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
except Exception:
return # This interferes with the following tests
assert len(chatcommunicate._rooms) == 0
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
client.login.reset_mock()
client_constructor.reset_mock()
thread.reset_mock()
GlobalVars.standby_mode = False
counter = 0
def throw_every_other(*_):
nonlocal counter
counter += 1
if counter & 1:
raise Exception()
client.login.side_effect = throw_every_other
# See GitHub Issue #2498, temporary workaround
try:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
except Exception as e:
return # Because this causes the following checks to fail
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
assert len(chatcommunicate._rooms) == 3
assert chatcommunicate._rooms[("stackexchange.com", 11540)].deletion_watcher is True
assert chatcommunicate._rooms[("stackexchange.com", 30332)].deletion_watcher is False
assert chatcommunicate._rooms[("stackoverflow.com", 111347)].deletion_watcher is False
@pytest.mark.skipif(os.path.isfile("messageData.p"), reason="shouldn't overwrite file")
@patch("chatcommunicate.pickle.dump")
def test_pickle_rick(dump):
try:
threading.Thread(target=chatcommunicate.pickle_last_messages, daemon=True).start()
chatcommunicate._pickle_run.set()
# Yield to the pickling thread until it acquires the lock again
while len(chatcommunicate._pickle_run._cond._waiters) == 0:
time.sleep(0)
assert dump.call_count == 1
call, _ = dump.call_args_list[0]
assert isinstance(call[0], chatcommunicate.LastMessages)
assert isinstance(call[1], io.IOBase) and call[1].name == "messageData.p"
finally:
_remove_pickle("messageData.p")
@patch("chatcommunicate._pickle_run")
def test_message_sender(pickle_rick):
chatcommunicate._last_messages = chatcommunicate.LastMessages({}, collections.OrderedDict())
threading.Thread(target=chatcommunicate.send_messages, daemon=True).start()
room = chatcommunicate.RoomData(Mock(), -1, False)
room.room.id = 11540
room.room._client.host = "stackexchange.com"
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 1}})
chatcommunicate._msg_queue.put((room, "test", None))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 11540, "test"))
room.room.reset_mock()
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
room.room.id = 30332
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 2}})
chatcommunicate._msg_queue.put((room, "test", "did you hear about what happened to pluto"))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 30332, "test"))
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
assert chatcommunicate._last_messages.reports == collections.OrderedDict({("stackexchange.com", 2): "did you hear about what happened to pluto"})
@patch("chatcommunicate._msg_queue.put")
@patch("chatcommunicate.get_last_messages")
def test_on_msg(get_last_messages, post_msg):
client = Fake({
"_br": {
"user_id": 1337
},
"host": "stackexchange.com"
})
room_data = chatcommunicate.RoomData(Mock(), -1, False)
chatcommunicate._rooms[("stackexchange.com", 11540)] = room_data
chatcommunicate.on_msg(Fake({}, spec=chatcommunicate.events.MessageStarred), None) # don't reply to events we don't care about
msg1 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1,
},
"parent": None,
"content": "shoutouts to simpleflips"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg1, client)
msg2 = Fake({
"message": {
"room": {
"id": 11540
},
"owner": {
"id": 1337
},
"id": 999,
"parent": None,
"content": "!!/not_actually_a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg2, client)
msg3 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 999,
"parent": None,
"content": "!!/a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
mock_command = Mock(side_effect=lambda *_, **kwargs: "hi" if not kwargs["quiet_action"] else "")
chatcommunicate._prefix_commands["a_command"] = (mock_command, (0, 0))
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command-"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._prefix_commands["a_command"] = (mock_command, (0, 1))
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(None, original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("1 2 3", original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._prefix_commands["a_command"] = (mock_command, (1, 2))
msg3.message.content = "!!/a_command"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too few arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 oatmeal"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too many arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command- 1 2"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with("1", "2", original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("3", None, original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg4 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 2
}
},
"id": 1000,
"content": "asdf"
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate.on_msg(msg4, client)
msg5 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 1337
}
},
"id": 1000,
"content": "@SmokeDetector why "
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate._reply_commands["why"] = (mock_command, (0, 0))
threw_exception = False
try:
chatcommunicate.on_msg(msg5, client)
except AssertionError:
threw_exception = True
assert threw_exception
mock_command.assert_not_called()
post_msg.assert_not_called()
chatcommunicate._reply_commands["why"] = (mock_command, (1, 1))
chatcommunicate.on_msg(msg5, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 hi"
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg5.message.content = "@SmokeDetector why@!@#-"
chatcommunicate.on_msg(msg5, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=True)
msg6 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 1000,
"parent": None,
"content": "sd why - 2why 2why- 2- why- "
}
}, spec=chatcommunicate.events.MessageEdited)
get_last_messages.side_effect = lambda _, num: (Fake({"id": i}) for i in range(num))
chatcommunicate.on_msg(msg6, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 [:0] hi\n[:1] <skipped>\n[:2] hi\n[:3] hi\n[:4] <processed without return value>\n[:5] <processed without return value>\n[:6] <skipped>\n[:7] <skipped>\n[:8] <processed without return value>"
def test_message_type():
fake1 = Fake({}, spec=chatcommunicate.Message)
assert chatcommands.message(fake1) == fake1
fake2 = Fake({})
threw_exception = False
try:
chatcommands.message(fake2)
except AssertionError:
threw_exception = True
assert threw_exception
|
"""
A Python implementation of the blob detector algorithm described in
Shyam Madhusudhana, Anita Murray, and Christine Erbe. (2020). "Automatic
detectors for low-frequency vocalizations of Omura’s whales, Balaenoptera
omurai: A performance comparison." The Journal of the Acoustical Society
of America. 147(4). DOI: 10.1121/10.000108
for the detection of Omura's whale vocalizations.
If you use this software, we request that you please cite the above article.
Author: Shyam Madhusudhana <shyamm@cornell.edu>
"""
import numpy as np
from scipy.signal import butter, sosfiltfilt, spectrogram
from scipy.signal.windows import hann
import librosa
import argparse
from timeit import default_timer as timer
from detectsound import BlobExtractor
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
def detect_omuras(audio_file, output_file=None):
"""Detect Omura's whale vocalizations in the recording 'audio_file'.
If 'output_file' is None, the detections will be displayed on-screen.
Otherwise, 'output_file' must specify a path where detections will be
written in Raven Selection Table format."""
t_start = timer() # Measure data load time
# Load audio from file. Up/downsample (as necessary) to 200 Hz.
data, fs = librosa.load(audio_file, sr=200)
time_load = timer() - t_start
# Build a Butterworth filter to suppress energies outside the
# 5-68 Hz band.
wn = np.array([5, 68]) / (fs / 2) # convert to angular frequency
filter_sos = butter(64, wn, btype='bandpass', output='sos')
t_start = timer() # Measure data preparation time
# Filter the audio
data = sosfiltfilt(filter_sos, data).astype(np.float32)
# Compute spectrogram with 1.0 s Hann window (and NFFT) and 70% overlap
f, t, spec = spectrogram(data, fs=fs, window=hann(200),
nperseg=200, noverlap=140,
nfft=200, detrend=False, mode='psd')
# Find out the indices of where to clip the frequency axis
valid_f_idx_start = f.searchsorted(5, side='left')
valid_f_idx_end = f.searchsorted(68, side='right') - 1
# Clip frequency extents
spec = spec[valid_f_idx_start:(valid_f_idx_end + 1), :]
f = f[valid_f_idx_start:(valid_f_idx_end + 1)]
# Convert to decibels
spec = 10 * np.log10(spec + 1e-15)
time_prep = timer() - t_start
t_start = timer() # Measure detector time
# Create a detector instance with parameters tuned for detection
# of Omura's whale vocalizations.
be = BlobExtractor(len(f),
centroid_bounds=[17, 39], # Translates to 22-44 Hz
min_snr=0., # All non-negative SNRs
cost_std=[1.5, 2.5, 2.0],
min_frames=5, # Translates to 1.5 s
first_scale=1.3,
t_blur_sigma=3)
# Run the detector on the spectrogram
detections = be.extract_blobs(spec)
time_detect = timer() - t_start
print('Source : {:s}'.format(audio_file))
print('Audio duration: {:.6f} s'.format(len(data) / fs))
print('Time taken: ')
print(' Audio load: {:.6f} s'.format(time_load))
print(' Audio prep: {:.6f} s'.format(time_prep))
print(' Detection : {:.6f} s'.format(time_detect))
if output_file is not None:
# Write out detections into a Raven Selection Table file.
with open(output_file, 'w') as of:
# Write header
of.write('Selection\tBegin Time (s)\tEnd Time (s)\t' +
'Low Freq (Hz)\tHigh Freq (Hz)\tScore\n')
discarded = 0
for b_idx, blob in enumerate(detections):
# Discard very narrow blobs (median bandwidth of 5 Hz)
if np.median(f[blob.edges[:, 1]] - f[blob.edges[:, 0]]) < 5:
discarded += 1
continue
of.write(
'{:d}\t{:.4f}\t{:.4f}\t{:.3f}\t{:.3f}\t{:.4f}\n'.format(
b_idx + 1 - discarded,
t[blob.first_frame], t[blob.last_frame],
f[blob.edges[:, 0].min()], f[blob.edges[:, 1].max()],
np.median(blob.snrs)))
print('{:d} detections written to {:s}'.format(
len(detections) - discarded, output_file))
else:
# Display detections
fig, ax = plt.subplots()
# Show the spectrogram
ax.imshow(spec, cmap=plt.cm.gray_r,
extent=(t[0] - (t[1] - t[0]) / 2, t[-1] + (t[1] - t[0]) / 2,
f[0] - (f[1] - f[0]) / 2, f[-1] + (f[1] - f[0]) / 2),
interpolation='none', origin='lower', aspect='auto')
# Overlay detections
patches = []
snrs = []
discarded = 0
for blob in detections:
# Discard very narrow blobs (median bandwidth of 5 Hz)
if np.median(f[blob.edges[:, 1]] - f[blob.edges[:, 0]]) < 5:
discarded += 1
continue
# Build list of polygon points from reported blob edges
temp = np.arange(blob.first_frame, blob.last_frame + 1)
time_axis = t[np.concatenate([temp, np.flip(temp), [temp[0]]])]
freq_axis = f[
np.concatenate([blob.edges[:, 0],
np.flip(blob.edges[:, 1]),
blob.edges[0:1, 0]])]
patches.append(Polygon(np.stack([time_axis, freq_axis]).T, True))
snrs.append(np.median(blob.snrs))
p = PatchCollection(patches, cmap=plt.cm.Greens, alpha=0.4, ec='k')
p.set_array(np.asarray(snrs))
ax.add_collection(p)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Frequency (Hz)')
ax.set_title('{:s}: {:d} detections'.format(
repr(audio_file), len(detections) - discarded))
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='omura_detector', allow_abbrev=False,
description='Detect Omura\'s whale vocalizations in recordings.')
parser.add_argument(
'src', metavar='<AUDIO SOURCE>',
help='Path to the audio file to process.')
parser.add_argument(
'out', metavar='<OUTPUT FILE>', nargs='?',
help='(Optional) Path to an output file into which detection ' +
'results (Raven selection tables) will be written. If not ' +
'specified, detection results will be displayed on screen.')
args = parser.parse_args()
detect_omuras(args.src, args.out)
|
# -*- coding: utf-8 -*-
"""
@author: J. Massey
@description: Script to check convergence and call kill if need be
@contact: masseyjmo@gmail.com
"""
# Imports
import numpy as np
import postproc.io as io
import postproc.frequency_spectra
import postproc.visualise.plotter
import subprocess
import sys
from pathlib import Path
import matplotlib.pyplot as plt
def remove(string):
string = string.replace(" ", "")
return string
def _plot_ts(ti, f):
"""
Just tidying up the above code by holding the plotting func
Args:
ti: time
f: force
Returns:
Time series plot
"""
fig1, ax1 = plt.subplots(figsize=(7, 5))
ax1.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
# Edit frame, labels and legend
ax1.set_xlabel(r"$torch/length_scale$")
ax1.set_ylabel(f"${sys.argv[2]}$")
ax1.plot_fill(t, u, c='r')
plt.savefig(Path.cwd().joinpath(f"figures/time_series_{sys.argv[2]}.png"), bbox_inches='tight', dpi=600,
transparent=False)
plt.close()
def _plot_err(ti, e):
"""
Just tidying up the above code by holding the plotting func
Args:
ti: time
e: ensemble error
Returns:
Ensemble error against time
"""
fig, ax = plt.subplots(figsize=(7, 5))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
# Edit frame, labels and legend
ax.set_xlabel(r"$torch/length_scale$")
ax.set_ylabel(r"$\int \sqrt(\overline{s_{0,n}} - \overline{s_{0,n+1}})^2 df/ \int \overline{s_{0,n+1}}$")
ax.plot_fill(ti, e, c='r')
plt.savefig(Path.cwd().joinpath(f"figures/ensemble_error_{sys.argv[2]}.png"), bbox_inches='tight', dpi=600,
transparent=False)
plt.close()
if __name__=="__main__":
# plt.style.use(['science', 'grid'])
print("Checking whether spectra have converged")
if float(sys.argv[1]) < 15:
print(
"This number of cycles is probably too"
"small and will probably give a false positive,"
"check the tail end of the ensemble error is reducing smoothly")
data_root = Path.cwd().joinpath('fort.9')
fos = io.unpack_flex_forces(data_root, remove(sys.argv[3]).split(','))
forces_dic = dict(zip(remove(sys.argv[3]).split(','), fos))
t = forces_dic['torch']
assert (max(t) - min(t)) >= 2 * float(sys.argv[1]), "You need > two windows to be able check convergence"
u = forces_dic[sys.argv[2]]
n = int(np.floor((np.max(t) - np.min(t)) / float(sys.argv[1])))
criteria = postproc.frequency_spectra.FreqConv(t, u, n=n, OL=0.5)
normed_error, window_t = criteria.f_conv(float(sys.argv[1]))
if normed_error[-1] < 0.1:
print("You've converged!")
subprocess.call('touch ../.kill', shell=True, cwd=Path(data_root).parent)
subprocess.call('mkdir -p figures', shell=True, cwd=Path(data_root).parent)
# Plot a couple of figs to show that the method behaves
_plot_ts(t, u)
_plot_err(window_t, normed_error)
else:
print("Hasn'torch yet, hold tight!")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-29 09:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0006_auto_20170828_1247'),
]
operations = [
migrations.AddField(
model_name='mahasiswa',
name='fakultas',
field=models.CharField(default='fakultasxx', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='mahasiswa',
name='program_studi',
field=models.CharField(default='prodixx', max_length=30),
preserve_default=False,
),
]
|
import os, glob
import requests
from io import BytesIO
from shutil import copyfile, copytree
from zipfile import ZipFile
import param
from .element import (_Element, Feature, Tiles, # noqa (API import)
WMTS, LineContours, FilledContours, Text, Image,
Points, Path, Polygons, Shape, Dataset, RGB,
Contours, Graph, TriMesh, Nodes, EdgePaths,
QuadMesh, VectorField)
from . import data # noqa (API import)
from . import operation # noqa (API import)
from . import plotting # noqa (API import)
from . import feature # noqa (API import)
__version__ = param.Version(release=(1,4,2), fpath=__file__,
commit="$Format:%h$", reponame='geoviews')
SAMPLE_DATA_URL = 'http://assets.holoviews.org/geoviews-sample-data.zip'
def examples(path='geoviews-examples', include_data=False, verbose=False):
"""
Copies the example notebooks to the supplied path. If
include_data is enabled the sample data is also downloaded.
"""
candidates = [os.path.join(__path__[0], '../doc/'),
os.path.join(__path__[0], '../../../../share/geoviews-examples')]
path = os.path.abspath(path)
asset_path = os.path.join(path, 'assets')
if not os.path.exists(path):
os.makedirs(path)
if verbose:
print('Created directory %s' % path)
for source in candidates:
if os.path.exists(source):
if not os.path.exists(asset_path):
copytree(os.path.join(source, 'assets'), asset_path)
if verbose:
print('Copied assets to %s' % asset_path)
for nb in glob.glob(os.path.join(source, '*.ipynb')):
nb_name = os.path.basename(nb)
nb_path = os.path.join(path, nb_name)
copyfile(nb, nb_path)
if verbose:
print("%s copied to %s" % (nb_name, path))
break
if include_data:
data_path = os.path.join(path, 'sample-data')
sample_data(data_path, verbose)
def sample_data(path='../doc/sample-data', verbose=False):
"""
Downloads and unzips the sample data to a particular location.
"""
url = requests.get(SAMPLE_DATA_URL)
zipfile = ZipFile(BytesIO(url.content))
zip_names = zipfile.namelist()
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if verbose:
print('Created directory %s' % path)
for nc_file in zip_names:
extracted_file = zipfile.open(nc_file).read()
save_path = os.path.join(path, nc_file)
with open(save_path, 'wb') as f:
f.write(extracted_file)
if verbose:
print("%s downloaded to %s" % (nc_file, path))
|
from modeling import *
d=0
n_train=1
#seed=5646
std=.3
n_shots = 8192
n_swap = 1
balanced = True
n = 200
test_size = .1
X, y = load_data(n=n, std=std)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123, test_size=test_size)
Y_vector_train = label_to_array(y_train)
Y_vector_test = label_to_array(y_test)
predictions = []
np.random.seed(seed)
for x_test, y_ts in zip(X_test, Y_vector_test):
ix = np.random.choice(int(n*(1-test_size)), 1)[0]
x_train = X_train[ix]
x_tr = normalize_custom(x_train)
y_tr = Y_vector_train[ix]
x_ts = normalize_custom(x_test)
qc = cos_classifier(x_tr, x_ts, y_tr)
r = exec_simulator(qc, n_shots=n_shots)
if '0' not in r.keys():
r['0'] = 0
elif '1' not in r.keys():
r['1'] = 0
predictions.append(retrieve_proba(r))
a, b = evaluation_metrics(predictions, X_test, y_test, save=False)
print('seed:', seed, ' Accuracy:', a, ' Brier score:', b)
file = open("output/result_single_classifier.csv", 'a')
file.write("%d, %d, %d, %d, %s,%f, %f, %f, %f, %d\n" % (n, n_train, n_swap, d, balanced, test_size, std, a, b, seed))
file.close()
|
import sys
import os
import re
import curses
from atig.migration import Migration, MigrationCollection
class Atig():
def __init__(self):
self.valid_migration = re.compile(r'[0-9a-f]{12}\_.*\.py')
self.migrations = None
self.migration_collection = None
self.debug_message = 'Welcome to ATIG 8^)'
self.window_open = False
self.display_window_contents = []
self.close_atig = False
if self.is_in_alembic_directory() and self.alembic_is_installed():
# first run ls, get all self.migrations and all hashes
self.migrations = os.listdir('alembic/versions/')
self.migrations = map(
lambda x: Migration('alembic/versions/' + x),
filter(self.valid_migration.match, self.migrations))
self.migration_collection = MigrationCollection(
list(self.migrations))
self.migrations = list(self.migration_collection.list_migrations())
def init_screen(self, stdscr):
self.stdscr = stdscr
self.k = 0
self.cursor_x = 0
self.cursor_y = 0
# Clear and refresh the screen for a blank canvas
self.stdscr.clear()
self.stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.event_loop()
def process_input(self):
if self.k == curses.KEY_DOWN:
self.cursor_y = self.cursor_y + 1
elif self.k == curses.KEY_UP:
self.cursor_y = self.cursor_y - 1
elif self.k == curses.KEY_RIGHT:
self.cursor_x = self.cursor_x + 1
elif self.k == curses.KEY_LEFT:
self.cursor_x = self.cursor_x - 1
elif self.k in [curses.KEY_ENTER, 10, 13]:
self.debug_message = ''
self.window_open = True
self.display_window = curses.newwin(0, self.width // 2, 0,
self.width // 2)
if self.cursor_y >= 1 and self.cursor_y <= len(self.migrations):
curr = self.migrations[self.cursor_y - 1]
self.debug_message = curr.hash
self.display_window_contents = curr.contents.split(
'\n')[:self.height - 2]
else:
self.debug_message = 'no migration selected'
elif self.k == ord('q'):
if self.window_open:
self.window_open = False
self.display_window.erase()
self.display_window.refresh()
self.debug_message = ''
self.display_window_contents = []
else:
self.close_atig = True
self.cursor_x = max(0, self.cursor_x)
self.cursor_x = min(self.width - 1, self.cursor_x)
self.cursor_y = max(0, self.cursor_y)
self.cursor_y = min(self.height - 1, self.cursor_y)
def render_status_bar(self):
statusbarstr = f"Press 'q' to exit | {self.debug_message} | Pos: {self.cursor_x}, {self.cursor_y}"
# Render status bar
self.stdscr.attron(curses.color_pair(3))
self.stdscr.addstr(self.height - 1, 0, statusbarstr)
self.stdscr.addstr(self.height - 1, len(statusbarstr),
" " * (self.width - len(statusbarstr) - 1))
self.stdscr.attroff(curses.color_pair(3))
def event_loop(self):
# Initialization
self.stdscr.clear()
self.height, self.width = self.stdscr.getmaxyx()
# Loop where k is the last character pressed
while (not self.close_atig):
self.render_status_bar()
if not self.alembic_is_installed():
self.render_alembic_install_error()
# check if we are in the right directory
elif self.is_in_alembic_directory():
self.fetch_migration_information()
else:
self.render_directory_error()
self.stdscr.move(self.cursor_y, self.cursor_x)
# Refresh the screen
self.stdscr.refresh()
if self.window_open:
for i, row in enumerate(self.display_window_contents):
self.display_window.addstr(i + 1, 1,
row[:(self.width // 2) - 2])
self.display_window.border()
self.display_window.refresh()
# Wait for next input
self.k = self.stdscr.getch()
self.process_input()
curses.endwin()
def fetch_migration_information(self):
title = "Alembic Migrations"[:self.width - 1]
start_x_title = int((self.width // 2) - (len(title) // 2) -
len(title) % 2)
# Turning on attributes for title
self.stdscr.attron(curses.color_pair(2))
self.stdscr.attron(curses.A_BOLD)
# Rendering title
self.stdscr.addstr(0, start_x_title, title)
# Turning off attributes for title
self.stdscr.attroff(curses.color_pair(2))
self.stdscr.attroff(curses.A_BOLD)
# finally we need to render them on the screen
for i, migration in enumerate(
self.migration_collection.list_migrations()):
self.stdscr.addstr(1 + i, 0, migration.date, curses.color_pair(0))
if self.migration_collection.current in migration.name:
self.stdscr.attron(curses.color_pair(2))
self.stdscr.attron(curses.A_BOLD)
self.stdscr.addstr(1 + i, 20,
migration.stringify() + ' (curr)',
curses.color_pair(2))
self.stdscr.attroff(curses.color_pair(2))
self.stdscr.attroff(curses.A_BOLD)
else:
self.stdscr.addstr(1 + i, 20, migration.stringify(),
curses.color_pair(1))
def render_alembic_install_error(self):
self.render_error("Alembic is not Installed")
def render_directory_error(self):
self.render_error("Not In Valid Alembic Directory")
def render_error(self, message):
# Declaration of strings
title = message[:self.width - 1]
keystr = "Press q to exit"[:self.width - 1]
# Centering calculations
start_x_title = int((self.width // 2) - (len(title) // 2) -
len(title) % 2)
start_x_keystr = int((self.width // 2) - (len(keystr) // 2) -
len(keystr) % 2)
start_y = int((self.height // 2) - 2)
# Turning on attributes for title
self.stdscr.attron(curses.color_pair(2))
self.stdscr.attron(curses.A_BOLD)
# Rendering title
self.stdscr.addstr(start_y, start_x_title, title)
# Turning off attributes for title
self.stdscr.attroff(curses.color_pair(2))
self.stdscr.attroff(curses.A_BOLD)
# Print rest of text
self.stdscr.addstr(start_y + 3, (self.width // 2) - 2, '-' * 4)
self.stdscr.addstr(start_y + 5, start_x_keystr, keystr)
def is_in_alembic_directory(self):
return os.path.exists('alembic') and \
os.path.exists('alembic/versions')
def alembic_is_installed(self):
return os.popen('which alembic').read() != ''
def main():
atig = Atig()
curses.wrapper(atig.init_screen)
if __name__ == "__main__":
main()
|
"""Async API
This module contains the API for parallelism in TorchScript, notably:
* torch.jit.fork
* torch.jit.wait
This is not intended to be imported directly; please use the exposed
functionalities in `torch.jit`.
"""
import torch
from torch.utils import set_module
from torch.jit._builtins import _register_builtin
from torch._jit_internal import Future
set_module(Future, "torch.jit")
def fork(func, *args, **kwargs):
"""
Creates an asynchronous task executing `func` and a reference to the value
of the result of this execution. `fork` will return immediately,
so the return value of `func` may not have been computed yet. To force completion
of the task and access the return value invoke `torch.jit.wait` on the Future. `fork` invoked
with a `func` which returns `T` is typed as `torch.jit.Future[T]`. `fork` calls can be arbitrarily
nested, and may be invoked with positional and keyword arguments.
Asynchronous execution will only occur when run in TorchScript. If run in pure python,
`fork` will not execute in parallel. `fork` will also not execute in parallel when invoked
while tracing, however the `fork` and `wait` calls will be captured in the exported IR Graph.
Warning:
`fork` tasks will execute non-deterministicly. We recommend only spawning
parallel fork tasks for pure functions that do not modify their inputs,
module attributes, or global state.
Arguments:
func (callable or torch.nn.Module): A Python function or `torch.nn.Module`
that will be invoked. If executed in TorchScript, it will execute asynchronously,
otherwise it will not. Traced invocations of fork will be captured in the IR.
``*args``, ``**kwargs``: arguments to invoke `func` with.
Returns:
`torch.jit.Future[T]`: a reference to the execution of `func`. The value `T`
can only be accessed by forcing completion of `func` through `torch.jit.wait`.
Example (fork a free function):
.. testcode::
import torch
from torch import Tensor
def foo(a : Tensor, b : int) -> Tensor:
return a + b
def bar(a):
fut : torch.jit.Future[Tensor] = torch.jit.fork(foo, a, b=2)
return torch.jit.wait(fut)
script_bar = torch.jit.script(bar)
input = torch.tensor(2)
# only the scripted version executes asynchronously
assert script_bar(input) == bar(input)
# trace is not run asynchronously, but fork is captured in IR
graph = torch.jit.trace(bar, (input,)).graph
assert "fork" in str(graph)
Example (fork a module method):
.. testcode::
import torch
from torch import Tensor
class SubMod(torch.nn.Module):
def forward(self, a: Tensor, b : int):
return a + b
class Mod(torch.nn.Module):
def __init__(self):
super(self).__init__()
self.mod = SubMod()
def forward(self, input):
fut = torch.jit.fork(self.mod, a, b=2)
return torch.jit.wait(fut)
input = torch.tensor(2)
mod = Mod()
assert mod(input) == torch.jit.script(mod).forward(input)
"""
return torch._C.fork(func, *args, **kwargs)
def wait(future):
"""
Forces completion of a `torch.jit.Future[T]` asynchronous task, returning the
result of the task. See :func:`~fork` for docs and examples.
Arguments:
func (torch.jit.Future[T]): an asynchronous task reference, created through `torch.jit.fork`
Returns:
`T`: the return value of the the completed task
"""
return torch._C.wait(future)
_register_builtin(wait, "aten::wait")
|
# Build/install the npcomp-torch package.
# This uses PyTorch's setuptools support and requires an existing installation
# of npcomp-core in order to access its headers/libraries.
from pathlib import Path
from setuptools import find_packages, setup, Extension
from torch.utils import cpp_extension
try:
from npcomp import build as npcomp_build
except ModuleNotFoundError:
raise ModuleNotFoundError(
f"Could not import npcomp.build "
f"(do you have the npcomp-core package installed)")
# Get our sources.
this_dir = Path(__file__).parent
extension_sources = [str(p) for p in this_dir.joinpath("csrc").rglob("*.cpp")]
# Npcomp bits.
include_dirs = npcomp_build.get_include_dirs()
lib_dirs = npcomp_build.get_lib_dirs()
npcomp_libs = [npcomp_build.get_capi_link_library_name()]
# TODO: Export this in some way from an npcomp config file include vs needing
# it loose here.
compile_args = ["-DMLIR_PYTHON_PACKAGE_PREFIX=npcomp."]
setup(
name="npcomp-torch",
ext_modules=[
cpp_extension.CppExtension(
name="_torch_mlir",
sources=extension_sources,
include_dirs=include_dirs,
library_dirs=lib_dirs,
libraries=npcomp_libs,
extra_compile_args=compile_args),
],
cmdclass={
"build_ext": cpp_extension.BuildExtension
},
package_dir={
"": "./python",
},
packages=find_packages("./python", include=[
"torch_mlir",
"torch_mlir.*",
"torch_mlir_torchscript",
"torch_mlir_torchscript.*",
"torch_mlir_torchscript_e2e_test_configs",
"torch_mlir_torchscript_e2e_test_configs.*",
]),
)
|
# -*- coding: utf-8 -*-
"""Main module."""
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import argparse
def getinput():
input = argparse.ArgumentParser()
input.add_argument('--initial_position', type = float, default = 0, help = 'Initial position of the particle, default = 0' )
input.add_argument('--initial_velocity', type = float, default = 0, help = 'Initial velocity of the particle, default = 0' )
input.add_argument('--temperature', type = float, default = 300, help = 'Temperature of the molecule, default = 300' )
input.add_argument('--damping_coefficient', type = float, default = 0.1, help = 'Damping Coefficient of the molecule, default = 0.1' )
input.add_argument('--time_step', type = float, default = 0.2, help = 'Time interval of the simulation, default = 0.01' )
input.add_argument('--wall_size', type = float, default = 5, help = 'Wall size of the simulation, default = 5' )
input.add_argument('--total_time', type = float, default = 1000, help = 'Total time of the simulation, default = 1000' )
inp = input.parse_args()
return inp
def acceleration(gamma=0.1,velocity=0,temperature=300,timestep=0.1,mass=1):
sigma=np.sqrt(2*temperature*gamma*1*timestep)
return (-gamma*velocity/mass + np.random.normal(0,sigma))*timestep
def checkwall(position, wallsize):
if position >= wallsize or position<=0:
return True
else:
return False
def lgmotion(velocity,timestep):
return velocity*timestep
def integrate(position=0,velocity=0,temperature=300,gamma=0.1,timestep=0.1,wallsize=5,totaltime=1000,mass=1):
timepass=0
indexnum=0
index=[]
while timepass < totaltime :
indexnum +=1
index.append([indexnum,timepass,position,velocity])
timepass+=timestep
velocity += acceleration(gamma, velocity, temperature, timestep)
position += lgmotion(velocity, timestep)
if checkwall(position,wallsize):
if position >= wallsize:
position = wallsize
index.append([indexnum+1,timepass,position,velocity])
else:
position= 0
index.append([indexnum+1,timepass,position,velocity])
break
return timepass,index
def filecreation(index):
indexf=np.array(index)
timef=indexf[:,1]
positionf=indexf[:,2]
velocityf=indexf[:,3]
with open('Langevin_Motion.txt','w+') as file:
file.write('Index Time Position Velocity \n')
for i in range(len(timef)):
file.write('{} {:.3f} {:.5f} {:.5f} \n'.format(i,timef[i],positionf[i],velocityf[i]))
def histogram(arr):
plt.figure(0)
plt.hist(arr,bins=20)
plt.title('100 runs of Langevin Motion')
plt.xlabel('Time passed')
plt.ylabel('Number of runs')
plt.savefig('histogram.png')
def trajectory(x,y):
plt.figure(1)
plt.plot(x,y)
plt.title('Position vs Time')
plt.xlabel('Time passed')
plt.ylabel('Position')
plt.savefig('trajectory.png')
def main():
#get input for simulation
inp=getinput()
#run for 100 times, collecting all the relavant data
t_arr=[] #time
for i in range(100):
t,idx=integrate(position=inp.initial_position,velocity=inp.initial_velocity,temperature=inp.temperature,gamma=inp.damping_coefficient,timestep=inp.time_step,wallsize=inp.wall_size,totaltime=inp.total_time,mass=1)
t_arr.append(t)
#plot the histogram of 100 runs
histogram(t_arr)
#plot the position vs time plot of the last run
trjdata=np.array(idx)
xdata=trjdata[:,1]
ydata=trjdata[:,2]
trajectory(xdata,ydata)
#write the index in to a txt file of the first run
filecreation(idx)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# @Time : 2020-06-03 11:34
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : electra.py
import tensorflow as tf
from aispace.utils.hparams import Hparams
from aispace.layers.encoders.transformer import Transformer
from aispace.layers.pretrained.bert import BertPooler
from aispace.layers.embeddings import ElectraEmbeddings
from aispace.layers.base_layer import BaseLayer
from aispace.utils.tf_utils import get_shape
from aispace.layers.activations import ACT2FN
class TFElectraDiscriminatorPredictions(tf.keras.layers.Layer):
def __init__(self, hparams: Hparams, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(hparams.hidden_size, name="dense")
self.dense_prediction = tf.keras.layers.Dense(1, name="dense_prediction")
self._act_fn = ACT2FN[hparams.hidden_act]
def call(self, discriminator_hidden_states, training=False):
hidden_states = self.dense(discriminator_hidden_states)
hidden_states = self._act_fn(hidden_states)
logits = tf.squeeze(self.dense_prediction(hidden_states))
return logits
class TFElectraGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, hparams: Hparams, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(hparams.embedding_size, name="dense")
self._act_fn = ACT2FN[hparams.hidden_act]
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = self._act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@BaseLayer.register("electra")
class Electra(BaseLayer):
def __init__(self, hparams: Hparams, **kwargs):
super(Electra, self).__init__(hparams, **kwargs)
self.embeddings = ElectraEmbeddings(hparams.config, name="embeddings")
if hparams.config.embedding_size != hparams.config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(hparams.config.hidden_size, name="embeddings_project")
self.encoder = Transformer(hparams.config, name="encoder")
def get_input_embeddings(self):
return self.embeddings
def get_extended_attention_mask(self, attention_mask, input_shape):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, tf.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self._hparams.config.num_hidden_layers
return head_mask
def call(
self,
inputs,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = get_shape(input_ids)
elif inputs_embeds is not None:
input_shape = get_shape(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
head_mask = self.get_head_mask(head_mask)
hidden_states = self.embeddings([input_ids, position_ids, token_type_ids, inputs_embeds], training=training)
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=training)
encoder_outputs = self.encoder([hidden_states, extended_attention_mask, head_mask], training=training)
sequence_output = encoder_outputs[0]
pooled_output = sequence_output[:, 0]
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:]
return outputs
|
from kubernetes import client
from kubernetes.watch import Watch
from loguru import logger
from .consts import CONTAINER_NAME, DEPLOYMENT_PREFIX, NAMESPACE
def create_deployment(v1, image, num_replicas):
container = client.V1Container(name=CONTAINER_NAME, image=image)
container_spec = client.V1PodSpec(containers=[container])
meta = client.V1ObjectMeta(labels=dict(app="kbench"))
template_spec = client.V1PodTemplateSpec(spec=container_spec,
metadata=meta)
selector = client.V1LabelSelector(match_labels=dict(app="kbench"))
deployment_spec = client.V1DeploymentSpec(template=template_spec,
replicas=num_replicas,
selector=selector)
meta = client.V1ObjectMeta(generate_name=DEPLOYMENT_PREFIX)
deployment_spec = client.V1Deployment(spec=deployment_spec, metadata=meta)
deployment = v1.create_namespaced_deployment(body=deployment_spec,
namespace=NAMESPACE)
return deployment.metadata.name
def delete_deployment(v1, name):
v1.delete_namespaced_deployment(name=name, namespace=NAMESPACE)
def wait_for_deployment_rescale(v1, name, target_replicas):
watch = Watch()
for event in watch.stream(v1.list_namespaced_deployment,
namespace=NAMESPACE):
deployment = event["object"]
if deployment.metadata.name != name:
continue
ready_replicas = deployment.status.ready_replicas
if ready_replicas is None:
ready_replicas = 0
logger.trace("Deployment {} has {} replicas", name, ready_replicas)
if ready_replicas == target_replicas:
return
def rescale_deployment(v1, name, num_replicas):
logger.info("Rescaling deployment {} to {} replicas", name, num_replicas)
scale = client.V1Scale(spec=client.V1ScaleSpec(replicas=num_replicas))
v1.patch_namespaced_deployment_scale(name=name, namespace=NAMESPACE,
body=scale)
|
#!/usr/bin/env python3
import sys
from phockup import main
from src.printer import Printer
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
Printer().empty().line('Exiting...')
sys.exit(0)
|
"""Command generator for running a script against a Redshift cluster.
Contains the method to compile the Redshift specific script execution command
based on generic arguments (sql script, output destination) and Redshift
specific arguments (flag values).
"""
__author__ = 'p3rf@google.com'
from absl import flags
flags.DEFINE_string('host', None, 'Redshift host.')
flags.DEFINE_string('database', None, 'Redshift Database.')
flags.DEFINE_string('user', None, 'Redshift User.')
flags.DEFINE_string('password', None, 'Redshift Password.')
flags.mark_flags_as_required(['host', 'database', 'user', 'password'])
FLAGS = flags.FLAGS
def generate_provider_specific_cmd_list(script, driver, output, error):
"""Method to compile the Redshift specific script execution command.
Arguments:
script: SQL script which contains the query.
driver: Driver that contains the Redshift specific script executor.
output: Output log file.
error: Error log file.
Returns:
Command list to execute the supplied script.
"""
return [driver, FLAGS.host, FLAGS.database, FLAGS.user, FLAGS.password,
script, output, error]
|
# Class definition of planet
# Should include the keplerian elements:
# - Semi-major axis (a) in m, km, or AU
# - Eccentricity (e) no unit: 0 -> circle
# - Inclination (i) in degrees °
# - Longitude of ascending node (o) in degrees °
# - Argument of periapsis (w) in degrees °
# - True anomaly at t0 (v0) in degrees
#
# In addition the following are specified
# - Gravitational constant (mu)
# - Radius for plotting (r)
# - Color for plotting
import geometry_3d as geo
import datetime
# Epoch t0
T0 = datetime.datetime(2000, 1, 1, 12)
class Planet:
def __init__(self, master=None,
a: float = 0., e: float = 0., v0: float = 0.,
i: float = 0., o: float = 0., w: float = 0.,
r: float = 0., color: str = 'k', mu: float = 0.,
name: str = None):
"""
Class definition for a planet orbiting a central mass
:param master: Object of central mass (other planet)
:param a: Orbit-Parameter: Semi-Major axis (a)
:param e: Orbit-Parameter: Eccentricity (e)
:param v0: Orbit-Parameter: True anomaly at epoch t0 (nu_0)
:param i: Orbit-Parameter: Inclination (i)
:param o: Orbit-Parameter: Longitude of ascending node (Omega)
:param w: Orbit-Parameter: Argument of periapsis (omega)
:param r: Radius of planet
:param color: Color for painting
:param mu: Gravitational parameter of planet
:param name: Name of planet
All distances are specified in m (SI) and all angles in degrees
"""
self.name = name
# If master is None, then this planet is the most central element (e.g. sun)
self.master = master
self.mu = mu
if self.master is not None:
# Calculate orbital period (and inverse)
self.n = (self.master.mu / a**3)**0.5
self.orbit_period = 2 * 3.14 / self.n
else:
self.n = 0.
self.r = r
self.a = a
self.e = e
self.i = i
self.o = o
self.w = w
self.v0 = v0
# Mean anomaly at epoch t0
self.m0 = mean_from_true(v0, e)
self.b = (a**2. - a**2. * e**2.)**.5 # Semi-minor axis
# Vectors of orbit ellipse plane
self.ue = geo.rotate_z(geo.rotate_y(geo.rotate_z(
geo.Vector(1, 0, 0), self.w), self.i), self.o)
self.ve = geo.rotate_z(geo.rotate_y(geo.rotate_z(
geo.Vector(0, 1, 0), self.w), self.i), self.o)
self.ne = geo.rotate_z(geo.rotate_y(geo.Vector(0, 0, 1), self.i), self.o)
# Vectors of semi-major and semi-minor axis
self.a_vec = self.a * self.ue
self.b_vec = self.b * self.ve
# List of matplotlib objects associated with this planet
self.shapes = []
self.color = color
def get_v(self, t: datetime.datetime = None):
"""
Calculation of true anomaly (angle on ellipse) at a certain time
:param t: datetime.datetime object
:return: True anomaly at time t
"""
if self.master is None:
return
dt = t - T0
dt = dt.total_seconds()
# Mean anomaly m increases linearly along the orbit
m = self.m0 + (self.n * dt) * 180. / geo.np.pi
m = m % 360
v = true_from_mean(m, self.e)
return v
def get_position(self, t: datetime.datetime = None):
"""
Get xyz-position of planet at a certain time
:param t: datetime.datetime object
:return: Vector with xyz coordinates of planet
"""
if self.master is None:
# Most central planet is always in the center
return geo.Vector(0., 0., 0.)
# Get true anomaly
v = self.get_v(t)
# Calculate distance from central mass
r = self.a * (1 - self.e**2) / (1 + self.e * geo.np.cos(v * geo.np.pi / 180.))
# Create vector and rotate in ellipsis planet
r_vec = r * geo.rotate_z(geo.Vector(1, 0, 0), v)
r_vec = geo.rotate_z(geo.rotate_y(geo.rotate_z(r_vec, self.w), self.i), self.o)
# Position of planet is relative to position of central mass
return self.master.get_position(t) - r_vec
def get_orbit_center(self, t: datetime.datetime = None):
"""
Get xyz-position of center of orbit ellipse
:param t: datetime.datetime object
:return: Vector with xyz coordinates of planet
"""
if self.master is None:
return None
return self.master.get_position(t) + (self.a_vec * self.e)
def remove_shapes(self):
"""
Delete all matplotlib shapes associated with planet
"""
for shape in self.shapes:
shape.remove()
self.shapes = []
def mean_from_true(v, e):
"""
Calculate mean anomaly from true anomaly
(Source: https://en.wikipedia.org/wiki/Mean_anomaly#Formulae)
:param v: True anomaly (nu)
:param e: Eccentricity (e)
:return: Mean anomaly (m)
"""
v = v * geo.np.pi / 180.
m = geo.np.arctan2((1 - e**2)**0.5 * geo.np.sin(v) / (1 + e * geo.np.cos(v)),
(e + geo.np.cos(v)) / (1 + e * geo.np.cos(v))) - \
e * (1 - e**2)**0.5 * geo.np.sin(v) / (1 + e * geo.np.cos(v))
return m * 180. / geo.np.pi
def true_from_mean(m, e):
"""
Calculate true anomaly from mean anomaly
(Source: https://en.wikipedia.org/wiki/True_anomaly#From_the_mean_anomaly)
:param m: Mean anomaly (m)
:param e: Eccentricity (e)
:return: True anomaly (nu)
"""
m = m * geo.np.pi / 180.
v = m + (2 * e - 1 / 4 * e**3) * geo.np.sin(m) + \
5 / 4 * e**2 * geo.np.sin(2 * m) + \
13 / 12 * e**3 * geo.np.sin(3 * m)
return v * 180. / geo.np.pi
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module includes a bunch of library functions
relevant for dealing with the topology structure
"""
import sets
from heron.proto import topology_pb2
from heron.common.src.python import constants
def get_topology_config(topology, key):
"""
Helper function to get the value of key in topology config.
Return None if the key is not present.
"""
for kv in topology.topology_config.kvs:
if kv.key == key:
return kv.value
return None
def get_component_parallelism(topology):
"""
The argument is the proto object for topology.
Returns a map of components to their parallelism.
"""
cmap = {}
components = list(topology.spouts) + list(topology.bolts)
for component in components:
component_name = component.comp.name
for kv in component.comp.config.kvs:
if kv.key == constants.TOPOLOGY_COMPONENT_PARALLELISM:
cmap[component_name] = int(kv.value)
return cmap
def get_nstmgrs(topology):
"""
The argument is the proto object for topology.
Returns the number of stream managers for the topology.
This is equal to the number of containers.
If not present, return 1 as default.
"""
return int(get_topology_config(topology, constants.TOPOLOGY_STMGRS) or 1)
def get_instance_opts(topology):
"""
The argument is the proto object for topology.
Returns the topology worker child options.
If not present, return empty string.
"""
return str(get_topology_config(topology, constants.TOPOLOGY_WORKER_CHILDOPTS) or "")
def get_total_instances(topology):
"""
The argument is the proto object for topology.
Returns the total number of instances based on all parallelisms.
"""
cmap = get_component_parallelism(topology)
return sum(cmap.values())
def get_additional_classpath(topology):
"""
The argument is the proto object for topology.
Returns an empty string if no additional classpath specified
"""
additional_classpath = str(get_topology_config(
topology, constants.TOPOLOGY_ADDITIONAL_CLASSPATH) or "")
return additional_classpath
def get_cpus_per_container(topology):
"""
The argument is the proto object for topology.
Calculate and return the CPUs per container.
It can be calculated in two ways:
1. It is passed in config.
2. Allocate 1 CPU per instance in a container, and 1 extra for stmrs.
"""
cpus = get_topology_config(topology, constants.TOPOLOGY_CONTAINER_CPU_REQUESTED)
if not cpus:
ninstances = get_total_instances(topology)
nstmgrs = get_nstmgrs(topology)
cpus = float(ninstances) / nstmgrs + 1 # plus one for stmgr
return float(cpus)
def get_disk_per_container(topology):
"""
The argument is the proto object for topology.
Calculate and return the disk per container.
It can be calculated in two ways:
1. It is passed in config.
2. Allocate 1 GB per instance in a container, and 12 GB extra for the rest.
"""
disk = get_topology_config(topology, constants.TOPOLOGY_CONTAINER_DISK_REQUESTED)
if not disk:
ninstances = get_total_instances(topology)
nstmgrs = get_nstmgrs(topology)
# Round to the ceiling
maxInstanceInOneContainer = (ninstances + nstmgrs - 1) / nstmgrs
disk = maxInstanceInOneContainer * constants.GB + constants.DEFAULT_DISK_PADDING_PER_CONTAINER
return disk
def get_ram_per_container(topology):
"""
The argument is the proto object for topology.
Calculate and return RAM per container for the topology based
container rammap. Since rammap takes into account all the
configs and cases, we just add some ram for stmgr and return it.
"""
component_distribution = get_component_distribution(topology)
rammap = get_component_rammap(topology)
maxsize = 0
for (_, container_items) in component_distribution.items():
ramsize = 0
for (component_name, _, _) in container_items:
ramsize += int(rammap[component_name])
if ramsize > maxsize:
maxsize = ramsize
return maxsize + constants.RAM_FOR_STMGR
def get_component_rammap(topology):
"""
The argument is the proto object for topology.
It is calculated based on the following priority:
1. Form the user specified component rammap. If the rammap
is complete, return it.
2. If only some component rams are specified, assign them
the default RAM, and return it.
3. If no component RAM is specified, take the container ram requested
and divide it equally among the max possible instances.
4. If container RAM was not requested, assign the default RAM
to all components.
Returns a map from component name to ram in bytes
"""
component_distribution = get_component_distribution(topology)
components = list(topology.spouts) + list(topology.bolts)
# first check if user has specified the rammap
rammap = get_user_rammap(topology) or {}
# Some or all of them are there, so assign default to those components
# that are not specified.
if len(rammap.keys()) > 0:
for component in components:
component_name = component.comp.name
if component_name not in rammap:
rammap[component_name] = constants.DEFAULT_RAM_FOR_INSTANCE
return rammap
max_instances_in_a_container = max(map(lambda x: len(x[1]), component_distribution.items()))
# User has not specified any kind of rammap
# We just find the ram needed for a container and divide
# memory equally
requested_ram_per_container = get_container_ram_requested(topology)
if requested_ram_per_container != None:
ram_for_jvms = requested_ram_per_container - constants.RAM_FOR_STMGR
ram_per_instance = int(ram_for_jvms / max_instances_in_a_container)
rammap = {}
for component in components:
component_name = component.comp.name
rammap[component_name] = ram_per_instance
return rammap
# Nothing was specified.
# The default is to allocate one 1gb per instance of all components
ram_per_instance = int(constants.DEFAULT_RAM_FOR_INSTANCE)
rammap = {}
for component in components:
component_name = component.comp.name
rammap[component_name] = ram_per_instance
return rammap
def strip_java_objects(topology):
"""
The argument is the proto object for topology.
The java objects are huge objects and topology
is stripped off these objects so that it can be stored
in zookeeper.
"""
stripped_topology = topology_pb2.Topology()
stripped_topology.CopyFrom(topology)
components = list(stripped_topology.spouts) + list(stripped_topology.bolts)
for component in components:
if component.comp.HasField("serialized_object"):
component.comp.ClearField("serialized_object")
return stripped_topology
def get_component_distribution(topology):
"""
The argument is the proto object for topology.
Distribute components and their instances evenly across containers in a round robin fashion
Return value is a map from container id to a list.
Each element of a list is a tuple (component_name, global_task_id, component_index)
This is essentially the physical plan of the topology.
"""
containers = {}
nstmgrs = get_nstmgrs(topology)
for i in range(1, nstmgrs + 1):
containers[i] = []
index = 1
global_task_id = 1
for (component_name, ninstances) in get_component_parallelism(topology).items():
component_index = 0
for i in range(ninstances):
containers[index].append((str(component_name), str(global_task_id), str(component_index)))
component_index = component_index + 1
global_task_id = global_task_id + 1
index = index + 1
if index == nstmgrs + 1:
index = 1
return containers
def get_user_rammap(topology):
"""
The argument is the proto object for topology.
Returns a dict of component to ram as specified by user.
Expected user input is "comp1:42,comp2:24,..."
where 42 and 24 represents the ram requested in bytes.
Returns None if nothing is specified.
"""
rammap = get_topology_config(topology, constants.TOPOLOGY_COMPONENT_RAMMAP)
if rammap:
rmap = {}
for component_ram in rammap.split(","):
component, ram = component_ram.split(":")
rmap[component] = int(ram)
return rmap
return None
def get_container_ram_requested(topology):
"""
The argument is the proto object for topology.
Returns the container ram as requested by the user.
Returns None if not specified.
"""
ram = get_topology_config(topology, constants.TOPOLOGY_CONTAINER_RAM_REQUESTED)
return int(ram) if ram else None
def get_component_jvmopts(topology):
"""
The argument is the proto object for topology.
Returns the jvm options if specified by user.
"""
return str(get_topology_config(topology, constants.TOPOLOGY_COMPONENT_JVMOPTS) or "")
def get_topology_state_string(topology):
"""
The argument is the proto object for topology.
Returns the topology state as one of:
1. RUNNING
2. PAUSED
3. KILLED
"""
return topology_pb2.TopologyState.Name(topology.state)
# pylint: disable=too-many-return-statements, too-many-branches
def sane(topology):
""" First check if topology name is ok """
if topology.name == "":
print "Topology name cannot be empty"
return False
if '.' in topology.name or '/' in topology.name:
print "Topology name cannot contain . or /"
return False
component_names = set()
for spout in topology.spouts:
component_name = spout.comp.name
if component_name in component_names:
print component_name + " is duplicated twice"
return False
component_names.add(component_name)
for bolt in topology.bolts:
component_name = bolt.comp.name
if component_name in component_names:
print component_name + " is duplicated twice"
return False
component_names.add(component_name)
ninstances = get_total_instances(topology)
nstmgrs = get_nstmgrs(topology)
if nstmgrs > ninstances:
print "Number of containers are greater than number of instances"
return False
for kv in topology.topology_config.kvs:
if kv.key == constants.TOPOLOGY_COMPONENT_RAMMAP:
rammap = str(kv.value).split(',')
for component_ram in rammap:
component_and_ram = component_ram.split(':')
if len(component_and_ram) != 2:
print "TOPOLOGY_COMPONENT_RAMMAP is set incorrectly"
return False
if not component_and_ram[0] in component_names:
print "TOPOLOGY_COMPONENT_RAMMAP is set incorrectly"
return False
# Now check if bolts are reading streams that some component emits
all_outputs = sets.Set()
for spout in topology.spouts:
for outputstream in spout.outputs:
all_outputs.add((outputstream.stream.id, outputstream.stream.component_name))
for bolt in topology.bolts:
for outputstream in bolt.outputs:
all_outputs.add((outputstream.stream.id, outputstream.stream.component_name))
for bolt in topology.bolts:
for inputstream in bolt.inputs:
if (inputstream.stream.id, inputstream.stream.component_name) not in all_outputs:
print "Bolt " + bolt.comp.name + " has an input stream that no one emits"
return False
return True
|
# Generated by Django 3.1.2 on 2020-11-16 12:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('feedback', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='home',
name='doctor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.doctor'),
),
migrations.AddField(
model_name='feedback',
name='doctor',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.doctor'),
),
migrations.AddField(
model_name='feedback',
name='patient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.patient'),
),
]
|
from clases.Edificio_LA import Edificio_LA
from clases.Edificio_NY import Edificio_NY
from clases.Empleado_LA import Empleados_LA
from clases.Edificio_NY import Empleados_NY
from clases.LosAngeles import LosAngeles
from clases.NewYork import NewYork
from clases.inmortal import Yin
from clases.inmortal import Yang
from clases.interfaz import Interfaz_cristal
from clases.interfaz import Pared
from clases.interfaz import Ventana
from clases.interfaz import Casa
from clases.interfaz import ParedCortina
if __name__ == '__main__':
Edificio_LA()
Edificio_NY()
Empleados_LA()
Empleados_NY()
LosAngeles()
NewYork()
ciudad = str(input('¿Que ciudad deseas destruir, New York o Los Ángeles?'))
if ciudad == 'New York':
NY = NewYork()
del NY
elif ciudad == 'Los Ángeles':
LA = LosAngeles()
del LA
else:
print('La opción seleccionada no es válida')
if __name__ == '__main__':
Yin()
Yang()
if __name__ == '__main__':
Interfaz_cristal()
Pared()
Ventana()
Casa()
ParedCortina()
|
#!/usr/bin/env python
import sys
import math, pdb, random
import list_funcs
import array
#import pysam
class BED_Track:
"""A BED track, following the format defined in the UCSC browser website."""
def __init__(self, trackInput, peaksTrack=False, refGenomeFilename=None):
# Each chromosome of BED items will be held in an array, referenced
# by a dictionary. This allows separate chromosome's data to be
# easily retrieved:
self.chroms = {}
# Determine the type of input, and initialise this BED_Track according
# to that type:
if (isinstance(trackInput, file)):
self.initFromFile(trackInput, peaksTrack=peaksTrack)
else:
assert (isinstance(trackInput, list))
self.initFromList(trackInput)
# The name of a file containing the reference genome corresponding
# to the assembly for this BED_Track object.
self.refGenomeFilename = refGenomeFilename
def getRefGenomeFilename(self):
return self.refGenomeFilename
def setRefGenomeFilename(self, filename):
self.refGenomeFilename = filename
def initFromList(self, inList):
"""Private method for initialising this object from an input list."""
for inputBED in inList:
assert(isinstance(inputBED, BED_line))
bed_chrom = inputBED.getChrom()
# Add the bed data item to the correct chromosome:
if self.chroms.has_key(bed_chrom):
self.chroms[bed_chrom].append(inputBED)
else:
self.chroms[bed_chrom] = [inputBED]
# Sort the bed_items according to chromosomal location for each
# chromosome:
for key in self.chroms.keys():
bed_items = self.chroms[key]
bed_items.sort(chrom_compare)
tuple_bed_items = tuple(bed_items)
self.chroms[key] = tuple_bed_items
def initFromFile(self, inFile, peaksTrack=False):
"""Private method for initialising this object from an input file."""
# Parse the BED track from the specified file, creating a BED_Track
# object:
try:
for line in inFile:
elems = line.split()
# Only add the line if it is a chromosomal location:
#if ((len(elems) > 0) and (elems[0][0:3] == "chr")):
# FIXME: The above condition was broken, so I changed it
# to only filter out comment lines:
if ((len(elems) > 0) and (elems[0][0] != "#")):
# Generate either a regular BED_line or a peaks_BED_line,
# depending on the value of the optional peaksTrack
# parameter. This is done to allow this class to optionally
# represent a track of ChIP-seq peak data, with extra
# attributes on each line:
curr_bed = None
if (peaksTrack):
curr_bed = peak_BED_line(line, track=self)
else:
curr_bed = BED_line(line, track=self)
bed_chrom = curr_bed.getChrom()
# Add the bed data item to the correct chromosome:
if self.chroms.has_key(bed_chrom):
self.chroms[bed_chrom].append(curr_bed)
else:
self.chroms[bed_chrom] = [curr_bed]
# Sort the bed_items according to chromosomal location, if
# necessary, for each chromosome:
for key in self.chroms.keys():
bed_items = self.chroms[key]
bed_items.sort(chrom_compare)
tuple_bed_items = tuple(bed_items)
self.chroms[key] = tuple_bed_items
except IOError:
# If the file does not open, throw this exception upwards, rather
# than dealing with it here:
raise
except ValueError:
# A value error here means that a BED_line was invalid.
pdb.set_trace()
dummy = 1
raise
# Added on 10th November 2009:
def getAllBEDs(self):
allBEDs = []
for chrom in (self.get_chroms()):
allBEDs = allBEDs + list(self.get_BEDs(chrom))
return allBEDs
# Function to retrieve all bed items for a specified chromosome, from
# this object. If there are no bed items for that chromosome (for this
# track), then return an empty list:
def get_BEDs(self, chrom):
if (self.chroms.has_key(chrom)):
return self.chroms[chrom]
else:
return []
# Added on July 15th 2013:
def getBEDsForRegion(self, region):
"""Returns a list of BED_line items whose start and end positions are
located inside the specified region (specified as a BED_line object)."""
chrom = region.getChrom()
allBedItems = self.get_BEDs(chrom)
# NOTE/FIXME: This assumes that the bed items in this track are of equal
# size, and will return misleading output otherwise.
# To fix this, I need to have separate indexes of the beds according
# to chrom start and chrom end.
# Get the index of the first bed item starting immediately inside the
# specified region...
idxOfBedClosestToStart = \
list_funcs.find_closest_idx(region, allBedItems,
get_disp_chrStartChrStart)
bedClosestToStart = allBedItems[idxOfBedClosestToStart]
if bedClosestToStart.getChromStart() > region.getChromStart():
# That bed item is to the right of the start of the region.
bedToGet_startIdx = idxOfBedClosestToStart
else:
# Will start from the next bed item over:
bedToGet_startIdx = idxOfBedClosestToStart + 1
# Get the index of the bed item ending just before the end of the
# specified region...
idxOfBedClosestToEnd = \
list_funcs.find_closest_idx(region, allBedItems,
get_disp_chrEndChrEnd )
bedClosestToEnd = allBedItems[idxOfBedClosestToEnd]
if bedClosestToEnd.getChromEnd() < region.getChromEnd():
# That bed item is to the inside the region.
bedToGet_endIdx = idxOfBedClosestToEnd
else:
# Will end at the previous bed item:
bedToGet_endIdx = idxOfBedClosestToEnd - 1
return allBedItems[bedToGet_startIdx:bedToGet_endIdx+1]
def get_chroms(self):
"""Returns a list of the the chromosomes included in this bed object."""
return self.chroms.keys()
# Function to retrieve the item closest to the chromStart of the specified
# BED line position, in log(N) time.
def get_closest_item(self, query_item):
# Retrieve the bed items for the specified chromosome:
bed_items = self.get_BEDs(query_item.getChrom())
# If no item is located on the same chromosome, then return None:
if (len(bed_items) == 0):
return None
# Locate the item in the (sorted) list of bed items that is located
# closest to the specified item:
closest = list_funcs.find_closest(query_item, bed_items, get_disp)
return closest
# Function to retrieve the item whose chromEnd is closest to the
# chromStart of the specified BED line position, in log(N) time.
def get_closest_chromEnd(self, query_item):
# Retrieve the bed items for the specified chromosome:
bed_items = self.get_BEDs(query_item.getChrom())
# If no item is located on the same chromosome, then return None:
if (len(bed_items) == 0):
return None
# Locate the item in the (sorted) list of bed items that is located
# closest to the specified item:
closest = list_funcs.find_closest(query_item, bed_items, get_disp2)
return closest
# This function finds the closest bed item that occurs {upstream of and on
# the same strand as} the specified query bed item:
def get_closest_upstreamSameStrand(self, query_item):
# Retrieve the bed items for the specified chromosome:
bed_items = self.get_BEDs(query_item.getChrom())
# If no item is located on the same chromosome, then return None:
if (len(bed_items) == 0):
return None
# Iterate through the bed items from the *downstream* end of the
# chromosome's beds, until the first {*upstream* bed on the same strand}
# is encountered:
if (query_item.getStrand() == "+"):
# The query item is on the positive strand => Start scanning
# from the *end* of the chromosome...
bed_idx = len(bed_items) - 1
# Logically, there must be at least one item in list, so the
# following array indexing should not yield an error:
curr_bed = bed_items[bed_idx]
while ((bed_idx >= 0) and ((curr_bed.getStrand() == "-") or
(curr_bed.getChromStart() > query_item.getChromStart()))):
curr_bed = bed_items[bed_idx]
bed_idx = bed_idx - 1
else:
# The query item is on the negative strand => Start scanning
# from the *end* of the chromosome...
bed_idx = 0
# Logically, there must be at least one item in list, so the
# following array indexing should not yield an error:
curr_bed = bed_items[bed_idx]
while ((bed_idx < len(bed_items)) and
((curr_bed.getStrand() == "+") or
(curr_bed.getChromEnd() < query_item.getChromEnd()))):
curr_bed = bed_items[bed_idx]
bed_idx = bed_idx + 1
if ((bed_idx < 0) or (bed_idx >= len(bed_items))):
return None
else:
return curr_bed
def writeToFile(self, outFile):
"""Writes a bed file for this track."""
for chrom in self.get_chroms():
for bedItem in self.get_BEDs(chrom):
outFile.write(bedItem.to_string() + "\n")
outFile.flush()
# FIXME: Currently, explicitly printing just the first 4 fields.
# It would be nice to have a more flexible and general printing
# mechanism.
# outStr = \
# bedItem.getChrom() + " " + \
# bedItem.getChromStart() + " " + \
# bedItem.getChromEnd() + " " + \
# bedItem.getName() + "\n"
# file.write(outStr)
def toArray(self):
"""Returns a flattened version of this data structure; i.e. a list of
BED_line objects."""
arr = []
for chrom in self.get_chroms():
for bedItem in self.get_BEDs(chrom):
arr.append(bedItem)
return arr
def sampleBEDs(self, nBEDs, sample="random"):
# Sample the required number of bed items from all those in this
# track...
allBEDs = self.getAllBEDs()
if (sample == "random"):
# Sample items randomly:
random.shuffle(allBEDs)
return allBEDs[:nBEDs]
elif (sample == "highScoreFilt"):
# Select the highest scoring items after first ignoring the
# top 5% of items:
allBEDs.sort(compareScores, reverse=True)
index5percent = int(len(allBEDs)/20.0)
return allBEDs[:(index5percent+nBEDs)][-nBEDs:]
else:
# Select the highest scoring items:
assert (sample == "highScore")
allBEDs.sort(compareScores, reverse=True)
return allBEDs[:nBEDs]
def gffPeaks2bed(gffFilename, outFilename):
"""Converts a specified gff file of ChIP-seq peaks into chip-seq peak-
specific bed format."""
# Introduced on October 1, 2010, for my chipseqQC analysis software:
gffFile = open(gffFilename)
outFile = open(outFilename, 'w')
# FIXME: Should probably improve the parsing here in the future, to
# introduce some error checking.
# Generate an output line from each output line, and write it out:
for line in gffFile:
elems = line.split()
# Minor error checking here. Just check that the line has the required
# number of fields in it:
if (len(elems) < 9):
print >> sys.stderr, "Invalid input line in peaks gff file " + \
gffFilename + ":", line
sys.exit(1)
chrom = "chr" + elems[0]
chrStart = elems[1]
chrEnd = elems[2]
summit = elems[3]
peakHeight = elems[4]
peakVolume = elems[5]
negCntrl = elems[6]
fcRatio = elems[7]
pVal = elems[8]
extraInfo = peakVolume + "_" + \
negCntrl + "_" + fcRatio + "_" + pVal
# FIXME: Currently, I am excluding the extra info (above) for the peak,
# rather than including it in the bed output line. Improve on this
# later.
outLine = chrom + " " + chrStart + " " + chrEnd + " NoName " + \
peakHeight + " # " + pVal + " " + fcRatio + " " + summit + "\n"
outFile.write(outLine)
outFile.flush()
outFile.close()
# This function returns the displacement between the chromStart of the first
# element, and the chromEnd of the second, assuming that they are located on
# the same chromosome as each other:
def get_disp2(bed1, bed2):
assert(bed1.getChrom() == bed2.getChrom())
return (bed1.getChromStart() - bed2.getChromEnd())
# This function returns the displacement from the chromStart of the second
# element to the chromStart of the first, assuming that they are located on
# the same chromosome as each other:
def get_disp_chrStartChrStart(bed1, bed2):
assert(bed1.getChrom() == bed2.getChrom())
return (bed1.getChromStart() - bed2.getChromStart())
# This function returns the displacement from the chromEnd of the second
# element to the chromEnd of the first, assuming that they are located on
# the same chromosome as each other:
def get_disp_chrEndChrEnd(bed1, bed2):
assert(bed1.getChrom() == bed2.getChrom())
return (bed1.getChromEnd() - bed2.getChromEnd())
# This function returns the displacement between the two elements, assuming
# that they are located on the same chromosome as each other:
def get_disp(bed1, bed2):
if (bed1.getChromStart() > bed2.getChromEnd()):
# Bed1 is "to the right" of bed2:
return (bed1.getChromStart() - bed2.getChromEnd())
elif (bed1.getChromEnd() < bed2.getChromStart()):
# Bed1 is "to the left" of bed2 (so negative displacement):
return -(bed2.getChromStart() - bed1.getChromEnd())
else:
# The two items must overlap => Report a displacement of zero:
return 0
# This function chooses the bed line with the lowest score, out of the two
# options:
def get_lowest(line1, line2):
if (line1.getScore() < line2.getScore()):
return line1
elif (line1.getScore() == line2.getScore()):
print >> sys.stderr, "Lines have equal scores:\n", line1.to_string(), \
"\n", line2.to_string()
return line2
else:
return line2
# A comparator function. This function compares two bed lines objects according
# to their score values, and reports -1, 0, or 1, according to the outcome
# of the comparison.
def compareScores(line1, line2):
if (line1.getScore() < line2.getScore()):
return -1
elif (line1.getScore() == line2.getScore()):
return 0
else:
return 1
# A comparator function. Compares two chromosomal locations.
def chrom_compare(line1, line2):
# Sort lexicographically according to chromosome first:
if (line1.chrom > line2.chrom):
return 1
elif (line1.chrom < line2.chrom):
return -1
# If chromosomes are then same, then sort according to chromStart:
elif (line1.chromStart > line2.chromStart):
return 1
elif (line1.chromStart == line2.chromStart):
return 0
else:
return -1
class BED_line(object):
"""A line of data in BED format. Modified on 6th of October 2010: Introduced
a comments field. Now, any non-compulsory field will be regarded as a
comment if it occurs after a "#" character. I did this to allow me
to extend the class and to include extra information in the comments field
for the extending class peak_BED_line."""
def __init__(self, line, track=None):
items = line.split()
# Introduced on 29th October 2010 to allow storage of wiggle track data
# for an individual BED item:
# August 1st 2013:
# Removing reference to parent track; I suspect it could create a
# "reference cycle", which would make garbage collection not happen:
#self.track=track # The track to which this BED line belongs (if any)
self.wigVals = {}
try:
# Parse the compulsory fields (chrom, start, end):
self.chrom = items[0]
self.chromStart = int(items[1])
self.chromEnd = int(items[2])
# Parse the remaining fields on the line...
self.comment = None
self.name = None
self.score = None
self.strand = None
# For each remaining item from the 4th onwards, check if it
# starts with "#". If so, store the remaining sections of the string
# as a comment and stop parsing the line. Otherwise, store the
# field as the appropriate attribute in the class:
item_idx = 3
while ((self.comment == None) and (len(items) > item_idx)):
currItem = items[item_idx]
if (currItem[0] == "#"):
# The remaining fields on the line comprise a comment.
# Reconstitute the string from those fields, and store
# the resulting comment:
commentStr = reduce(lambda tok1, tok2: tok1 + " " + tok2,
items[item_idx:])[1:]
self.comment = commentStr
else:
# The field is a regular optional bed line field. Parse
# it as such:
if (item_idx == 3):
self.name = currItem
if (item_idx == 4):
self.score = float(currItem)
if (item_idx == 5):
if (currItem == "None"):
# Added on 15th October 2010: Allow "None" as
# a valid strand string:
self.strand = None
else:
if (currItem != '-' and currItem != '+'):
raise ValueError("Invalid strand string: " +
currItem)
self.strand = currItem
item_idx = item_idx + 1
except ValueError:
# If the BED line is not formatted correctly, then raise the
# exception to the calling code.
raise
# FIXME: The following fields of BED objects are not being
# used in my current applications. => Comment them out (ie don't
# make them attributes of the class) for the time being.
# if (len(items) > 6):
# self.thickStart = items[6]
# else:
# self.thickStart = None
# if (len(items) > 7):
# self.thickEnd = items[7]
# else:
# self.thickEnd = None
# if (len(items) > 8):
# self.itemRgb = items[8]
# else:
# self.itemRgb = None
# if (len(items) > 9):
# self.blockCount = items[9]
# else:
# self.blockCount = None
# if (len(items) > 10):
# self.blockSizes = items[10]
# else:
# self.blockSizes = None
# if (len(items) > 11):
# self.blockStarts = items[11]
# else:
# self.blockStarts = None
# Converts this bed line to a string:
def to_string(self):
str_rep = self.chrom + " " + str(self.chromStart) + " " + \
str(self.chromEnd) + " " + str(self.name) + " " + str(self.score) \
+ " " + str(self.strand)
return str_rep
# August 1st 2013:
# Removing reference to parent track; I suspect it could create a
# "reference cycle", which would make garbage collection not happen:
#def getTrack(self):
# return self.track
def getChrom(self):
return self.chrom
def getChromStart(self):
return self.chromStart
def setChromStart(self, newStart):
self.chromStart = newStart
def setChromEnd(self, newEnd):
self.chromEnd = newEnd
def getChromEnd(self):
return self.chromEnd
def getLen(self):
"""Return the length of this genomic interval."""
return self.getChromEnd() - self.getChromStart() + 1
def getScore(self):
return self.score
def setScore(self, score):
self.score = score
def setName(self, name):
self.name = name
def getStrand(self):
return self.strand
def getName(self):
return self.name
# Returns true if this bed line overlaps the specified item's chromStart.
# Returns false otherwise:
def overlaps(self, bed_item):
if (self.getChrom() != bed_item.getChrom()):
return False
start = self.getChromStart()
end = self.getChromEnd()
if ((start <= bed_item.getChromStart()) and \
(end >= bed_item.getChromStart())):
return True
else:
return False
def overlaps2(self, bed_item):
"""Returns true if this bed line overlaps any of the specified item's
region. Returns false otherwise:"""
if (self.getChrom() != bed_item.getChrom()):
return False
start = self.getChromStart()
end = self.getChromEnd()
if ((start <= bed_item.getChromStart()) and
(end >= bed_item.getChromStart())):
return True
elif ((start <= bed_item.getChromEnd()) and
(end >= bed_item.getChromEnd())):
return True
elif ((start >= bed_item.getChromStart()) and
(end <= bed_item.getChromEnd())):
return True
else:
return False
def setWig(self, wigKey, wigArray):
"""Sets wiggle data for this BED object. Stores the specified array
under the specified object key, which will be used as a dictionary
key."""
self.wigVals[wigKey] = wigArray
def getWig(self, wigKey):
"""Returns the wiggle data for the specified key."""
return self.wigVals[wigKey]
def spansBED(self, bedTrack):
""""Returns true if there is at least one {bed item in the specified
bedTrack} whose end occurs within the bounds of this bed item."""
regStart = self.getChromStart()
regEnd = self.getChromEnd()
regMiddle = int((regStart + regEnd)/2.0)
tmpBedLineStr = self.getChrom() + " " + str(regMiddle) + " " + \
str(regMiddle)
halfRegSize = (regEnd - regStart)/2.0
regMiddle_BED = BED_line(tmpBedLineStr)
bedMiddleClosestPeak = bedTrack.get_closest_item(regMiddle_BED)
if ((bedMiddleClosestPeak != None) and \
(abs(get_disp(bedMiddleClosestPeak, regMiddle_BED)) < \
halfRegSize)):
return True
else:
return False
def spanningBED(self, bedTrack):
""""Returns the closest item in bedTrack if there is at least one {bed
item in the specified bedTrack} that spans the middle of this bed
item. Otherwise returns None"""
regStart = self.getChromStart()
regEnd = self.getChromEnd()
regMiddle = int((regStart + regEnd)/2.0)
tmpBedLineStr = self.getChrom() + " " + str(regMiddle) + " " + \
str(regMiddle)
regMiddle_BED = BED_line(tmpBedLineStr)
bedMiddleClosestItem = bedTrack.get_closest_item(regMiddle_BED)
if ((bedMiddleClosestItem != None) and
(bedMiddleClosestItem.getChromStart() < regMiddle) and
(bedMiddleClosestItem.getChromEnd() > regMiddle)):
return bedMiddleClosestItem
else:
return None
def getBedCoordStr(self):
"""Returns a single-word representation of the coordinates of this
bed object."""
return str(self.getChrom()) + ":" + str(self.getChromStart()) + "-" + \
str(self.getChromEnd())
# NOTE: Added this functionality on 1st October 2010 in order to support
# the chipseq QC analysis code:
# DEPRECATED ON 14TH OCTOBER 2010, as I have refactored my code =>
# Commenting this out.
# def seq2fasta(self, genomeFilename, outFilename, noZero=False):
# """This method retrieves the genomic sequence corresponding to the
# BED line's coordinates, and writes that sequence out to the specified
# file, in fasta format."""
# # noZero is a boolean specifying whether a zero-length sequence is
# # to be output.
# # FIXME: Currently, I am assuming the input reference genome chromosome
# # sequences are named just by number, without a preceding "chr". This
# # assumption is very volatile, and I should improve the mechanism
# # here to make it robust in the future - e.g. by parsing part of
# # the genome fasta file to ensure it follows that format.
# chrLocStr = self.getChrom()[3:] + ":" + str(self.getChromStart()) + \
# "-" + str(self.getChromEnd())
# # print >> sys.stderr, \
# # "seq2fasta called. Attempting to run pysam.faidx for the inputs", \
# # genomeFilename, "and", chrLocStr
# seqGetter = getSeqFa.seqRetriever(genomeFilename)
# seq = seqGetter.getSequence(chrLocStr)
# # Output the sequence in fasta format...
# seqNameLine = ">" + chrLocStr + "\n"
# outFile = open(outFilename, 'a')
# if (noZero):
# if (len(seq) > 0):
# # Sequence is non-zero length => can print it:
# outFile.write(seqNameLine)
# outFile.write(seq + "\n")
# else:
# # Output the sequence, irrespective of it's length:
# outFile.write(seq + "\n")
# outFile.write(line)
# outFile.flush()
# outFile.close()
def peaksFcCmp(peakBED1, peakBED2):
"""Comparator function that compares two peak beds on fold-change."""
if (peakBED1.foldChange < peakBED2.foldChange):
return -1
elif (peakBED1.foldChange == peakBED2.foldChange):
return 0
else:
return 1
class peak_BED_line(BED_line):
"""A class representing a special kind of BED line, which represents a
chip-seq peak. This line has three extra attributes (pValue, foldChange,
and summit), which are stored as space-delimited items in that order in
the comment string of the input line. I have done it this way because
the standard bed line does not include these attributes as defined by
UCSC."""
def __init__(self, line, track=None):
# Instantiate all the fields on the line:
super(peak_BED_line, self).__init__(line, track=track)
# Parse the comment string of the parent class, and extract the 3
# required fields from it. Report an error if the comment string
# formatting is invalid...
try:
# Check that the comments field has been instantiated and that
# it has three items. If it doesn't, report this input error as
# an exception:
if ((self.comment == None) or (len(self.comment.split()) != 3)):
raise ValueError(\
"Invalid input line for peak_BED_line creation:" + \
str(self.comment))
toks = self.comment.split()
self.pVal = float(toks[0])
self.foldChange = float(toks[1])
self.summit = int(toks[2])
# Parse the comment line, storing
except ValueError:
# If the BED line is not formatted correctly, then raise the
# exception to the calling code.
raise
# The peak sequence is also introduced as an attribute of this
# BED_line subclass:
peakSequence = None
def get_pVal(self):
return self.pVal
def get_fc(self):
return self.foldChange
def get_summit(self):
return self.summit
def to_string(self):
"""Returns a string representation of this peak bed object. Simply
appends the additional variables onto the end of the string generated
by the superclass."""
superStrRep = super(peak_BED_line, self).to_string()
str_rep = superStrRep + " # " + str(self.get_pVal()) + " " + \
str(self.get_fc()) + " " + str(self.get_summit())
return str_rep
# August 1st 2013:
# Removing reference to parent track; I suspect it could create a
# "reference cycle", which would make garbage collection not happen:
# def getRefGenomeFilename(self):
# return self.getTrack().getRefGenomeFilename()
# September 18th 2013:
# Re-adding the getPeakSequence method, but this time the user has
# to pass in a seqGetter object:
def getPeakSequence(self, flankWidth, seqGetter):
"""Returns the peak's sequence, corresponding to the sequence
surrounding the summit with the specified flanking sequence width."""
# Generate a new chromosomal loc string, centred on the peak's
# summit, and with the specified width:
startPos = str(self.get_summit() - flankWidth)
endPos = str(self.get_summit() + flankWidth)
if (int(startPos) < 0):
print >> sys.stderr, "WARNING: Ignoring peak as extended start " + \
"is below 0:", self.to_string()
return None
centredPeakLoc = self.getChrom() + ":" + startPos + "-" + endPos
# Retrieve the genomic sequence corresponding to that location,
# and set this peak's sequence accordingly...
# March 4th 2013: Trim the start of the chromosome string name
# if needed. Figure out whether to trim based on the genome
# filename contents. I don't like this solution but it is the
# best solution I can come up with in the current design, since
# option passing is not flexible enough in chipseq_analysis.py:
firstGenomeLine = open(seqGetter.seqFilename).readline()
chrLocQueryStr = None
if firstGenomeLine[:4] == ">chr":
chrLocQueryStr = centredPeakLoc
else:
chrLocQueryStr = centredPeakLoc[3:]
try:
peakSequence = seqGetter.getSequence(chrLocQueryStr)
# Ensure that the peak sequence is the correct size given
# the flanking width specified. If it is not, then set the
# peakSequence to null and report this as a warning:
requiredSeqSize = (flankWidth*2)+1
if (len(peakSequence) != requiredSeqSize):
print >> sys.stderr, "WARNING!: Genomic sequence for " + \
"peak " + self.to_string() + " is not the required size" + \
" of " + str(requiredSeqSize) + ". Setting sequence to null" + \
" instead."
return None
else:
return peakSequence
except ValueError, e:
# If the peak's sequence could not be retrieved, report this
# error (a possible user input error) but do not crash:
print >> sys.stderr, "WARNING!: Could not retrieve sequence" + \
" for peak " + self.to_string() + "."
# Sequence retrieval failed for this peak for some reason =>
# Make sure that the peakSequence is returned as null
# to flag this fact:
return None
def writeFasta(self, outFile, flankWidth, seqGetter):
"""Write the sequence for this peak to the specified output fasta
file. The sequence name written is exactly the same as the string
returned by the parent class's method getBedCoordStr()."""
seqName = ">" + self.getBedCoordStr()
# NOTE: Potentially confusing point: The sequence returned probably
# does *not* correspond to the printed sequence name; This is because
# "getPeakSequence" retrieves the region flanking the peak summit,
# whereas the sequence name specifies the start and end of the peak
# itself.
seq = self.getPeakSequence(flankWidth, seqGetter)
outFile.write(seqName + "\n")
outFile.write(seq + "\n")
def copy(self):
"""Introduced on January 21st 2011 to facilitate spamo analysis.
Returns a copy of this peak_BED_line object."""
# Generate a string representing the line, which is required by
# the constructor of this class:
strRep = self.to_string()
# Generate the copy and return it:
copy = peak_BED_line(strRep)
copy.track = self.getTrack()
return copy
def setStart(self, newStart):
"""Introduced on January 21st 2011 to facilitate spamo analysis.
Sets the start coordinates of this BED_line object. NOTE: This
is a bit dodgy as I had originally planned for the start and end
of a given BED_line object to be immutable."""
self.chromStart = newStart
def setEnd(self, newEnd):
"""Introduced on January 21st 2011 to facilitate spamo analysis.
Sets the end coordinates of this BED_line object. NOTE: This
is a bit dodgy as I had originally planned for the start and end
of a given BED_line object to be immutable."""
self.chromEnd = newEnd
class FixedStep_Track_From_BED:
"""A fixedStep wiggle track class that is created by reading in a bed-
formatted file."""
def __init__(self, file, bg_score):
# A hashtable with chromosomal locs as keys and scores from the
# bed items as values. Supports O(1) retrieval of values on the basis
# of chromosomal location, assuming that no collisions occur.
self.values = {}
# The background score:
self.bg_score = bg_score
# Parse the contents of the specified file to produce the wiggle
# track object:
try:
for line in file:
elems = line.split()
# Will just ignore the line if it is empty; empty lines are
# valid:
if (len(elems) > 0):
# Only add values for the line if it is a chromosomal
# location:
first_elem = elems[0]
if (first_elem[0:3] == "chr"):
# Each bed entry must have a score entry if the bed
# object is going to be converted into a fixedStep
# wiggle track object:
if (len(elems) < 5):
raise ValueError(
"Line invalid for FixedStep_Track_From_BED:\n" \
+ line)
chrom = elems[0]
start = int(elems[1])
end = int(elems[2])
score = float(elems[4])
# Generate a list of chromosomal locations, spanning the
# specified start to the specified end:
for loc in (range(start,(end - 1))):
curr_pos = (chrom, loc)
# Add each resulting chromosomal location string to
# the values hashtable, with the items score as the
# value:
self.values[curr_pos] = score
except IOError:
# If the file does not open, throw this exception upwards, rather
# than dealing with it here:
raise
except ValueError:
# A value error here means that a BED_line was invalid.
raise
# Retrieves the wiggle track's value at the specified chromosomal position.
def get_value(self, chrom_name, pos):
if (self.values.has_key((chrom_name,pos))):
return self.values[(chrom_name,pos)]
else:
return self.bg_score # No explicit value => Return background value.
class FixedStep_Track:
"""A fixedStep wiggle track."""
# Added max_chrom_size on 24th July 2008. It specifies the maximum size
# a chromosome can be. If it is specified, then the array of float for
# each chromosome is pre-allocated and filled in. If left as null, the
# array is grown using append, instead.
def __init__(self, file, max_chrom_size=None):
print >> sys.stderr, "Reading file: ", file.name # Trace
# # Insist that the first line must define the fixedStep interval. Read
# # in that info:
# first_line = file.readline()
# elems = first_line.split()
# if (elems[0] != "Track"):
# raise ValueError("First line of file " + file.name + \
# " must start with \"track\"." + items[5])
# The arrays of data, corresponding to the different chromosomes, will
# be stored in a dictionary for easy retrieval:
self.chroms = {}
# Discard the lines at the start of the file until a "fixedStep" line
# is found:
curr_line = file.readline()
while ((curr_line != "") and (len(curr_line.split()) > 0) and \
(curr_line.split()[0] != "fixedStep")):
curr_line = file.readline()
# Keep reading data and placing it in "Chromosome" objects until no
# more lines are available in the file:
while (curr_line != ""):
# Current line is a "fixedStep" definition line. Retrieve the
# chrom, start, and step values from it:
elems = curr_line.split()
chrom_str = elems[1]
start_str = elems[2]
step_str = elems[3]
chrom = chrom_str.split("=")[1]
start = int(start_str.split("=")[1])
step = int(step_str.split("=")[1])
# Read in data until the end of the file is reached or a new
# chromosome is reached:
if (max_chrom_size == None):
chrom_data = array.array('f', [])
print >> sys.stderr, "STARTING WITH ARRAY OF SIZE ZERO."
else:
# 25th July 2008: Forced to use python arrays, to reduce
# memory usage.
# The maximum size for the chromosome has been specified:
# Default value; -100 value seems OK...
default_val = -100
chrom_data = array.array('f', [default_val]*max_chrom_size)
print >> sys.stderr, "STARTING WITH EMPTY ARRAY OF SIZE", \
max_chrom_size
curr_chrom_idx = 0
curr_line = file.readline()
while ((curr_line != "") and (len(curr_line.split()) > 0) and \
(curr_line.split()[0] != "fixedStep")):
if (max_chrom_size == None):
chrom_data.append(float(curr_line))
else:
chrom_data[curr_chrom_idx] = float(curr_line)
curr_chrom_idx = curr_chrom_idx + 1
curr_line = file.readline()
print >> sys.stderr, "FINISHED PROCESSING FIXEDSTEP BLOCK."
# Create a new chromosome, containing the values. These wiggle
# tracks are only allowed to have one segment of data per
# chromosome, for simplicity. Note that hence this class
# does not fully implement a wiggle track. Check this here:
if (self.chroms.has_key(chrom)):
raise ValueError( \
"More than one segment of data for a single chromosome: " \
+ chrom + " . File: " + file.name)
self.chroms[chrom] = Chromosome(chrom, start, step, chrom_data)
# Retrieves the wiggle track's value at the specified chromosomal position.
# Returns None if the wiggle track does not record a value at that location:
def get_value(self, chrom_name, pos):
# Retrieve the specified chromosome:
curr_chrom = self.chroms[chrom_name]
return curr_chrom.get_value(pos)
# Prints the data for the specified chromosome, for this fixedStep Wiggle
# track:
def print_chrom(self, outfile, chrom_name, chrom_len):
assert(self.chroms.has_key(chrom_name))
# The length of the chromosome must be specified, as this is not
# specified when the track is first created. FIXME: Could improve that.
print >> outfile, self.chroms[chrom_name].to_string(chrom_len)
class Chromosome:
def __init__(self, chrom, start, step, chrom_data):
self.chrom = chrom
self.start = start # Start of the data values in this chromosome
self.step = step
self.data = chrom_data
def get_value(self, pos):
# Calculate index of specified position into the array:
arr_idx = int(math.floor(float(pos - self.start)/self.step))
# int(math.ceil(float(pos - self.start)/self.step) - 1)
# If there is an item of data at that location, return it. Otherwise
# return None to indicate that the location did not have a value
# in this wiggle track:
if ((arr_idx >= 0) and (arr_idx < len(self.data))):
return self.data[arr_idx]
else:
return None
def to_string(self, chrom_len):
out_str = "CHROM: " + str(self.chrom) + "\n"
out_str = out_str + "START: " + str(self.start) + "\n"
out_str = out_str + "STEP: " + str(self.step) + "\n"
for pos in range(1, chrom_len + 1):
out_str = out_str + str(self.get_value(pos)) + "\n"
return out_str
def file_is_fixedStep(filename):
"""Scans the file, and returns if it is in fixedStep format, false
otherwise"""
file = open(filename)
for line in file:
elems = line.split()
if (elems[0] == "fixedStep"):
return True
return False
def file_is_bed(filename):
"""Scans the file, and returns if it is in BED format, false
otherwise"""
file = open(filename)
for line in file:
elems = line.split()
if (elems[0][0:3] == "chr"):
return True
return False
def getChromSizes(infoFile):
"""Returns a hashtable containing the lengths of the chromosomes, as
recorded in the specified file."""
sizeHash = {}
for line in infoFile:
elems = line.split()
sizeHash[elems[0]] = int(elems[1])
return sizeHash
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import time
from azure.core.exceptions import ServiceRequestError
from azure.core.pipeline.policies import HTTPPolicy
class AAZBearerTokenCredentialPolicy(HTTPPolicy):
"""
Followed the implement of azure.core.pipeline.policies.BearerTokenCredentialPolicy
"""
def __init__(self, credential, *scopes, **kwargs): # pylint:disable=unused-argument
super().__init__()
self._scopes = scopes
self._credential = credential
self._token = None
self._aux_tokens = None
@staticmethod
def _enforce_https(request):
# move 'enforce_https' from options to context so it persists
# across retries but isn't passed to a transport implementation
option = request.context.options.pop("enforce_https", None)
# True is the default setting; we needn't preserve an explicit opt in to the default behavior
if option is False:
request.context["enforce_https"] = option
enforce_https = request.context.get("enforce_https", True)
if enforce_https and not request.http_request.url.lower().startswith("https"):
raise ServiceRequestError(
"Bearer token authentication is not permitted for non-TLS protected (non-https) URLs."
)
def _update_headers(self, headers):
"""Updates the Authorization header with the bearer token.
:param dict headers: The HTTP Request headers
"""
headers["Authorization"] = "Bearer {}".format(self._token.token)
if self._aux_tokens:
headers["x-ms-authorization-auxiliary"] = ', '.join(
"Bearer {}".format(token.token) for token in self._aux_tokens)
@property
def _need_new_token(self):
if not self._token:
return True
return self._token.expires_on - time.time() < 300
@property
def _need_new_aux_tokens(self):
if not self._aux_tokens:
return True
for token in self._aux_tokens:
if token.expires_on - time.time() < 300:
return True
return False
def on_request(self, request):
"""Called before the policy sends a request.
The base implementation authorizes the request with a bearer token.
:param ~azure.core.pipeline.PipelineRequest request: the request
"""
self._enforce_https(request)
if self._need_new_token:
self._token = self._credential.get_token(*self._scopes)
if self._need_new_aux_tokens:
self._aux_tokens = self._credential.get_auxiliary_tokens(*self._scopes)
self._update_headers(request.http_request.headers)
def authorize_request(self, request, *scopes, **kwargs):
"""Acquire a token from the credential and authorize the request with it.
Keyword arguments are passed to the credential's get_token method. The token will be cached and used to
authorize future requests.
:param ~azure.core.pipeline.PipelineRequest request: the request
:param str scopes: required scopes of authentication
"""
self._token = self._credential.get_token(*scopes, **kwargs)
self._aux_tokens = self._credential.get_auxiliary_tokens(*self._scopes)
self._update_headers(request.http_request.headers)
def send(self, request):
"""Authorize request with a bearer token and send it to the next policy
:param request: The pipeline request object
:type request: ~azure.core.pipeline.PipelineRequest
"""
self.on_request(request)
try:
response = self.next.send(request)
self.on_response(request, response)
except Exception: # pylint:disable=broad-except
handled = self.on_exception(request)
if not handled:
raise
else:
if response.http_response.status_code == 401:
self._token = None # any cached token is invalid
if "WWW-Authenticate" in response.http_response.headers:
request_authorized = self.on_challenge(request, response)
if request_authorized:
try:
response = self.next.send(request)
self.on_response(request, response)
except Exception: # pylint:disable=broad-except
handled = self.on_exception(request)
if not handled:
raise
return response
def on_challenge(self, request, response):
"""Authorize request according to an authentication challenge
This method is called when the resource provider responds 401 with a WWW-Authenticate header.
:param ~azure.core.pipeline.PipelineRequest request: the request which elicited an authentication challenge
:param ~azure.core.pipeline.PipelineResponse response: the resource provider's response
:returns: a bool indicating whether the policy should send the request
"""
# pylint:disable=unused-argument,no-self-use
return False
def on_response(self, request, response):
"""Executed after the request comes back from the next policy.
:param request: Request to be modified after returning from the policy.
:type request: ~azure.core.pipeline.PipelineRequest
:param response: Pipeline response object
:type response: ~azure.core.pipeline.PipelineResponse
"""
def on_exception(self, request):
"""Executed when an exception is raised while executing the next policy.
This method is executed inside the exception handler.
:param request: The Pipeline request object
:type request: ~azure.core.pipeline.PipelineRequest
:return: False by default, override with True to stop the exception.
:rtype: bool
"""
# pylint: disable=no-self-use,unused-argument
return False
class AAZARMChallengeAuthenticationPolicy(AAZBearerTokenCredentialPolicy):
"""Adds a bearer token Authorization header to requests.
This policy internally handles Continuous Access Evaluation (CAE) challenges. When it can't complete a challenge,
it will return the 401 (unauthorized) response from ARM.
:param ~azure.core.credentials.TokenCredential credential: credential for authorizing requests
:param str scopes: required authentication scopes
"""
@staticmethod
def _parse_claims_challenge(challenge):
"""Parse the "claims" parameter from an authentication challenge
Example challenge with claims:
Bearer authorization_uri="https://login.windows-ppe.net/", error="invalid_token",
error_description="User session has been revoked",
claims="eyJhY2Nlc3NfdG9rZW4iOnsibmJmIjp7ImVzc2VudGlhbCI6dHJ1ZSwgInZhbHVlIjoiMTYwMzc0MjgwMCJ9fX0="
:return: the challenge's "claims" parameter or None, if it doesn't contain that parameter
"""
encoded_claims = None
for parameter in challenge.split(","):
if "claims=" in parameter:
if encoded_claims:
# multiple claims challenges, e.g. for cross-tenant auth, would require special handling
return None
encoded_claims = parameter[parameter.index("=") + 1:].strip(" \"'")
if not encoded_claims:
return None
padding_needed = -len(encoded_claims) % 4
try:
decoded_claims = base64.urlsafe_b64decode(encoded_claims + "=" * padding_needed).decode()
return decoded_claims
except Exception: # pylint:disable=broad-except
return None
def on_challenge(self, request, response): # pylint:disable=unused-argument
"""Authorize request according to an ARM authentication challenge
:param ~azure.core.pipeline.PipelineRequest request: the request which elicited an authentication challenge
:param ~azure.core.pipeline.PipelineResponse response: ARM's response
:returns: a bool indicating whether the policy should send the request
"""
challenge = response.http_response.headers.get("WWW-Authenticate")
if challenge:
claims = self._parse_claims_challenge(challenge)
if claims:
self.authorize_request(request, *self._scopes, claims=claims)
return True
return False
|
import random
import uuid
from math import gcd
import numpy as np
from ._population import Population
from pychemia import Composition, Structure, pcm_log
from pychemia.analysis import StructureAnalysis, StructureChanger, StructureMatch
from pychemia.analysis.splitting import SplitMatch
from pychemia.utils.mathematics import unit_vector
from pychemia.utils.periodic import atomic_number, covalent_radius
from pymongo import ASCENDING
from pychemia.db import get_database
from pychemia.crystal import CrystalSymmetry
class RelaxStructures(Population):
def evaluate_entry(self, entry_id):
pass
def __init__(self, name, composition=None, tag='global', target_forces=1E-3, value_tol=1E-2,
distance_tolerance=0.3, min_comp_mult=2, max_comp_mult=8, pcdb_source=None, pressure=0.0,
target_stress=None, target_diag_stress=None, target_nondiag_stress=None):
"""
Defines a population of PyChemia Structures,
The 'name' of the database is used to create the MongoDB database and the structures are
uniform in composition. A specific 'tag' could be attached to differentiate
the other instances running concurrently. The 'delta' argument is the scaling
factor for changers and mixers. In the case of populations supported on
PyChemia databases the 'new' will erase the database
:param name: The name of the population. ie the name of the database
:param composition: The composition uniform for all the members
:param tag: A tag to differentiate different instances running concurrently
:return: A new StructurePopulation object
"""
if composition is not None:
self.composition = Composition(composition)
else:
self.composition = None
self.tag = tag
self.target_forces = target_forces
self.value_tol = value_tol
self.min_comp_mult = min_comp_mult
self.max_comp_mult = max_comp_mult
self.pcdb_source = pcdb_source
self.pressure = pressure
if target_stress is None:
self.target_stress = target_forces
else:
self.target_stress = target_stress
if target_diag_stress is None:
self.target_diag_stress = self.target_stress
else:
self.target_diag_stress = target_diag_stress
if target_diag_stress is None:
self.target_nondiag_stress = self.target_stress
else:
self.target_nondiag_stress = target_nondiag_stress
self.name = name
Population.__init__(self, name, tag, distance_tolerance=distance_tolerance)
if self.pcdb_source is not None:
self.sources = {}
for i in range(min_comp_mult, max_comp_mult+1):
self.sources[i] = []
for entry in self.pcdb_source.entries.find({'structure.natom': i*self.composition.natom,
'structure.nspecies': self.composition.nspecies},
{'_id': 1}):
self.sources[i].append(entry['_id'])
def recover(self):
data = self.get_population_info()
if data is not None:
self.distance_tolerance = data['distance_tol']
self.value_tol = data['value_tol']
self.name = data['name']
self.target_forces = data['target_forces']
def get_structure(self, entry_id):
entry = self.get_entry(entry_id)
return Structure.from_dict(entry['structure'])
@staticmethod
def new_identifier():
return str(uuid.uuid4())[-12:]
def new_entry(self, structure, active=True):
properties = {'forces': None, 'stress': None, 'energy': None}
status = {self.tag: active, 'tag': self.tag}
entry = {'structure': structure.to_dict, 'properties': properties, 'status': status}
entry_id = self.insert_entry(entry)
pcm_log.debug('Added new entry: %s with tag=%s: %s' % (str(entry_id), self.tag, str(active)))
return entry_id
def get_max_force_stress(self, entry_id):
entry = self.get_entry(entry_id, projection={'properties': 1})
max_force = None
max_diag_stress = None
max_nondiag_stress = None
if entry is not None and entry['properties'] is not None:
properties = entry['properties']
if 'forces' in properties and 'stress' in properties:
if properties['forces'] is not None and properties['stress'] is not None:
forces = np.array(entry['properties']['forces'])
stress = np.array(entry['properties']['stress'])
max_force = np.max(np.apply_along_axis(np.linalg.norm, 1, forces))
max_diag_stress = np.max(np.abs(stress[:3]))
max_nondiag_stress = np.max(np.abs(stress[4:]))
return max_force, max_diag_stress, max_nondiag_stress
def is_evaluated(self, entry_id):
max_force, max_diag_stress, max_nondiag_stress = self.get_max_force_stress(entry_id)
if max_force is None or max_diag_stress is None or max_nondiag_stress is None:
return False
elif max_force < self.target_forces and max_diag_stress < self.target_diag_stress + self.pressure:
if max_nondiag_stress < self.target_nondiag_stress:
return True
else:
return False
else:
return False
def add_random(self, random_probability=0.3):
"""
Add one random structure to the population
"""
entry_id = None
structure = Structure()
if self.composition is None:
raise ValueError('No composition associated to this population')
factor = np.random.randint(self.min_comp_mult, self.max_comp_mult + 1)
comp = self.composition.composition.copy()
# print("Initial composition: %s" % comp)
# print(Composition(comp))
# print(Composition(comp).symbols)
for i in comp:
comp[i] *= factor
new_comp = Composition(comp)
while True:
rnd = random.random()
condition = {'structure.nspecies': new_comp.nspecies,
'structure.natom': new_comp.natom}
if self.pcdb_source is None:
rnd = 0
elif len(self.sources[factor]) == 0:
rnd = 0
if self.pcdb_source is None or rnd < random_probability:
pcm_log.debug('Random Structure')
structure = Structure.random_cell(new_comp, method='stretching', stabilization_number=5, nparal=5,
periodic=True)
break
else:
pcm_log.debug('From source')
entry_id = self.sources[factor][np.random.randint(0, len(self.sources[factor]))]
structure = self.pcdb_source.get_structure(entry_id)
print("chosen structure from database =", structure)
sym = CrystalSymmetry(structure)
scale_factor = float(np.max(covalent_radius(new_comp.species)) /
np.max(covalent_radius(structure.species)))
reduce_scale = scale_factor ** (1. / 3) # WIH
msg = 'Mult: %d natom: %d From source: %s Spacegroup: %d Scaling: %7.3f'
print(msg % (factor, structure.natom, structure.formula, sym.number(), scale_factor))
# structure.set_cell(np.dot(scale_factor * np.eye(3), structure.cell)) # WIH
structure.set_cell(np.dot(reduce_scale * np.eye(3), structure.cell)) # WIH
print("symbols before change = ", structure.symbols)
structure.symbols = new_comp.symbols
print("symbols after change = ", structure.symbols)
self.sources[factor].remove(entry_id)
break
return self.new_entry(structure), entry_id
def check_duplicates(self, ids):
"""
Computes duplicate structures measuring its distance when their value is larger than value_tol.
If the distance is lower than 'distance_tol' the structures will be cosidered as duplicates.
:param ids:
:return: (dict) Dictionary of duplicates, the keys are the ids of the duplicates and the value is the structure
from which the structure is duplicated. In general the energy of the 'value' is lower than the
'key'
"""
ret = {}
selection = self.ids_sorted(ids)
values = np.array([self.value(i) for i in selection])
if len(values) == 0:
return ret
diffs = np.ediff1d(values)
for i in range(len(diffs)):
idiff = diffs[i]
if idiff < self.value_tol:
ident1 = selection[i]
ident2 = selection[i + 1]
pcm_log.debug('Testing distances between %s and %s' % (str(ident1), str(ident2)))
distance = self.distance(ident1, ident2)
# print 'Distance = ', distance
if distance < self.distance_tolerance:
pcm_log.debug('Distance %7.3f < %7.3f' % (distance, self.distance_tolerance))
ret[ident2] = ident1
if len(ret) > 0:
pcm_log.debug('Number of duplicates %d' % len(ret))
return ret
def get_duplicates(self, ids, tolerance, fast=False):
dupes_dict = {}
dupes_list = []
values = {}
for i in ids:
values[i] = self.value(i)
selection = self.ids_sorted(ids)
print('Searching duplicates in %d structures' % len(selection))
for i in range(len(selection) - 1):
entry_id = selection[i]
value_i = values[entry_id]
for j in range(i + 1, len(selection)):
entry_jd = selection[j]
if fast and entry_jd in dupes_list:
continue
value_j = values[entry_jd]
if abs(value_i - value_j) < self.value_tol:
distance = self.distance(entry_id, entry_jd)
if distance < tolerance:
if entry_id in dupes_dict:
dupes_dict[entry_id].append(entry_jd)
else:
dupes_dict[entry_id] = [entry_jd]
dupes_list.append(entry_jd)
return dupes_dict, [x for x in selection if x in dupes_list]
def cleaned_from_duplicates(self, ids):
selection = self.ids_sorted(ids)
duplicates_dict = self.check_duplicates(selection)
return [x for x in selection if x not in duplicates_dict.keys()]
def diff_values_matrix(self):
members = self.members
ret = np.zeros((len(members), len(members)))
for i in range(len(members)):
for j in range(i, len(members)):
if self.value(members[i]) is not None and self.value(members[j]) is not None:
ret[i, j] = np.abs(self.value(members[i]) - self.value(members[j]))
else:
ret[i, j] = float('nan')
ret[j, i] = ret[i, j]
return ret
def distance(self, entry_id, entry_jd, rcut=50):
ids_pair = [entry_id, entry_jd]
ids_pair.sort()
distance_entry = self.pcdb.db.distances.find_one({'pair': ids_pair}, {'distance': 1})
self.pcdb.db.distances.create_index([("pair", ASCENDING)])
if distance_entry is None:
print('Distance not in DB')
fingerprints = {}
for entry_ijd in [entry_id, entry_jd]:
if self.pcdb.db.fingerprints.find_one({'_id': entry_ijd}) is None:
structure = self.get_structure(entry_ijd)
analysis = StructureAnalysis(structure, radius=rcut)
x, ys = analysis.fp_oganov()
fingerprint = {'_id': entry_ijd}
for k in ys:
atomic_number1 = atomic_number(structure.species[k[0]])
atomic_number2 = atomic_number(structure.species[k[1]])
pair = '%06d' % min(atomic_number1 * 1000 + atomic_number2,
atomic_number2 * 1000 + atomic_number1)
fingerprint[pair] = list(ys[k])
if self.pcdb.db.fingerprints.find_one({'_id': entry_ijd}) is None:
self.pcdb.db.fingerprints.insert(fingerprint)
else:
self.pcdb.db.fingerprints.update({'_id': entry_ijd}, fingerprint)
fingerprints[entry_ijd] = fingerprint
else:
fingerprints[entry_ijd] = self.pcdb.db.fingerprints.find_one({'_id': entry_ijd})
dij = []
for pair in fingerprints[entry_id]:
if pair in fingerprints[entry_jd] and pair != '_id':
uvect1 = unit_vector(fingerprints[entry_id][pair])
uvect2 = unit_vector(fingerprints[entry_jd][pair])
dij.append(0.5 * (1.0 - np.dot(uvect1, uvect2)))
distance = float(np.mean(dij))
self.pcdb.db.distances.insert({'pair': ids_pair, 'distance': distance})
else:
distance = distance_entry['distance']
return distance
def add_from_db(self, db_settings, sizemax=1):
if self.composition is None:
raise ValueError('No composition associated to this population')
comp = Composition(self.composition)
readdb = get_database(db_settings)
index = 0
for entry in readdb.entries.find({'structure.formula': comp.formula,
'structure.natom': {'$lte': self.min_comp_mult * comp.natom,
'$gte': self.max_comp_mult * comp.natom}}):
if index < sizemax:
print('Adding entry ' + str(entry['_id']) + ' from ' + readdb.name)
self.new_entry(readdb.get_structure(entry['_id']))
index += 1
def move_random(self, entry_id, factor=0.2, in_place=False, kind='move'):
structure = self.get_structure(entry_id)
changer = StructureChanger(structure=structure)
if kind == 'move':
changer.random_move_many_atoms(epsilon=factor)
else: # change
changer.random_change(factor)
if in_place:
return self.set_structure(entry_id, changer.new_structure)
else:
return self.new_entry(changer.new_structure, active=False)
def move(self, entry_id, entry_jd, factor=0.2, in_place=False):
"""
Moves entry_id in the direction of entry_jd
If in_place is True the movement occurs on the
same address as entry_id
:param factor:
:param entry_id:
:param entry_jd:
:param in_place:
:return:
"""
structure_mobile = self.get_structure(entry_id)
structure_target = self.get_structure(entry_jd)
if structure_mobile.natom != structure_target.natom:
# Moving structures with different number of atoms is only implemented for smaller structures moving
# towards bigger ones by making a super-cell and only if their size is smaller that 'max_comp_mult'
mult1 = structure_mobile.get_composition().gcd
mult2 = structure_target.get_composition().gcd
lcd = mult1 * mult2 / gcd(mult1, mult2)
if lcd > self.max_comp_mult:
# The resulting structure is bigger than the limit
# cannot move
if not in_place:
return self.new_entry(structure_mobile)
else:
return entry_id
# We will move structure1 in the direction of structure2
match = StructureMatch(structure_target, structure_mobile)
match.match_size()
match.match_shape()
match.match_atoms()
displacements = match.reduced_displacement()
new_reduced = match.structure2.reduced + factor * displacements
new_cell = match.structure2.cell
new_symbols = match.structure2.symbols
new_structure = Structure(reduced=new_reduced, symbols=new_symbols, cell=new_cell)
if in_place:
return self.set_structure(entry_id, new_structure)
else:
return self.new_entry(new_structure, active=False)
def __str__(self):
ret = '\n'
ret += '[%s] Population type: %s\n' % (self.tag, 'Relax Structures')
ret += '[%s] Database: %s\n' % (self.tag, self.name)
ret += '[%s] Tag: %s\n' % (self.tag, self.tag)
ret += '[%s] Target-Forces: %7.2E\n' % (self.tag, self.target_forces)
ret += '[%s] Value tolerance: %7.2E\n' % (self.tag, self.value_tol)
ret += '[%s] Distance tolerance: %7.2E\n\n' % (self.tag, self.distance_tolerance)
if self.composition is not None:
ret += '[%s] Composition: %s\n' % (self.tag, self.composition.formula)
ret += '[%s] Minimal composition multiplier: %d\n' % (self.tag, self.min_comp_mult)
ret += '[%s] Maximal composition multiplier: %d\n' % (self.tag, self.max_comp_mult)
ret += '[%s] Members: %d\n' % (self.tag, len(self.members))
ret += '[%s] Actives: %d\n' % (self.tag, len(self.actives))
ret += '[%s] Evaluated: %d\n' % (self.tag, len(self.evaluated))
ret += '\n'
return ret
def value(self, entry_id):
entry = self.get_entry(entry_id)
structure = self.get_structure(entry_id)
if 'properties' not in entry:
pcm_log.debug('This entry has no properties %s' % str(entry['_id']))
return None
elif entry['properties'] is None:
return None
elif 'energy' not in entry['properties']:
pcm_log.debug('This entry has no energy in properties %s' % str(entry['_id']))
return None
else:
return entry['properties']['energy'] / structure.get_composition().gcd
@property
def to_dict(self):
return {'name': self.name,
'tag': self.tag,
'target_forces': self.target_forces,
'value_tol': self.value_tol,
'distance_tolerance': self.distance_tolerance}
def from_dict(self, population_dict):
return RelaxStructures(name=population_dict['name'],
tag=population_dict['tag'],
target_forces=population_dict['target_forces'],
value_tol=population_dict['value_tol'],
distance_tolerance=population_dict['distance_tolerance'])
def cross(self, ids):
assert len(ids) == 2
structure1 = self.get_structure(ids[0])
structure2 = self.get_structure(ids[1])
split_match = SplitMatch(structure1, structure2)
st1, st2 = split_match.get_simple_match()
entry_id = self.new_entry(st1, active=True)
entry_jd = self.new_entry(st2, active=True)
return entry_id, entry_jd
def str_entry(self, entry_id):
struct = self.get_structure(entry_id)
return str(struct)
|
#!/usr/bin/python
""" PN DCI """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.module_utils.basic import AnsibleModule
import shlex
import time
DOCUMENTATION = """
---
module: pn_dci
author: 'Pluribus Networks (devops@pluribusnetworks.com)'
short_description: Module to implement a DCI.
description:
This module is to plan and implement a DCI architecture.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
type: str
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
type: str
pn_fabric_name:
description:
- Specify name of the fabric.
required: False
type: str
pn_current_switch:
description:
- Specify name of the current switch.
required: True
type: str
pn_run_initial_setup:
description:
- Flag to accept eula
required: True
type: bool
pn_spine_list:
description:
- Specify list of third party spine switches.
required: True
type: list
pn_leaf_list:
description:
- Specify list of all DC leaf switches.
required: True
type: list
pn_inband_ip:
description:
- Specify the inband ip address.
required: False
type: str
pn_loopback_ip:
description:
- Specify the loopback interface ip.
required: False
type: str
pn_bgp_ip:
description:
- Specify the bgp ip for creating neighbor.
required: False
type: str
pn_bgp_redistribute:
description:
- Specify bgp_redistribute value to be added to vrouter.
required: False
type: str
choices: ['none', 'static', 'connected', 'rip', 'ospf']
default: 'connected'
pn_bgp_maxpath:
description:
- Specify bgp_maxpath value to be added to vrouter.
required: False
type: str
default: '16'
pn_ibgp_vlan:
description:
- Specify vlan for iBGP configuration.
required: False
type: str
default: '4040'
pn_ibgp_ip_range:
description:
- Specify iBGP ip range to assigned during iBGP configuration.
required: False
type: str
default: '75.75.75.0/30'
pn_csv_data:
description:
- Specify VRRP and vxlan config data in the form of csv.
required: False
type: str
pn_third_party_bgp_data:
description:
- Specify third party bgp config data in the form of csv.
required: False
type: str
"""
EXAMPLES = """
- name: Accept EULA, Join Fabric, Configure eBGP
pn_dci:
pn_cliusername: "{{ USERNAME }}"
pn_clipassword: "{{ PASSWORD }}"
pn_fabric_name: 'ansible_dci_fabric'
pn_current_switch: "{{ inventory_hostname }}"
pn_run_initial_setup: True
pn_spine_list: "{{ groups['spine'] }}"
pn_leaf_list: "{{ groups['leaf'] }}"
pn_inband_ip: '172.18.0.0/24'
pn_loopback_ip: '108.108.108.0/24'
pn_bgp_ip: '100.1.1.0/30'
pn_csv_data: "{{ lookup('file', '{{ dci_file }}') }}"
pn_third_party_bgp_data: "{{ lookup('file', '{{ third_party_file }}') }}"
- name: Configure iBGP, VRRP and Vxlan
pn_dci:
pn_cliusername: "{{ USERNAME }}"
pn_clipassword: "{{ PASSWORD }}"
pn_fabric_name: 'ansible_dci_fabric'
pn_current_switch: "{{ inventory_hostname }}"
pn_run_initial_setup: False
pn_spine_list: "{{ groups['spine'] }}"
pn_leaf_list: "{{ groups['leaf'] }}"
pn_inband_ip: '172.18.0.0/24'
pn_loopback_ip: '108.108.108.0/24'
pn_bgp_ip: '100.1.1.0/30'
pn_csv_data: "{{ lookup('file', '{{ dci_file }}') }}"
pn_third_party_bgp_data: "{{ lookup('file', '{{ third_party_file }}') }}"
"""
RETURN = """
stdout:
description: The set of responses for each command.
returned: always
type: str
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
failed:
description: Indicates whether or not the execution failed on the target.
returned: always
type: bool
"""
CHANGED_FLAG = []
def pn_cli(module):
"""
Method to generate the cli portion to launch the Netvisor cli.
:param module: The Ansible module to fetch username and password.
:return: The cli string for further processing.
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
return cli
def run_cli(module, cli):
"""
Method to execute the cli command on the target node(s) and returns the
output.
:param module: The Ansible module to fetch input parameters.
:param cli: The complete cli string to be executed on the target node(s).
:return: Output/Error or Success msg depending upon the response from cli.
"""
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
if out:
return out
if err:
module.exit_json(
error='1',
failed=True,
stderr=err.strip(),
msg='Operation Failed: ' + str(cli),
changed=False
)
else:
return 'Success'
def auto_accept_eula(module):
"""
Method to accept the EULA when we first login to a new switch.
:param module: The Ansible module to fetch input parameters.
:return: The output of run_cli() method.
"""
password = module.params['pn_clipassword']
cli = ' /usr/bin/cli --quiet --skip-setup eula-show '
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
if err:
cli = '/usr/bin/cli --quiet'
cli += ' --skip-setup --script-password '
cli += ' switch-setup-modify password ' + password
cli += ' eula-accepted true '
return run_cli(module, cli)
elif out:
return ' EULA has been accepted already '
def update_switch_names(module, switch_name):
"""
Method to update switch names.
:param module: The Ansible module to fetch input parameters.
:param switch_name: Name to assign to the switch.
:return: String describing switch name got modified or not.
"""
cli = pn_cli(module)
cli += ' switch-setup-show format switch-name '
if switch_name in run_cli(module, cli).split()[1]:
return ' Switch name is same as hostname! '
else:
cli = pn_cli(module)
cli += ' switch-setup-modify switch-name ' + switch_name
run_cli(module, cli)
return ' Updated switch name to match hostname! '
def assign_inband_ip(module):
"""
Method to assign in-band ips to switches.
:param module: The Ansible module to fetch input parameters.
:return: String describing if in-band ip got assigned or not.
"""
global CHANGED_FLAG
inband_ip = module.params['pn_inband_ip']
leaf_list = module.params['pn_leaf_list']
current_switch = module.params['pn_current_switch']
switch_position = leaf_list.index(current_switch) + 1
address = inband_ip.split('.')
static_part = str(address[0]) + '.' + str(address[1]) + '.'
last_octet = str(address[3]).split('/')
subnet = last_octet[1]
ip = static_part + str(switch_position) + '.'
ip += str(switch_position) + '/' + subnet
cli = pn_cli(module)
cli += 'switch-setup-modify in-band-ip %s ' % ip
if 'Setup completed successfully' in run_cli(module, cli):
CHANGED_FLAG.append(True)
return ' %s: In-band ip assigned with ip %s \n' % (current_switch, ip)
return ''
def create_switch_routes(module, inband_ip):
"""
Method to create a switch routes
:param module: The Ansible module to fetch input parameters.
:param inband_ip: in-band ip of the switch.
"""
inband_address = inband_ip.split(':')[1].split('.')
static_part = str(inband_address[0]) + '.' + str(inband_address[1]) + '.'
gateway_static_part = static_part + str(inband_address[2]) + '.'
last_octet = str(inband_address[3]).split('/')
subnet = last_octet[1]
gateway_ip = gateway_static_part + str(int(last_octet[0]) + 1)
switch_count = 1
while switch_count <= len(module.params['pn_leaf_list']):
network_ip = static_part + str(switch_count) + '.' + str(0)
network_ip += '/' + subnet
cli = pn_cli(module)
cli += ' switch-route-create network %s gateway-ip %s ' % (
network_ip, gateway_ip
)
switch_count += 1
cli = shlex.split(cli)
module.run_command(cli)
def configure_fabric(module, switch):
"""
Method to configure (create/join) fabric.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the current switch.
:return: String describing fabric creation/joining description.
"""
global CHANGED_FLAG
output = ''
fabric_name = module.params['pn_fabric_name']
switch_index = module.params['pn_leaf_list'].index(switch)
address = module.params['pn_inband_ip'].split('.')
inband_static_part = str(address[0]) + '.' + str(address[1]) + '.'
last_octet = str(address[3]).split('/')
subnet = last_octet[1]
if switch_index == 0:
cli = pn_cli(module)
clicopy = cli
cli += 'fabric-show format name no-show-headers'
existing_fabric = run_cli(module, cli).split()
# Create fabric if not already created.
if fabric_name not in existing_fabric:
cli = clicopy
cli += 'fabric-create name %s ' % fabric_name
cli += ' fabric-network in-band control-network in-band '
run_cli(module, cli)
output += ' %s: Fabric %s created \n' % (switch, fabric_name)
CHANGED_FLAG.append(True)
else:
output += ' %s: Fabric %s already exists \n' % (switch, fabric_name)
# Indicate all subnets of the in-band interfaces of switches,
# that will be part of the fabric.
output += fabric_inband_net_create(module, inband_static_part,
subnet, switch)
else:
switch_ip = inband_static_part + str(1) + '.' + str(1)
# Join existing fabric.
if 'Already' in join_fabric(module, switch_ip):
output += ' %s: Already part of fabric %s \n' % (switch,
fabric_name)
else:
output += ' %s: Joined fabric %s \n' % (switch, fabric_name)
CHANGED_FLAG.append(True)
return output
def find_clustered_switches(module):
"""
Method to find clustered switches from the input csv file.
:param module: The Ansible module to fetch input parameters.
:return: It returns a dict whose first value is list of pairs of cluster
and second is list of switches in cluster.
"""
csv_data = module.params['pn_csv_data']
csv_data = csv_data.replace(' ', '')
csv_data = csv_data.split('\n')
cluster_dict_info = {}
cluster_list = []
cluster_switches = []
for line in csv_data:
if line != '':
line = line.split(',')
cluster_node_1 = line[2]
cluster_node_2 = line[3]
if not cluster_node_2.isdigit():
temp_list = [cluster_node_1, cluster_node_2]
if temp_list not in cluster_list:
cluster_switches.append(cluster_node_1)
cluster_switches.append(cluster_node_2)
cluster_list.append(temp_list)
cluster_dict_info[0] = cluster_list
cluster_dict_info[1] = cluster_switches
return cluster_dict_info
def fabric_inband_net_create(module, inband_static_part, subnet, switch):
"""
Method to create fabric in-band network.
:param module: The Ansible module to fetch input parameters.
:param inband_static_part: In-band ip address till second octet.
:param subnet: Subnet mask of in-band ip address.
:param switch: Name of the 1st switch in the DC.
:return: String describing if fabric in-band network got created or not.
"""
global CHANGED_FLAG
output = ''
leaf_list = module.params['pn_leaf_list']
cli = pn_cli(module)
clicopy = cli
for leaf in leaf_list:
switch_position = leaf_list.index(leaf) + 1
inband_network_ip = inband_static_part + str(switch_position) + '.'
inband_network_ip += str(switch_position)
cli = clicopy
cli += 'fabric-in-band-network-show format network no-show-headers'
existing_networks = run_cli(module, cli).split()
if inband_network_ip not in existing_networks:
cli = clicopy
cli += 'fabric-in-band-network-create network ' + inband_network_ip
cli += ' netmask ' + subnet
if 'Success' in run_cli(module, cli):
output += ' %s: Fabric in-band network created for %s \n' % (
switch, inband_network_ip
)
CHANGED_FLAG.append(True)
else:
output += ' %s: Fabric in-band network %s already exists \n' % (
switch, inband_network_ip
)
return output
def join_fabric(module, switch_ip):
"""
Method to join existing fabric by pointing to in-band ip of first switch.
:param module: The Ansible module to fetch input parameters.
:param switch_ip: In-band ip of the first/originator switch.
:return: The output of run_cli() method.
"""
cli = pn_cli(module)
clicopy = cli
cli += ' fabric-info format name no-show-headers'
cli = shlex.split(cli)
rc, out, err = module.run_command(cli)
if err:
cli = clicopy
cli += ' fabric-join switch-ip %s ' % switch_ip
elif out:
return ' Already in a fabric \n'
return run_cli(module, cli)
def create_vrouter(module, switch, bgp_as, router_id, bgp_nic_ip,
bgp_nic_l3_port, neighbor_ip, remote_as, in_band_nic_ip,
in_band_nic_netmask, fabric_network_address):
"""
Method to create vrouter.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the current switch.
:param bgp_as: BGP AS value of this switch.
:param router_id: Router id to be assigned to this vrouter.
:param bgp_nic_ip: Bgp_nic_ip for the vrouter creation.
:param bgp_nic_l3_port: L3 port number connected to neighbor switch.
:param neighbor_ip: Ip of the BGP neighbor node.
:param remote_as: BGP AS value of the neighbor node.
:param in_band_nic_ip: In-band nic ip of this switch.
:param in_band_nic_netmask: Netmask of in-band nic ip.
:param fabric_network_address: Fabric network address of the existing fabric
:return: String describing if vrouter got created or if it already exists.
"""
global CHANGED_FLAG
vrouter_name = switch + '-vrouter'
cli = pn_cli(module)
clicopy = cli
cli += ' vrouter-show format name no-show-headers '
existing_vrouter_names = run_cli(module, cli).split()
if vrouter_name not in existing_vrouter_names:
cli = clicopy
cli += ' fabric-comm-vrouter-bgp-create name %s ' % vrouter_name
cli += ' bgp-as %s router-id %s ' % (bgp_as, router_id)
cli += ' bgp-nic-ip %s ' % bgp_nic_ip
cli += ' bgp-nic-l3-port %s ' % bgp_nic_l3_port
cli += ' neighbor %s remote-as %s ' % (neighbor_ip, remote_as)
cli += ' bfd in-band-nic-ip %s ' % in_band_nic_ip
cli += ' in-band-nic-netmask %s ' % in_band_nic_netmask
cli += ' fabric-network %s ' % fabric_network_address
cli += ' allowas-in '
run_cli(module, cli)
CHANGED_FLAG.append(True)
return ' %s: Created %s \n' % (switch, vrouter_name)
else:
return ' %s: %s already exists \n' % (switch, vrouter_name)
def get_l3_port(module, neighbor_name):
"""
Method to get l3 port number which is connected to given neighbor.
:param module: The Ansible module to fetch input parameters.
:param neighbor_name: Name of the bgp neighbor host.
:return: l3 port number.
"""
cli = pn_cli(module)
cli += 'port-show hostname %s format port no-show-headers' % (
neighbor_name
)
ports = run_cli(module, cli).split()
cli = pn_cli(module)
cli += 'trunk-show ports %s format trunk-id no-show-headers ' % (
ports[0]
)
trunk_id = run_cli(module, cli)
if len(trunk_id) == 0 or trunk_id == 'Success':
return ports[0]
else:
return trunk_id
def configure_loopback_interface(module, switch, router_id):
"""
Method to configure looack interface on a vrouter.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the switch.
:param router_id: Router id which is same as loopback ip.
:return: String describing if loopback interface got configured or not
"""
global CHANGED_FLAG
vrouter_name = switch + '-vrouter'
cli = pn_cli(module)
clicopy = cli
cli += ' vrouter-loopback-interface-show vrouter-name %s ' % vrouter_name
cli += ' format ip no-show-headers '
existing_ip = run_cli(module, cli)
if router_id not in existing_ip:
cli = clicopy
cli += ' vrouter-loopback-interface-add vrouter-name %s ' % vrouter_name
cli += ' ip %s ' % router_id
run_cli(module, cli)
cli = clicopy
cli += ' vrouter-bgp-network-add vrouter-name %s ' % vrouter_name
cli += ' network %s netmask 255.255.255.255 ' % router_id
run_cli(module, cli)
CHANGED_FLAG.append(True)
return ' %s: Configured loopback interface with ip %s \n' % (switch,
router_id)
else:
return ' %s: Loopback interface %s has been already configured \n' % (
switch, router_id
)
def configure_ebgp_connections(module, switch, third_party_data, bgp_nic_ip):
"""
Method to configure eBGP connection to remaining third party neighbors.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the switch.
:param third_party_data: Third party BGP data in csv format.
:param bgp_nic_ip: Ip of first bgp neighbor added.
:return: String describing eBGP configuration.
"""
global CHANGED_FLAG
output = ''
vrouter_name = switch + '-vrouter'
skip_flag = False
address = bgp_nic_ip.split('.')
bgp_static_part = str(address[0]) + '.' + str(address[1]) + '.'
bgp_static_part += str(address[2]) + '.'
bgp_last_octet = str(address[3]).split('/')
bgp_subnet = bgp_last_octet[1]
for row in third_party_data:
row = row.split(',')
if not skip_flag and row[4] == switch:
skip_flag = True
continue
if skip_flag and row[4] == switch:
neighbor_name = row[0]
neighbor_ip = row[1]
remote_as = row[2]
address = neighbor_ip.split('.')
static_part = str(address[0]) + '.' + str(address[1]) + '.'
static_part += str(address[2]) + '.'
last_octet = str(int(address[3]) - 1)
ip = static_part + last_octet + '/' + bgp_subnet
l3_port = get_l3_port(module, neighbor_name)
cli = pn_cli(module)
clicopy = cli
cli += ' vrouter-interface-show vrouter-name %s ' % vrouter_name
cli += ' format ip no-show-headers '
exisiting_ip = run_cli(module, cli).split()
if ip not in exisiting_ip:
cli = clicopy
cli += ' vrouter-interface-add vrouter-name %s ' % vrouter_name
cli += ' l3-port %s ip %s ' % (l3_port, ip)
run_cli(module, cli)
output += ' %s: Added vrouter interface %s \n' % (switch, ip)
else:
output += ' %s: Vrouter interface %s already added \n' % (
switch, ip
)
cli = clicopy
cli += ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
cli += ' format neighbor no-show-headers '
exisiting_neighbor = run_cli(module, cli).split()
if neighbor_ip not in exisiting_neighbor:
cli = clicopy
cli += ' vrouter-bgp-add vrouter-name %s ' % vrouter_name
cli += ' neighbor %s remote-as %s bfd ' % (neighbor_ip,
remote_as)
cli += ' allowas-in '
run_cli(module, cli)
output += ' %s: Added eBGP neighbor %s \n' % (switch,
neighbor_ip)
cli = clicopy
cli += ' vrouter-modify name %s ' % vrouter_name
cli += ' bgp-max-paths %s ' % module.params['pn_bgp_max_path']
cli += ' bgp-bestpath-as-path multipath-relax '
run_cli(module, cli)
else:
output += ' %s: eBGP neighbor %s already added \n' % (
switch, neighbor_ip
)
return output
def create_vlan(module, vlan_id, switch, scope):
"""
Method to create local vlan.
:param module: The Ansible module to fetch input parameters.
:param vlan_id: vlan id to create.
:param switch: Name of the switch.
:param scope: Scope of the vlan to create.
:return: String describing vlan creation details.
"""
global CHANGED_FLAG
cli = pn_cli(module)
clicopy = cli
cli += ' switch %s vlan-show format id no-show-headers ' % switch
existing_vlan_ids = run_cli(module, cli).split()
existing_vlan_ids = list(set(existing_vlan_ids))
if vlan_id not in existing_vlan_ids:
cli = clicopy
cli += ' switch %s vlan-create id %s scope %s ' % (switch, vlan_id,
scope)
run_cli(module, cli)
CHANGED_FLAG.append(True)
return ' %s: Vlan id %s with scope %s created \n' % (
switch, vlan_id, scope
)
else:
return ' %s: Vlan id %s with scope %s already exists \n' % (
switch, vlan_id, scope
)
def configure_ibgp_connection(module, switch, local_ip, remote_ip, remote_as):
"""
Method to configure iBGP connection between cluster members.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the switch.
:param local_ip: Vrouter interface ip of local switch.
:param remote_ip: Vrouter interface ip of remote switch.
:param remote_as: Remote-as value of cluster.
:return: String describing details of iBGP configuration made.
"""
global CHANGED_FLAG
output = ''
vrouter_name = switch + '-vrouter'
vlan_id = module.params['pn_ibgp_vlan']
cli = pn_cli(module)
clicopy = cli
cli += ' switch %s vrouter-interface-show vrouter-name %s ' % (switch,
vrouter_name)
cli += ' format ip no-show-headers '
exisiting_ip = run_cli(module, cli).split()
if local_ip not in exisiting_ip:
cli = clicopy
cli += ' switch %s vrouter-interface-add vrouter-name %s ' % (
switch, vrouter_name
)
cli += ' ip %s vlan %s ' % (local_ip, vlan_id)
run_cli(module, cli)
output += ' %s: Added vrouter interface with ip %s on %s \n' % (
switch, local_ip, vrouter_name
)
CHANGED_FLAG.append(True)
else:
output += ' %s: iBGP interface %s already added \n' % (switch, local_ip)
remote_ip = remote_ip.split('/')[0]
cli = clicopy
cli += ' switch %s vrouter-bgp-show vrouter-name %s ' % (switch,
vrouter_name)
cli += ' format neighbor no-show-headers '
exisiting_neighbor = run_cli(module, cli).split()
if remote_ip not in exisiting_neighbor:
cli = clicopy
cli += ' switch %s vrouter-bgp-add vrouter-name %s ' % (switch,
vrouter_name)
cli += ' neighbor %s remote-as %s next-hop-self bfd ' % (remote_ip,
remote_as)
run_cli(module, cli)
output += ' %s: Added iBGP neighbor %s for %s \n' % (switch, remote_ip,
vrouter_name)
CHANGED_FLAG.append(True)
else:
output += ' %s: iBGP neighbor %s already added \n' % (switch, remote_ip)
return output
def create_cluster(module, name, cluster_list):
"""
Method to create a cluster between two switches.
:param module: The Ansible module to fetch input parameters.
:param name: The name of the cluster to create.
:param cluster_list: List of cluster switches.
:return: String describing if cluster got created or if it already exists.
"""
global CHANGED_FLAG
cluster_node1 = cluster_list[0]
cluster_node2 = cluster_list[1]
cli = pn_cli(module)
clicopy = cli
cli += ' switch %s cluster-show format name no-show-headers ' % (
cluster_node1)
existing_clusters = run_cli(module, cli).split()
if name not in existing_clusters:
cli = clicopy
cli += ' switch %s cluster-create name %s ' % (cluster_node1, name)
cli += ' cluster-node-1 %s cluster-node-2 %s ' % (cluster_node1,
cluster_node2)
if 'Success' in run_cli(module, cli):
CHANGED_FLAG.append(True)
return ' %s: %s created successfully \n' % (cluster_node1, name)
else:
return ' %s: %s already exists \n' % (cluster_node1, name)
def modify_vrouter(module, switch, vrrp_id):
"""
Method to add vrrp id to a vrouter.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the switch.
:param vrrp_id: vrrp id to be assigned to vrouter.
:return: String describing if vrrp id got assigned or not.
"""
global CHANGED_FLAG
vrouter_name = switch + '-vrouter'
cli = pn_cli(module)
clicopy = cli
cli += ' vrouter-show name %s format hw-vrrp-id ' % vrouter_name
cli += ' no-show-headers '
existing_vrrp_id = run_cli(module, cli).split()
if vrrp_id not in existing_vrrp_id:
cli = clicopy
cli += ' switch %s vrouter-modify name %s ' % (switch, vrouter_name)
cli += ' hw-vrrp-id %s ' % vrrp_id
run_cli(module, cli)
return ' %s: Assigned vrrp_id %s to %s \n' % (switch, vrrp_id,
vrouter_name)
else:
return ' %s: Vrrp-id %s already assigned to %s \n' % (switch, vrrp_id,
vrouter_name)
def create_vrouter_interface(module, switch, vrrp_ip, vlan_id, vrrp_id,
ip_count, vrrp_priority):
"""
Method to add vrouter interface and assign IP to it along with
vrrp_id and vrrp_priority.
:param module: The Ansible module to fetch input parameters.
:param switch: The switch name on which interfaces will be created.
:param vrrp_ip: IP address to be assigned to vrouter interface.
:param vlan_id: vlan_id to be assigned.
:param vrrp_id: vrrp_id to be assigned.
:param vrrp_priority: priority to be given(110 for active switch).
:param ip_count: The value of fourth octet in the ip.
:return: String describing if vrouter interface got added or not.
"""
global CHANGED_FLAG
vrouter_name = switch + '-vrouter'
ip_addr = vrrp_ip.split('.')
fourth_octet = ip_addr[3].split('/')
subnet = fourth_octet[1]
static_ip = ip_addr[0] + '.' + ip_addr[1] + '.' + ip_addr[2] + '.'
vip = static_ip + '1' + '/' + subnet
interface_ip = static_ip + ip_count + '/' + subnet
cli = pn_cli(module)
clicopy = cli
cli += ' vrouter-interface-show vlan %s ip %s ' % (vlan_id, interface_ip)
cli += ' format switch no-show-headers '
existing_vrouter = run_cli(module, cli).split()
existing_vrouter = list(set(existing_vrouter))
if vrouter_name not in existing_vrouter:
cli = clicopy
cli += ' switch %s vrouter-interface-add vrouter-name %s ' % (
switch, vrouter_name
)
cli += ' ip ' + interface_ip
cli += ' vlan %s if data ' % vlan_id
run_cli(module, cli)
output = ' %s: Added vrouter interface with ip %s to %s \n' % (
switch, interface_ip, vrouter_name
)
CHANGED_FLAG.append(True)
else:
output = ' %s: Vrouter interface %s already exists for %s \n' % (
switch, interface_ip, vrouter_name
)
cli = clicopy
cli += ' vrouter-interface-show vrouter-name %s ip %s vlan %s ' % (
vrouter_name, interface_ip, vlan_id
)
cli += ' format nic no-show-headers '
eth_port = run_cli(module, cli).split()
eth_port.remove(vrouter_name)
cli = clicopy
cli += ' vrouter-interface-show vlan %s ip %s vrrp-primary %s ' % (
vlan_id, vip, eth_port[0]
)
cli += ' format switch no-show-headers '
existing_vrouter = run_cli(module, cli).split()
existing_vrouter = list(set(existing_vrouter))
if vrouter_name not in existing_vrouter:
cli = clicopy
cli += ' switch %s vrouter-interface-add vrouter-name %s ' % (
switch, vrouter_name
)
cli += ' ip ' + vip
cli += ' vlan %s if data vrrp-id %s ' % (vlan_id, vrrp_id)
cli += ' vrrp-primary %s vrrp-priority %s ' % (eth_port[0],
vrrp_priority)
run_cli(module, cli)
output += ' %s: Added vrouter interface with ip %s to %s \n' % (
switch, vip, vrouter_name
)
CHANGED_FLAG.append(True)
else:
output += ' %s: Vrouter interface %s already exists for %s \n' % (
switch, vip, vrouter_name
)
return output
def add_vrouter_interface_for_non_cluster_switch(module, vrrp_ip, switch,
vlan_id):
"""
Method to add vrouter interface for non clustered switches.
:param module: The Ansible module to fetch input parameters.
:param vrrp_ip: Vrouter interface ip to be added.
:param switch: Name of non clustered switch.
:param vlan_id: vlan id.
:return: String describing if vrouter interface got added or not.
"""
global CHANGED_FLAG
vrouter_name = switch + '-vrouter'
ip_addr = vrrp_ip.split('.')
fourth_octet = ip_addr[3].split('/')
subnet = fourth_octet[1]
static_ip = ip_addr[0] + '.' + ip_addr[1] + '.' + ip_addr[2] + '.'
gateway_ip = static_ip + '1' + '/' + subnet
cli = pn_cli(module)
clicopy = cli
cli += ' vrouter-interface-show ip %s vlan %s ' % (gateway_ip, vlan_id)
cli += ' format switch no-show-headers '
existing_vrouter = run_cli(module, cli).split()
existing_vrouter = list(set(existing_vrouter))
if vrouter_name not in existing_vrouter:
cli = clicopy
cli += ' switch %s vrouter-interface-add vrouter-name %s ' % (
switch, vrouter_name
)
cli += ' vlan ' + vlan_id
cli += ' ip ' + gateway_ip
run_cli(module, cli)
CHANGED_FLAG.append(True)
return ' %s: Added vrouter interface with ip %s on %s \n' % (
switch, gateway_ip, vrouter_name
)
else:
return ' %s: Vrouter interface %s already exists on %s \n' % (
switch, gateway_ip, vrouter_name
)
def configure_vrrp(module):
"""
Method to configure VRRP L3.
:param module: The Ansible module to fetch input parameters.
:return: Output string of configuration.
"""
output = ''
csv_data = module.params['pn_csv_data']
csv_data = csv_data.replace(" ", "")
csv_data_list = csv_data.split('\n')
# Parse csv file data and configure VRRP.
for row in csv_data_list:
elements = row.split(',')
cluster_list = []
vlan_id = elements[0]
vrrp_ip = elements[1]
cluster_node1 = str(elements[2])
if len(elements) > 5:
# Configure VRRP for clustered switches
cluster_node2 = str(elements[3])
vrrp_id = elements[4]
active_switch = str(elements[5])
cluster_list.append(cluster_node1)
cluster_list.append(cluster_node2)
cluster_name = cluster_node1 + '-to-' + cluster_node2 + '-cluster'
host_count = 1
# Create a cluster and vlan with scope cluster
output += create_cluster(module, cluster_name, cluster_list)
output += create_vlan(module, vlan_id, cluster_node1, 'cluster')
for switch in cluster_list:
output += modify_vrouter(module, switch, vrrp_id)
host_count += 1
vrrp_priority = '110' if switch == active_switch else '100'
output += create_vrouter_interface(module, switch, vrrp_ip,
vlan_id, vrrp_id,
str(host_count),
vrrp_priority)
else:
# Configure VRRP for non clustered switches.
output += create_vlan(module, vlan_id, cluster_node1, 'local')
output += add_vrouter_interface_for_non_cluster_switch(
module, vrrp_ip, cluster_node1, vlan_id)
return output
def add_vxlan_to_vlan(module, vlan_id, vxlan, switch):
"""
Method to add vxlan mapping to vlan.
:param module: The Ansible module to fetch input parameters.
:param vlan_id: vlan id to be modified.
:param vxlan: vxlan id to be assigned to vlan.
:param switch: Name of the switch on which vlan is present.
:return: String describing if vxlan for added or not.
"""
global CHANGED_FLAG
cli = pn_cli(module)
cli += ' switch %s vlan-show id %s format vxlan ' % (switch, vlan_id)
cli += ' no-show-headers '
existing_vxlan = run_cli(module, cli).split()
if vxlan not in existing_vxlan:
cli = pn_cli(module)
cli += ' switch %s vlan-modify id %s vxlan %s ' % (switch, vlan_id,
vxlan)
run_cli(module, cli)
return ' %s: Added vxlan %s to vlan %s \n' % (switch, vxlan, vlan_id)
else:
return ' %s: Vxlan %s has been added to vlan %s \n' % (switch, vxlan,
vlan_id)
def get_vrouter_interface_ip(module, switch, vlan):
"""
Method to get vrouter interface ip to be used as local ip.
:param module: The Ansible module to fetch input parameters.
:param switch: Name of the local switch.
:param vlan: Vlan id for which to find vrouter interface ip.
:return: Vrouter interface ip.
"""
vrouter_name = switch + '-vrouter'
cli = pn_cli(module)
cli += ' switch %s vrouter-interface-show vrouter-name %s ' % (switch,
vrouter_name)
cli += ' vlan %s format ip no-show-headers ' % vlan
output = run_cli(module, cli).split()
output = list(set(output))
output.remove(vrouter_name)
address = output[0].split('.')
ip = address[0] + '.' + address[1] + '.'
ip += address[2] + '.' + str(1)
return ip
def create_tunnel(module, local_switch, tunnel_name, scope, local_ip, remote_ip,
peer_switch=None):
"""
Method to create tunnel to carry vxlan traffic.
:param module: The Ansible module to fetch input parameters.
:param local_switch: Name of the switch on which tunnel will be created.
:param tunnel_name: Name of the tunnel to create.
:param scope: Scope of the tunnel to create.
:param local_ip: Local vrouter interface ip.
:param remote_ip: Remote vrouter interface ip.
:param peer_switch: Name of the peer clustered switch. In case of
unclustered switch, this will be None.
:return: String describing if tunnel got created or if it already exists.
"""
global CHANGED_FLAG
cli = pn_cli(module)
cli += ' switch %s tunnel-show format name no-show-headers ' % local_switch
existing_tunnels = run_cli(module, cli).split()
if tunnel_name not in existing_tunnels:
cli = pn_cli(module)
cli += ' switch %s tunnel-create name %s scope %s ' % (local_switch,
tunnel_name,
scope)
cli += ' local-ip %s remote-ip %s ' % (local_ip, remote_ip)
cli += ' vrouter-name %s ' % (local_switch + '-vrouter')
if peer_switch is not None:
cli += ' peer-vrouter-name %s ' % (peer_switch + '-vrouter')
if 'Success' in run_cli(module, cli):
CHANGED_FLAG.append(True)
return ' %s: %s created successfully \n ' % (local_switch,
tunnel_name)
else:
return ' %s: Could not create %s \n' % (local_switch, tunnel_name)
else:
return ' %s: %s already exists \n' % (local_switch, tunnel_name)
def add_vxlan_to_tunnel(module, vxlan, tunnel_name, switch):
"""
Method to add vxlan to created tunnel so that it can carry vxlan traffic.
:param module: The Ansible module to fetch input parameters.
:param vxlan: vxlan id to add to tunnel.
:param tunnel_name: Name of the tunnel on which vxlan will be added.
:param switch: Name of the switch on which tunnel exists.
:return: String describing if vxlan got added to tunnel or not.
"""
global CHANGED_FLAG
cli = pn_cli(module)
cli += ' switch %s tunnel-vxlan-show format switch no-show-headers ' % (
switch)
existing_tunnel_vxlans = run_cli(module, cli).split()
if tunnel_name not in existing_tunnel_vxlans:
cli = pn_cli(module)
cli += ' switch %s tunnel-vxlan-add name %s vxlan %s ' % (switch,
tunnel_name,
vxlan)
if 'Success' in run_cli(module, cli):
CHANGED_FLAG.append(True)
return ' %s: Added vxlan %s to %s \n' % (switch, vxlan, tunnel_name)
else:
return ' %s: Could not add vxlan %s to %s \n' % (switch, vxlan,
tunnel_name)
else:
return ' %s: vxlan %s already added to %s \n' % (switch, vxlan,
tunnel_name)
def configure_vxlan(module):
"""
Method to configure vxlan.
:param module: The Ansible module to fetch input parameters.
:return: Output string of configuration.
"""
output = ''
vxlan_switches_list = []
csv_data = module.params['pn_csv_data']
csv_data = csv_data.replace(" ", "")
csv_data_list = csv_data.split('\n')
for row in csv_data_list:
elements = row.split(',')
vlan_id = elements[0]
leaf_switch_1 = elements[2]
if len(elements) == 7:
leaf_switch_2 = elements[3]
vxlan_id = elements[6]
vxlan_switches_list.append([leaf_switch_1, leaf_switch_2, vlan_id])
output += add_vxlan_to_vlan(module, vlan_id, vxlan_id,
leaf_switch_1)
output += add_vxlan_to_vlan(module, vlan_id, vxlan_id,
leaf_switch_2)
elif len(elements) == 4:
vxlan_id = elements[3]
vxlan_switches_list.append([leaf_switch_1, vlan_id])
output += add_vxlan_to_vlan(module, vlan_id, vxlan_id,
leaf_switch_1)
for row in csv_data_list:
elements = row.split(',')
vlan_id = elements[0]
leaf_switch_1 = elements[2]
if len(elements) == 7 or len(elements) == 4:
if len(elements) == 7:
leaf_switch_2 = elements[3]
vxlan_id = elements[6]
cluster = [leaf_switch_1, leaf_switch_2, vlan_id]
scope = 'cluster'
else:
leaf_switch_2 = None
vxlan_id = elements[3]
cluster = [leaf_switch_1, vlan_id]
scope = 'local'
for switches in vxlan_switches_list:
if switches == cluster:
continue
else:
tunnel_name = leaf_switch_1 + '-' + switches[0] + '-tunnel'
local_ip = get_vrouter_interface_ip(module, leaf_switch_1,
vlan_id)
if len(switches) == 3:
remote_vlan = switches[2]
else:
remote_vlan = switches[1]
remote_ip = get_vrouter_interface_ip(module, switches[0],
remote_vlan)
output += create_tunnel(module, leaf_switch_1, tunnel_name,
scope, local_ip, remote_ip,
leaf_switch_2)
output += add_vxlan_to_tunnel(module, vxlan_id, tunnel_name,
leaf_switch_1)
return output
def configure_ibgp_vrrp_vxlan(module):
"""
Method to configure iBGP, VRRP and Vxlan for DCI.
:param module: The Ansible module to fetch input parameters.
:return: String describing details of all configurations.
"""
global CHANGED_FLAG
output = ''
cluster_dict_info = find_clustered_switches(module)
cluster_list = cluster_dict_info[0]
# Configure iBGP connection between clusters
for cluster in cluster_list:
cluster_node1 = cluster[0]
cluster_node2 = cluster[1]
vlan_id = module.params['pn_ibgp_vlan']
vlan_scope = 'local'
ibgp_ip_range = module.params['pn_ibgp_ip_range']
subnet_count = 0
# Create local vlans on both cluster nodes.
output += create_vlan(module, vlan_id, cluster_node1, vlan_scope)
output += create_vlan(module, vlan_id, cluster_node2, vlan_scope)
address = ibgp_ip_range.split('.')
static_part = str(address[0]) + '.' + str(address[1]) + '.'
static_part += str(address[2]) + '.'
last_octet = str(address[3]).split('/')
subnet = last_octet[1]
ip_count = subnet_count * 4
node1_ip = static_part + str(ip_count + 1) + '/' + subnet
node2_ip = static_part + str(ip_count + 2) + '/' + subnet
subnet_count += 1
# Get the bgp-as values of cluster nodes.
third_party_data = module.params['pn_third_party_bgp_data'].replace(' ',
'')
third_party_data = third_party_data.split('\n')
for row in third_party_data:
row = row.split(',')
if row[4] == cluster_node1 or row[4] == cluster_node2:
bgp_as = row[3]
break
# Configure iBGP connection.
output += configure_ibgp_connection(module, cluster_node1, node1_ip,
node2_ip, bgp_as)
output += configure_ibgp_connection(module, cluster_node2, node2_ip,
node1_ip, bgp_as)
# Configure VRRP to be used for VTEP HA
output += configure_vrrp(module)
# Configure vxlan tunnels
output += configure_vxlan(module)
return output
def implement_dci(module):
"""
Method to implement initial DCI setup: fabric creation/join and eBGP.
:param module: The Ansible module to fetch input parameters.
:return: String describing details of DCI implementation.
"""
global CHANGED_FLAG
output = ''
current_switch = module.params['pn_current_switch']
bgp_ip = module.params['pn_bgp_ip']
leaf_list = module.params['pn_leaf_list']
loopback_ip = module.params['pn_loopback_ip']
third_party_data = module.params['pn_third_party_bgp_data'].replace(' ', '')
third_party_data = third_party_data.split('\n')
address = bgp_ip.split('.')
bgp_static_part = str(address[0]) + '.' + str(address[1]) + '.'
bgp_static_part += str(address[2]) + '.'
bgp_last_octet = str(address[3]).split('/')
bgp_subnet = bgp_last_octet[1]
address = loopback_ip.split('.')
loopback_static_part = str(address[0]) + '.' + str(address[1]) + '.'
# Calculate router-id to be assigned to vrouter.
switch_index = leaf_list.index(current_switch)
router_count = switch_index + 1
router_id = loopback_static_part + str(router_count) + '.'
router_id += str(router_count)
# Calculate in-band-nic-ip and in-band-nic-netmask for vrouter creation.
cli = pn_cli(module)
cli += ' switch-setup-show format in-band-ip no-show-headers '
inband_ip = run_cli(module, cli)
address = inband_ip.split(':')[1]
address = address.replace(' ', '')
address = address.split('.')
inband_static_part = str(address[0]) + '.' + str(address[1]) + '.'
inband_static_part += str(address[2]) + '.'
inband_last_octet = str(address[3]).split('/')
inband_nic_ip = inband_static_part + str(int(inband_last_octet[0]) + 1)
inband_nic_netmask = inband_last_octet[1]
# Get the neighbor ip and remote-as value of the first neighbor
# Third party csv format: (third party switch name, neighbor-ip,
# bgp-as of third party switch, remote-as, neighbor switch name)
neighbor_name, neighbor_ip, remote_as, bgp_as = None, None, None, None
for row in third_party_data:
row = row.split(',')
if row[4] == current_switch:
neighbor_name = row[0]
neighbor_ip = row[1]
remote_as = row[2]
bgp_as = row[3]
break
if neighbor_name is None or neighbor_ip is None or remote_as is None:
return ' %s: Could not find remote bgp data \n' % current_switch
# Calculate bgp-nic-l3-port number connected to first neighbor
bgp_nic_l3_port = get_l3_port(module, neighbor_name)
# Calculate fabric-network address
fabric_network_address = str(address[0]) + '.' + str(address[1]) + '.'
fabric_network_address += str(1) + '.' + str(0) + '/'
fabric_network_address += inband_nic_netmask
address = neighbor_ip.split('.')
n_static_part = str(address[0]) + '.' + str(address[1]) + '.'
n_static_part += str(address[2]) + '.'
n_last_octet = str(int(address[3]) - 1)
bgp_nic_ip = n_static_part + n_last_octet + '/' + bgp_subnet
# Create and configure vrouter on this switch.
output += create_vrouter(module, current_switch, bgp_as, router_id,
bgp_nic_ip, bgp_nic_l3_port, neighbor_ip,
remote_as, inband_nic_ip, inband_nic_netmask,
fabric_network_address)
# Configure other eBGP connection to third party switches
output += configure_ebgp_connections(module, current_switch,
third_party_data, bgp_nic_ip)
# Configure loopback interface for debugging purpose.
output += configure_loopback_interface(module, current_switch, router_id)
# Create a switch routes to all other switches
if switch_index != 0:
create_switch_routes(module, inband_ip)
time.sleep(10)
# Configure fabric
output += configure_fabric(module, current_switch)
return output
def toggle_40g_local(module):
"""
Method to toggle 40g ports to 10g ports.
:param module: The Ansible module to fetch input parameters.
:return: The output messages for assignment.
"""
output = ''
cli = pn_cli(module)
clicopy = cli
cli += ' lldp-show format local-port no-show-headers '
local_ports = run_cli(module, cli).split()
cli = clicopy
cli += ' port-config-show speed 40g '
cli += ' format port no-show-headers '
ports_40g = run_cli(module, cli)
if len(ports_40g) > 0 and ports_40g != 'Success':
ports_40g = ports_40g.split()
ports_to_modify = list(set(ports_40g) - set(local_ports))
for port in ports_to_modify:
next_port = str(int(port) + 1)
cli = clicopy
cli += ' port-show port %s format bezel-port' % next_port
cli += ' no-show-headers'
bezel_port = run_cli(module, cli).split()[0]
if '.2' in bezel_port:
end_port = int(port) + 3
range_port = port + '-' + str(end_port)
cli = clicopy
cli += ' port-config-modify port %s ' % port
cli += ' disable '
output += 'port ' + port + ' disabled'
output += run_cli(module, cli)
cli = clicopy
cli += ' port-config-modify port %s ' % port
cli += ' speed 10g '
output += 'port ' + port + ' converted to 10g'
output += run_cli(module, cli)
cli = clicopy
cli += ' port-config-modify port %s ' % range_port
cli += ' enable '
output += 'port range_port ' + range_port + ' enabled'
output += run_cli(module, cli)
time.sleep(10)
return output
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_fabric_name=dict(required=False, type='str'),
pn_run_initial_setup=dict(required=True, type='bool'),
pn_current_switch=dict(required=False, type='str'),
pn_spine_list=dict(required=True, type='list'),
pn_leaf_list=dict(required=True, type='list'),
pn_inband_ip=dict(required=False, type='str',
default='172.16.0.0/24'),
pn_loopback_ip=dict(required=False, type='str',
default='109.109.109.0/24'),
pn_bgp_ip=dict(required=False, type='str', default='100.1.1.0/24'),
pn_bgp_redistribute=dict(required=False, type='str',
default='connected'),
pn_bgp_max_path=dict(required=False, type='str', default='16'),
pn_ibgp_vlan=dict(required=False, type='str', default='4040'),
pn_ibgp_ip_range=dict(required=False, type='str',
default='75.75.75.0/30'),
pn_csv_data=dict(required=False, type='str'),
pn_third_party_bgp_data=dict(required=False, type='str'),
)
)
current_switch = module.params['pn_current_switch']
message = ''
global CHANGED_FLAG
if module.params['pn_run_initial_setup']:
# Auto accept EULA
if 'Setup completed successfully' in auto_accept_eula(module):
message += ' %s: EULA accepted \n' % current_switch
CHANGED_FLAG.append(True)
else:
message += ' %s: EULA has already been accepted \n' % current_switch
# Update switch names to match host names from hosts file
if 'Updated' in update_switch_names(module, current_switch):
CHANGED_FLAG.append(True)
# Toggle 40g ports to 10g
if toggle_40g_local(module):
message += ' %s: Toggled 40G ports to 10G \n' % current_switch
CHANGED_FLAG.append(True)
# Assign in-band ip
message += assign_inband_ip(module)
# Implement Data Center Interconnect
message += implement_dci(module)
else:
# Configure iBGP, VRRP and vxlan
message += configure_ibgp_vrrp_vxlan(module)
# Exit the module and return the required JSON
module.exit_json(
stdout=message,
error='0',
failed=False,
changed=True if True in CHANGED_FLAG else False
)
if __name__ == '__main__':
main()
|
0:'manicTimeDur'
1:'alpiskidur'
2:'climbdur'
3:'downskidur'
4:'mtbikedur'
5:'roadbikedur'
6:'swimdur'
7:'viadur'
8:'walkdur'
9:'nbkeys'
10:'nbclicks'
11:'totcal'
12:'totsteps'
13:'alpiskical'
14:'alpiskisteps'
15:'climbcal'
16:'climbsteps'
17:'downskical'
18:'downskisteps'
19:'mtbikecal'
20:'mtbikesteps'
21:'roadbikecal'
22:'roadbikesteps'
23:'swimcal'
24:'swimsteps'
25:'viacal'
26:'viasteps'
27:'walkcal'
28:'walksteps'
29:'stress'
30:'mood'
31:'socquant'
32:'weight'
33:'paper'
34:'ubuntu'
35:'driving'
36:'store'
37:'effortInt' climbOrViaMaxInt
38:'ridingcar'
39:'screenSaver'
0:'Knees'
1:'Head'
2:'Hands'
3:'meanIntensity'
4:'maxIntensity'
|
import copy
from distutils.version import LooseVersion
import pickle
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_equal,
assert_no_warnings,
assert_raises,
assert_warns,
suppress_warnings,
)
import pytest
from randomgen import MT19937, PCG64, ExtendedGenerator
try:
from numpy.random import Generator
except ImportError:
from randomgen import Generator # type: ignore[misc]
try:
from scipy import linalg # noqa: F401
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
SEED = 1234567890
MV_SEED = 123456789
@pytest.fixture(scope="module")
def seed():
return SEED
@pytest.fixture(scope="module")
def mv_seed():
return MV_SEED
@pytest.fixture(scope="function")
def extended_gen():
pcg = PCG64(0, mode="sequence")
return ExtendedGenerator(pcg)
@pytest.fixture(scope="function")
def extended_gen_legacy():
return ExtendedGenerator(MT19937(mode="legacy"))
_mt19937 = MT19937(SEED, mode="legacy")
random = ExtendedGenerator(_mt19937)
NP_LT_118 = LooseVersion(np.__version__) < LooseVersion("1.18.0")
@pytest.mark.skipif(NP_LT_118, reason="Can only test with NumPy >= 1.18")
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_method(seed, method):
from numpy.random import MT19937 as NPMT19937
random = ExtendedGenerator(NPMT19937(seed))
mean = (0.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array(
[
[
[-1.747478062846581, 11.25613495182354],
[-0.9967333370066214, 10.342002097029821],
],
[
[0.7850019631242964, 11.181113712443013],
[0.8901349653255224, 8.873825399642492],
],
[
[0.7130260107430003, 9.551628690083056],
[0.7127098726541128, 11.991709234143173],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check path with scalar size works correctly
scalar = random.multivariate_normal(mean, cov, 3, method=method)
tuple1d = random.multivariate_normal(mean, cov, (3,), method=method)
assert scalar.shape == tuple1d.shape == (3, 2)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, method="eigh")
assert_raises(LinAlgError, random.multivariate_normal, mean, cov, method="cholesky")
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov, check_valid="ignore")
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
assert_raises(
ValueError,
random.multivariate_normal,
mean,
cov,
check_valid="raise",
method="eigh",
)
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ("svd", "eigh"):
samples = random.multivariate_normal(mean, cov, size=(3, 2), method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1], decimal=6)
else:
assert_raises(
LinAlgError, random.multivariate_normal, mean, cov, method="cholesky"
)
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert random.multivariate_normal(mu, cov, size=3).shape == (3, 2)
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="other"
)
assert_raises(ValueError, random.multivariate_normal, np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal, mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(seed, method):
random = ExtendedGenerator(MT19937(seed, mode="sequence"))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
@pytest.mark.parametrize("size", [(4, 3, 2), (5, 4, 3, 2)])
@pytest.mark.parametrize("mean", [np.zeros(2), np.zeros((3, 3))])
def test_multivariate_normal_bad_size(mean, size):
cov = np.eye(4)
with pytest.raises(ValueError):
random.multivariate_normal(mean, cov)
mean = np.zeros((2, 3, 4))
with pytest.raises(ValueError):
random.multivariate_normal(mean, cov, size=size)
with pytest.raises(ValueError):
random.multivariate_normal(0, [[1]], size=size)
with pytest.raises(ValueError):
random.multivariate_normal([0], [1], size=size)
def test_multivariate_normal(seed):
random.bit_generator.seed(seed)
mean = (0.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array(
[
[
[-3.34929721161096100, 9.891061435770858],
[-0.12250896439641100, 9.295898449738300],
],
[
[0.48355927611635563, 10.127832101772366],
[3.11093021424924300, 10.283109168794352],
],
[
[-0.20332082341774727, 9.868532121697195],
[-1.33806889550667330, 9.813657233804179],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([-1.097443117192574, 10.535787051184261])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn"t warn with RuntimeWarning check_valid="ignore"
assert_no_warnings(random.multivariate_normal, mean, cov, check_valid="ignore")
# and that it raises with RuntimeWarning check_valid="raises"
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="other"
)
assert_raises(ValueError, random.multivariate_normal, np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal, mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3))
def test_complex_normal(seed):
random.bit_generator.seed(seed)
actual = random.complex_normal(loc=1.0, gamma=1.0, relation=0.5, size=(3, 2))
desired = np.array(
[
[
-2.007493185623132 - 0.05446928211457126j,
0.7869874090977291 - 0.35205077513085050j,
],
[
1.3118579018087224 + 0.06391605088618339j,
3.5872278793967554 + 0.14155458439717636j,
],
[
0.7170022862582056 - 0.06573393915140235j,
-0.26571837106621987 - 0.0931713830979103j,
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.bit_generator.seed(seed)
actual = random.complex_normal(loc=0, gamma=1.0, relation=0.5, size=3)
assert_array_almost_equal(actual, desired.flat[:3] - 1.0, decimal=15)
random.bit_generator.seed(seed)
actual = random.complex_normal(loc=2.0, gamma=1.0, relation=0.5)
assert_array_almost_equal(actual, 1.0 + desired[0, 0], decimal=15)
def test_complex_normal_invalid():
assert_raises(ValueError, random.complex_normal, gamma=1 + 0.5j)
assert_raises(ValueError, random.complex_normal, relation=2)
assert_raises(ValueError, random.complex_normal, relation=-3)
assert_raises(ValueError, random.complex_normal, relation=10j)
assert_raises(ValueError, random.complex_normal, gamma=[1 + 0.5j])
assert_raises(ValueError, random.complex_normal, relation=[2])
assert_raises(ValueError, random.complex_normal, relation=[-3])
assert_raises(ValueError, random.complex_normal, relation=[10j])
def test_random_uintegers():
assert len(random.uintegers(10)) == 10
assert len(random.uintegers(10, bits=32)) == 10
assert isinstance(random.uintegers(), int)
assert isinstance(random.uintegers(bits=32), int)
with pytest.raises(ValueError):
with pytest.deprecated_call():
random.uintegers(bits=128)
def test_str_repr():
assert "ExtendedGenerator" in str(random)
assert "ExtendedGenerator" in repr(random)
assert "MT19937" in str(random)
def test_pickle_and_copy(seed):
gen = ExtendedGenerator(MT19937(seed, mode="legacy"))
reloaded = pickle.loads(pickle.dumps(gen))
assert isinstance(reloaded, ExtendedGenerator)
copied = copy.deepcopy(gen)
gen_rv = gen.uintegers(10, bits=64)
reloaded_rv = reloaded.uintegers(10, bits=64)
copied_rv = copied.uintegers(10, bits=64)
assert_equal(gen_rv, reloaded_rv)
assert_equal(gen_rv, copied_rv)
def test_set_get_state(seed):
state = _mt19937.state
gen = ExtendedGenerator(MT19937(seed, mode="legacy"))
gen.state = state
assert_equal(gen.state["state"]["key"], state["state"]["key"])
assert_equal(gen.state["state"]["pos"], state["state"]["pos"])
def test_complex_normal_size(mv_seed):
random = ExtendedGenerator(MT19937(mv_seed, mode="legacy"))
state = random.state
loc = np.ones((1, 2))
gamma = np.ones((3, 1))
relation = 0.5 * np.ones((3, 2))
actual = random.complex_normal(loc=loc, gamma=gamma, relation=relation)
desired = np.array(
[
[
1.393937478212015 - 0.31374589731830593j,
0.9474905694736895 - 0.16424530802218726j,
],
[
1.119247463119766 + 0.023956373851168843j,
0.8776366291514774 + 0.2865220655803411j,
],
[
0.5515508326417458 - 0.15986016780453596j,
-0.6803993941303332 + 1.1782711493556892j,
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.state = state
actual = random.complex_normal(loc=loc, gamma=1.0, relation=0.5, size=(3, 2))
assert_array_almost_equal(actual, desired, decimal=15)
def test_invalid_capsule():
class fake:
capsule = "capsule"
with pytest.raises(ValueError):
ExtendedGenerator(fake())
def test_default_pcg64():
eg = ExtendedGenerator()
assert isinstance(eg.bit_generator, PCG64)
assert eg.bit_generator.variant == "dxsm"
@pytest.mark.parametrize("df", [2, 5, 10])
@pytest.mark.parametrize("dim", [2, 5, 10])
@pytest.mark.parametrize("size", [None, 5, (3, 7)])
def test_standard_wishart_reproduce(df, dim, size):
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
w = eg.standard_wishart(df, dim, size)
if size is not None:
sz = size if isinstance(size, tuple) else (size,)
assert w.shape[:-2] == sz
else:
assert w.ndim == 2
assert w.shape[-2:] == (dim, dim)
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
w2 = eg.standard_wishart(df, dim, size)
assert_allclose(w, w2)
@pytest.mark.parametrize("scale_dim", [0, 1, 2])
@pytest.mark.parametrize("df", [8, [10], [[5], [6]]])
def test_wishart_broadcast(df, scale_dim):
dim = 5
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
scale = np.eye(dim)
for i in range(scale_dim):
scale = np.array([scale, scale])
w = eg.wishart(df, scale)
assert w.shape[-2:] == (dim, dim)
if np.isscalar(df) and scale_dim == 0:
assert w.ndim == 2
elif scale_dim > 0:
z = np.zeros(scale.shape[:-2])
assert w.shape[:-2] == np.broadcast(df, z).shape
size = w.shape[:-2]
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
w2 = eg.wishart(df, scale, size=size)
assert_allclose(w, w2)
METHODS = ["svd", "eigh"]
if HAS_SCIPY:
METHODS += ["cholesky"]
@pytest.mark.parametrize("method", METHODS)
def test_wishart_reduced_rank(method):
scale = np.eye(3)
scale[0, 1] = scale[1, 0] = 1.0
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
w = eg.wishart(10, scale, method=method, rank=2)
assert w.shape == (3, 3)
assert np.linalg.matrix_rank(w) == 2
@pytest.mark.skipif(HAS_SCIPY, reason="Cannot test with SciPy")
def test_missing_scipy_exception():
scale = np.eye(3)
scale[0, 1] = scale[1, 0] = 1.0
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
with pytest.raises(ImportError):
eg.wishart(10, scale, method="cholesky", rank=2)
def test_wishart_exceptions():
eg = ExtendedGenerator()
with pytest.raises(ValueError, match="scale must be non-empty"):
eg.wishart(10, [10])
with pytest.raises(ValueError, match="scale must be non-empty"):
eg.wishart(10, 10)
with pytest.raises(ValueError, match="scale must be non-empty"):
eg.wishart(10, np.array([[1, 2]]))
with pytest.raises(ValueError, match="At least one"):
eg.wishart([], np.eye(2))
with pytest.raises(ValueError, match="df must contain strictly"):
eg.wishart(-1, np.eye(2))
with pytest.raises(ValueError, match="df must contain strictly"):
df = np.ones((3, 4, 5))
df[-1, -1, -1] = -1
eg.wishart(df, np.eye(2))
with pytest.raises(ValueError, match="cannot convert float"):
eg.wishart(np.nan, np.eye(2))
with pytest.raises(ValueError, match=r"size \(3,\) is not compatible"):
eg.wishart([[10, 9, 8], [10, 9, 8]], np.eye(2), size=3)
@pytest.mark.parametrize("size", [None, 10, (10,), (10, 10)])
@pytest.mark.parametrize("df", [100, [100] * 10, [[100] * 10] * 10])
@pytest.mark.parametrize("tile", [None, (10,), (10, 10)])
def test_wishart_size(size, df, tile):
eg = ExtendedGenerator()
scale = np.eye(3)
if tile:
scale = np.tile(scale, tile + (1, 1))
expected_shape = base_shape = (3, 3)
sz = (size,) if isinstance(size, int) else size
if np.asarray(df).ndim or tile:
tile_shape = () if tile is None else tile
shape = np.broadcast(np.asarray(df), np.ones(tile_shape)).shape
expected_shape = shape + base_shape
if size:
if len(sz) < len(shape):
with pytest.raises(ValueError, match=""):
eg.wishart(df, scale, size=size)
return
if size:
expected_shape = sz + base_shape
w = eg.wishart(df, scale, size=size)
assert w.shape == expected_shape
def test_broadcast_both_paths():
eg = ExtendedGenerator()
w = eg.wishart([3, 5], np.eye(4), size=(100, 2))
assert w.shape == (100, 2, 4, 4)
def test_factor_wishart():
pcg = PCG64(0, mode="sequence")
eg = ExtendedGenerator(pcg)
w = eg.wishart([3, 5], 2 * np.eye(4), size=(10000, 2), method="factor")
assert_allclose(np.diag((w[:, 0] / 3).mean(0)).mean(), 4, rtol=1e-2)
assert_allclose(np.diag((w[:, 1] / 5).mean(0)).mean(), 4, rtol=1e-2)
def test_wishart_chi2(extended_gen):
state = extended_gen.state
w = extended_gen.standard_wishart(10, 1, 10, rescale=False)
extended_gen.state = state
bg = extended_gen.bit_generator
c = Generator(bg).chisquare(10, size=10)
assert_allclose(np.squeeze(w), c)
@pytest.mark.parametrize("df", [3, 5])
def test_standard_vs_full_wishart(extended_gen, df):
state = extended_gen.state
sw = extended_gen.standard_wishart(df, 4, size=(3, 4, 5), rescale=False)
extended_gen.state = state
w = extended_gen.wishart(df, np.eye(4), size=(3, 4, 5))
assert_allclose(sw, w)
def test_standard_wishart_direct_small(extended_gen):
bg = extended_gen.bit_generator
state = bg.state
w = extended_gen.standard_wishart(3, 4, 2, rescale=False)
bg.state = state
gen = Generator(bg)
for i in range(2):
n = gen.standard_normal((3, 4))
direct = n.T @ n
assert_allclose(direct, w[i])
def test_standard_wishart_direct_large(extended_gen):
bg = extended_gen.bit_generator
state = bg.state
w = extended_gen.standard_wishart(3, 2, 2, rescale=False)
bg.state = state
gen = Generator(bg)
direct = np.zeros((2, 2))
for i in range(2):
v11 = gen.chisquare(3)
n = gen.standard_normal()
v22 = gen.chisquare(2)
direct[0, 0] = v11
direct[1, 1] = n ** 2 + v22
direct[1, 0] = direct[0, 1] = n * np.sqrt(v11)
assert_allclose(direct, w[i])
upper = np.zeros((2, 2))
upper[0, 0] = np.sqrt(v11)
upper[0, 1] = n
upper[1, 1] = np.sqrt(v22)
direct2 = upper.T @ upper
assert_allclose(direct2, w[i])
@pytest.mark.parametrize("ex_gamma", [True, False])
@pytest.mark.parametrize("ex_rel", [True, False])
@pytest.mark.parametrize("ex_loc", [True, False])
@pytest.mark.parametrize("size", [None, (5, 4, 3, 2)])
def test_mv_complex_normal(extended_gen, ex_gamma, ex_rel, ex_loc, size):
gamma = np.array([[2, 0 + 1.0j], [-0 - 1.0j, 2]])
if ex_gamma:
gamma = np.tile(gamma, (2, 1, 1))
rel = np.array([[0.22, 0 + 0.1j], [+0 + 0.1j, 0.22]])
if ex_rel:
rel = np.tile(rel, (3, 1, 1, 1))
loc = np.zeros(2)
if ex_loc:
loc = np.tile(loc, (4, 1, 1, 1))
extended_gen.multivariate_complex_normal(np.zeros(2), size=size)
extended_gen.multivariate_complex_normal(np.zeros(2), size=size)
extended_gen.multivariate_complex_normal(np.zeros(2), gamma, size=size)
extended_gen.multivariate_complex_normal(np.zeros(2), gamma, rel, size=size)
extended_gen.multivariate_complex_normal(np.zeros(2), gamma, rel, size=size)
def test_mv_complex_normal_exceptions(extended_gen):
with pytest.raises(ValueError, match="loc"):
extended_gen.multivariate_complex_normal(0.0)
with pytest.raises(ValueError, match="gamma"):
extended_gen.multivariate_complex_normal([0.0], 1.0)
with pytest.raises(ValueError, match="gamma"):
extended_gen.multivariate_complex_normal([0.0], [1.0])
with pytest.raises(ValueError, match="gamma"):
extended_gen.multivariate_complex_normal([0.0], np.ones((2, 3, 2)))
with pytest.raises(ValueError, match="relation"):
extended_gen.multivariate_complex_normal([0.0, 0.0], np.eye(2), 0.0)
with pytest.raises(ValueError, match="relation"):
extended_gen.multivariate_complex_normal([0.0, 0.0], np.eye(2), 0.0)
with pytest.raises(ValueError, match="relation"):
extended_gen.multivariate_complex_normal([0.0, 0.0], relation=0.0)
with pytest.raises(ValueError, match="The covariance matrix implied"):
extended_gen.multivariate_complex_normal(
[0.0, 0.0], np.array([[1.0, 0 + 1.0j], [0 + 1.0j, 1]])
)
with pytest.raises(ValueError, match="The leading dimensions"):
extended_gen.multivariate_complex_normal(
[0.0, 0.0], np.ones((4, 1, 3, 2, 2)), np.ones((1, 1, 2, 2, 2))
)
def test_wishart_edge(extended_gen):
with pytest.raises(ValueError, match="scale must be non-empty"):
extended_gen.wishart(5, np.empty((0, 0)))
with pytest.raises(ValueError, match="scale must be non-empty"):
extended_gen.wishart(5, np.empty((0, 2, 2)))
with pytest.raises(ValueError, match="scale must be non-empty"):
extended_gen.wishart(5, [[]])
with pytest.raises(ValueError, match="At least one value is required"):
extended_gen.wishart(np.empty((0, 2, 3)), np.eye(2))
def test_mv_complex_normal_edge(extended_gen):
with pytest.raises(ValueError, match="loc must be non-empty and at least"):
extended_gen.multivariate_complex_normal(np.empty((0, 2)))
with pytest.raises(ValueError, match="gamma must be non-empty"):
extended_gen.multivariate_complex_normal([0, 0], np.empty((0, 2, 2)))
with pytest.raises(ValueError, match="relation must be non-empty"):
extended_gen.multivariate_complex_normal([0, 0], np.eye(2), np.empty((0, 2, 2)))
def test_random_long_double(extended_gen):
out = extended_gen.random(dtype="longdouble")
types_equiv = np.empty(1, np.longdouble).dtype == np.empty(1, np.double).dtype
expected_type = float if types_equiv else np.longdouble
assert isinstance(out, expected_type)
out = extended_gen.random(dtype=np.longdouble)
assert isinstance(out, expected_type)
out = extended_gen.random(size=10, dtype=np.longdouble)
expected_type = np.double if types_equiv else np.longdouble
assert out.dtype == expected_type
def test_random_long_double_out(extended_gen):
state = extended_gen.state
out = np.empty((10, 10), dtype=np.longdouble)
ret = extended_gen.random(out=out, dtype=np.longdouble)
assert ret is out
extended_gen.state = state
alt = extended_gen.random(size=(10, 10), dtype=np.longdouble)
assert_allclose(out, alt)
def test_random_other_type(extended_gen):
with pytest.raises(TypeError, match="Unsupported dtype"):
extended_gen.random(dtype=int)
f16 = getattr(np, "float16", np.uint32)
with pytest.raises(TypeError, match="Unsupported dtype"):
extended_gen.random(dtype=f16)
def test_random(extended_gen_legacy):
extended_gen_legacy.bit_generator.seed(SEED)
actual = extended_gen_legacy.random((3, 2))
desired = np.array(
[
[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
extended_gen_legacy.bit_generator.seed(SEED)
actual = extended_gen_legacy.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(extended_gen_legacy):
extended_gen_legacy.bit_generator.seed(SEED)
actual = extended_gen_legacy.random((3, 2))
desired = np.array(
[[0.6187948, 0.5916236], [0.8886836, 0.8916548], [0.4575675, 0.7781881]]
)
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(extended_gen_legacy):
extended_gen_legacy.bit_generator.seed(SEED)
actual = extended_gen_legacy.random(dtype=np.float32)
desired = 0.6187948
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_long_double_direct(extended_gen):
state = extended_gen.state
actual = extended_gen.random(10, dtype=np.longdouble)
extended_gen.state = state
nmant = np.finfo(np.longdouble).nmant
denom = np.longdouble(str(2 ** (nmant + 1)))
c = np.longdouble(1.0) / denom
def twopart(u1, u2, a, b, c):
v = int(u1) >> a
if b:
v = (v << (64 - b)) + (int(u2) >> b)
return np.longdouble(str(v)) * c
if nmant == 52:
a, b = 11, 0
elif nmant == 63:
a, b = 0, 0
elif nmant == 105:
a, b = 11, 11
elif nmant == 112:
a, b = 7, 8
else:
raise NotImplementedError(f"No test for {nmant} bits")
direct = np.empty(10, dtype=np.longdouble)
u = extended_gen.bit_generator.random_raw(10 * (1 + (b > 0)))
for i in range(10):
if b:
u1, u2 = u[2 * i : 2 * i + 2]
else:
u1, u2 = u[i], None
direct[i] = twopart(u1, u2, a, b, c)
assert_allclose(actual, direct)
|
from torch.utils.data import random_split, DataLoader
from pytorch_lightning.core.datamodule import LightningDataModule
from tests.base.datasets import TrialMNIST
class TrialMNISTDataModule(LightningDataModule):
def __init__(self, data_dir: str = './'):
super().__init__()
self.data_dir = data_dir
def prepare_data(self):
TrialMNIST(self.data_dir, train=True, download=True)
TrialMNIST(self.data_dir, train=False, download=True)
def setup(self):
mnist_full = TrialMNIST(root=self.data_dir, train=True, num_samples=64, download=True)
self.mnist_train, self.mnist_val = random_split(mnist_full, [128, 64])
self.dims = tuple(self.mnist_train[0][0].shape)
self.mnist_test = TrialMNIST(root=self.data_dir, train=False, num_samples=32, download=True)
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=32)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=32)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=32)
|
from django.shortcuts import render
from django.http import HttpResponse
from .forms import TestForm
import tweepy
consumer_key = '1ajVPYtNS09p27YPq8SLo54y7'
consumer_secret = 'DP7VOUXFd7sLWRWsJnhMIthxHac4MgCETUA2KSvxzVXAOfw4xl'
access_token = '142843393-tITe8g8Z3DsqZKXFoqRHGXjJwvwui1GG0pRYrLRk'
access_token_secret = 'yufRfuVQ05VYEBqsPqUbXsai9a2QF2q6Lk1cDX2U1g2ZS'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
#APIインスタンスを作成
api = tweepy.API(auth)
def index(request):
Message = {
'words': request.GET.get('words'),
}
msg = Message['words']
#api.update_status(msg)
return render(request, 'tweets/index.html', Message)
|
#!/usr/bin/env python
__doc__ = """
Script resizes all images in the folder. Parameters which can be specified by the user.
scale (default = 0.5) <= how much we downscale images e.g 0.5 makes images of 1/4 or area of orig.
modify (default = True) <= are images changed in place (origs are overwritten)
path (default = '.') <= where to look for images (with subdirectories)
Requires:
* xvfb to not block the screen
Python:
* requires python-3.2
:bug:
None known.
:organization:
ETH
"""
__authors__="""Szymon Stoma"""
__contact__="<Your contact>"
__license__="Cecill-C"
__date__="17-11-01"
__version__="0.1"
__docformat__= "restructuredtext en"
# ----------------------------------------------------------- imports
import os
from fnmatch import fnmatch
import argparse
import subprocess
# ----------------------------------------------------------- conf
conf_fiji_args = ['--headless', '-batch']
# ----------------------------------------------------------- parsing args
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--scale',
type=float,
help='how much we downscale images e.g 0.5 makes images of 1/4 or area of orig.',
default=0.5
)
parser.add_argument('--modify',
help='are images changed in place (origs are overwritten)?',
default=True
)
parser.add_argument('--path',
help='where to look for images (with subdirectories)',
default='.'
)
parser.add_argument('--file_regexp',
help='what files to include?',
default='*'
)
parser.add_argument('--fiji_path',
help='path to executable fiji',
default='fiji'
)
parser.add_argument('--version', action='version', version='%(prog)s '+str(__version__))
args = parser.parse_args()
# ----------------------------------------------------------- getting info about files
"""
root = args.path
pattern = args.file_regexp
to_process = []
for path, subdirs, files in os.walk(root):
for name in files:
if fnmatch(name, pattern):
to_process.append(os.path.join(path, name))
"""
# print(to_process)
# ----------------------------------------------------------- converting files
subprocess.run([args.fiji_path, *conf_fiji_args])
|
# Generated by Django 3.1 on 2020-08-30 02:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('somms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='somm',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
|
import json
from django.dispatch import receiver
from django.http import HttpRequest, HttpResponse
from django.template.loader import get_template
from django.urls import resolve
from django.utils.translation import gettext_lazy as _
from pretix.base.middleware import _merge_csp, _parse_csp, _render_csp
from pretix.base.signals import logentry_display, register_payment_providers
from pretix.presale.signals import html_head, process_response
from pretix_adyen.payment import AdyenSettingsHolder
@receiver(register_payment_providers, dispatch_uid="payment_adyen")
def register_payment_provider(sender, **kwargs):
from .payment import (
AdyenSettingsHolder, AdyenScheme, AdyenGiropay, AdyenMBWay
)
return [AdyenSettingsHolder, AdyenScheme, AdyenGiropay, AdyenMBWay]
@receiver(html_head, dispatch_uid="payment_adyen_html_head")
def html_head_presale(sender, request=None, **kwargs):
from .payment import AdyenMethod
provider = AdyenMethod(sender)
url = resolve(request.path_info)
if provider.settings.get('_enabled', as_type=bool) and ("checkout" in url.url_name or "order.pay" in url.url_name):
template = get_template('pretix_adyen/presale_head.html')
ctx = {
'locale': request.LANGUAGE_CODE,
'environment': 'test' if sender.testmode else provider.settings.prod_env,
'originKey': provider._get_originKey('test' if provider.event.testmode else provider.settings.prod_env),
}
return template.render(ctx)
else:
return ""
@receiver(signal=process_response, dispatch_uid="payment_adyen_middleware_resp")
def signal_process_response(sender, request: HttpRequest, response: HttpResponse, **kwargs):
provider = AdyenSettingsHolder(sender)
url = resolve(request.path_info)
if provider.settings.get('_enabled', as_type=bool) and ("checkout" in url.url_name or "order.pay" in url.url_name):
if 'Content-Security-Policy' in response:
h = _parse_csp(response['Content-Security-Policy'])
else:
h = {}
sources = ['frame-src', 'style-src', 'script-src', 'img-src', 'connect-src']
env = 'test' if sender.testmode else provider.settings.prod_env
csps = {src: ['https://checkoutshopper-{}.adyen.com'.format(env)] for src in sources}
# Adyen unfortunately applies styles through their script-src
# Also, the unsafe-inline needs to specified within single quotes!
csps['style-src'].append("'unsafe-inline'")
_merge_csp(h, csps)
if h:
response['Content-Security-Policy'] = _render_csp(h)
return response
@receiver(signal=logentry_display, dispatch_uid="adyen_logentry_display")
def pretixcontrol_logentry_display(sender, logentry, **kwargs):
if logentry.action_type != 'pretix_adyen.adyen.event':
return
data = json.loads(logentry.data)
outcome = data.get('success', None)
reason = data.get('reason', None)
if outcome == 'true':
outcome = 'successful.'
else:
outcome = 'unsuccessful.'
if reason:
outcome = '{} ({})'.format(outcome, reason)
return _('Adyen reported an event: {} {}').format(data.get('eventCode'), outcome)
|
try :
from tools.assistant import ask_question
from tools.AI.data import data , youtube , wiki , google , youtube_play , goto_keys
from tools.AI.data import install_keys , calc_keys , should_not_learn , version_keys
from tools.wiki_search import wiki_search
from settings.logs import *
from tools.browser.search import *
from tools.browser.goto import find_goto_address
from system.install import install , command
from system.screen_text import command_sep
from tools.string_processing import is_matched
from tools.json_manager import JsonManager
from tools.calculation import google_calculation
from tools.run_program import if_run_type
from system.notifications import notify
from settings.settings import bot , DEBUG , LEARN
from tools.shell import if_shell_type
from tools.OJ.cp import if_cp_type
from termcolor import cprint
from tools.downloader import wget_downloader
from settings.config import if_config_type
from tools.credits import if_credit_type
from system.path import getpath
from settings.config import train_path
import os
import time
except Exception as e:
print(e)
def check(msg,mp,need = 90):
logger.debug('check->' + msg)
for word in mp :
if is_matched(word,msg,need):
return True
return False
def version_type(msg) :
try :
if msg in version_keys :
from system.__about__ import __version__
x = "Current version is : "+ __version__
return (True,x)
return (False,'')
except:
return (False,'')
def rep(msg,mp):
msg = msg.lower()
# for key in mp :
# for word in key:
# for w in msg:
# if w == word:
# w = word
# return msg.strip().capitalize()
for word in mp :
if word in msg:
return msg.replace(word,'',1).strip()
return msg.strip()
def ai(msg,orginal_path) :
""" Little ai for reacting to the msg.
Written by Saurav-Paul"""
logger.debug('Processing with ai')
msg = msg.replace(' ',' ')
msg = msg.replace(bot['name'],'')
msg = msg.replace(bot['name'].lower(),'')
msg = msg.replace(bot['name'].capitalize(),'')
reply = "I don't know what to do, sir ."
# print('you said ' , msg, bot['name'])
is_type = version_type(msg)
if is_type[0]:
return is_type[1]
if if_cp_type(msg):
return 'Good luck sir.'
if if_config_type(msg):
return 'Good luck sir.'
if if_shell_type(msg):
return 'Good luck sir.'
if if_run_type(msg):
return 'Good luck sir.'
if if_credit_type(msg):
return 'Good luck sir.'
else :
try :
msg = msg.strip().lower()
for line in data :
if is_matched(msg,line,95):
reply = data[line]
return reply
# logger.info('Not found in common data')
# from history
if(check(msg,['change dir','change directory','chdir','-cd'],100)):
# print(os.getcwd())
cprint('Enter the path: ','cyan',end='')
path = input()
os.chdir(path)
return 'Directory changed.'
if check(msg,youtube_play):
msg = rep(msg,youtube_play)
logger.info(msg)
find_goto_address(msg)
reply = 'Enjoy sir. :D'
elif check(msg,goto_keys):
msg = rep(msg,goto_keys)
find_goto_address(msg)
reply = 'check browser'
notify('Check Browser',':D',t=5)
elif check(msg,youtube):
msg = rep(msg,youtube)
search_youtube(msg)
reply = 'check browser.'
notify('Check Browser',':D',t=5)
elif check(msg,wiki):
msg = rep(msg,wiki)
# search_wiki(msg)
msg = 'en.wikipedia.org '+msg
find_goto_address(msg)
reply = 'check browser.'
notify('Check Browser',':D',t=5)
elif msg == 'download':
cprint('Enter the url : ','cyan',end='')
url = input()
wget_downloader(url)
reply = 'Done.'
elif check(msg,google):
msg = rep(msg,google)
# print('here = ',msg)
search_google(msg)
reply = 'check browser.'
notify('Check Browser',':D',t=5)
elif check(msg,install_keys):
msg = rep(msg,install_keys)
reply = install(msg)
elif check(msg,calc_keys):
msg = rep(msg,calc_keys)
reply = google_calculation(msg)
if reply == "sorry":
search_google(msg)
reply = "check browser"
notify('Check Browser',':D',t=5)
else :
if 0 and 'cmd:' in msg or '-s' in msg:
msg = rep(msg,{'cmd:'})
msg = rep(msg,{'-s'})
command_sep()
command(msg.lower())
command_sep()
reply = 'done sir'
else :
try :
f = getpath(__file__)+'.learnt'
history = JsonManager.json_read(f)
for line in history:
if is_matched(msg,line,95):
logging.info('Learnt this before')
return history[line]
except :
logging.error("Can't read history file")
try :
ft = train_path
history = JsonManager.json_read(ft)
for line in history:
if is_matched(msg,line,95):
logging.info('You have trained this before.')
return history[line]
except :
logger.info("Can't read trained data")
t = time.time()
reply = ask_question(msg)
t = time.time()-t
logger.info(str(t)+' sec')
# cprint(t,'red')
ok = True
for word in should_not_learn:
if word in msg.lower() or word in reply.lower():
ok = False
break
if ok:
logger.info('reply -> ' + reply)
if LEARN == False:
cprint("(Automatically LEARN MODE is disable)Enter y to learn : ",'red',attrs=['bold'],end='')
learn = input('')
else :
learn = 'y'
if learn.lower() == 'y':
try :
history.update({msg:reply})
JsonManager.json_write(f,history)
logger.info('Learnt')
except Exception as e:
logger.info("Exception while writing learnt : "+e)
return reply
except Exception as e:
logger.info('Getting some error in ai')
logger.info(e)
return reply
|
# usr/bin/env python3
import pandas as pd
import sys
import matplotlib.pyplot as plt
def readExcel(url):
data= pd.ExcelFile(url)
print(data.sheet_names)
data_gender= data.parse('Paciente',skiprows=0)#skipfooter=0,names=[)
data_gender.set_index('idPais',inplace=True)
data_gender.plot()
plt.show()
print(data_gender)
print(type(data))
print(type(data_gender))
if __name__=="__main__":
if len(sys.argv)<2:
print("Ingrese una <PATH> relativa")
exit(0)
try:
readExcel(sys.argv[1])
except Exception as e:
print(e)
|
from datetime import date
import pandas as pd
import lxml
from nsepy import get_history
print('Enter symbol---')
cols=['Date','Open','High','Low','Close','Volume']
df_new=pd.DataFrame(columns=cols)
#sym=input()
print(df_new)
stock = input("Enter stock name(ex:GOOGL, AAPL): ")
df = get_history(symbol=stock,
start=date(2015,1,1),
end=date(2015,1,10))
print(df.head())
df.reset_index(drop = False, inplace = True)
print(df.head())
#df.reset_index()
#df_new=df[['Date','Open','High','Low','Close']].copy()
#df_new.set_index('Date')
#df_new.set_index('Date')
df_new['Date']=df['Date']
df_new['Open']=df['Open']
df_new['High']=df['High']
df_new['Low']=df['Low']
df_new['Close']=df['Close']
df_new['Volume']=df['Volume']
df_new.set_index('Date',inplace=True)
print(df_new.head())
|
# -*- coding: utf-8 -*-
"""
fix_self_assert - lib2to3 fix for replacing assertXXX() method calls
by their larky assertpy (assertion library for larky equivalent).
"""
#
# Mostly inspired by Hartmut Goebel <h.goebel@crazy-compilers.com>
# and the amazing project of unittest2pytest.
#
# Obligatory license...
# unittest2pytest is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import binascii
import re
import unittest
import uuid
from functools import partial
from lib2to3.fixer_base import BaseFix
from lib2to3.fixer_util import (
Attr,
Call,
Comma,
Dot,
LParen,
Leaf,
Name,
Newline,
Node,
Number,
RParen,
String,
does_tree_import,
find_indentation,
find_root,
parenthesize,
syms,
token,
)
from typing import List
from py2star import utils
TEMPLATE_PATTERN = re.compile("[\1\2]|[^\1\2]+")
def _insert_import(import_stmt, nearest_parent_node, file_input):
"""This inserts an import in a very similar way as
lib2to3.fixer_util.touch_import, but try to maintain encoding and shebang
prefixes on top of the file when there is no import
nearest_parent_node here is like the enclosing testcase
"""
import_nodes = utils.get_import_nodes(file_input)
if import_nodes:
last_import_stmt = import_nodes[-1].parent
i = file_input.children.index(last_import_stmt) + 1
# no import found, so add right before the test case
else:
i = file_input.children.index(nearest_parent_node)
import_stmt.prefix = nearest_parent_node.prefix
nearest_parent_node.prefix = ""
file_input.insert_child(i, import_stmt)
def add_import(import_name, node, ns="stdlib"):
suite = utils.get_parent_of_type(node, syms.suite)
test_case = suite
while test_case.parent.type != syms.file_input:
test_case = test_case.parent
file_input = test_case.parent
if does_tree_import(None, import_name, node):
return
n = Call(
Name("load"),
args=[
String(f'"@{ns}//{import_name}"'),
Comma(),
String(f'"{import_name}"'),
],
)
import_stmt = Node(syms.simple_stmt, [n, Newline()])
# Check to see if we have already added this import.
for c in file_input.children:
for x in c.children:
if (
c.type == syms.simple_stmt
and x.type == syms.power
and x.parent == import_stmt
):
# We have already added this import statement, so
# we do not need to add it again.
return
_insert_import(import_stmt, test_case, file_input)
def CompOp(op, left, right, kws):
left = parenthesize_expression(left)
right = parenthesize_expression(right)
left.prefix = ""
if "\n" not in right.prefix:
right.prefix = ""
# trailer< '.' 'assert_that' > trailer< '(' '1' ')' >
_left_asserts = Call(Name("assert_that"), args=[left])
new = Node(
syms.power,
# trailer< '.' 'is_equal_to' > trailer< '(' '2' ')' > >
Attr(_left_asserts, Name(op))
+ [Node(syms.trailer, [LParen(), right, RParen()])],
)
return new
def UnaryOp(prefix, postfix, value, kws):
"""
Converts a method like: ``self.failUnless(True)`` to
asserts.assert_that(value).is_true()
Example:
unittest: ``self.failUnless(v4)``
pattern:
``UnaryOp('is_true', '', Leaf(1, 'v4'),
OrderedDict([('expr', Leaf(1, 'v4')), ('msg', None)]))``
Translates to ``assert_that(v4).is_true()``
"""
if postfix:
value = parenthesize_expression(value)
kids = []
left = Call(Name(prefix), args=[value])
kids.append(left)
if postfix:
_obj, node = Attr(left, Name(postfix))
kids.append(node)
kids.append(Node(syms.trailer, [LParen(), RParen()]))
return Node(syms.power, kids)
# These symbols have lower precedence than the CompOps we use and thus
# need to be parenthesized. For details see
# https://docs.python.org/3/reference/expressions.html#operator-precedence
_NEEDS_PARENTHESIS = [
syms.test, # if – else
syms.or_test,
syms.and_test,
syms.not_test,
syms.comparison,
]
def parenthesize_expression(value):
if value.type in _NEEDS_PARENTHESIS:
parenthesized = parenthesize(value.clone())
parenthesized.prefix = parenthesized.children[1].prefix
parenthesized.children[1].prefix = ""
value = parenthesized
return value
def fill_template(template, *args):
parts = TEMPLATE_PATTERN.findall(template)
kids = []
for p in parts:
if p == "":
continue
elif p in "\1\2\3\4\5":
p = args[ord(p) - 1]
p.prefix = ""
else:
p = Name(p)
kids.append(p.clone())
return kids
def DualOp(template, first, second, kws):
kids = fill_template(template, first, second)
return Node(syms.test, kids, prefix=" ")
def SequenceEqual(left, right, kws):
if "seq_type" in kws:
# :todo: implement `assert isinstance(xx, seq_type)`
pass
return CompOp("==", left, right, kws)
def AlmostOp(places_op, delta_op, first, second, kws):
first.prefix = ""
second.prefix = ""
first = parenthesize_expression(first)
second = parenthesize_expression(second)
abs_op = Call(Name("abs"), [Node(syms.factor, [first, Name("-"), second])])
if kws.get("delta", None) is not None:
# delta
return CompOp(delta_op, abs_op, kws["delta"], {})
else:
# `7` is the default in unittest.TestCase.asserAlmostEqual
places = kws["places"] or Number(7)
places.prefix = " "
round_op = Call(Name("round"), (abs_op, Comma(), places))
return CompOp(places_op, round_op, Number(0), {})
def indent(level) -> List[Leaf]:
return [Leaf(token.INDENT, " ") for i in range(level)]
def dedent(level) -> List[Leaf]:
return [Leaf(token.DEDENT, "") for i in range(level)]
def _ellipsis() -> Node:
return Node(syms.atom, [Dot(), Dot(), Dot()])
def parameters(arg_names: List[str]) -> Node:
# children = [Leaf(token.LPAR, '(')]
children = []
for name in arg_names:
prefix = ""
if (
children
and children[-1].type == token.COMMA
and children[-2].type == token.NAME
):
prefix = " "
children.extend(
[Leaf(token.NAME, name, prefix=prefix), Leaf(token.COMMA, ",")]
)
return Node(
syms.parameters,
[
Leaf(token.LPAR, "("),
Node(syms.typedargslist, children),
Leaf(token.RPAR, ")"),
],
)
def get_funcdef_node(
funcname: str,
args: List[str],
body: List,
decorators: List[str],
indentation_level=1,
) -> Node:
to_prepend = []
for decorator in decorators:
decorator_node = Node(
syms.decorator,
[
Leaf(token.INDENT, " " * indentation_level),
Leaf(token.AT, "@"),
Leaf(token.NAME, decorator),
Newline(),
],
)
to_prepend.append(decorator_node)
return Node(
syms.suite,
[
Newline(),
*to_prepend,
Leaf(token.INDENT, " " * indentation_level),
Node(
syms.funcdef,
[
Leaf(token.NAME, "def"),
Leaf(token.NAME, funcname, prefix=" "),
parameters(args),
# Leaf(token.RARROW, '->', prefix=' '),
# Leaf(token.NAME, return_type, prefix=' '),
Leaf(token.COLON, ":"),
Node(
syms.suite,
[
Newline(),
*indent(indentation_level + 1),
*body,
Newline(),
*dedent(indentation_level + 1),
],
),
],
),
],
)
def _rand():
return f"_larky_{binascii.crc32(uuid.uuid4().bytes)}"
def get_lambdef_node(args, name=None):
# name = 'lambda_'+str(id)
# lambda:
if name is None:
name = ""
return Node(
syms.lambdef,
[Name("lambda"), Name(name, prefix=" "), Leaf(token.COLON, ":")] + args,
)
def RaisesOp(context, exceptionClass, indent, kws, arglist, node):
# asserts.assert_fails(lambda: -1 in b("abc"), "int in bytes: -1 out of range")
exceptionClass.prefix = ""
args = [
String('"', prefix=" "), # unquoted on purpose
String(".*?"), # match anything until the exception we are looking for
String(f"{str(exceptionClass)}"),
]
# this would be the second parameter
# Add match keyword arg to with statement if an expected regex was provided.
if "expected_regex" in kws:
expected_regex = kws.get("expected_regex").clone()
expected_regex.prefix = ""
args.append(String(".*"))
args.append(expected_regex)
args.append(String('"')) # close quote
# # assertRaises(exception, callable, *args, **kwds)
# # assertRaises(exception, *, msg=None)
# # Test that an exception is raised when callable is called with any
# # positional or keyword arguments that are also passed to
# # assertRaises().
# # To catch any of a group of exceptions, a tuple containing the
# # exception classes may be passed as exception.
# with_item = Call(Name(context), args) # pytest.raises(TypeError)
# with_item.prefix = " "
# args = []
arglist = [a.clone() for a in arglist.children[4:]]
if arglist:
arglist[0].prefix = ""
func = None
# :fixme: this uses hardcoded parameter names, which may change
if "callableObj" in kws:
func = kws["callableObj"]
elif "callable_obj" in kws:
func = kws["callable_obj"]
elif kws["args"]: # any arguments assigned to `*args`
func = kws["args"][0]
else:
func = None
if func and func.type == syms.lambdef:
suite = func.children[-1].clone()
else:
if func is None:
# Context manager, so let us convert it to a function definition
# let us create a function first.
func = get_funcdef_node(
funcname=_rand(),
args=[],
body=arglist,
decorators=[],
indentation_level=1,
)
# append it as a child to the root node
find_root(node).append_child(func)
# return Node(syms.with_stmt, [with_item])
# TODO: Newlines within arguments are not handled yet.
# If argment prefix contains a newline, all whitespace around this
# ought to be replaced by indent plus 4+1+len(func) spaces.
suite = get_lambdef_node([Call(Name(str(func)), arglist)])
# suite.prefix = indent + (4 * " ")
# new = Node(
# syms.power,
# # trailer< '.' 'is_equal_to' > trailer< '(' '2' ')' > >
# Attr(_left_asserts, Name(op))
# + [Node(syms.trailer, [LParen(), right, RParen()])],
# )
return Call(
Name("assert_fails"),
args=[suite, Comma()] + args,
)
# return Node(
# syms.with_stmt, [Name("with"), with_item, Name(":"), Newline(), suite]
# )
def RaisesRegexOp(
context,
designator,
exceptionClass,
expected_regex,
indent,
kws,
arglist,
node,
):
arglist = [a.clone() for a in arglist.children]
pattern = arglist[2]
del arglist[2:4] # remove pattern and comma
arglist = Node(syms.arglist, arglist)
with_stmt = RaisesOp(context, exceptionClass, indent, kws, arglist, node)
# if this is already part of a with statement we need to insert re.search
# after the last leaf with content
if node.parent.type == syms.with_stmt:
parent_with = node.parent
for leaf in reversed(list(parent_with.leaves())):
if leaf.value.strip():
break
i = leaf.parent.children.index(leaf)
return with_stmt
else:
return Node(syms.suite, [with_stmt])
_method_map = {
# simple equals
# asserts.eq(A, B) || asserts.assert_that(A).is_equal_to(B)
"assertDictEqual": partial(CompOp, "is_equal_to"),
"assertListEqual": partial(CompOp, "is_equal_to"),
"assertMultiLineEqual": partial(CompOp, "is_equal_to"),
"assertSetEqual": partial(CompOp, "is_equal_to"),
"assertTupleEqual": partial(CompOp, "is_equal_to"),
"assertSequenceEqual": partial(CompOp, "is_equal_to"),
"assertEqual": partial(CompOp, "is_equal_to"),
"assertNotEqual": partial(CompOp, "is_not_equal_to"),
"assertIs": partial(CompOp, "is_equal_to"),
"assertGreater": partial(CompOp, "is_greater_than"),
"assertLessEqual": partial(CompOp, "is_lte_to"),
"assertLess": partial(CompOp, "is_less_than"),
"assertGreaterEqual": partial(CompOp, "is_gte_to"),
"assertIn": partial(CompOp, "is_in"),
"assertIsNot": partial(CompOp, "is_not_equal_to"),
"assertNotIn": partial(CompOp, "is_not_in"),
"assertIsInstance": partial(CompOp, "is_instance_of"),
"assertNotIsInstance": partial(CompOp, "is_not_instance_of"),
# unary operations
"assertIsNone": partial(UnaryOp, "assert_that", "is_none"),
"assertIsNotNone": partial(UnaryOp, "assert_that", "is_not_none"),
"assertFalse": partial(UnaryOp, "assert_that", "is_false"),
"failIf": partial(UnaryOp, "assert_that", "is_false"),
"assertTrue": partial(UnaryOp, "assert_that", "is_true"),
"failUnless": partial(UnaryOp, "assert_that", "is_true"),
"assert_": partial(UnaryOp, "assert_that", "is_true"),
# "exceptions" in larky do not exist but we have asserts.assert_fails...
"assertRaises": partial(RaisesOp, "asserts"),
"assertWarns": partial(RaisesOp, "asserts"),
# types ones
"assertDictContainsSubset": partial(DualOp, "dict(\2, **\1) == \2"),
"assertItemsEqual": partial(DualOp, "sorted(\1) == sorted(\2)"),
"assertRegex": partial(DualOp, "re.search(\2, \1)"),
"assertNotRegex": partial(DualOp, "not re.search(\2, \1)"), # new Py 3.2
"assertAlmostEqual": partial(AlmostOp, "==", "<"),
"assertNotAlmostEqual": partial(AlmostOp, "!=", ">"),
# "assertRaisesRegex": partial(RaisesRegexOp, "pytest.raises", "excinfo"),
# "assertWarnsRegex": partial(RaisesRegexOp, "pytest.warns", "record"),
# 'assertLogs': -- not to be handled here, is an context handler only
}
for newname, oldname in (
("assertRaisesRegex", "assertRaisesRegexp"),
("assertRegex", "assertRegexpMatches"),
):
if not hasattr(unittest.TestCase, newname):
# use old name
_method_map[oldname] = _method_map[newname]
del _method_map[newname]
for m in list(_method_map.keys()):
if not hasattr(unittest.TestCase, m):
del _method_map[m]
# (Deprecated) Aliases
_method_aliases = {
"assertEquals": "assertEqual",
"assertNotEquals": "assertNotEqual",
"assert_": "assertTrue",
"assertAlmostEquals": "assertAlmostEqual",
"assertNotAlmostEquals": "assertNotAlmostEqual",
"assertRegexpMatches": "assertRegex",
"assertRaisesRegexp": "assertRaisesRegex",
"failUnlessEqual": "assertEqual",
"failIfEqual": "assertNotEqual",
"failUnless": "assertTrue",
"failIf": "assertFalse",
"failUnlessRaises": "assertRaises",
"failUnlessAlmostEqual": "assertAlmostEqual",
"failIfAlmostEqual": "assertNotAlmostEqual",
}
for a, o in list(_method_aliases.items()):
if o not in _method_map:
# if the original name is not a TestCase method, remove the alias
del _method_aliases[a]
class FixAsserts(BaseFix):
order = "pre"
run_order = 2
PATTERN = """
power< 'self'
trailer< '.' method=( %s ) >
trailer< '(' arglist=any ')' >
>
""" % " | ".join(
map(repr, (set(_method_map.keys()) | set(_method_aliases.keys())))
)
def transform(self, node, results):
def process_arg(arg):
if isinstance(arg, Leaf) and arg.type == token.COMMA:
return
elif (
isinstance(arg, Node)
and arg.type == syms.argument
and arg.children[1].type == token.EQUAL
):
# keyword argument
name, equal, value = arg.children
assert name.type == token.NAME
assert equal.type == token.EQUAL
value = value.clone()
kwargs[name.value] = value
if "\n" in arg.prefix:
value.prefix = arg.prefix
else:
value.prefix = arg.prefix.strip() + " "
else:
if (
isinstance(arg, Node)
and arg.type == syms.argument
and arg.children[0].type == 36
and arg.children[0].value == "**"
):
return
assert (
not kwargs
), "all positional args are assumed to come first"
if (
isinstance(arg, Node)
and arg.type == syms.argument
and arg.children[1].type == syms.comp_for
):
# argument is a generator expression w/o
# parenthesis, add parenthesis
value = arg.clone()
value.children.insert(0, Leaf(token.LPAR, "("))
value.children.append(Leaf(token.RPAR, ")"))
posargs.append(value)
else:
posargs.append(arg.clone())
method = results["method"][0].value
# map (deprecated) aliases to original to avoid analysing
# the decorator function
method = _method_aliases.get(method, method)
posargs = []
kwargs = {}
# This is either a "arglist" or a single argument
if results["arglist"].type == syms.arglist:
for arg in results["arglist"].children:
process_arg(arg)
else:
process_arg(results["arglist"])
try:
test_func = getattr(unittest.TestCase, method)
except AttributeError:
raise RuntimeError(
"Your unittest package does not support '%s'. "
"consider updating the package" % method
)
required_args, argsdict = utils.resolve_func_args(
test_func, posargs, kwargs
)
if method.startswith(("assertRaises", "assertWarns")):
n_stmt = Node(
syms.simple_stmt,
[
Name("asserts."),
_method_map[method](
*required_args,
indent=find_indentation(node),
kws=argsdict,
arglist=results["arglist"],
node=node,
),
],
)
else:
n_stmt = Node(
syms.simple_stmt,
[
Name("asserts."),
_method_map[method](*required_args, kws=argsdict),
],
)
if argsdict.get("msg", None) is not None:
n_stmt.children.extend((Name(","), argsdict["msg"]))
def fix_line_wrapping(x):
for c in x.children:
# no need to worry about wrapping of "[", "{" and "("
if c.type in [token.LSQB, token.LBRACE, token.LPAR]:
break
if c.prefix.startswith("\n"):
c.prefix = c.prefix.replace("\n", " \\\n")
fix_line_wrapping(c)
fix_line_wrapping(n_stmt)
# the prefix should be set only after fixing line wrapping
# because it can contain a '\n'
n_stmt.prefix = node.prefix
# add necessary imports
if "Raises" in method or "Warns" in method:
add_import("pytest", node)
if (
"Regex" in method
and not "Raises" in method
and not "Warns" in method
):
add_import("re", node)
add_import("asserts", node, ns="vendor")
return n_stmt
|
from python_structure.data_structures.linked_list.node import Node
class LinkedList:
def __init__(self):
self.head = None
def append_node(self, data):
"""
Add a Node to the Linked List
:param data:
:return:
"""
if not self.head:
self.head = Node(data)
return
else:
current = self.head
while current.next:
current = current.next
current.next = Node(data)
def print_list(self):
"""
Print all the Nodes in the list
:return: string in the built-in 'print()' method
"""
node = self.head
while node is not None:
print(node.data, " ")
node = node.next
print("\n")
def return_list(self):
"""
Save all the Nodes of the linked list to a list and return it
:return: Nodes' list
"""
result = []
node = self.head
while node is not None:
result.append(node.data)
node = node.next
return result
def search(self, target):
"""
Search 'target' in the linked list.
:param target:
:return:
"""
current = self.head
while current is not None:
if current.data == target:
print("'{}' found!".format(target))
return True
else:
current = current.next
print("'{}' NOT found.".format(target))
return False
|
from ..factory import Type
class publicMessageLink(Type):
link = None # type: "string"
html = None # type: "string"
|
'''OpenGL extension SGIX.blend_alpha_minmax
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIX_blend_alpha_minmax'
_DEPRECATED = False
GL_ALPHA_MIN_SGIX = constant.Constant( 'GL_ALPHA_MIN_SGIX', 0x8320 )
GL_ALPHA_MAX_SGIX = constant.Constant( 'GL_ALPHA_MAX_SGIX', 0x8321 )
def glInitBlendAlphaMinmaxSGIX():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.