content
stringlengths 5
1.05M
|
|---|
# Copyright (c) 2021 by Don Deel. All rights reserved.
"""
RedfishODataService API Definitions.
Defines REST API behaviors for RedfishODataService.
Allows initial data for instances of this API object to be set.
Based upone Singleton template version 0.9.0
"""
# Standard library module imports
# None
# Third party module imports
from flask_restful import Resource # REST operations
from flask import request # JSON input from REST
# Local module imports
from fish_data import fish # Fish data
import fishem_httpcodes as HTTP # HTTP status codes
# Constants
# None
# Capabilities Restricions from unversioned schema
# (These items are read-only within the classes below)
#
# None
# The Redfish OData service document is stored in the fish object
# dictionary. The key '/redfish/v1/odata' should only be defined
# when a valid odata service document is present. It can be defined
# here (see below) or by loading fishem from a mockup or a fish file.
# Mockups and fish files loaded into fishem can overwrite any
# initial object defined here.
#
# An initial odata document can be defined like this:
# fish['/redfish/v1/odata'] = {
# << JSON odata service document contents>>
# }
class RedfishODataService(Resource):
"""Defines API behaviors for the Redfish OData Service
document.
Defined: GET.
flask_restful provides default support for HEAD and OPTIONS.
It also provides a 405 (Method Not Allowed) response for any
RESTful service requests that are not defined.
"""
def __init__(self):
"""Handles class setup. Called by flask_restful prior to
each and every REST operation handled by this class."""
# End of __init__()
def get(self):
"""Defines GET behavior. Called by flask_restful."""
# TODO: Generate an OData service document on demand, if possible
# Handle GET request
inst_key = request.path
# fish keys do not have trailing slashes
if inst_key.endswith('/'):
inst_key += '/'
inst_key = inst_key.replace('//', '')
# Ensure object is in the fish object dictionary
if inst_key not in fish:
return 'Object not found', HTTP.NOT_FOUND
# Return the requested object
return fish[inst_key], HTTP.OK
# End of get()
# Activate the API module
def activate(rest_api):
# Register the URIs that this API module responds to
rest_api.add_resource(RedfishODataService,
'/redfish/v1/odata',
'/redfish/v1/odata/')
return
# End of activate()
|
class FindMin():
def __init__(self,num1,num2,num3):
self.num1=num1
self.num2=num2
self.num3=num3
def find_min(self):
smallest=self.num1
if smallest > self.num2:
smallest=self.num2
if smallest > self.num3:
smallest=self.num3
return smallest
# Main Program
x=FindMin(2,-5,25)
print x.find_min()
|
# This code is all CDS code
# Author: David Wu (dwu@broadinstitute.org)
import collections
import pandas as pd
import pickle
import random
import sys
from scipy.special import comb
from scipy.stats import pearsonr
from sklearn.ensemble import RandomForestRegressor
# Generate 10 random splits of cell lines for a tissue to fewshot over
def get_10_splits(cell_lines_in_tissue, num_cell_lines_in_training_set):
set_of_combs = set()
while len(set_of_combs) < min(comb(len(cell_lines_in_tissue), num_cell_lines_in_training_set, exact=True), 10):
random_training_set = tuple(random.sample(cell_lines_in_tissue, num_cell_lines_in_training_set))
set_of_combs.add(random_training_set)
return set_of_combs
# Convert defaultdict to regular for pickling
def default_to_regular(d):
if isinstance(d, collections.defaultdict):
d = {k: default_to_regular(v) for k, v in d.items()}
return d
def main():
logfile = open("log.txt", "w+")
feature_matrix = pd.read_csv("achilles_data/feature_matrix.csv", header=0, index_col=0)
labels_matrix = pd.read_csv("achilles_data/labels_matrix.csv", header=0, index_col=0)
# K shot learning to perform
FEWSHOT_START = 0
FEWSHOT_END = 10
# get gene to predict over
argv = sys.argv[1:]
if len(argv) != 1:
raise ValueError('A very specific bad thing happened.')
gene = argv[0]
# Read in necessary files
with open('achilles_data/lineages_to_fewshot_over.pkl', 'rb') as f:
lineages_to_fewshot_over = pickle.load(f)
with open('achilles_data/gene_to_features.pkl', 'rb') as f:
gene_to_features = pickle.load(f)
# Get feature set
final_features_to_use = gene_to_features[gene]
logfile.write(f"Number of features used: {len(final_features_to_use)}\n")
# Quit if gene has no features
print(f"Gene: {gene} has {len(final_features_to_use)} features!")
if len(final_features_to_use) == 0:
quit()
# prepare feature matrix
final_features_to_use = list(final_features_to_use)
feature_matrix = feature_matrix[final_features_to_use]
all_cell_lines = feature_matrix.index.tolist()
# Dictionary mapping tissue to gene to list of k shot performances
tissue_to_gene_to_corrlation_list = collections.defaultdict(lambda : collections.defaultdict(lambda : collections.defaultdict(list)))
# Replicate TCRP fewshot learning process on random forest
for tissue in lineages_to_fewshot_over:
sys.stderr.write(f"Tissue: {tissue}")
cell_lines_in_tissue = lineages_to_fewshot_over[tissue]
for num_cell_lines_in_training_set in range(FEWSHOT_START, FEWSHOT_END + 1):
cell_lines_to_include_in_training = get_10_splits(cell_lines_in_tissue, num_cell_lines_in_training_set)
for cell_line_set in cell_lines_to_include_in_training:
test_set_cell_lines = [element for element in cell_lines_in_tissue if element not in cell_line_set]
training_set_cell_lines = [element for element in all_cell_lines if element not in test_set_cell_lines]
if len(set(test_set_cell_lines) & set(training_set_cell_lines)) != 0:
raise ValueError('A very specific bad thing happened.')
train_features = feature_matrix.loc[training_set_cell_lines]
test_features = feature_matrix.loc[test_set_cell_lines]
train_labels = labels_matrix.loc[training_set_cell_lines][gene]
test_labels = labels_matrix.loc[test_set_cell_lines][gene]
model = RandomForestRegressor(n_estimators=100, max_depth=8, min_samples_leaf=5, n_jobs=-1, random_state=0)
model.fit(train_features, train_labels)
predictions = model.predict(test_features)
correlation = pearsonr(test_labels.tolist(), list(predictions))[0]
tissue_to_gene_to_corrlation_list[tissue][gene][num_cell_lines_in_training_set].append(correlation)
tissue_to_gene_to_corrlation_list = default_to_regular(tissue_to_gene_to_corrlation_list)
# ouput results
with open(f'{gene}_results_dict.pkl', 'wb') as handle:
pickle.dump(tissue_to_gene_to_corrlation_list, handle)
main()
|
# coding: utf-8
# Author:雪山凌狐
# website:http://www.xueshanlinghu.com
# version: 1.1
# update_date:2020-01-28
# 自己封装的SMTP发送邮件类,一般用于QQ邮箱SMTP发信
import smtplib
import email.utils
from email.message import EmailMessage
import os
try:
# 外部使用本包导入方式
from .xs_sendmail_setting import sender, receiver, username, password
except:
# 测试本python代码用的导入方式
from xs_sendmail_setting import sender, receiver, username, password
def sendmail(sender = sender,
receiver = receiver,
username = username,
password = password,
subject = 'Python3发送邮件测试',
content_text = '这是一封测试邮件,收到该邮件表示服务器可以正常发送邮件',
cc_receiver = '',
bcc_receiver = '',
content_type = 'plain',
host = 'smtp.qq.com',
port = 465,
img_list = [],
attach_list = [],
debug = 0):
'''
发送邮件功能帮助:
==============================
入参:
sender 发件人,string,只能填写一个,如'xxx<xxx@xxx.com>',可添加昵称也可以不加,注意邮箱必须要和鉴权登录账号相同
receiver 收件人,string,可填写多个,用英文半角逗号,分隔,如'xxx<xxx@xxx.com>,xxx@xxx.com',可添加昵称也可以不加
username 登录用户名,string
password 登录密码,string
subject 主题,string,默认为'Python3发送邮件测试'
content_text 邮件正文,string,可以是普通内容或HTML格式内容,默认为'这是一封测试邮件,收到该邮件表示服务器可以正常发送邮件'
cc_receiver 抄送人,string,可填写多个,用英文半角逗号,分隔,如'xxx<xxx@xxx.com>,xxx@xxx.com',
可添加昵称也可以不加,默认为''
bcc_receiver 密送人,可以接收到邮件但不会展示在接收人列表中,string,可填写多个,用英文半角逗号,分隔,
如'xxx<xxx@xxx.com>,xxx@xxx.com',可添加昵称也可以不加,默认为''
content_type 邮件正文类型,string,普通内容填写'plain',HTML内容填写'html',默认为'plain'
host SMTP服务器主机,string,默认为'smtp.qq.com'
port 端口,int,默认为465
img_list 图片列表(正文中引用),list,列表的元素为dict,字典键值:'imgpath' 图片地址,相对或绝对路径,
Windows下的路径应用两个\表示;'imgID' 图片ID,在HTML正文中引用,可自动生成。
若正文中不引用该图片,是希望作为附件的,可放到attach_list参数中传入
默认为[]
如:
import email.utils
image_id = email.utils.make_msgid()
# 太长可能有问题,只取其中的部分即可
image_id = image_id[1:13]
content_text = '<img src="cid:' + image_id + '" />'
content_type = 'html'
img_list = [{"imgpath": "邮件发送\\图片\\star.jpg", "imgID": image_id}]
attach_list 附件列表,list,列表元素为string,为附件地址,相对或绝对路径。Windows下的路径应用两个\表示
默认为[]
如:
attach_list = ["邮件发送\\附件\\附件一张.jpg", "邮件发送\\附件\\小人走路.gif"]
debug 调试模式,当该值非0时会显示一些调试信息,默认为0
==============================
其他注意事项:
1.该函数不传入任何参数也可以进行测试
2.本文件可以直接运行,进行测试
'''
# 创建邮件对象
msg = EmailMessage()
# 设置邮件正文内容
msg.set_content(content_text, content_type, 'utf-8')
# 设置主题
msg['Subject'] = subject
# 设置发件人
msg['From'] = sender
# 设置收件人,如果有多个的,传入的字符串多个之间使用,分隔,程序会自动处理,支持“昵称<邮箱地址>”的形式
msg['To'] = receiver.split(",")
receiverlist = receiver
# 设置抄送人,如果有多个的,传入的字符串多个之间使用,分隔,程序会自动处理,支持“昵称<邮箱地址>”的形式
if cc_receiver != '':
msg['Cc'] = cc_receiver.split(",")
receiverlist = receiverlist + "," + cc_receiver
# 设置密送人,如果有多个的,传入的字符串多个之间使用,分隔,程序会自动处理,支持“昵称<邮箱地址>”的形式
if bcc_receiver != '':
receiverlist = receiverlist + "," + bcc_receiver
# 设置加载的图片
for imginfo in img_list:
if imginfo.get("imgpath") and imginfo.get("imgID"):
if os.path.exists(imginfo["imgpath"]):
with open(imginfo["imgpath"], 'rb') as f:
msg.add_attachment(f.read(),
maintype = 'image',
subtype = os.path.splitext(imginfo["imgpath"])[1].replace(".", ""),
filename = os.path.split(imginfo["imgpath"])[1],
cid = imginfo["imgID"])
# 加载附件:
for attach_file in attach_list:
if os.path.exists(attach_file):
with open(attach_file, 'rb') as f:
# 不在正文中引用图片啥的,是附件,不用指定cid
msg.add_attachment(f.read(),
maintype = 'application',
subtype = os.path.splitext(attach_file)[1].replace(".", ""),
filename = os.path.split(attach_file)[1])
# 正式发送
try:
# 创建SMTP连接,一定要使用SSL连接!
conn = smtplib.SMTP_SSL(host, port = port)
# 设置调试级别,0为不输出调试信息,1为输出调试信息
if debug != 0:
conn.set_debuglevel(1)
# 登录服务器
conn.login(username, password)
# 发送邮件
conn.sendmail(sender, receiverlist.split(","), msg.as_string())
# 退出连接
conn.quit()
if debug != 0:
print("\n原始邮件内容为:\n" + msg.as_string())
return True
except Exception as e:
print('发送邮件报错,%s' % (str(e)))
return False
# 快速将传入的内容头尾加上<p>标签
def text_html(text):
'''
快速将传入的内容头尾加上<p>标签
'''
return "<p>%s</p>" % text
if __name__ == '__main__':
# 发送邮件测试
sender = sender
receiver = receiver
subject = '测试邮件主题'
# 普通文本正文测试
# content_text = '看到我表示测试成功!'
# 普通文本正文测试
# 快速生成html文本测试
# content_text = '看到我表示测试成功!\n这是第二行内容\n这是第三行'
# final_content_text = ''
# for i in content_text.split("\n"):
# print(i, text_html(i))
# final_content_text = final_content_text + text_html(i)
# print(final_content_text)
# content_text = final_content_text
# content_type = 'html'
# 快速生成html文本测试
# 正文带图片测试
# image_id = email.utils.make_msgid()
# # 太长可能有问题,只取其中的部分即可
# image_id = image_id[1:13]
# # print(image_id)
# content_text = '看到我表示测试成功!\n如下是一张图片:\n<img src="cid:' + image_id + '" />'
# final_content_text = ''
# for i in content_text.split("\n"):
# # print(i, text_html(i))
# final_content_text = final_content_text + text_html(i)
# # print(final_content_text)
# content_text = final_content_text
# content_type = 'html'
# img_list = [{"imgpath": "邮件发送\\图片\\star.jpg", "imgID": image_id}]
# 正文带图片测试
# 正文带图片和附件测试
image_id = email.utils.make_msgid()
# 太长可能有问题,只取其中的部分即可
image_id = image_id[1:13]
# print(image_id)
content_text = '看到我表示测试成功!\n如下是一张图片:\n<img src="cid:' + image_id + '" />'
final_content_text = ''
for i in content_text.split("\n"):
print(i, text_html(i))
final_content_text = final_content_text + text_html(i)
print(final_content_text)
content_text = final_content_text
content_type = 'html'
img_list = [{"imgpath": "邮件发送\\图片\\star.jpg", "imgID": image_id}]
attach_list = ["邮件发送\\附件\\附件一张.jpg", "邮件发送\\附件\\小人走路.gif"]
# 正文带图片和附件测试
cc_receiver = ''
bcc_receiver = ''
res = sendmail(sender = sender,
receiver = receiver,
subject = subject,
content_text = content_text,
debug = 1,
cc_receiver = cc_receiver,
bcc_receiver = bcc_receiver,
content_type = content_type,
img_list = img_list,
attach_list = attach_list)
if res:
print("发送邮件成功!")
else:
print("发送邮件失败!")
|
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
#path('signup/', views.SignUp.as_view(), name='signup'),
path('camera/cholesterol_login/', views.cholesterol_login, name='cholesterol_login'),
path('camera/bilirubin_login/', views.bilirubin_login, name='bilirubin_login'),
path('camera/cataract_login/', views.cataract_login, name='cataract_login'),
path('camera/cholesterol/', views.cholesterol_, name='cholesterol_'),
path('camera/bilirubin/', views.bilirubin_, name='bilirubin_'),
path('camera/cataract/', views.cataract_, name='cataract_'),
path('logout/', views.logout, name='logout'),
path('history/', views.history, name='history'),
path('home/', views.search_form),
url(r'^signup/$', views.signup),
path('login/', views.login),
url(r'^aadhar/$', views.aadhar),
url(r'^aadhar2/$', views.aadhar2),
path('about/', views.about),
path('contact/', views.contact),
path('login_only_redirect/', views.login_only_redirect),
path('direct_test/', views.direct_test),
path('diagnosis_registered/', views.diagnosis_registered),
path('notifyform/', views.notifyform),
path('notifyformexec/', views.notifyformexec),
path('genform/', views.genform),
path('gen_diagnosis/', views.gen_diagnosis),
path('finddoctors/', views.finddoctors),
path('diagnosis_option/', views.diagnosis_option),
path('camera/cholesterol_login_module/', views.cholesterol_login_module),
path('camera/bilirubin_login_module/', views.bilirubin_login_module),
path('camera/cataract_login_module/', views.cataract_login_module),
]
|
# -*- coding: utf-8 -*-
from .util.pyutil import ChemPyDeprecationWarning
import warnings
from .electrolytes import (
A,
B,
limiting_log_gamma,
extended_log_gamma,
davies_log_gamma,
limiting_activity_product,
extended_activity_product,
davies_activity_product,
)
warnings.warn("use .electrolytes instead of .debye_huckel", ChemPyDeprecationWarning)
|
from empire import *
from empire.enums.base_enum import BaseEnum
class PythonTokenTypes(BaseEnum):
ENDMARKER: Final[int] = 0
NAME: Final[int] = 1
NUMBER: Final[int] = 2
STRING: Final[int] = 3
NEWLINE: Final[int] = 4
INDENT: Final[int] = 5
DEDENT: Final[int] = 6
LPAR: Final[int] = 7
RPAR: Final[int] = 8
LSQB: Final[int] = 9
RSQB: Final[int] = 10
COLON: Final[int] = 11
COMMA: Final[int] = 12
SEMI: Final[int] = 13
PLUS: Final[int] = 14
MINUS: Final[int] = 15
STAR: Final[int] = 16
SLASH: Final[int] = 17
VBAR: Final[int] = 18
AMPER: Final[int] = 19
LESS: Final[int] = 20
GREATER: Final[int] = 21
EQUAL: Final[int] = 22
DOT: Final[int] = 23
PERCENT: Final[int] = 24
LBRACE: Final[int] = 25
RBRACE: Final[int] = 26
EQEQUAL: Final[int] = 27
NOTEQUAL: Final[int] = 28
LESSEQUAL: Final[int] = 29
GREATEREQUAL: Final[int] = 30
TILDE: Final[int] = 31
CIRCUMFLEX: Final[int] = 32
LEFTSHIFT: Final[int] = 33
RIGHTSHIFT: Final[int] = 34
DOUBLESTAR: Final[int] = 35
PLUSEQUAL: Final[int] = 36
MINEQUAL: Final[int] = 37
STAREQUAL: Final[int] = 38
SLASHEQUAL: Final[int] = 39
PERCENTEQUAL: Final[int] = 40
AMPEREQUAL: Final[int] = 41
VBAREQUAL: Final[int] = 42
CIRCUMFLEXEQUAL: Final[int] = 43
LEFTSHIFTEQUAL: Final[int] = 44
RIGHTSHIFTEQUAL: Final[int] = 45
DOUBLESTAREQUAL: Final[int] = 46
DOUBLESLASH: Final[int] = 47
DOUBLESLASHEQUAL: Final[int] = 48
AT: Final[int] = 49
ATEQUAL: Final[int] = 50
RARROW: Final[int] = 51
ELLIPSIS: Final[int] = 52
COLONEQUAL: Final[int] = 53
OP: Final[int] = 54
AWAIT: Final[int] = 55
ASYNC: Final[int] = 56
TYPE_IGNORE: Final[int] = 57
TYPE_COMMENT: Final[int] = 58
# These aren't used by the C tokenizer but are needed for tokenize.py
ERRORTOKEN: Final[int] = 59
COMMENT: Final[int] = 60
NL: Final[int] = 61
ENCODING: Final[int] = 62
N_TOKENS: Final[int] = 63
# Special definitions for cooperation with parser
NT_OFFSET: Final[int] = 256
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from services import CCTVService
class CCTV_Api(object):
@staticmethod
def main():
while 1:
service = CCTVService()
menu = input('0-Exit, 1-read_csv 2-read_xls 3-read_json')
if menu == '0':
break
elif menu == '1':
service.csv({'context':'./data/', 'fname':'cctv_in_seoul'})
elif menu == '2':
service.xls({'context':'./data/', 'fname':'pop_in_seoul'})
elif menu == '3':
service.json({'context':'./data/', 'fname':'geo_simple'})
else:
continue
CCTV_Api.main()
|
import unittest
import numpy
import pytest
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer import functions
from chainer import testing
import chainerx
def _to_gpu(x, device_id):
if device_id >= 0:
return cuda.to_gpu(x, device_id)
else:
return x
_nonchainerx_backend_configs = (
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
])
_chainerx_backend_configs = (
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
_numpy_device = chainer.get_device('@numpy')
class CopyTestBase(object):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, dst_device_spec, src_device, dst_device):
x = src_device.send(self.x)
x_var = chainer.Variable(x)
y = functions.copy(x_var, dst_device_spec)
assert y.device == dst_device
assert backend.get_device_from_array(y.array) == dst_device
assert y.dtype == self.dtype
numpy.testing.assert_array_equal(_numpy_device.send(y.array), self.x)
def test_forward(self, src_backend_config, dst_backend_config):
self.check_forward(
dst_backend_config.device,
src_backend_config.device,
dst_backend_config.device)
def test_backward(self, src_backend_config, dst_backend_config):
x = src_backend_config.get_array(self.x)
gy = dst_backend_config.get_array(self.gy)
src_device = src_backend_config.device
dst_device = dst_backend_config.device
x_var = chainer.Variable(x, requires_grad=True)
y_var = functions.copy(x_var, dst_device)
y_var.grad = gy
y_var.backward()
x_grad = x_var.grad
assert x_var.grad_var.device == src_device
assert backend.get_device_from_array(x_grad) == src_device
numpy.testing.assert_array_equal(_numpy_device.send(x_grad), self.gy)
def test_double_backward(self, src_backend_config, dst_backend_config):
x = src_backend_config.get_array(self.x)
gy = dst_backend_config.get_array(self.gy)
ggx = src_backend_config.get_array(self.ggx)
dst_device = dst_backend_config.device
x_var = chainer.Variable(x, requires_grad=True)
y_var = functions.copy(x_var, dst_device)
y_var.grad = gy
gy_var = y_var.grad_var
y_var.backward(enable_double_backprop=True)
assert x_var.grad_var.requires_grad is True
x_var.grad_var.grad = ggx
x_var.grad_var.backward()
assert gy_var.grad_var.device == dst_device
assert (
backend.get_device_from_array(gy_var.grad_var.array)
== dst_device)
numpy.testing.assert_array_equal(
_numpy_device.send(gy_var.grad_var.array), self.ggx)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestCopyNonChainerx(CopyTestBase, unittest.TestCase):
def test_forward_int(self, src_backend_config, dst_backend_config):
src_device = src_backend_config.device
dst_device = dst_backend_config.device
if dst_device.xp is numpy:
dst_device_spec = -1
elif dst_device.xp is chainer.backends.cuda.cupy:
dst_device_spec = dst_device.device.id
else:
assert False, dst_device
self.check_forward(
dst_device_spec,
src_device,
dst_device)
def test_forward_str(self, src_backend_config, dst_backend_config):
src_device = src_backend_config.device
dst_device = dst_backend_config.device
if dst_device.xp is numpy:
dst_device_spec = '@numpy'
elif dst_device.xp is chainer.backends.cuda.cupy:
dst_device_spec = '@cupy:{}'.format(dst_device.device.id)
else:
assert False, dst_device
self.check_forward(
dst_device_spec,
src_device,
dst_device)
@testing.inject_backend_tests(None, _chainerx_backend_configs)
@testing.inject_backend_tests(None, _chainerx_backend_configs)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestCopyChainerx(CopyTestBase, unittest.TestCase):
def test_forward_str(self, src_backend_config, dst_backend_config):
src_device = src_backend_config.device
dst_device = dst_backend_config.device
dst_device_spec = dst_device.device.name
self.check_forward(
dst_device_spec,
src_device,
dst_device)
@testing.inject_backend_tests(None, _chainerx_backend_configs)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
class TestCopyBetweenChainerxAndNonChainerx(unittest.TestCase):
# Copy between non-ChainerX and ChainerX devices are not supported.
dtype = numpy.float32
def check_invalid(self, src_device, dst_device_spec):
x = src_device.send(
numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype))
x_var = chainer.Variable(x)
with pytest.raises(RuntimeError):
functions.copy(x_var, dst_device_spec)
def test_invalid(self, nonchx_backend_config, chx_backend_config):
assert nonchx_backend_config.xp is not chainerx
assert chx_backend_config.xp is chainerx
self.check_invalid(
nonchx_backend_config.device, chx_backend_config.device)
self.check_invalid(
chx_backend_config.device, nonchx_backend_config.device)
# cuda.DummyDevice is not supported either.
self.check_invalid(
chx_backend_config.device, cuda.DummyDevice)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
class TestCopyCudaDummyDevice(unittest.TestCase):
def test_dummy_device(self, src_backend_config, current_backend_config):
x_arr = src_backend_config.get_array(numpy.zeros((2, 3)))
with current_backend_config:
y = functions.copy(x_arr, cuda.DummyDevice)
# Always transferred to NumPy device, regardless of the current CUDA
# device.
assert isinstance(y.device, _cpu.CpuDevice)
testing.run_module(__name__, __file__)
|
# -*- coding: utf-8 -*-
# Copyright 2020 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from typing import Iterable
from unittest.mock import Mock
import pytest
from iconservice.base.block import Block
from iconservice.base.block import NULL_BLOCK
from iconservice.precommit_data_manager import PrecommitDataManager
class TestPrecommitDataManager(object):
@pytest.fixture(scope="function")
def manager(self, create_precommit_data_manager):
return create_precommit_data_manager(NULL_BLOCK)
@pytest.fixture(scope="class")
def create_precommit_data_manager(self):
def func(block: "Block"):
manager = PrecommitDataManager()
manager.init(block)
return manager
return func
@pytest.fixture(scope="function")
def create_dummy_precommit_data(self):
def func(block: 'Block'):
data = Mock()
data.block_batch.block = block
return data
return func
@staticmethod
def tx_hash() -> bytes:
"""Returns 32 length random tx_hash in bytes
:return: tx_hash in bytes
"""
print("tx_hash()")
return os.urandom(32)
@staticmethod
def block_hash() -> bytes:
"""Returns 32 length random block_hash in bytes
:return: block_hash in bytes
"""
print("block_hash()")
return os.urandom(32)
@staticmethod
def timestamp() -> int:
return int(time.time() * 1_000_000)
def test_init_with_null_block(self, manager: 'PrecommitDataManager'):
precommit_data = manager.get(bytes(32))
assert precommit_data is None
block: 'Block' = manager.last_block
assert block == NULL_BLOCK
assert manager.get(block.hash) is None
assert len(manager) == 1
def test_with_genesis_block(self, manager: 'PrecommitDataManager', create_dummy_precommit_data):
block_hash = bytes(32)
block = Block(
block_height=0,
timestamp=1234,
block_hash=block_hash,
prev_hash=None,
)
precommit_data = create_dummy_precommit_data(block)
# TEST: push
# null_block -> genesis_block
manager.push(precommit_data)
assert manager.last_block == NULL_BLOCK
assert len(manager) == 2
# TEST: get
# null_block -> genesis_block
assert manager.get(block_hash) == precommit_data
assert len(manager) == 2
# TEST: commit
# genesis_block only
manager.commit(block)
assert manager.last_block == block
assert len(manager) == 1
# TEST clear
manager.clear()
assert manager.last_block == block
assert len(manager) == 1
def test_get(self, create_precommit_data_manager, create_dummy_precommit_data):
"""
parent0 - child0
/
root - parent1 - child1
\
parent2 - child2
:param create_precommit_data_manager:
:return:
"""
block_height = 100
root = Block(
block_height=block_height,
timestamp=self.timestamp(),
block_hash=self.block_hash(),
prev_hash=self.block_hash()
)
parents = []
for i in range(3):
block = Block(
block_height=root.height + 1,
timestamp=self.timestamp(),
block_hash=self.block_hash(),
prev_hash=root.hash
)
parents.append(block)
children = []
for i in range(3):
parent = parents[i]
block = Block(
block_height=parent.height + 1,
prev_hash=parent.hash,
timestamp=self.timestamp(),
block_hash=self.block_hash(),
)
children.append(block)
manager = create_precommit_data_manager(root)
# Push parent blocks
for block in parents:
precommit_data = create_dummy_precommit_data(block)
manager.push(precommit_data)
assert len(manager) == 4
# Push child blocks
for block in children:
precommit_data = create_dummy_precommit_data(block)
manager.push(precommit_data)
assert len(manager) == 7
# There is no precommit_data for root, because root block has been already committed.
assert manager.get(root.hash) is None
for block in parents:
precommit_data = manager.get(block.hash)
assert precommit_data.block_batch.block == block
for block in children:
precommit_data = manager.get(block.hash)
assert precommit_data.block_batch.block == block
def test_push(self, manager):
pass
def test_commit(self, manager):
pass
def validate_block_to_invoke(self, manager):
pass
def validate_block_to_commit(self, manager):
pass
def clear(self):
pass
|
import datetime as dt
import logging
import random
import sqlite3
logger = logging.getLogger(__file__)
with open('data/first_names.txt') as f:
first_names = [n.strip() for n in f]
with open("data/last_names.txt") as f:
last_names = [n.strip() for n in f]
CLASS_NAMES = [
"Mineral Psychology",
"Underwater Basket Weaving",
"The History of Mud",
"Algorithms and Data Structures",
"Programming Languages",
"Natural Language Processing",
"Computational Linguistics",
"Machine Learning",
"Deep Learning",
"The Literature of Science Fiction",
"Creative Writing",
"Statistics",
"Writing Creative Non-fiction",
"Jazz Composition",
"Jazz Piano",
"Classical Piano",
"Music Theory",
"Intro to Biology",
"Computational Biology",
"The Molecular Biology of the Cell",
"Cancer Biology",
"Number Theory",
"The History of Technology",
]
def insert_students(db, n):
sql = """
INSERT INTO students
(first_name, last_name, dob)
VALUES
(?,?,?)
"""
for i in range(1, n+1):
first_name = random.choice(first_names)
last_name = random.choice(last_names)
dob = dt.date.today() - dt.timedelta(days=
(random.normalvariate(19,2) + random.paretovariate(3))
* 365.2421897)
db.execute(sql, (first_name, last_name, dob))
if i % 100 == 0:
db.commit()
db.commit()
sql = """
SELECT id FROM students
"""
cursor = db.execute(sql)
return [row[0] for row in cursor]
def insert_classes(db):
sql = """
INSERT INTO classes
(name)
VALUES
(?)
"""
for name in CLASS_NAMES:
db.execute(sql, (name,))
db.commit()
sql = """
SELECT id FROM classes
"""
cursor = db.execute(sql)
return [row[0] for row in cursor]
def enroll(db, student_id, class_id):
sql = """
INSERT INTO enrollments
(student_id, class_id)
VALUES
(?,?)
"""
db.execute(sql, (student_id, class_id,))
def enrollments(db, student_ids, class_ids):
for student_id in student_ids:
for class_id in random.sample(class_ids, k=random.randint(0,3)):
logger.debug("enrolling %s in %s", student_id, class_id)
# nobody signs up for this class
if class_id == 2: continue
enroll(db, student_id, class_id)
db.commit()
with sqlite3.connect("data/class_enrollments.sqlite.db") as db:
student_ids = insert_students(db, n=1000)
class_ids = insert_classes(db)
enrollments(db, student_ids, class_ids)
|
ano = int(input("Digite o ano: "))
if ano%100 == 0 and ano%400 == 0:
print('Bissexto!!')
else :
if ano%4 == 0:
print('Bissexto!!')
else:
print('Não é Bissexto!')
|
#!/usr/bin/env python3
#coding=utf-8
import os
import VBREG as REG
import CorpusReader as cr
os.system('clear')
print('Training starts - Tuna-Furniture')
ts,ds,ats = cr.LoadAlignedRegCorpus('Corpora/ETunaF-Aligned2.json')
reg = REG.VBREG(ts,ds,ats)
print('Training completes')
# "targets": [
# "colour=grey;orientation=front;type=desk;size=large"
# ],
# "distractors": [
# "colour=blue;orientation=front;type=desk;size=large",
# "colour=red;orientation=back;type=desk;size=large",
# "colour=green;orientation=left;type=desk;size=small",
# "colour=blue;orientation=front;type=fan;size=large",
# "colour=red;orientation=back;type=fan;size=large",
# "colour=green;orientation=left;type=fan;size=small"
# ],
txt1 = reg.Generate([
('colour','grey',1),('orientation','front',1),('type','desk',1),('size','large',1)
],[
[('colour','blue',1),('orientation','front',1),('type','desk',1),('size','large',1)],
[('colour','red',1),('orientation','back',1),('type','desk',1),('size','large',1)],
[('colour','green',1),('orientation','left',1),('type','desk',1),('size','small',1)],
[('colour','blue',1),('orientation','front',1),('type','fan',1),('size','large',1)],
[('colour','red',1),('orientation','back',1),('type','fan',1),('size','large',1)],
[('colour','green',1),('orientation','left',1),('type','fan',1),('size','small',1)],
])
print('task1:',txt1)
print('corpus text: grey frontal table')
print('')
# "targets": [
# "colour=red;orientation=right;type=chair;size=small"
# ],
# "distractors": [
# "colour=red;orientation=left;type=chair;size=small",
# "colour=blue;orientation=back;type=chair;size=small",
# "colour=grey;orientation=front;type=chair;size=large",
# "colour=red;orientation=left;type=sofa;size=small",
# "colour=blue;orientation=back;type=sofa;size=small",
# "colour=grey;orientation=front;type=sofa;size=large"
# ],
txt2 = reg.Generate([
('colour','red',1),('orientation','right',1),('type','chair',1),('size','small',1)
],[
[('colour','red',1),('orientation','left',1),('type','chair',1),('size','small',1)],
[('colour','blue',1),('orientation','back',1),('type','chair',1),('size','small',1)],
[('colour','grey',1),('orientation','front',1),('type','chair',1),('size','large',1)],
[('colour','red',1),('orientation','left',1),('type','sofa',1),('size','small',1)],
[('colour','blue',1),('orientation','back',1),('type','sofa',1),('size','small',1)],
[('colour','grey',1),('orientation','front',1),('type','sofa',1),('size','large',1)],
])
print('task2:',txt2)
print('corpus text: red large chair')
print('')
print('Training starts - Tuna-People')
ts,ds,ats = cr.LoadAlignedRegCorpus('Corpora/ETunaP-Aligned2.json')
reg = REG.VBREG(ts,ds,ats)
print('Training completes')
# "targets": [
# "age=old;orientation=front;hairColour=light;hasSuit=1;hasShirt=1;hasTie=1;hasBeard=1;hasGlasses=1;hasHair=1"
# ],
# "distractors": [
# "age=old;orientation=left;hairColour=light;hasSuit=0;hasShirt=1;hasTie=0;hasBeard=0;hasGlasses=1;hasHair=1",
# "age=young;orientation=front;hairColour=dark;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=1;hasHair=1",
# "age=old;orientation=front;hairColour=light;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=1;hasHair=1",
# "age=young;orientation=front;hairColour=dark;hasSuit=0;hasShirt=0;hasTie=0;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=young;orientation=front;hairColour=dark;hasSuit=0;hasShirt=1;hasTie=0;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=old;orientation=front;hairColour=light;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=0;hasHair=1"
# ],
txt1 = reg.Generate([
('age','old',1),('orientation','front',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','1',1),('hasTie','1',1),('hasBeard','1',1),('hasGlasses','1',1),('hasHair','1',1)
],[
[('age','old',1),('orientation','left',1),('hairColour','light',1),('hasSuit','0',1),('hasShirt','1',1),('hasTie','0',1),('hasBeard','0',1),('hasGlasses','1',1),('hasHair','1',1)],
[('age','young',1),('orientation','front',1),('hairColour','dark',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','1',1),('hasHair','1',1)],
[('age','old',1),('orientation','front',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','1',1),('hasHair','1',1)],
[('age','young',1),('orientation','front',1),('hairColour','dark',1),('hasSuit','0',1),('hasShirt','0',1),('hasTie','0',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','young',1),('orientation','front',1),('hairColour','dark',1),('hasSuit','0',1),('hasShirt','1',1),('hasTie','0',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','old',1),('orientation','front',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
])
print('task1:',txt1)
print('corpus text: beard, glasses, old')
print('')
# "targets": [
# "age=young;orientation=front;hairColour=dark;hasSuit=0;hasShirt=1;hasTie=0;hasBeard=1;hasGlasses=0;hasHair=1"
# ],
# "distractors": [
# "age=young;orientation=front;hairColour=light;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=young;orientation=front;hairColour=dark;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=young;orientation=right;hairColour=dark;hasSuit=0;hasShirt=1;hasTie=0;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=old;orientation=front;hairColour=light;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=old;orientation=front;hairColour=light;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=0;hasHair=1",
# "age=old;orientation=right;hairColour=light;hasSuit=1;hasShirt=0;hasTie=1;hasBeard=0;hasGlasses=0;hasHair=1"
# ],
txt2 = reg.Generate([
('age','young',1),('orientation','front',1),('hairColour','dark',1),('hasSuit','0',1),('hasShirt','1',1),('hasTie','0',1),('hasBeard','1',1),('hasGlasses','0',1),('hasHair','1',1)
],[
[('age','young',1),('orientation','front',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','young',1),('orientation','front',1),('hairColour','dark',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','young',1),('orientation','right',1),('hairColour','dark',1),('hasSuit','0',1),('hasShirt','1',1),('hasTie','0',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','old',1),('orientation','front',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','old',1),('orientation','front',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
[('age','old',1),('orientation','right',1),('hairColour','light',1),('hasSuit','1',1),('hasShirt','0',1),('hasTie','1',1),('hasBeard','0',1),('hasGlasses','0',1),('hasHair','1',1)],
])
print('task1:',txt2)
print('corpus text: guy with barb')
print('')
|
import os
from math import cos, sin, pi
def rotate_vector(theta, vector):
#Rotation by anti-clocwise rotation matrix
R = [ [cos(theta), -sin(theta)], [sin(theta), cos(theta)]]
new_vector = list(); new_vector.append(0); new_vector.append(0);
for i in xrange(2):
for j in xrange(2):
new_vector[i]+=R[i][j]*vector[j]
return new_vector
def ConvertToRadians(angle):
return angle*pi/180
def ConvertToAngles(angle):
return angle*180/pi
|
import os
SAMPLES_DIR = '/home/pi/ArchSound/ArchSound/samples/'
SAMPLES_CONFIG = [
{'zone': '1', 'space': SAMPLES_DIR + '01_molenstraat/space.wav', 'steps': SAMPLES_DIR + '01_molenstraat/steps.wav'},
{'zone': '2', 'space': SAMPLES_DIR + '02_pocket_park/space_steps.wav', 'steps': ''},
{'zone': '3', 'space': SAMPLES_DIR + '03_pocket_park_2/space.wav', 'steps': SAMPLES_DIR + '03_pocket_park_2/steps.wav'},
{'zone': '4', 'space': SAMPLES_DIR + '04_atrium/space.wav', 'steps': SAMPLES_DIR + '04_atrium/steps.wav'},
{'zone': '5', 'space': SAMPLES_DIR + '05_playground//space_steps.wav', 'steps': ''},
{'zone': '6', 'space': SAMPLES_DIR + '06_skatepark/space.wav', 'steps': ''},
{'zone': '7', 'space': SAMPLES_DIR + '07_green_belt/space.wav', 'steps': SAMPLES_DIR + '07_green_belt/steps.wav'},
{'zone': '8', 'space': SAMPLES_DIR + '08_green_belt_2/space.wav', 'steps': SAMPLES_DIR + '08_green_belt_2/steps.wav'},
{'zone': '9', 'space': SAMPLES_DIR + '09_tws/space.wav', 'steps': SAMPLES_DIR + '09_tws/steps.wav'},
{'zone': '10', 'space': SAMPLES_DIR + '10_ziekerstraat/space.wav', 'steps': SAMPLES_DIR + '10_ziekerstraat/steps.wav'},
{'zone': '11', 'space': SAMPLES_DIR + '11_meditation_garden/space.wav', 'steps': SAMPLES_DIR + '11_meditation_garden/steps.wav'}
]
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import os
import traceback
import requests
import zipfile
import io
from datetime import datetime as dt
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
# Remove trailing slash to prevent wrong URL path to service
API_URL = demisto.params()['url'].rstrip('/')
# Should we use SSL
USE_SSL = not demisto.params().get('insecure', False)
# Remove proxy if not set to true in params
if not demisto.params().get('proxy'):
os.environ.pop('HTTP_PROXY', None)
os.environ.pop('HTTPS_PROXY', None)
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)
THRESHOLD = int(demisto.params().get('threshold', 1))
# disable-secrets-detection-start
# Whether compromised websites are considered malicious or not. See the blacklists output in
# https://urlhaus-api.abuse.ch/
# disable-secrets-detection-end
COMPROMISED_IS_MALICIOUS = demisto.params().get('compromised_is_malicious', False)
# Headers to be sent in requests
HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'application/json'
}
''' HELPER FUNCTIONS '''
def http_request(method, command, data=None):
retry = int(demisto.params().get('retry', 3))
try_num = 0
while try_num < retry:
try_num += 1
url = f'{API_URL}/{command}/'
res = requests.request(method,
url,
verify=USE_SSL,
data=data,
headers=HEADERS)
if res.status_code == 200:
return res
raise Exception(f'Error in API call {url} [{res.status_code}] - {res.reason}')
def reformat_date(date):
try:
return dt.strptime(date.rstrip(' UTC'), '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%dT%H:%M:%S')
except Exception:
return 'Unknown'
def extract_zipped_buffer(buffer):
with io.BytesIO() as bio:
bio.write(buffer)
with zipfile.ZipFile(bio) as z:
return z.read(z.namelist()[0])
def query_url_information(url):
return http_request('POST',
'url',
f'url={url}')
def query_host_information(host):
return http_request('POST',
'host',
f'host={host}')
def query_payload_information(hash_type, hash):
return http_request('POST',
'payload',
f'{hash_type}_hash={hash}')
def download_malware_sample(sha256):
return http_request('GET',
f'download/{sha256}')
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module():
"""
Performs basic get request to get item samples
"""
http_request('POST', 'url')
def calculate_dbot_score(blacklists, threshold, compromised_is_malicious):
dbot_score = 0
description = 'Not listed in any blacklist'
blacklist_appearances = []
for blacklist, status in blacklists.items():
if blacklist == 'spamhaus_dbl':
if status.endswith('domain') or (status.startswith('abused') and compromised_is_malicious):
blacklist_appearances.append((blacklist, status))
elif status == 'listed':
blacklist_appearances.append((blacklist, None))
if len(blacklist_appearances) >= threshold:
description = ''
for appearance in blacklist_appearances:
if appearance[1] is not None:
description += f'Listed in {appearance[0]}. '
else:
description += f'Listed as {appearance[1]} in {appearance[0]}. '
dbot_score = 3
elif len(blacklist_appearances) > 0:
dbot_score = 2
else:
dbot_score = 1
return dbot_score, description
def url_command():
url = demisto.args().get('url')
try:
url_information = query_url_information(url).json()
ec = {
'URL': {
'Data': url
},
'DBotScore': {
'Type': 'url',
'Vendor': 'URLhaus',
'Indicator': url
}
}
if url_information['query_status'] == 'ok':
# URLhaus output
blacklist_information = []
blacklists = url_information.get('blacklists', {})
for bl_name, bl_status in blacklists.items():
blacklist_information.append({'Name': bl_name,
'Status': bl_status})
date_added = reformat_date(url_information.get('date_added'))
urlhaus_data = {
'ID': url_information.get('id', ''),
'Status': url_information.get('url_status', ''),
'Host': url_information.get('host', ''),
'DateAdded': date_added,
'Threat': url_information.get('threat', ''),
'Blacklist': blacklist_information,
'Tags': url_information.get('tags', [])
}
payloads = []
for payload in url_information.get('payloads', []):
vt_data = payload.get('virustotal', None)
vt_information = None
if vt_data:
vt_information = {
'Result': float(vt_data.get('percent', 0)),
'Link': vt_data.get('link', '')
}
payloads.append({
'Name': payload.get('filename', 'unknown'),
'Type': payload.get('file_type', ''),
'MD5': payload.get('response_md5', ''),
'VT': vt_information
})
urlhaus_data['Payload'] = payloads
# DBot score calculation
dbot_score, description = calculate_dbot_score(url_information.get('blacklists', {}), THRESHOLD,
COMPROMISED_IS_MALICIOUS)
ec['DBotScore']['Score'] = dbot_score
if dbot_score == 3:
ec['URL']['Malicious'] = {
'Vendor': 'URLhaus',
'Description': description
}
ec['URLhaus.URL(val.ID && val.ID === obj.ID)'] = urlhaus_data
human_readable = tableToMarkdown(f'URLhaus reputation for {url}',
{
'URLhaus link': url_information.get("urlhaus_reference", "None"),
'Description': description,
'URLhaus ID': urlhaus_data['ID'],
'Status': urlhaus_data['Status'],
'Threat': url_information.get("threat", ""),
'Date added': date_added
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': url_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
elif url_information['query_status'] == 'no_results':
ec['DBotScore']['Score'] = 0
human_readable = f'## URLhaus reputation for {url}\n' \
f'No results!'
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': url_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
elif url_information['query_status'] == 'invalid_url':
human_readable = f'## URLhaus reputation for {url}\n' \
f'Invalid URL!'
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': url_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
else:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': f'Query results = {url_information["query_status"]}'
})
except Exception:
demisto.debug(traceback.format_exc())
return_error('Failed getting url data, please verify the arguments and parameters')
def domain_command():
domain = demisto.args()['domain']
try:
domain_information = query_host_information(domain).json()
ec = {
'Domain': {
'Name': domain
},
'DBotScore': {
'Type': 'domain',
'Vendor': 'URLhaus',
'Indicator': domain
}
}
if domain_information['query_status'] == 'ok':
# URLHaus output
blacklist_information = []
blacklists = domain_information.get('blacklists', {})
for bl_name, bl_status in blacklists.items():
blacklist_information.append({'Name': bl_name,
'Status': bl_status})
first_seen = reformat_date(domain_information.get('firstseen'))
urlhaus_data = {
'FirstSeen': first_seen,
'Blacklist': blacklists,
'URL': domain_information.get('urls', [])
}
# DBot score calculation
dbot_score, description = calculate_dbot_score(domain_information.get('blacklists', {}), THRESHOLD,
COMPROMISED_IS_MALICIOUS)
ec['DBotScore']['Score'] = dbot_score
if dbot_score == 3:
ec['domain']['Malicious'] = {
'Vendor': 'URLhaus',
'Description': description
}
ec['URLhaus.Domain(val.Name && val.Name === obj.Name)'] = urlhaus_data
human_readable = tableToMarkdown(f'URLhaus reputation for {domain}',
{
'URLhaus link': domain_information.get('urlhaus_reference', 'None'),
'Description': description,
'First seen': first_seen,
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': domain_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
elif domain_information['query_status'] == 'no_results':
ec['DBotScore']['Score'] = 0
human_readable = f'## URLhaus reputation for {domain}\n' \
f'No results!'
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': domain_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
elif domain_information['query_status'] == 'invalid_host':
human_readable = f'## URLhaus reputation for {domain}\n' \
f'Invalid domain!'
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': domain_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
else:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': f'Query results = {domain_information["query_status"]}'
})
except Exception:
demisto.debug(traceback.format_exc())
return_error('Failed getting domain data, please verify the arguments and parameters')
def file_command():
hash = demisto.args()['file']
if len(hash) == 32:
hash_type = 'md5'
elif len(hash) == 64:
hash_type = 'sha256'
else:
return_error('Only accepting MD5 (32 bytes) or SHA256 (64 bytes) hash types')
try:
file_information = query_payload_information(hash_type, hash).json()
if file_information['query_status'] == 'ok' and file_information['md5_hash']:
# URLhaus output
first_seen = reformat_date(file_information.get('firstseen'))
last_seen = reformat_date(file_information.get('lastseen'))
urlhaus_data = {
'MD5': file_information.get('md5_hash', ''),
'SHA256': file_information.get('sha256_hash', ''),
'Type': file_information.get('file_type', ''),
'Size': int(file_information.get('file_size', '')),
'Signature': file_information.get('signature', ''),
'FirstSeen': first_seen,
'LastSeen': last_seen,
'DownloadLink': file_information.get('urlhaus_download', ''),
'URL': file_information.get('urls', [])
}
virus_total_data = file_information.get('virustotal')
if virus_total_data:
urlhaus_data['VirusTotal'] = {
'Percent': float(file_information.get('virustotal', {'percent': 0})['percent']),
'Link': file_information.get('virustotal', {'link': ''})['link']
}
ec = {
'File': {
'Size': urlhaus_data.get('Size', 0),
'MD5': urlhaus_data.get('MD5', ''),
'SHA256': urlhaus_data.get('SHA256')
},
'URLhaus.File(val.MD5 && val.MD5 === obj.MD5)': urlhaus_data
}
human_readable = tableToMarkdown(f'URLhaus reputation for {hash_type.upper()} : {hash}',
{
'URLhaus link': urlhaus_data.get('DownloadLink', ''),
'Signature': urlhaus_data.get('Signature', ''),
'MD5': urlhaus_data.get('MD5', ''),
'SHA256': urlhaus_data.get('SHA256', ''),
'First seen': first_seen,
'Last seen': last_seen
})
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': file_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
'EntryContext': ec
})
elif (file_information['query_status'] == 'ok' and not file_information['md5_hash']) or \
file_information['query_status'] == 'no_results':
human_readable = f'## URLhaus reputation for {hash_type.upper()} : {hash}\n' \
f'No results!'
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': file_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
})
elif file_information['query_status'] in ['invalid_md5', 'invalid_sha256']:
human_readable = f'## URLhaus reputation for {hash_type.upper()} : {hash}\n' \
f'Invalid {file_information["query_status"].lstrip("invalid_").upper()}!'
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': file_information,
'HumanReadable': human_readable,
'HumanReadableFormat': formats['markdown'],
})
else:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': f'Query results = {file_information["query_status"]}'
})
except Exception:
print(traceback.format_exc())
demisto.debug(traceback.format_exc())
return_error('Failed getting file data, please verify the arguments and parameters')
def urlhaus_download_sample_command():
"""
The response can be either the zipped sample (content-type = application/zip), or JSON (content-type = text/html)
containing the query status.
"""
file_sha256 = demisto.args()['file']
res = download_malware_sample(file_sha256)
try:
if len(res.content) == 0:
demisto.results({
'Type': entryTypes['note'],
'HumanReadable': f'No results for SHA256: {file_sha256}',
'HumanReadableFormat': formats['markdown']
})
elif res.headers['content-type'] in ['text/html', 'application/json'] and \
res.json()['query_status'] == 'not_found':
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': res.json(),
'HumanReadable': f'No results for SHA256: {file_sha256}',
'HumanReadableFormat': formats['markdown']
})
elif res.headers['content-type'] == 'application/zip':
demisto.results(fileResult(file_sha256, extract_zipped_buffer(res.content)))
else:
raise Exception
# Handle like an exception
except Exception:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': str(res.content)
})
''' COMMANDS MANAGER / SWITCH PANEL '''
LOG('Command being called is %s' % (demisto.command()))
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
demisto.results('ok')
elif demisto.command() == 'url':
url_command()
elif demisto.command() == 'domain':
domain_command()
elif demisto.command() == 'file':
file_command()
elif demisto.command() == 'urlhaus-download-sample':
urlhaus_download_sample_command()
# Log exceptions
except Exception as e:
LOG(str(e))
LOG.print_log()
raise
|
import qcore
from qcore.asserts import AssertRaises
class Foo(metaclass=qcore.DisallowInheritance):
pass
def test_disallow_inheritance():
with AssertRaises(TypeError):
class Bar(Foo):
pass
|
#!/usr/bin/env python
from __future__ import print_function
import jinja2
import argparse
import os
import fnmatch
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('env_dir')
args = parser.parse_args()
env = jinja2.Environment(loader=jinja2.FileSystemLoader(args.env_dir))
template = env.get_template(os.path.relpath(args.filename, args.env_dir))
# create dictionary with useful modules etc.
try:
import rospkg
rospack = rospkg.RosPack()
except ImportError:
pass
rospack = None
d = {'np': np, 'rospack': rospack}
result = template.render(d)
filename_out = args.filename.replace('.sdf.jinja','-gen.sdf')
with open(filename_out, 'w') as f_out:
print('{:s} -> {:s}'.format(args.filename, filename_out))
f_out.write(result)
|
from django.urls import path
from . import views
urlpatterns = [
path('textCheck', views.textCheck, name='textCheck'),
path('check', views.check, name='check'),
path('check_result', views.check_result, name='check_result'),
path('globalUpload', views.globalUpload, name='globalUpload'),
path('fileMerge', views.fileMerge, name='fileMerge'),
path('file_download', views.file_download, name='file_download'),
]
|
from math import *
inp = input().split(" ")
n = int(inp[0])
r = int(inp[1])
fives = 1
for i in range(n):
a = int(input())
fives = (fives*a)//gcd(fives,a)
print((fives+r) % (10**9+7))
|
import unittest
from xappt.models.callback import Callback
class CallbackHost:
def __init__(self):
self.call_info = {
'a': [],
'b': [],
'c': [],
}
def callback_method_a(self, *args, **kwargs):
self.call_info['a'].append((args, kwargs))
def callback_method_b(self, *args, **kwargs):
self.call_info['b'].append((args, kwargs))
def callback_method_c(self, *args, **kwargs):
self.call_info['c'].append((args, kwargs))
class TestCallback(unittest.TestCase):
def test_add(self):
cb_host = CallbackHost()
cb = Callback()
cb.add(cb_host.callback_method_a)
self.assertEqual(0, len(cb._callback_functions))
cb._run_deferred_ops()
self.assertEqual(1, len(cb._callback_functions))
def test_add_auto_remove(self):
cb_host = CallbackHost()
cb = Callback()
cb.add(cb_host.callback_method_a)
cb._run_deferred_ops()
self.assertEqual(1, len(cb._callback_functions))
del cb_host
cb._run_deferred_ops()
self.assertEqual(0, len(cb._callback_functions))
def test_weakref(self):
cb_host = CallbackHost()
cb = Callback()
cb.add(cb_host.callback_method_a)
cb._run_deferred_ops()
self.assertEqual(1, len(cb._callback_functions))
cb.remove(cb_host.callback_method_a)
cb._run_deferred_ops()
self.assertEqual(0, len(cb._callback_functions))
def test_clear(self):
cb_host = CallbackHost()
cb = Callback()
cb.add(cb_host.callback_method_a)
cb.add(cb_host.callback_method_b)
cb.add(cb_host.callback_method_c)
cb._run_deferred_ops()
self.assertEqual(3, len(cb._callback_functions))
cb.clear()
cb._run_deferred_ops()
self.assertEqual(0, len(cb._callback_functions))
def test_invoke(self):
cb_host = CallbackHost()
cb = Callback()
cb.add(cb_host.callback_method_a)
cb.invoke("arg1", "arg2")
call_info = cb_host.call_info['a']
self.assertEqual(1, len(call_info))
self.assertIn("arg1", call_info[0][0])
self.assertIn("arg2", call_info[0][0])
def test_invoke_paused(self):
cb_host = CallbackHost()
cb = Callback()
cb.add(cb_host.callback_method_a)
self.assertFalse(cb.paused)
cb.paused = True
self.assertTrue(cb.paused)
cb.invoke("arg1", "arg2")
self.assertEqual(1, len(cb._callback_functions))
self.assertEqual(0, len(cb_host.call_info['a']))
|
"""Test the ht.pyfilter.operations.primaryimage module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library
import argparse
# Third Party
import pytest
# Houdini Toolbox
from ht.pyfilter.manager import PyFilterManager
from ht.pyfilter.operations import primaryimage
# =============================================================================
# FIXTURES
# =============================================================================
@pytest.fixture
def init_operation(mocker):
"""Fixture to initialize an operation."""
mocker.patch.object(primaryimage.SetPrimaryImage, "__init__", lambda x, y: None)
def _create():
return primaryimage.SetPrimaryImage(None)
return _create
# =============================================================================
# TESTS
# =============================================================================
class Test_SetPrimaryImage:
"""Test the ht.pyfilter.operations.primaryimage.SetPrimaryImage object."""
def test___init__(self, mocker):
"""Test object initialization."""
mock_super_init = mocker.patch.object(
primaryimage.PyFilterOperation, "__init__"
)
mock_manager = mocker.MagicMock(spec=PyFilterManager)
op = primaryimage.SetPrimaryImage(mock_manager)
mock_super_init.assert_called_with(mock_manager)
assert not op._disable_primary_image
assert op._primary_image_path is None
# Properties
def test_disable_primary_image(self, init_operation, mocker):
"""Test the 'disable_primary_image' property."""
mock_value = mocker.MagicMock(spec=bool)
op = init_operation()
op._disable_primary_image = mock_value
assert op.disable_primary_image == mock_value
def test_primary_image_path(self, init_operation, mocker):
"""Test the 'primary_image_path' property."""
mock_value = mocker.MagicMock(spec=str)
op = init_operation()
op._primary_image_path = mock_value
assert op.primary_image_path == mock_value
# Static Methods
def test_build_arg_string(self, mocker):
"""Test arg string construction."""
result = primaryimage.SetPrimaryImage.build_arg_string()
assert result == ""
mock_value = mocker.MagicMock(spec=str)
result = primaryimage.SetPrimaryImage.build_arg_string(
primary_image_path=mock_value
)
assert result == "--primary-image-path={}".format(mock_value)
result = primaryimage.SetPrimaryImage.build_arg_string(
disable_primary_image=True
)
assert result == "--disable-primary-image"
def test_register_parser_args(self, mocker):
"""Test registering all the argument parser args."""
mock_parser = mocker.MagicMock(spec=argparse.ArgumentParser)
primaryimage.SetPrimaryImage.register_parser_args(mock_parser)
calls = [
mocker.call("--primary-image-path", dest="primary_image_path"),
mocker.call(
"--disable-primary-image",
action="store_true",
dest="disable_primary_image",
),
]
mock_parser.add_argument.assert_has_calls(calls)
# Methods
# filter_camera
def test_filter_camera__no_op(self, init_operation, patch_operation_logger, mocker):
"""Test 'filter_camera' when doing nothing."""
mock_set = mocker.patch("ht.pyfilter.operations.primaryimage.set_property")
op = init_operation()
op._disable_primary_image = False
op._primary_image_path = None
op.filter_camera()
mock_set.assert_not_called()
def test_filter_camera__disable(
self, init_operation, patch_operation_logger, mocker
):
"""Test 'filter_camera' when disabling the image."""
mock_set = mocker.patch("ht.pyfilter.operations.primaryimage.set_property")
mock_logger = mocker.patch(
"ht.pyfilter.operations.primaryimage._logger", autospec=True
)
op = init_operation()
op._disable_primary_image = True
op.filter_camera()
mock_logger.info.assert_called()
mock_set.assert_called_with("image:filename", "null:")
def test_filter_camera__path(self, init_operation, patch_operation_logger, mocker):
"""Test 'filter_camera' when setting the image path."""
mock_set = mocker.patch("ht.pyfilter.operations.primaryimage.set_property")
mock_path = mocker.MagicMock(spec=str)
op = init_operation()
op._disable_primary_image = False
op._primary_image_path = mock_path
op.filter_camera()
mock_set.assert_called_with("image:filename", mock_path)
# process_parsed_args
def test_process_parsed_args__noop(self, init_operation, mocker):
"""Test processing parsed args when no args are set."""
mock_namespace = mocker.MagicMock(spec=argparse.Namespace)
mock_namespace.disable_primary_image = False
mock_namespace.primary_image_path = None
op = init_operation()
op._disable_primary_image = False
op._primary_image_path = None
op.process_parsed_args(mock_namespace)
assert not op.disable_primary_image
assert op.primary_image_path is None
def test_process_parsed_args__all(self, init_operation, mocker):
"""Test processing parsed args when all args are set."""
mock_path = mocker.MagicMock(spec=str)
mock_namespace = mocker.MagicMock(spec=argparse.Namespace)
mock_namespace.disable_primary_image = True
mock_namespace.primary_image_path = mock_path
op = init_operation()
op._disable_primary_image = False
op._primary_image_path = None
op.process_parsed_args(mock_namespace)
assert op.disable_primary_image
assert op.primary_image_path == mock_path
# should_run
def test_should_run__no_op(self, init_operation):
"""Test if the operation should run with no args set."""
op = init_operation()
op._disable_primary_image = False
op._primary_image_path = None
assert not op.should_run()
def test_should_run__disable(self, init_operation):
"""Test if the operation should run when disable image is set."""
op = init_operation()
op._disable_primary_image = True
op._primary_image_path = None
assert op.should_run()
def test_should_run__set_path(self, init_operation, mocker):
"""Test if the operation should run when setting an image path."""
op = init_operation()
op._disable_primary_image = False
op._primary_image_path = mocker.MagicMock(spec=str)
assert op.should_run()
|
import os
import cv2
import torch
import numpy as np
class SAVE_ATTEN(object):
def __init__(self, save_dir='../save_bins'):
"""
save_dir: the path for saving target
"""
self.save_dir = save_dir
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
def save_masked_img_batch(self, path_batch, atten_batch, label_batch):
"""
process batch_size images; the path_batch and label_batch obtained from dataloader,and the atten_batch obtained directly from the modle(without any changes)
atten_batch:get the attention_map after a layer in the forwad function of the model.py
for example:
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
return x
def get_attention_map(self):
return self.layer1(x)
the procedure of obtaining attention_map
: external input ----> batch selection ----> channel selection ----> normalize ----> resize ----> denormalize ----> save
"""
img_num = atten_batch.size()[0]
for idx in range(img_num):
atten = atten_batch[idx]
atten = atten.cpu().data.numpy()
label = label_batch[idx]
label_list = self.get_label(label) # Label_list may be multiple labels, so it may correspond to multiple maps
self._save_masked_img(path_batch[idx], atten, label_list)
def _save_masked_img(self, img_path, atten, label_list):
"""
Process each image in turn
label: the target class which will be used as the index to get correspoing channel
"""
if not os.path.isfile(img_path):
raise 'Image not exist:%s'%(img_path)
for each_label in label_list:
label = each_label[0]
attention_map = atten[label,:,:] # now is [width, height]
atten_norm = attention_map
img = cv2.imread(img_path)
org_size = np.shape(img)
w, h = org_size[0], org_size[1]
# regularize each attention map
atten_norm = self.normalize_map(atten_norm)
atten_norm = cv2.resize(atten_norm, dsize=(h,w))
atten_norm = atten_norm* 255
heat_map = cv2.applyColorMap(atten_norm.astype(np.uint8), cv2.COLORMAP_JET)
img = cv2.addWeighted(img.astype(np.uint8), 0.5, heat_map.astype(np.uint8), 0.5, 0)
img_id = img_path.strip().split('/')[-1]
img_id = img_id.strip().split('.')[0]
save_dir = os.path.join(self.save_dir, img_id + '_' + str(label) +'.png')
cv2.imwrite(save_dir, img)
def normalize_map(self, atten_map):
min_val = np.min(atten_map)
max_val = np.max(atten_map)
atten_norm = (atten_map - min_val)/(max_val - min_val)
return atten_norm
def get_label(self, gt_label):
labels_idx = []
labels_idx = torch.nonzero(gt_label.squeeze()).cpu().numpy()
# labels_idx is a list and it'type like this [[12], [22]], so you must use labels_idx[i][0] to get the correspoding label class
return labels_idx
def _merge_multi_class(self, atten, label_list):
atten_norm = torch.zeros_like(atten)
for each_label in label_list:
label = each_label[0]
atten_norm += atten[label,:,:]
# atten_norm can be processed outside of the for loop
return atten_norm
|
from django.apps import AppConfig
class ContactUpdateConfig(AppConfig):
name = 'contact_update'
|
import os
from dotenv import load_dotenv, find_dotenv
class Config(object):
# Environment config
SECRET_KEY = os.environ.get('SECRET_KEY', 'dev')
BASE_PATH = os.environ.get('BASE_PATH', '')
DB_HOST = os.environ.get('DB_HOST', 'localhost')
DB_USER = os.environ.get('DB_USER', 'user')
DB_PASS = os.environ.get('DB_PASS', 'pass')
DB_PORT = os.environ.get('DB_PORT', 5432)
DB_NAME = os.environ.get('DB_NAME', 'db_name')
DB_URL = f"postgres://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
ENVIRONMENT_NAME = os.environ.get('ENVIRONMENT_NAME', 'dev')
# SqlAlchemy config
SQLALCHEMY_DATABASE_URI = DB_URL
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ENGINE_OPTIONS = {"pool_pre_ping": True}
JWT_OIDC_WELL_KNOWN_CONFIG = os.environ.get(
'JWT_OIDC_WELL_KNOWN_CONFIG',
'https://localhost:8080/auth/realms/mds/.well-known/openid-configuration')
JWT_OIDC_AUDIENCE = os.environ.get('JWT_OIDC_AUDIENCE', 'mds')
JWT_OIDC_ALGORITHMS = os.environ.get('JWT_OIDC_ALGORITHMS', 'RS256')
NRIS_DB_USER = os.environ.get('NRIS_DB_USER', 'localhost')
NRIS_DB_PASSWORD = os.environ.get('NRIS_DB_PASSWORD', 'localhost')
NRIS_DB_PORT = os.environ.get('NRIS_DB_PORT', 'localhost')
NRIS_DB_SERVICENAME = os.environ.get('NRIS_DB_SERVICENAME', 'localhost')
NRIS_DB_HOSTNAME = os.environ.get('NRIS_DB_HOSTNAME', 'localhost')
NRIS_SERVER_CERT_DN = os.environ.get('NRIS_SERVER_CERT_DN', 'localhost')
# Cache settings
CACHE_TYPE = os.environ.get('CACHE_TYPE', 'redis')
CACHE_REDIS_HOST = os.environ.get('CACHE_REDIS_HOST', 'redis')
CACHE_REDIS_PORT = os.environ.get('CACHE_REDIS_PORT', 6379)
CACHE_REDIS_PASS = os.environ.get('CACHE_REDIS_PASS', 'redis-password')
CACHE_REDIS_URL = 'redis://:{0}@{1}:{2}'.format(CACHE_REDIS_PASS, CACHE_REDIS_HOST,
CACHE_REDIS_PORT)
def JWT_ROLE_CALLBACK(jwt_dict):
return (jwt_dict['realm_access']['roles'])
class TestConfig(Config):
TESTING = os.environ.get('TESTING', True)
DB_NAME = os.environ.get('DB_NAME_TEST', 'db_name_test')
DB_URL = f"postgres://{Config.DB_USER}:{Config.DB_PASS}@{Config.DB_HOST}:{Config.DB_PORT}/{DB_NAME}"
SQLALCHEMY_DATABASE_URI = DB_URL
NRIS_DB_USER = os.environ.get('NRIS_DB_USER', 'localhost')
NRIS_DB_PASSWORD = os.environ.get('NRIS_DB_PASSWORD', 'localhost')
NRIS_DB_PORT = os.environ.get('NRIS_DB_PORT', 'localhost')
NRIS_DB_SERVICENAME = os.environ.get('NRIS_DB_SERVICENAME', 'localhost')
NRIS_DB_HOSTNAME = os.environ.get('NRIS_DB_HOSTNAME', 'localhost')
NRIS_SERVER_CERT_DN = os.environ.get('NRIS_SERVER_CERT_DN', 'localhost')
JWT_OIDC_TEST_MODE = True
JWT_OIDC_TEST_AUDIENCE = "test_audience"
JWT_OIDC_TEST_CLIENT_SECRET = "test_secret"
JWT_OIDC_TEST_ISSUER = "test_issuer"
# Dummy Private Keys for testing purposes, can replace these keys with any other generated key.
JWT_OIDC_TEST_KEYS = {
"keys": [{
"kid": "flask-jwt-oidc-test-client",
"kty": "RSA",
"alg": "RS256",
"use": "sig",
"n":
"AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR",
"e": "AQAB"
}]
}
# Dummy Private Keys for testing purposes.
JWT_OIDC_TEST_PRIVATE_KEY_JWKS = {
"keys": [{
"kid":
"flask-jwt-oidc-test-client",
"kty":
"RSA",
"alg":
"RS256",
"use":
"sig",
"kty":
"RSA",
"n":
"AN-fWcpCyE5KPzHDjigLaSUVZI0uYrcGcc40InVtl-rQRDmAh-C2W8H4_Hxhr5VLc6crsJ2LiJTV_E72S03pzpOOaaYV6-TzAjCou2GYJIXev7f6Hh512PuG5wyxda_TlBSsI-gvphRTPsKCnPutrbiukCYrnPuWxX5_cES9eStR",
"e":
"AQAB",
"d":
"C0G3QGI6OQ6tvbCNYGCqq043YI_8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhskURaDwk4-8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh_xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0",
"p":
"APXcusFMQNHjh6KVD_hOUIw87lvK13WkDEeeuqAydai9Ig9JKEAAfV94W6Aftka7tGgE7ulg1vo3eJoLWJ1zvKM",
"q":
"AOjX3OnPJnk0ZFUQBwhduCweRi37I6DAdLTnhDvcPTrrNWuKPg9uGwHjzFCJgKd8KBaDQ0X1rZTZLTqi3peT43s",
"dp":
"AN9kBoA5o6_Rl9zeqdsIdWFmv4DB5lEqlEnC7HlAP-3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhc",
"dq":
"ANtbSY6njfpPploQsF9sU26U0s7MsuLljM1E8uml8bVJE1mNsiu9MgpUvg39jEu9BtM2tDD7Y51AAIEmIQex1nM",
"qi":
"XLE5O360x-MhsdFXx8Vwz4304-MJg-oGSJXCK_ZWYOB_FGXFRTfebxCsSYi0YwJo-oNu96bvZCuMplzRI1liZw"
}]
}
# Dummy Private Key, for testing purposes.
JWT_OIDC_TEST_PRIVATE_KEY_PEM = """
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDfn1nKQshOSj8xw44oC2klFWSNLmK3BnHONCJ1bZfq0EQ5gIfg
tlvB+Px8Ya+VS3OnK7Cdi4iU1fxO9ktN6c6TjmmmFevk8wIwqLthmCSF3r+3+h4e
ddj7hucMsXWv05QUrCPoL6YUUz7Cgpz7ra24rpAmK5z7lsV+f3BEvXkrUQIDAQAB
AoGAC0G3QGI6OQ6tvbCNYGCqq043YI/8MiBl7C5dqbGZmx1ewdJBhMNJPStuckhs
kURaDwk4+8VBW9SlvcfSJJrnZhgFMjOYSSsBtPGBIMIdM5eSKbenCCjO8Tg0BUh/
xa3CHST1W4RQ5rFXadZ9AeNtaGcWj2acmXNO3DVETXAX3x0CQQD13LrBTEDR44ei
lQ/4TlCMPO5bytd1pAxHnrqgMnWovSIPSShAAH1feFugH7ZGu7RoBO7pYNb6N3ia
C1idc7yjAkEA6Nfc6c8meTRkVRAHCF24LB5GLfsjoMB0tOeEO9w9Ous1a4o+D24b
AePMUImAp3woFoNDRfWtlNktOqLel5PjewJBAN9kBoA5o6/Rl9zeqdsIdWFmv4DB
5lEqlEnC7HlAP+3oo3jWFO9KQqArQL1V8w2D4aCd0uJULiC9pCP7aTHvBhcCQQDb
W0mOp436T6ZaELBfbFNulNLOzLLi5YzNRPLppfG1SRNZjbIrvTIKVL4N/YxLvQbT
NrQw+2OdQACBJiEHsdZzAkBcsTk7frTH4yGx0VfHxXDPjfTj4wmD6gZIlcIr9lZg
4H8UZcVFN95vEKxJiLRjAmj6g273pu9kK4ymXNEjWWJn
-----END RSA PRIVATE KEY-----"""
|
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from qtpy import QtWidgets
from quocspyside2interface.gui.settings.AllInOneCommForm import AllInOneCommForm
from quocspyside2interface.gui.settings.FilesUpdateForm import FilesUpdateForm
from quocspyside2interface.gui.settings.LocalCommForm import LocalCommForm
from quocspyside2interface.gui.settings.PythonClassForm import PythonClassForm
from quocspyside2interface.gui.settings.RemoteCommForm import RemoteCommForm
from quocspyside2interface.gui.uiclasses.CommFomSettingsUI import Ui_Form
class CommFom(QtWidgets.QWidget, Ui_Form):
"""Widget for Communication and Figure of merit evaluation"""
def __init__(self, parent=None, loaded_dictionary=None, plugin_name=None):
super().__init__(parent)
# TODO Thinks here what is the best behaviour with buttons in the case of a new fom mode
self.setupUi(self)
# Communication QButtonGroup
self.comm_button_group = QtWidgets.QButtonGroup()
# self.comm_button_group.addButton(self.remote_radio_button)
# self.comm_button_group.addButton(self.local_radio_button)
self.comm_button_group.addButton(self.allinone_radio_button)
# Not available buttons
self.no_available_button_group = QtWidgets.QButtonGroup()
self.no_available_button_group.addButton(self.remote_radio_button)
self.no_available_button_group.addButton(self.local_radio_button)
self.no_available_button_group.addButton(self.files_exchange_radio_button)
self.no_available_button_group.setExclusive(False)
# self.test_radio_button.setChecked(True)
# Fom QButtonGroup
self.fom_button_group = QtWidgets.QButtonGroup()
self.fom_button_group.addButton(self.python_class_radio_button)
# self.fom_button_group.addButton(self.files_exchange_radio_button)
# Create the widget object and set it
communication_dictionary, figure_of_merit_dictionary = None, None
if loaded_dictionary is not None:
communication_dictionary = loaded_dictionary[0]
figure_of_merit_dictionary = loaded_dictionary[1]
# Comm
self.remote_comm_form = RemoteCommForm()
self.local_comm_form = LocalCommForm()
self.all_in_one_comm_form = AllInOneCommForm(loaded_dictionary=communication_dictionary)
# Fom
self.python_class_form = PythonClassForm(loaded_dictionary=figure_of_merit_dictionary)
self.files_update_form = FilesUpdateForm()
# Comm
# self.remote_radio_button.pressed.connect(self.set_remote_widget)
# self.local_radio_button.pressed.connect(self.set_local_widget)
self.allinone_radio_button.pressed.connect(self.set_all_in_one_widget)
# Fom
self.python_class_radio_button.pressed.connect(self.set_python_class_widget)
# self.files_exchange_radio_button.pressed.connect(self.set_files_update_widget)
self.no_available_button_group.buttonReleased.connect(self.no_available_button_unchecked)
# Initialization
self._initialization()
def _initialization(self):
# Set initial widgets
self.allinone_radio_button.setChecked(True)
self.comm_scroll_area.setWidget(self.all_in_one_comm_form)
#
self.python_class_radio_button.setChecked(True)
self.fom_scroll_area.setWidget(self.python_class_form)
#
self.comm_scroll_area.setWidgetResizable(True)
@staticmethod
def no_available_button_unchecked(no_available_button):
"""Just a module to disabled button action"""
if no_available_button is not None:
no_available_button.setChecked(False)
def get_dictionary(self):
communication_dict = self.comm_scroll_area.widget().get_dictionary()
figure_of_merit_dict = self.fom_scroll_area.widget().get_dictionary()
return {"communication": communication_dict, "figure_of_merit": figure_of_merit_dict}
def set_remote_widget(self):
self.comm_scroll_area.takeWidget()
self.comm_scroll_area.setWidget(self.remote_comm_form)
self.remote_comm_form = self.comm_scroll_area.widget()
def set_local_widget(self):
self.comm_scroll_area.takeWidget()
self.comm_scroll_area.setWidget(self.local_comm_form)
self.local_comm_form = self.comm_scroll_area.widget()
def set_all_in_one_widget(self):
self.comm_scroll_area.takeWidget()
self.comm_scroll_area.setWidget(self.all_in_one_comm_form)
self.all_in_one_comm_form = self.comm_scroll_area.widget()
def set_python_class_widget(self):
self.fom_scroll_area.takeWidget()
self.fom_scroll_area.setWidget(self.python_class_form)
self.python_class_form = self.fom_scroll_area.widget()
def set_files_update_widget(self):
self.fom_scroll_area.takeWidget()
self.fom_scroll_area.setWidget(self.files_update_form)
self.files_update_form = self.fom_scroll_area.widget()
|
from .utils.packetCreator import *
"""
Format of an SSL record
Byte 0 = SSL record type
Bytes 1-2 = SSL version (major/minor)
Bytes 3-4 = Length of data in the record (excluding the header itself). The maximum SSL supports is 16384 (16K).
Byte 0 can have following values:
SSL3_RT_CHANGE_CIPHER_SPEC 20 (x'14')
SSL3_RT_ALERT 21 (x'15')
SSL3_RT_HANDSHAKE 22 (x'16')
SSL3_RT_APPLICATION_DATA 23 (x'17')
Bytes 1-2 in the record have the following version values:
SSL3_VERSION x'0300'
TLS1_VERSION x'0301'
"""
class RecordHeader3(object):
def __init__(self):
self.type = 0
self.version = (0,0)
self.length = 0
self.ssl2 = False
def create(self,version, type, length):
self.type = type
self.version = version
self.length = length
return self
def write(self):
w = Writer()
w.add(self.type, 1)
w.add(self.version[0], 1)
w.add(self.version[1], 1)
w.add(self.length, 2)
return w.bytes
#TODO parse function not included
class RecordHeader2(object):
def __init__(self):
self.type = 0
self.version = (0,0)
self.length = 0
self.ssl2 = True
#TODO parse fucntion not included
"""
FORMAT OF AN SSL HANDHSAKE RECORD
Byte 0 = SSL record type = 22 (SSL3_RT_HANDSHAKE)
Bytes 1-2 = SSL version (major/minor)
Bytes 3-4 = Length of data in the record (excluding the header itself).
Byte 5 = Handshake type
Bytes 6-8 = Length of data to follow in this record
Bytes 9-n = Command-specific data
"""
class HandshakeMsg(object):
def __init__(self, handshakeType):
self.contentType = ContentType.handshake
self.handshakeType = handshakeType
def postWrite(self,w):
headerWriter = Writer()
headerWriter.add(self.handshakeType, 1)
headerWriter.add(len(w.bytes), 3)
return headerWriter.bytes + w.bytes
"""
Client hello as per RFC
struct {
ProtocolVersion client_version;
Random random;
SessionID session_id;
CipherSuite cipher_suites<2..2^16-1>;
CompressionMethod compression_methods<1..2^8-1>;
Extension extensions<0..2^16-1>;
} ClientHello;
"""
class ClientHello(HandshakeMsg):
def __init__(self, ssl2=False):
HandshakeMsg.__init__(self, HandshakeType.client_hello)
self.ssl2 = ssl2
self.client_version = (0,0)
self.random = bytearray(32)
self.session_id = bytearray(0)
self.cipher_suites = [] # list of 16-bit values
self.certificate_types = [CertificateType.x509]
self.compression_methods = [] # list of 8-bit values
self.srp_username = None # string
self.tack = False # TLS key pinning for everyone http://lwn.net/Articles/499134/
self.supports_npn = False
self.server_name = bytearray(0) # for Server Name Indication (SNI)
def create(self, version, random, session_id, cipher_suites, certificate_types = None, srpUsername=None,
tack=False, supports_npn=False, serverName=None):
self.client_version = version
self.random = random
self.session_id = session_id #THis field should be empty if no session_id is available or the client wishes to generate new security parameters
self.cipher_suites = cipher_suites
self.certificate_types = certificate_types
self.compression_methods = [0]
if srpUsername:
self.srp_username = bytearray(srpUsername, "utf-8")
self.tack = tack
self.supports_npn = supports_npn
if serverName:
self.server_name = bytearray(serverName, "utf-8")
return self
#TODO parse not included
def write(self):
w = writer()
w.add(self.client_version[0], 1)
w.add(self.client_version[1], 1)
w.addFixSeq(self.random, 1)
w.addVarSeq(self.sesion_id, 1, 1)
w.addVarSeq(self.cipher_suites, 2, 2)
w.addVarSeq(self.copmression_methods, 1, 1)
#TODO read about extensions
w2 = Writer() # for extensions
if self.certificate_types and self.certificate_types != [CertificateType.x509]:
w2.add(ExternsionType.cert_type, 2)
w2.add(len(self.certificate_types)+1,2)
w2.addVarSeq(self.certificate_types, 1, 1)
if self.srp_username:
w2.add(ExtensionType.srp, 2)
w2.add(len(self.srp_username)+1, 2)
w2.addVarSeq(self.srp_username, 1, 1)
if self.supports_npn:
w2.add(ExtensionType.supports_npn, 2)
w2.add(0, 2)
if self.server_name:
w2.add(ExtensionType.server_name, 2)
w2.add(len(self.server_name)+5, 2)
w2.add(len(self.server_name)+3, 2)
w2.add(NameType.host_name, 1)
w2.addVarSeq(self.server_name, 1, 2)
if self.tack:
w2.add(ExtensionType.tack, 2)
w2.add(0, 2)
if len(w2.bytes):
w.add(len(w2.bytes), 2)
w.bytes += w2.bytes
return self.postWrite(w)
|
from collections import defaultdict
def sock_merchant(sock_colors):
sock_counter = defaultdict(int)
result = 0
for sock_color in sock_colors:
sock_counter[sock_color] += 1
for sock_type_count in sock_counter.values():
result += sock_type_count // 2
return result
n = int(input().strip())
sock_array = list(map(int, input().strip().split(' ')))
print(sock_merchant(sock_array))
|
import os
from flask import Flask, Response, request
import requests
import requests_ftp
import xml.etree.ElementTree as ET
from pprint import pprint
import json
import datetime
# Set up FTP
requests_ftp.monkeypatch_session()
cache = dict()
def get_cached_result(url: str) -> ET.Element:
cached_res = cache.get(url, None)
if cached_res is None or (datetime.datetime.now().timestamp() - cached_res[1]) > 600:
cache[url] = (get_BOM_xml(url), datetime.datetime.now().timestamp())
return cache[url][0]
return cached_res[0]
def get_BOM_xml(url: str) -> ET.Element:
s = requests.Session()
resp = s.retr(url)
xmltree = ET.fromstring(resp.text)
return xmltree
def make_xml_element_dict(xml_element: ET.Element) -> dict:
d = dict()
d[xml_element.tag] = xml_element.attrib
d[xml_element.tag]['text'] = xml_element.text
d[xml_element.tag]['children'] = [make_xml_element_dict(x) for x in xml_element.getchildren()]
return d
def get_bom_obs() -> dict:
xmltree = get_cached_result('ftp://ftp.bom.gov.au/anon/gen/fwo/IDD60920.xml')
obs = xmltree.find('observations')
station = ([x for x in obs.findall('station') if x.get('stn-name') == 'DARWIN AIRPORT'])[0]
outdict = dict()
outdict['observation_time'] = station[0].get('time-local')
outdict['apparent_temp'] = [x for x in station[0][0].findall('element') if x.get('type') == 'apparent_temp'][0].text
outdict['air_temperature'] = [x for x in station[0][0].findall('element') if x.get('type') == 'air_temperature'][0].text
return outdict
def get_bom_mini_forecast() -> dict:
xmltree = get_cached_result('ftp://ftp.bom.gov.au/anon/gen/fwo/IDD10207.xml')
current_forecast = [i for i in xmltree[1] if i.attrib['description'] == 'Darwin Airport'][0].getchildren()[0]
#return current_forecast
outdict = dict()
outdict['forecast_icon_code'] = [x for x in current_forecast.getchildren() if x.attrib['type'] == 'forecast_icon_code'][0].text
outdict['short_forecast'] = [x for x in current_forecast.getchildren() if x.attrib['type'] == 'precis'][0].text
outdict['probability_of_precipitation'] = [x for x in current_forecast.getchildren() if x.attrib['type'] == 'probability_of_precipitation'][0].text
return outdict
def get_bom_long_forecast() -> dict:
xmltree = get_cached_result('ftp://ftp.bom.gov.au/anon/gen/fwo/IDD10198.xml')
all_forecasts = [x for x in xmltree[1].getchildren() if x.attrib['description'] == "Darwin City and Outer Darwin"][0]
forecastzero = [x for x in all_forecasts.getchildren() if x.attrib['index'] == '0'][0]
forecast = [x for x in forecastzero.getchildren() if x.attrib['type'] == 'forecast'][0].text
outdict = dict()
outdict['long_forecast'] = forecast
return outdict
def get_full_data() -> dict:
obs = get_bom_obs()
minis = get_bom_mini_forecast()
longf = get_bom_long_forecast()
outdict = dict()
outdict['observation'] = obs
outdict['mini_forecast'] = minis
outdict['long_forecast'] = longf
return outdict
"""Create and configure an instance of the Flask application."""
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
@app.route('/bom_data')
def bom_observations():
outputdict = get_full_data()
return Response(json.dumps(outputdict), mimetype='application/json', headers=[('Access-Control-Allow-Origin', '*')])
@app.route('/nearby_search')
def nearby_search():
pprint(request.headers)
r = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json', params=request.args )
pprint(r.url)
return Response(r.text, mimetype='application/json', headers=[('Access-Control-Allow-Origin', '*')])
@app.route('/geocode')
def geocode():
r = requests.get('https://maps.googleapis.com/maps/api/geocode/json', params=request.args )
return Response(r.text, mimetype='application/json', headers=[('Access-Control-Allow-Origin', '*')])
|
from setuptools import setup, find_packages
import robosync
setup(
name='robosync',
version=robosync.__version__,
url='https://github.com/rbn920/robosync/',
license='MIT',
author='Robert Nelson',
test_require=['unittest'],
author_email='robertb.nelson@gmail.com',
description='Sync with Python and robocopy',
long_description='',
packages=['robosync'],
include_package_data=True,
platforms='windows',
test_suite='robosync.test.test_robosync',
classifiers=['Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT',
'Environment :: Win32 (MS Windows)']
)
|
# WARPnet Client<->Server Architecture
# WARPnet Parameter Definitions
#
# Author: Siddharth Gupta
import struct, time
from warpnet_framework.warpnet_common_params import *
from warpnet_framework.warpnet_client_definitions import *
from twisted.internet import reactor
import binascii
RETRANSMIT_COUNT = 10 # Max retries to send the data struct
WAIT_TIME = 10 # Time to wait before resending data structs
FILE_REFRESH_TIME = 60 # In seconds, time to wait between file reopens
# Struct IDs
STRUCTID_CONTROL = 0x13
STRUCTID_CONTROL_ACK = 0x14
STRUCTID_OBSERVE_REQUEST = 0x15
STRUCTID_OBSERVE = 0x16
STRUCTID_COMMAND = 0x17
STRUCTID_COMMAND_ACK = 0x18
STRUCTID_RTOBSERVE_REQUEST = 0x1A
STRUCTID_RTOBSERVE = 0x1B
STRUCTID_CFO = 0x20
STRUCTID_PHYCTRL = 0x22
STRUCTID_PHYCTRL_ACK = 0x23
STRUCTID_OBSERVE_BER = 0x24
STRUCTID_OBSERVE_BER_REQ = 0x25
STRUCTID_OBSERVE_COOPBER = 0x28
STRUCTID_OBSERVE_COOPBER_REQ = 0x29
STRUCTID_OBSERVE_PER = 0x26
STRUCTID_OBSERVE_PER_REQ = 0x27
STRUCTID_RAW_PKT = 0x30
STRUCTID_LOGPARAMS = 0x32
STRUCTID_LOGPARAMS_ACK = 0x33
# Command IDs
COMMANDID_STARTTRIAL = 0x40
COMMANDID_STOPTRIAL = 0x41
COMMANDID_RELAYSTATE = 0x42
COMMANDID_PKTGEN = 0x46
COMMANDID_RESET_PER = 0x50
# Command Params
COMMANDPARAM_RELAYOFF = 0x43
COMMANDPARAM_RELAYAF = 0x44
COMMANDPARAM_RELAYDF = 0x45
COMMANDPARAM_PKTGEN_ENABLE = 0x47
COMMANDPARAM_PKTGEN_DISABLE = 0x48
PKTTYPE_NCDATA = 0x00
PKTTYPE_NCMHOPDATA = 0xA2
PKTTYPE_DFDATA = 0xEE
PKTTYPE_AFDATA = 0x55
PKTTYPE_AFGHDATA = 0xC3
PKTTYPE_DFGHDATA = 0x3C
PKTTYPE_INVALID = 0x88
PHYCTRL_BER_EN = 0x1
PHYCTRL_CFO_EN = 0x2
PHYCTRL_PHYDUMP_EN = 0x4
PHYTRCL_EXTPKTDET_EN = 0x8
PHYCTRL_COOP_EN = 0x10
PHYCTRL_CFO_CORR_EN = 0x20
PHYCTRL_SWAP_ANT = 0x40
PHYCTRL_TX_NC = 0x01
PHYCTRL_TX_DF = 0x02
PHYCTRL_TX_AF = 0x04
PHYCTRL_TX_AFGH = 0x08
PHYCTRL_TX_DFGH = 0x10
PHYCTRL_TX_NCMHOP = 0x20
class MyDataLogger(DataCollector):
def __init__(self, filename):
self.filename = filename
self.logFile = open(self.filename, 'w')
def log(self, dataToLog):
self.logFile.write(dataToLog)
self.logFile.flush()
def closeFile(self):
self.logFile.close()
# Struct Definitions
# ControlStruct is a ClientStruct that stores some basic parameters to pass to the WARP board. The local variable can be accessed
# globally by calling ControlStruct.txPower etc. The struct must also understand the conversion from integer values to binary
# using the prepToSend function; it will be provided with the nodeID.
class ControlStruct(ClientStruct):
txPower = -1
channel = -1
modOrderHeader = -1
modOrderPayload = -1
reserved = 0
packetGeneratorPeriod = 0
packetGeneratorLength = 0
def __init__(self):
self.structID = STRUCTID_CONTROL
self.txPower = 63
self.channel = 4
self.modOrderHeader = 0
self.modOrderPayload = 2
self.packetGeneratorPeriod = 0
self.packetGeneratorLength = 1300
self.expectedReturnStructID = STRUCTID_CONTROL_ACK
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!6BHII', self.structID, nodeID, self.txPower, self.channel, self.modOrderHeader, self.modOrderPayload, self.reserved, self.packetGeneratorPeriod, self.packetGeneratorLength)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!BBH', rawData[0:4])
#print "Control struct successfully applied at node %d" % dataTuple[1]
# CommandStruct is a Client struct to send commands or request data from the WARP board. The cmdIDs are defined in warpnet_params.py
class CommandStruct(ClientStruct):
cmdID = -1
cmdParam = -1
def __init__(self, cmdID, cmdParam):
self.structID = STRUCTID_COMMAND
self.expectedReturnStructID = STRUCTID_COMMAND_ACK
self.cmdID = cmdID
self.cmdParam = cmdParam
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!4B', self.structID, nodeID, self.cmdID, self.cmdParam)
def updateFromNode(self, rawData, pcapts):
pass
#print "Successfully executed command %d" % self.cmdID
# BER struct that is filled in by the node
class ObserveStruct(ClientStruct):
sourceNode = -1
numDataTx = -1
numNACKTx = -1
numDataRx = -1
numNACKRx = -1
numBadHeaderRx = -1
sumGain = -1
sumRSSI = -1
packetCountRx = -1
def __init__(self, sourceNode, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_OBSERVE_REQUEST
self.expectedReturnStructID = STRUCTID_OBSERVE
self.sourceNode = sourceNode
self.logData("structID=%d, nodeID, cmdID, numDataTx, numNACKTx, numDataRx, numNACKRx, numBadHeaderRx, sumGain, sumRSSI, packetCountRx, trialDuration, time\r\n" % STRUCTID_OBSERVE_REQUEST)
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!BBH', self.structID, nodeID, self.sourceNode)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2BH9I', rawData[0:40])
self.logData("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\r\n" % (dataTuple[0], dataTuple[1], dataTuple[2], dataTuple[3], dataTuple[4], dataTuple[5], dataTuple[6], dataTuple[7], dataTuple[8], dataTuple[9], dataTuple[10], dataTuple[11], time.time()))
class RTObserveStruct(ClientStruct):
sequenceNumber = -1
pktType = -1
srcNode = -1
dstNode = -1
relayNode = -1
state = -1
rssi = -1
gain = -1
timestamp = -1
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_RTOBSERVE_REQUEST
self.expectedReturnStructID = STRUCTID_RTOBSERVE
self.logData("sequenceNumber, pktType, srcNode, dstNode, relayNode, state, RSSI, gain, timestamp\r\n")
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!BBH', self.structID, nodeID, 0)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2BH6B2HI', rawData[0:18])
self.logData("%d, %d, %d, %d, %d, %d, %d, %d, %d\r\n" % (dataTuple[2], dataTuple[3], dataTuple[4], dataTuple[5], dataTuple[6], dataTuple[7], dataTuple[9], dataTuple[10], dataTuple[11]))
class CFOStruct(ClientStruct):
atten = 0
pktLen = 0
minMag = 0
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_CFO #Dangerous to reuse structID only if there's another controller
self.expectedReturnStructID = STRUCTID_CFO
self.logData("nodeID, seqNum, cfo_c, cfo_p, cfo_b, txCFO, pktStatus, atten, pktLen, minMag, pcap timestamp\r\n")
self.atten = 0
self.pktLen = 0
self.minMag = 0
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!BBH', self.structID, nodeID, 0)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2B H 5i', rawData[0:24])
self.logData("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %s\r\n" % (dataTuple[1], dataTuple[2], dataTuple[3], dataTuple[4], dataTuple[5], dataTuple[6], dataTuple[7], self.atten, self.pktLen, self.minMag, pcapts))
class PHYctrlStruct(ClientStruct):
param0 = 0
param1 = 0
param2 = 0
param3 = 0
param4 = 0
param5 = 0
param6 = 0
param7 = 0
param8 = 0
param9 = 0
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_PHYCTRL
self.expectedReturnStructID = STRUCTID_PHYCTRL_ACK
self.param0 = 0 #short - Enable CFO correction
self.param1 = 0 #int - Tx CFO Freq
self.param2 = 0 #int
self.param3 = 0 #int
self.param4 = 0 #int
self.param5 = 0 #int
self.param6 = 0 #int
self.param7 = 0 #int
self.param8 = 0 #int
self.param9 = 0 #int
def prepToSend(self, nodeID):
self.updateDone = False
#was type "i", but Python complained when MSB of argument was 1...
return struct.pack('!BBH9I', self.structID, nodeID, self.param0, self.param1, self.param2, self.param3, self.param4, self.param5, self.param6, self.param7, self.param8, self.param9)
class ObserveBERStruct(ClientStruct):
#0 unsigned char structID;
#1 unsigned char nodeID;
#2 unsigned short sequenceNumber;
#3 unsigned char nodeID_tx;
#4 unsigned char nodeID_rx;
#5 unsigned short mac_seqNum;
#6 unsigned int bits_rx;
#7 unsigned int bits_errors;
totalBitsReceived = 0
totalBitErrors = 0
nodeID_tx = -1
nodeID_rx = -1
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_OBSERVE_BER_REQ
self.expectedReturnStructID = STRUCTID_OBSERVE_BER
self.totalBitsReceived = 0
self.totalBitErrors = 0
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!BBH', self.structID, nodeID, 0)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2B H 2B H 2I', rawData[0:16])
self.nodeID_tx = dataTuple[3]
self.nodeID_rx = dataTuple[4]
self.totalBitsReceived += dataTuple[6]
self.totalBitErrors += dataTuple[7]
def clearBitCounts(self):
self.totalBitsReceived = 0
self.totalBitErrors = 0
class ObserveCoopBERStruct(ClientStruct):
#0 unsigned char structID;
#1 unsigned char nodeID;
#2 unsigned short sequenceNumber;
#3 unsigned char nodeID_tx;
#4 unsigned char nodeID_rx;
#5 unsigned short mac_seqNum;
#6 unsigned char mac_pktType;
#7 unsigned char reserved0;
#8 unsigned char reserved1;
#9 unsigned char reserved2;
#10 unsigned int bits_rx;
#11 unsigned int bits_errors;
nodeID_tx = -1
nodeID_rx = -1
totalBitsReceived_NC = 0
totalBitsReceived_AF = 0
totalBitsReceived_AFGH = 0
totalBitsReceived_DF = 0
totalBitsReceived_DFGH = 0
totalBitsReceived_NCMHOP = 0
totalBitErrors_NC = 0
totalBitErrors_AF = 0
totalBitErrors_AFGH = 0
totalBitErrors_DF = 0
totalBitErrors_DFGH = 0
totalBitErrors_NCMHOP = 0
totalBitsReceived_AF_noNC = 0
totalBitsReceived_AFGH_noNC = 0
totalBitsReceived_DF_noNC = 0
totalBitsReceived_DFGH_noNC = 0
totalBitErrors_AF_noNC = 0
totalBitErrors_AFGH_noNC = 0
totalBitErrors_DF_noNC = 0
totalBitErrors_DFGH_noNC = 0
lastSeqNum_NC = -1;
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_OBSERVE_COOPBER_REQ
self.expectedReturnStructID = STRUCTID_OBSERVE_COOPBER
self.totalBitsReceived_NC = 0
self.totalBitsReceived_AF = 0
self.totalBitsReceived_AFGH = 0
self.totalBitsReceived_DF = 0
self.totalBitsReceived_DFGH = 0
self.totalBitsReceived_NCMHOP = 0
self.totalBitErrors_NC = 0
self.totalBitErrors_AF = 0
self.totalBitErrors_AFGH = 0
self.totalBitErrors_DF = 0
self.totalBitErrors_DFGH = 0
self.totalBitErrors_NCMHOP = 0
self.totalBitsReceived_AF_noNC = 0
self.totalBitsReceived_AFGH_noNC = 0
self.totalBitsReceived_DF_noNC = 0
self.totalBitsReceived_DFGH_noNC = 0
self.totalBitErrors_AF_noNC = 0
self.totalBitErrors_AFGH_noNC = 0
self.totalBitErrors_DF_noNC = 0
self.totalBitErrors_DFGH_noNC = 0
self.lastSeqNum_NC = -1;
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!BBH', self.structID, nodeID, 0)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2B H 2B H 4B 2I', rawData[0:20])
self.nodeID_tx = dataTuple[3]
self.nodeID_rx = dataTuple[4]
self.coopPktType = dataTuple[6]
if(dataTuple[6] == PKTTYPE_NCDATA):
self.totalBitsReceived_NC += dataTuple[10]
self.totalBitErrors_NC += dataTuple[11]
self.lastSeqNum_NC = dataTuple[5]
elif(dataTuple[6] == PKTTYPE_AFDATA):
self.totalBitsReceived_AF += dataTuple[10]
self.totalBitErrors_AF += dataTuple[11]
if(self.lastSeqNum_NC != dataTuple[5]):
self.totalBitsReceived_AF_noNC += dataTuple[10]
self.totalBitErrors_AF_noNC += dataTuple[11]
elif(dataTuple[6] == PKTTYPE_AFGHDATA):
self.totalBitsReceived_AFGH += dataTuple[10]
self.totalBitErrors_AFGH += dataTuple[11]
if(self.lastSeqNum_NC != dataTuple[5]):
self.totalBitsReceived_AFGH_noNC += dataTuple[10]
self.totalBitErrors_AFGH_noNC += dataTuple[11]
elif(dataTuple[6] == PKTTYPE_DFDATA):
self.totalBitsReceived_DF += dataTuple[10]
self.totalBitErrors_DF += dataTuple[11]
if(self.lastSeqNum_NC != dataTuple[5]):
self.totalBitsReceived_DF_noNC += dataTuple[10]
self.totalBitErrors_DF_noNC += dataTuple[11]
elif(dataTuple[6] == PKTTYPE_DFGHDATA):
self.totalBitsReceived_DFGH += dataTuple[10]
self.totalBitErrors_DFGH += dataTuple[11]
if(self.lastSeqNum_NC != dataTuple[5]):
self.totalBitsReceived_DFGH_noNC += dataTuple[10]
self.totalBitErrors_DFGH_noNC += dataTuple[11]
elif(dataTuple[6] == PKTTYPE_NCMHOPDATA):
self.totalBitsReceived_NCMHOP += dataTuple[10]
self.totalBitErrors_NCMHOP += dataTuple[11]
else:
print("ObserveCoopBERStruct::updateFromNode: Unknown pktType=%d" % dataTuple[6])
def clearBitCounts(self):
self.totalBitsReceived_NC = 0
self.totalBitsReceived_AF = 0
self.totalBitsReceived_AFGH = 0
self.totalBitsReceived_DF = 0
self.totalBitsReceived_DFGH = 0
self.totalBitsReceived_NCMHOP = 0
self.totalBitErrors_NC = 0
self.totalBitErrors_AF = 0
self.totalBitErrors_AFGH = 0
self.totalBitErrors_DF = 0
self.totalBitErrors_DFGH = 0
self.totalBitErrors_NCMHOP = 0
self.totalBitsReceived_AF_noNC = 0
self.totalBitsReceived_AFGH_noNC = 0
self.totalBitsReceived_DF_noNC = 0
self.totalBitsReceived_DFGH_noNC = 0
self.totalBitErrors_AF_noNC = 0
self.totalBitErrors_AFGH_noNC = 0
self.totalBitErrors_DF_noNC = 0
self.totalBitErrors_DFGH_noNC = 0
self.lastSeqNum_NC = -1;
class ObservePERStruct(ClientStruct):
#typedef struct {
# unsigned char structID;
# unsigned char nodeID;
# unsigned char reqNum;
# unsigned char reqType;
# unsigned int numPkts_tx;
# unsigned int numPkts_rx_good;
# unsigned int numPkts_rx_goodHdrBadPyld;
# unsigned int numPkts_rx_badHdr;
#} warpnetObservePER;
numPkts_tx = -1
numPkts_rx_good = -1
numPkts_rx_goodHdrBadPyld = -1
numPkts_rx_badHdr = -1
reqNum = -1
reqType = -1
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_OBSERVE_PER_REQ
self.expectedReturnStructID = STRUCTID_OBSERVE_PER
self.numPkts_tx = 0
self.numPkts_rx_good = 0
self.numPkts_rx_goodHdrBadPyld = 0
self.numPkts_rx_badHdr = 0
self.reqNum = 0
self.reqType = 0
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!4B', self.structID, nodeID, self.reqNum, self.reqType)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2B 2B 4I', rawData[0:20])
self.reqNum = dataTuple[2]
self.reqType = dataTuple[3]
self.numPkts_tx = dataTuple[4]
self.numPkts_rx_good = dataTuple[5]
self.numPkts_rx_goodHdrBadPyld = dataTuple[6]
self.numPkts_rx_badHdr = dataTuple[7]
class LogParams(ClientStruct):
#typedef struct {
# unsigned char structID;
# unsigned char nodeID;
#
# unsigned short fileSuffix;
#
# unsigned int param0;
# unsigned int param1;
# unsigned int param2;
# unsigned int param3;
#} warpnetLogParams;
fileSuffix = 0
param0 = 0
param1 = 0
param2 = 0
param3 = 0
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_LOGPARAMS
self.expectedReturnStructID = STRUCTID_LOGPARAMS_ACK
self.fileSuffix = 0
self.param0 = 0 #int
self.param1 = 0 #int
self.param2 = 0 #int
self.param3 = 0 #int
def prepToSend(self, nodeID):
self.updateDone = False
#was type "i", but Python complained when MSB of argument was 1...
return struct.pack('!BBH4I', self.structID, nodeID, self.fileSuffix, self.param0, self.param1, self.param2, self.param3)
def updateFromNode(self, rawData, pcapts):
pass
|
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import traceback
import functools
import warnings
import IECore
import Gaffer
import GafferUI
import GafferRenderMan
##########################################################################
# Access to shaders and annotations from the shader cache
##########################################################################
def _shader( shaderNode ) :
if isinstance( shaderNode, GafferRenderMan.RenderManShader ) :
shaderName = shaderNode["name"].getValue()
else :
shaderName = shaderNode["__shaderName"].getValue()
try :
return GafferRenderMan.RenderManShader.shaderLoader().read( shaderName + ".sdl" )
except Exception, e :
return None
def _shaderAnnotations( shaderNode ) :
shader = _shader( shaderNode )
return shader.blindData().get( "ri:annotations", {} ) if shader is not None else {}
##########################################################################
# Nodules
##########################################################################
def __parameterNoduleType( plug ) :
# only coshader parameters should be connectable in the node
# graph.
if plug.typeId() == Gaffer.Plug.staticTypeId() :
return "GafferUI::StandardNodule"
elif plug.typeId() == Gaffer.ArrayPlug.staticTypeId() :
return "GafferUI::CompoundNodule"
return ""
GafferUI.Metadata.registerPlugValue( GafferRenderMan.RenderManShader, "parameters.*", "nodule:type", __parameterNoduleType )
GafferUI.Metadata.registerPlugValue( GafferRenderMan.RenderManShader, "parameters.*", "compoundNodule:orientation", "y" )
# coshader arrays tend to be used for layering, so we prefer to present the
# last entry at the top, hence the increasing direction.
GafferUI.Metadata.registerPlugValue( GafferRenderMan.RenderManShader, "parameters.*", "compoundNodule:direction", "increasing" )
##########################################################################
# NodeUI - this exists only for backwards compatibility, and will be
# removed.
##########################################################################
class RenderManShaderUI( GafferUI.StandardNodeUI ) :
def __init__( self, node, displayMode = None, **kw ) :
GafferUI.StandardNodeUI.__init__( self, node, displayMode, **kw )
warnings.warn( "RenderManShaderUI is deprecated, use either StandardNodeUI or LayoutPlugValueWidget.", DeprecationWarning, 2 )
##########################################################################
# PlugValueWidget creator for the parameters plug itself.
##########################################################################
def __parametersPlugValueWidgetCreator( plug ) :
# Because we don't know the names of sections in advance,
# we must use this opportunity to do a just-in-time registration of
# metadata values for the collapsed status of each section. An
# alternative approach would perhaps allow Metadata to be registered
# with wildcards in the name, and with an associated method to return all
# matching names (so registeredPlugValues() could continue to work).
collapsedRe = re.compile( "^page\.(.+)\.collapsed" )
annotations = _shaderAnnotations( plug.node() )
for name, value in annotations.items() :
m = collapsedRe.match( name )
if m :
Gaffer.Metadata.registerPlugValue(
plug,
"layout:section:" + m.group( 1 ) + ":collapsed",
value in ( "True", "true", "1" ),
persistent = False,
)
shader = _shader( plug.node() )
if shader is not None :
# when shaders are reloaded after having new parameters added,
# the order of the plugs and the parameters don't match, so we
# use the parameter ordering to define the ui order via metadata.
## \todo Ideally we'd get the plug ordering to match in
# RenderManShader::loadShader(), and then the ordering of
# connections in the node graph would be correct too.
orderedParameterNames = shader.blindData()["ri:orderedParameterNames"]
index = 0
for name in orderedParameterNames :
if name.endswith( "Values" ) and name[:-6] + "Positions" in shader.parameters :
name = name[:-6]
elif name.endswith( "Positions" ) and name[:-9] + "Values" in shader.parameters :
continue
if name in plug :
Gaffer.Metadata.registerPlugValue( plug[name], "layout:index", index, persistent = False )
index += 1
# Now we've created the appropriate metadata, we can just defer to a standard LayoutPlugValueWidget
return GafferUI.LayoutPlugValueWidget( plug )
GafferUI.PlugValueWidget.registerCreator( GafferRenderMan.RenderManShader, "parameters", __parametersPlugValueWidgetCreator )
GafferUI.PlugValueWidget.registerCreator( GafferRenderMan.RenderManLight, "parameters", __parametersPlugValueWidgetCreator )
##########################################################################
# PlugValueWidgets for the individual parameter plugs. We use annotations
# stored in the shader to provide hints as to how we should build the UI.
# We use the OSL specification for shader metadata in the hope that one day
# we'll get to use OSL in Gaffer and then we'll have a consistent metadata
# convention across both shader types.
##########################################################################
def __optionValue( plug, stringValue ) :
if isinstance( plug, Gaffer.StringPlug ) :
return stringValue
elif isinstance( plug, Gaffer.IntPlug ) :
return int( stringValue )
elif isinstance( plug, Gaffer.FloatPlug ) :
return float( stringValue )
else :
raise Exception( "Unsupported parameter type." )
def __numberCreator( plug, annotations ) :
if len( plug ) :
return GafferUI.CompoundNumericPlugValueWidget( plug )
else :
return GafferUI.NumericPlugValueWidget( plug )
def __stringCreator( plug, annotations ) :
return GafferUI.StringPlugValueWidget( plug )
def __booleanCreator( plug, annotations ) :
return GafferUI.BoolPlugValueWidget( plug )
def __popupCreator( plug, annotations ) :
options = annotations.get( plug.getName() + ".options", None )
if options is None :
raise Exception( "No \"options\" annotation." )
options = options.value.split( "|" )
labelsAndValues = [ ( x, __optionValue( plug, x ) ) for x in options ]
return GafferUI.EnumPlugValueWidget( plug, labelsAndValues )
def __mapperCreator( plug, annotations ) :
options = annotations.get( plug.getName() + ".options", None )
if options is None :
raise Exception( "No \"options\" annotation." )
options = options.value.split( "|" )
labelsAndValues = []
for option in options :
tokens = option.split( ":" )
if len( tokens ) != 2 :
raise Exception( "Option \"%s\" is not of form name:value" % option )
labelsAndValues.append( ( tokens[0], __optionValue( plug, tokens[1] ) ) )
return GafferUI.EnumPlugValueWidget( plug, labelsAndValues )
def __fileNameCreator( plug, annotations ) :
extensions = annotations.get( plug.getName() + ".extensions", None )
if extensions is not None :
extensions = extensions.value.split( "|" )
else :
extensions = []
bookmarksCategory = annotations.get( plug.getName() + ".bookmarksCategory", None )
if bookmarksCategory is not None :
bookmarksCategory = bookmarksCategory.value
else :
# seems like a reasonable guess, and it's preferable to have a category
# rather than to have any bookmarks made here pollute the bookmarks for
# other browsers.
bookmarksCategory = "texture"
return GafferUI.PathPlugValueWidget(
plug,
path = Gaffer.FileSystemPath(
"/",
filter = Gaffer.FileSystemPath.createStandardFilter(
extensions = extensions,
extensionsLabel = "Show only supported files",
),
),
pathChooserDialogueKeywords = {
"bookmarks" : GafferUI.Bookmarks.acquire(
plug,
pathType = Gaffer.FileSystemPath,
category = bookmarksCategory,
),
},
)
def __nullCreator( plug, annotations ) :
return None
__creators = {
"number" : __numberCreator,
"vector2" : __numberCreator,
"string" : __stringCreator,
"boolean" : __booleanCreator,
"checkBox" : __booleanCreator,
"popup" : __popupCreator,
"mapper" : __mapperCreator,
"filename" : __fileNameCreator,
"null" : __nullCreator,
}
def __plugValueWidgetCreator( plug ) :
global __creators
annotations = _shaderAnnotations( plug.node() )
parameterName = plug.getName()
widgetType = annotations.get( parameterName + ".widget", None )
widgetCreator = None
if widgetType is not None :
widgetCreator = __creators.get( widgetType.value, None )
if widgetCreator is None :
IECore.msg(
IECore.Msg.Level.Warning,
"RenderManShaderUI",
"Shader parameter \"%s.%s\" has unsupported widget type \"%s\"" %
( plug.node()["name"].getValue(), parameterName, widgetType )
)
if widgetCreator is not None :
try :
return widgetCreator( plug, annotations )
except Exception, e :
IECore.msg(
IECore.Msg.Level.Warning,
"RenderManShaderUI",
"Error creating UI for parameter \"%s.%s\" : \"%s\"" %
( plug.node()["name"].getValue(), parameterName, str( e ) )
)
if plug.typeId() == Gaffer.ArrayPlug.staticTypeId() :
# coshader array
return None
result = GafferUI.PlugValueWidget.create( plug, useTypeOnly=True )
if isinstance( result, GafferUI.VectorDataPlugValueWidget ) :
result.vectorDataWidget().setSizeEditable( plug.defaultValue().size() == 0 )
return result
GafferUI.PlugValueWidget.registerCreator( GafferRenderMan.RenderManShader, "parameters.*", __plugValueWidgetCreator )
GafferUI.PlugValueWidget.registerCreator( GafferRenderMan.RenderManLight, "parameters.*", __plugValueWidgetCreator )
##########################################################################
# Metadata registrations
##########################################################################
def __nodeDescription( node ) :
__defaultNodeDescription = """Loads shaders for use in RenderMan renderers. Use the ShaderAssignment node to assign shaders to objects in the scene."""
description = _shaderAnnotations( node ).get( "help", None )
return description.value if description is not None else __defaultNodeDescription
def __nodeColor( node ) :
try:
annotations = _shaderAnnotations( node )
if annotations.has_key( "nodeColor" ) :
match = re.search( "color\((.+),(.+),(.+)\)", annotations["nodeColor"].value )
if match:
return IECore.Color3f( float( match.group(1) ), float( match.group(2) ), float( match.group(3) ) )
else:
raise Exception, "Error parsing \"nodeColor\" annotation: " + annotations["nodeColor"].value
except Exception, e:
IECore.msg( IECore.Msg.Level.Warning, "RenderManShaderUI", str( e ) )
return None
def __nodeActivators( node ) :
class ExpressionVariables :
def connected( self, key ) :
return node["parameters"][key].getInput() is not None
def __getitem__( self, key ) :
if key == "connected" :
return self.connected
else :
return node["parameters"][key].getValue()
result = IECore.CompoundData()
for name, value in _shaderAnnotations( node ).items() :
if not name.startswith( "activator." ) or not name.endswith( ".expression" ) :
continue
active = False
try :
active = eval( value.value, globals(), ExpressionVariables() )
except Exception, e :
IECore.msg( IECore.Msg.Level.Error, "Parameter activator", "".join( traceback.format_exception_only( type( e ), e ) ) )
result[name.split( "." )[1]] = bool( active )
return result
def __plugDescription( plug ) :
annotations = _shaderAnnotations( plug.node() )
d = annotations.get( plug.getName() + ".help", None )
return d.value if d is not None else ""
def __plugLabel( plug ) :
annotations = _shaderAnnotations( plug.node() )
d = annotations.get( plug.getName() + ".label", None )
return d.value if d is not None else None
def __plugDivider( plug ) :
annotations = _shaderAnnotations( plug.node() )
d = annotations.get( plug.getName() + ".divider", None )
if d is None :
return False
return d.value.lower() in ( "True", "true", "1" )
def __plugVisibleDimensions( plug ) :
annotations = _shaderAnnotations( plug.node() )
d = annotations.get( plug.getName() + ".widget", None )
if d is not None and d.value == "vector2" :
return 2
else :
return None
def __plugSection( plug ) :
annotations = _shaderAnnotations( plug.node() )
return annotations.get( plug.getName() + ".page", None )
def __plugActivator( plug ) :
annotations = _shaderAnnotations( plug.node() )
return annotations.get( plug.getName() + ".activator", None )
Gaffer.Metadata.registerNodeDescription( GafferRenderMan.RenderManShader, __nodeDescription )
Gaffer.Metadata.registerNodeValue( GafferRenderMan.RenderManShader, "nodeGadget:color", __nodeColor )
for nodeType in( GafferRenderMan.RenderManShader, GafferRenderMan.RenderManLight ) :
Gaffer.Metadata.registerNodeValue( nodeType, "layout:activators", __nodeActivators )
Gaffer.Metadata.registerPlugDescription( nodeType, "parameters.*", __plugDescription )
Gaffer.Metadata.registerPlugValue( nodeType, "parameters.*", "label", __plugLabel )
Gaffer.Metadata.registerPlugValue( nodeType, "parameters.*", "divider", __plugDivider )
Gaffer.Metadata.registerPlugValue( nodeType, "parameters.*", "ui:visibleDimensions", __plugVisibleDimensions )
Gaffer.Metadata.registerPlugValue( nodeType, "parameters.*", "layout:section", __plugSection )
Gaffer.Metadata.registerPlugValue( nodeType, "parameters.*", "layout:activator", __plugActivator )
|
from .linked_list import LinkedList
from .hashmap import HashMap
|
import pytest
import os
@pytest.fixture(autouse=True)
def is_skinny():
if "MLFLOW_SKINNY" not in os.environ:
pytest.skip("This test is only valid for the skinny client")
def test_fails_import_flask():
import mlflow
assert mlflow is not None
with pytest.raises(ImportError):
import flask
assert flask is not None
def test_fails_import_pandas():
import mlflow
assert mlflow is not None
with pytest.raises(ImportError):
import pandas
assert pandas is not None
def test_fails_import_numpy():
import mlflow
assert mlflow is not None
with pytest.raises(ImportError):
import numpy
assert numpy is not None
|
from abc import ABC, abstractmethod
from typing import List
class Collision(ABC):
"""Abstract interface for a game collision."""
@abstractmethod
def snake(self, location: List) -> bool:
pass
@abstractmethod
def apple(self, location: List) -> bool:
pass
class GameCollision(Collision):
"""SnakeEntity game collision interface."""
def __init__(self, position: List, size: int) -> None:
self._position: List = position
self._size: int = size
self._times: int = 11
self._diff: int = 10
self._shift: int = 1
def snake(self, location: List) -> bool:
return (location[0] + location[0] + self._diff) > self._position[0] > location[0] and \
(location[1] + location[1] + self._diff) > self._position[1] > location[1] or \
(location[0] + location[0] + self._diff) > self._position[0] + self._diff > location[0] and \
(location[1] + location[1] + self._diff) > self._position[1] + self._diff > location[1]
def apple(self, location: List) -> bool:
return (location[0] - self._shift +
(self._times * self._size)) > self._position[0] > location[0] - self._shift and \
(location[1] - self._shift +
(self._times * self._size)) > self._position[1] > location[1] - self._shift or \
(location[0] - self._shift +
(self._times * self._size)) > self._position[0] + self._diff > location[0] - self._shift and \
(location[1] - self._shift +
(self._times * self._size)) > self._position[1] + self._diff > location[1] - self._shift
|
import unittest
import uuid
import mock
def fake_id(prefix):
entropy = ''.join([a for a in str(uuid.uuid4()) if a.isalnum()])
return '{}_{}'.format(prefix, entropy)
class APITestCase(unittest.TestCase):
def setUp(self):
super(APITestCase, self).setUp()
self.requestor_patcher = mock.patch('gym.scoreboard.client.api_requestor.APIRequestor')
requestor_class_mock = self.requestor_patcher.start()
self.requestor_mock = requestor_class_mock.return_value
def mock_response(self, res):
self.requestor_mock.request = mock.Mock(return_value=(res, 'reskey'))
class TestData(object):
@classmethod
def file_upload_response(cls):
return {
'id': fake_id('file'),
'object': 'file',
}
@classmethod
def evaluation_response(cls):
return {
'id': fake_id('file'),
'object': 'evaluation',
}
|
import os
import fcntl
import os.path
import pytest
import simplejson
from .conftest import requires_questionnaire, requires_mongomock
from happi.backends.json_db import JSONBackend
from happi.errors import DuplicateError, SearchError
from happi import Client
from happi.containers import Motor
@pytest.fixture(scope='function')
def mockmongo(mockmongoclient):
return mockmongoclient.backend
@pytest.fixture(scope='function')
def mockjson(device_info, valve_info):
# Write underlying database
with open('testing.json', 'w+') as handle:
simplejson.dump({device_info['prefix']: device_info},
handle)
# Return handle name
yield JSONBackend('testing.json')
# Delete file
os.remove('testing.json')
@requires_mongomock
def test_mongo_find(valve_info, device_info, mockmongo):
mm = mockmongo
mm._collection.insert_one(valve_info)
# No single device expected
assert mm.find(beamline='BLERG', multiples=False) == []
# Single device by id
assert device_info == mm.find(_id=device_info['_id'],
multiples=False)
# Single device by kwarg
assert valve_info == mm.find(prefix=valve_info['prefix'],
multiples=False)
# No multiple devices expected
assert mm.find(beamline='BLERG', multiples=False) == []
# Multiple devices by id
assert [device_info] == mm.find(_id=device_info['_id'],
multiples=True)
# Multiple devices by kwarg
assert [device_info] == mm.find(prefix=device_info['prefix'],
multiples=True)
# Multiple devices expected
result = mm.find(beamline='LCLS', multiples=True)
assert all([info in result for info in (device_info, valve_info)])
@requires_mongomock
def test_mongo_save(mockmongo, device_info, valve_info):
# Duplicate device
with pytest.raises(DuplicateError):
mockmongo.save(device_info['prefix'], device_info, insert=True)
# Device not found
with pytest.raises(SearchError):
mockmongo.save(valve_info['prefix'], valve_info, insert=False)
# Add to database
mockmongo.save(valve_info['prefix'], valve_info, insert=True)
assert mockmongo._collection.find_one(valve_info) == valve_info
@requires_mongomock
def test_mongo_delete(mockmongo, device_info):
mockmongo.delete(device_info['prefix'])
assert mockmongo._collection.find_one(device_info) is None
def test_json_find(valve_info, device_info, mockjson):
mm = mockjson
# Write underlying database
with open(mm.path, 'w+') as handle:
simplejson.dump({valve_info['prefix']: valve_info,
device_info['prefix']: device_info},
handle)
# No single device expected
assert mm.find(beamline='BLERG', multiples=False) == []
# Single device by id
assert device_info == mm.find(_id=device_info['_id'],
multiples=False)
# Single device by kwarg
assert valve_info == mm.find(prefix=valve_info['prefix'],
multiples=False)
# No multiple devices expected
assert mm.find(beamline='BLERG', multiples=False) == []
# Multiple devices by id
assert [device_info] == mm.find(_id=device_info['_id'],
multiples=True)
# Multiple devices by kwarg
assert [device_info] == mm.find(prefix=device_info['prefix'],
multiples=True)
# Multiple devices expected
result = mm.find(beamline='LCLS', multiples=True)
assert all([info in result for info in (device_info, valve_info)])
def test_json_delete(mockjson, device_info):
mockjson.delete(device_info['prefix'])
assert device_info not in mockjson.all_devices
def test_json_save(mockjson, device_info, valve_info):
# Duplicate device
with pytest.raises(DuplicateError):
mockjson.save(device_info['prefix'], device_info, insert=True)
# Device not found
with pytest.raises(SearchError):
mockjson.save(valve_info['prefix'], valve_info, insert=False)
# Add to database
mockjson.save(valve_info['prefix'], valve_info, insert=True)
assert valve_info in mockjson.all_devices
def test_json_locking(mockjson):
# Place lock on file
handle = open(mockjson.path, 'w')
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Attempt to save
with pytest.raises(IOError):
mockjson.store({"_ID": "ID"})
def test_json_initialize():
jb = JSONBackend("testing.json", initialize=True)
# Check that the file was made
assert os.path.exists("testing.json")
# Check it is a valid json file
assert jb.load() == {}
# Check that we can not overwrite the database
with pytest.raises(PermissionError):
JSONBackend("testing.json", initialize=True)
# Cleanup
os.remove("testing.json")
@requires_questionnaire
def test_qs_find(mockqsbackend):
assert len(mockqsbackend.find(beamline='TST', multiples=True)) == 6
assert len(mockqsbackend.find(name='sam_r', multiples=True)) == 1
@requires_questionnaire
def test_qsbackend_with_client(mockqsbackend):
c = Client(database=mockqsbackend)
assert len(c.all_devices) == 6
assert all([isinstance(d, Motor) for d in c.all_devices])
|
try:
import uwsgi # noqa: F401
except ModuleNotFoundError:
class postfork:
"""Simple non-uwsgi stub that just calls the postfork function"""
def __init__(self, f):
f()
def __call__(self, f):
pass
else:
import uwsgidecorators
postfork = uwsgidecorators.postfork
|
from django.contrib import admin
from .models import Board, Post, Topic
# Register your models here.
admin.site.register(Board),
admin.site.register(Post),
admin.site.register(Topic),
|
class Triangle:
EQUILATERAL = "equilateral"
ISOSCELES = "isosceles"
SCALENE = "scalene"
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
if self.error():
raise TriangleError
def kind(self):
if self.equilateral():
return self.EQUILATERAL
if self.isosceles():
return self.ISOSCELES
return self.SCALENE
def equilateral(self):
return self.a == self.b == self.c
def isosceles(self):
return self.a == self.b or self.b == self.c or self.a == self.c
def error(self):
return self.negative_sides() or self.triangle_inequality()
def negative_sides(self):
return self.a <= 0 or self.b <= 0 or self.c <= 0
def triangle_inequality(self):
return (self.a + self.b <= self.c or
self.b + self.c <= self.a or
self.a + self.c <= self.b)
class TriangleError(Exception):
pass
|
from .visible_object import VisibleObject
from ..aabb import AABB
class Sphere(VisibleObject):
TYPE = 'sphere'
def __init__(self, **kwargs):
self._location = [0, 0, 0]
self._size = 2
attribute_mappings = {
'location': {
'attribute': '_location'
},
'size': {
'attribute': '_size'
}
}
super().__init__(Sphere.TYPE, attribute_mappings, **kwargs)
def aabb(self):
(cx, cy, cz) = self._location
r = self._size
return AABB(cx - r, cx + r, cy - r, cy + r, cz - r, cz + r)
def to_bpy(self, bpy, name_prefix):
bpy.ops.mesh.primitive_ico_sphere_add(
location=self._location, size=self._size
)
obj = bpy.context.object
super().apply_common_bpy_properties(bpy, obj, name_prefix)
return obj
|
"""
Utilities for Django Models
"""
# Python
from typing import Any
# Django
from django.forms.widgets import ChoiceWidget
from django import forms
# Django filters
import django_filters as filters
def set_placeholder(field: Any, text: str) -> Any:
"""
Pass a Django form field and set a placeholder widget attribute,
HTML Input with placeholder attribute with value as text passed
Args:
field:
Django form field (usually a CharField),
this need to be compatible with HTML placeholder attribute.
text:
Text to set in placeholder
Returns:
The same field with the placehorder attr assigned
"""
field.widget.attrs["placeholder"] = text
return field
class PaginationForm(forms.Form):
pagination_choices = (
(10, "10"),
(25, "25"),
(50, "50"),
(100, "100"),
(150, "150"),
(150, "250"),
)
pagination = forms.ChoiceField(
label="Paginate by",
choices=pagination_choices,
widget=forms.Select(
attrs={
"onchange": "this.form.submit()"
}
)
)
class NumberInFilter(filters.BaseInFilter, filters.CharFilter):
pass
class CharInFilter(filters.BaseInFilter, filters.CharFilter):
pass
class DateInput(forms.DateInput):
input_type = "date"
|
#Feliz Natal
dia = 25
if(dia == 24):
print('Bora comer panetone')
print('Beber um refri')
print('comer doce')
elif dia == 25:
print('Feliz Natal!!!')
|
import pytest
@pytest.mark.django_db
def test_signed_out_homepage(client):
response = client.get("/")
assert response.status_code == 200
assert b'<a href="/login/auth0">Sign in</a>' in response.content
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: lipeijie
import os
import sys
sys.path.insert(0, os.getcwd() + "/.")
import json
from pathlib import Path
import time
import re
import fire
import psutil
import numpy as np
import torch
from google.protobuf import text_format
import torchplus
from second.builder import target_assigner_builder, voxel_builder
from second.data.preprocess import merge_second_batch, merge_second_batch_multigpu
from second.protos import pipeline_pb2
from second.pytorch.builder import (box_coder_builder, input_reader_builder,
lr_scheduler_builder, optimizer_builder,
losses_builder)
from second.utils.log_tool import SimpleModelLog
from second.utils.progress_bar import ProgressBar
from second.pytorch.models.my_pointpillars import PointPillarsNet
from second.pytorch.models.pointpillars_loss import PointPillarsLoss
def example_convert_to_torch(example, dtype=torch.float32,
device=None) -> dict:
device = device or torch.device("cuda:0")
example_torch = {}
float_names = [
"voxels", "anchors", "reg_targets", "reg_weights", "bev_map", "importance"
]
for k, v in example.items():
if k in float_names:
# slow when directly provide fp32 data with dtype=torch.half
example_torch[k] = torch.tensor(
v, dtype=torch.float32, device=device).to(dtype)
elif k in ["coordinates", "labels", "num_points"]:
example_torch[k] = torch.tensor(
v, dtype=torch.int32, device=device)
elif k in ["anchors_mask"]:
example_torch[k] = torch.tensor(
v, dtype=torch.uint8, device=device)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
calib[k1] = torch.tensor(
v1, dtype=dtype, device=device).to(dtype)
example_torch[k] = calib
elif k == "num_voxels":
example_torch[k] = torch.tensor(v)
else:
example_torch[k] = v
return example_torch
def _worker_init_fn(worker_id):
time_seed = np.array(time.time(), dtype=np.int32)
np.random.seed(time_seed + worker_id)
print(f"WORKER {worker_id} seed:", np.random.get_state()[1][0])
def freeze_params_v2(params: dict, include: str=None, exclude: str=None):
assert isinstance(params, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
for k, p in params.items():
if include_re is not None:
if include_re.match(k) is not None:
p.requires_grad = False
if exclude_re is not None:
if exclude_re.match(k) is None:
p.requires_grad = False
def filter_param_dict(state_dict: dict, include: str=None, exclude: str=None):
assert isinstance(state_dict, dict)
include_re = None
if include is not None:
include_re = re.compile(include)
exclude_re = None
if exclude is not None:
exclude_re = re.compile(exclude)
res_dict = {}
for k, p in state_dict.items():
if include_re is not None:
if include_re.match(k) is None:
continue
if exclude_re is not None:
if exclude_re.match(k) is not None:
continue
res_dict[k] = p
return res_dict
def load_config(model_dir, config_path):
config_file_bkp = "pipeline.config"
if isinstance(config_path, str):
# directly provide a config object. this usually used
# when you want to train with several different parameters in
# one script.
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
else:
config = config_path
proto_str = text_format.MessageToString(config, indent=2)
with (model_dir / config_file_bkp).open("w") as f:
f.write(proto_str)
return config, proto_str
def load_target_assigner_param(model_cfg):
classes_cfg = model_cfg.target_assigner.class_settings
num_class = len(classes_cfg)
use_mcnms = [c.use_multi_class_nms for c in classes_cfg]
use_rotate_nms = [c.use_rotate_nms for c in classes_cfg]
if len(model_cfg.target_assigner.nms_pre_max_sizes) != 0:
nms_pre_max_sizes = list(model_cfg.target_assigner.nms_pre_max_sizes)
assert len(nms_pre_max_sizes) == num_class
else:
nms_pre_max_sizes = [c.nms_pre_max_size for c in classes_cfg]
if len(model_cfg.target_assigner.nms_post_max_sizes) != 0:
nms_post_max_sizes = list(model_cfg.target_assigner.nms_post_max_sizes)
assert len(nms_post_max_sizes) == num_class
else:
nms_post_max_sizes = [c.nms_post_max_size for c in classes_cfg]
if len(model_cfg.target_assigner.nms_score_thresholds) != 0:
nms_score_thresholds = list(model_cfg.target_assigner.nms_score_thresholds)
assert len(nms_score_thresholds) == num_class
else:
nms_score_thresholds = [c.nms_score_threshold for c in classes_cfg]
if len(model_cfg.target_assigner.nms_iou_thresholds) != 0:
nms_iou_thresholds = list(model_cfg.target_assigner.nms_iou_thresholds)
assert len(nms_iou_thresholds) == num_class
else:
nms_iou_thresholds = [c.nms_iou_threshold for c in classes_cfg]
assert all(use_mcnms) or all([not b for b in use_mcnms]), "not implemented"
assert all(use_rotate_nms) or all([not b for b in use_rotate_nms]), "not implemented"
if all([not b for b in use_mcnms]):
assert all([e == nms_pre_max_sizes[0] for e in nms_pre_max_sizes])
assert all([e == nms_post_max_sizes[0] for e in nms_post_max_sizes])
assert all([e == nms_score_thresholds[0] for e in nms_score_thresholds])
assert all([e == nms_iou_thresholds[0] for e in nms_iou_thresholds])
return num_class, use_mcnms, use_rotate_nms, nms_pre_max_sizes, nms_post_max_sizes, \
nms_score_thresholds, nms_iou_thresholds
def build_net_loss(model_cfg, target_assigner):
num_class, use_mcnms, use_rotate_nms, nms_pre_max_sizes, nms_post_max_sizes, \
nms_score_thresholds, nms_iou_thresholds = load_target_assigner_param(model_cfg)
losses = losses_builder.build(model_cfg.loss)
cls_loss_ftor, loc_loss_ftor, cls_weight, loc_weight, _ = losses
net_loss = PointPillarsLoss(target_assigner,
nms_score_thresholds=nms_score_thresholds,
nms_iou_thresholds=nms_iou_thresholds,
nms_pre_max_sizes=nms_pre_max_sizes,
nms_post_max_sizes=nms_post_max_sizes,
cls_loss_ftor=cls_loss_ftor,
loc_loss_ftor=loc_loss_ftor,
cls_loss_weight=cls_weight,
loc_loss_weight=loc_weight)
return net_loss
def load_pretrained_model(net, pretrained_path,
pretrained_include, pretrained_exclude,
freeze_include, freeze_exclude):
if pretrained_path is not None:
model_dict = net.state_dict()
pretrained_dict = torch.load(pretrained_path)
pretrained_dict = filter_param_dict(pretrained_dict, pretrained_include, pretrained_exclude)
new_pretrained_dict = {}
for k, v in pretrained_dict.items():
if k in model_dict and v.shape == model_dict[k].shape:
new_pretrained_dict[k] = v
print("Load pretrained parameters:")
for k, v in new_pretrained_dict.items():
print(k, v.shape)
model_dict.update(new_pretrained_dict)
net.load_state_dict(model_dict)
freeze_params_v2(dict(net.named_parameters()), freeze_include, freeze_exclude)
def create_optimizer(model_dir, train_cfg, net):
optimizer_cfg = train_cfg.optimizer
loss_scale = train_cfg.loss_scale_factor
fastai_optimizer = optimizer_builder.build(
optimizer_cfg,
net,
mixed=False,
loss_scale=loss_scale)
amp_optimizer = fastai_optimizer
torchplus.train.try_restore_latest_checkpoints(model_dir,
[fastai_optimizer])
lr_scheduler = lr_scheduler_builder.build(optimizer_cfg, amp_optimizer,
train_cfg.steps)
return amp_optimizer, lr_scheduler
def get_paddings_indicator(actual_num, max_num, axis=0):
"""Create boolean mask by actually number of a padded tensor.
Args:
actual_num ([type]): [description]
max_num ([type]): [description]
Returns:
[type]: [description]
"""
actual_num = torch.unsqueeze(actual_num, axis + 1)
# tiled_actual_num: [N, M, 1]
max_num_shape = [1] * len(actual_num.shape)
max_num_shape[axis + 1] = -1
max_num = torch.arange(
max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
# tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
paddings_indicator = actual_num.int() > max_num
# paddings_indicator shape: [batch_size, max_num]
return paddings_indicator
def reshape_input(batch_size, input_features, coords, grid_size):
# input_features: [num_voxels, max_num_points_per_voxel, 9]
# coors: [num_voxels, 4]
# print("grid_size:", grid_size) # x, y, z
nx = grid_size[0]
ny = grid_size[1]
pillar_x = input_features[:, :, 0].squeeze()
pillar_y = input_features[:, :, 1].squeeze()
pillar_z = input_features[:, :, 2].squeeze()
pillar_i = input_features[:, :, 3].squeeze()
pillar_c_x = input_features[:, :, 4].squeeze()
pillar_c_y = input_features[:, :, 5].squeeze()
pillar_c_z = input_features[:, :, 6].squeeze()
pillar_p_x = input_features[:, :, 7].squeeze()
pillar_p_y = input_features[:, :, 8].squeeze()
batch_canvas = []
for batch_itt in range(batch_size):
# Create the canvas for this sample
all_canvas = []
# Only include non-empty pillars
batch_mask = coords[:, 0] == batch_itt
this_coords = coords[batch_mask, :]
indices = this_coords[:, 2] * nx + this_coords[:, 3]
indices = indices.type(torch.long)
voxels_x = pillar_x[batch_mask, :]
canvas_x = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_x[indices, :] = voxels_x
all_canvas.append(canvas_x)
voxels_y = pillar_y[batch_mask, :]
canvas_y = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_y[indices, :] = voxels_y
all_canvas.append(canvas_y)
voxels_z = pillar_z[batch_mask, :]
canvas_z = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_z[indices, :] = voxels_z
all_canvas.append(canvas_z)
voxels_i = pillar_i[batch_mask, :]
canvas_i = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_i[indices, :] = voxels_i
all_canvas.append(canvas_i)
voxels_c_x = pillar_c_x[batch_mask, :]
canvas_c_x = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_c_x[indices, :] = voxels_c_x
all_canvas.append(canvas_c_x)
voxels_c_y = pillar_c_y[batch_mask, :]
canvas_c_y = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_c_y[indices, :] = voxels_c_y
all_canvas.append(canvas_c_y)
voxels_c_z = pillar_c_z[batch_mask, :]
canvas_c_z = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_c_z[indices, :] = voxels_c_z
all_canvas.append(canvas_c_z)
voxels_p_x = pillar_p_x[batch_mask, :]
canvas_p_x = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_p_x[indices, :] = voxels_p_x
all_canvas.append(canvas_p_x)
voxels_p_y = pillar_p_y[batch_mask, :]
canvas_p_y = torch.zeros(
nx * ny,
100,
dtype=input_features.dtype,
device=input_features.device)
canvas_p_y[indices, :] = voxels_p_y
all_canvas.append(canvas_p_y)
all_data = torch.stack(all_canvas, 0)
# Append to a list for later stacking.
batch_canvas.append(all_data)
# Stack to 3-dim tensor (batch-size, nchannels, nrows*ncols, 100)
batch_canvas = torch.stack(batch_canvas, 0)
# print("batch_canvas", batch_canvas.shape)
return batch_canvas
def reshape_input1(input_features):
# input_features: [num_voxels, max_num_points_per_voxel, 9]
pillar_x = input_features[:, :, 0].unsqueeze(0).unsqueeze(0)
pillar_y = input_features[:, :, 1].unsqueeze(0).unsqueeze(0)
pillar_z = input_features[:, :, 2].unsqueeze(0).unsqueeze(0)
pillar_i = input_features[:, :, 3].unsqueeze(0).unsqueeze(0)
pillar_c_x = input_features[:, :, 4].unsqueeze(0).unsqueeze(0)
pillar_c_y = input_features[:, :, 5].unsqueeze(0).unsqueeze(0)
pillar_c_z = input_features[:, :, 6].unsqueeze(0).unsqueeze(0)
pillar_p_x = input_features[:, :, 7].unsqueeze(0).unsqueeze(0)
pillar_p_y = input_features[:, :, 8].unsqueeze(0).unsqueeze(0)
batch_canvas = [pillar_x, pillar_y, pillar_z, pillar_i, pillar_c_x,
pillar_c_y, pillar_c_z, pillar_p_x, pillar_p_y]
return torch.cat(batch_canvas, 1)
def compute_model_input(voxel_size, pc_range, with_distance,
voxels, num_voxels, coors):
# num_voxels: [num_voxels]
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
vx = voxel_size[0]
vy = voxel_size[1]
x_offset = vx / 2 + pc_range[0]
y_offset = vy / 2 + pc_range[1]
device = voxels.device
dtype = voxels.dtype
# Find distance of x, y, and z from cluster center
points_mean = voxels[:, :, :3].sum(
dim=1, keepdim=True) / num_voxels.type_as(voxels).view(-1, 1, 1)
f_cluster = voxels[:, :, :3] - points_mean
# Find distance of x, y, and z from pillar center
f_center = torch.zeros_like(voxels[:, :, :2])
f_center[:, :, 0] = voxels[:, :, 0] - (
coors[:, 3].to(dtype).unsqueeze(1) * vx + x_offset)
f_center[:, :, 1] = voxels[:, :, 1] - (
coors[:, 2].to(dtype).unsqueeze(1) * vy + y_offset)
# Combine together feature decorations
features_ls = [voxels, f_cluster, f_center]
if with_distance:
points_dist = torch.norm(voxels[:, :, :3], 2, 2, keepdim=True)
features_ls.append(points_dist)
features = torch.cat(features_ls, dim=-1)
# The feature decorations were calculated without regard to whether pillar was empty. Need to ensure that
# empty pillars remain set to zeros.
voxel_count = features.shape[1]
mask = get_paddings_indicator(num_voxels, voxel_count, axis=0)
mask = torch.unsqueeze(mask, -1).type_as(features)
features *= mask
# print("features", features.shape)
return features
def kaiming_init(model, scale=0.1):
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data *= scale # for residual block
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.Linear):
torch.nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
# nn.init.normal_(m.weight, 0, 0.01)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.ones_(m.weight)
torch.nn.init.zeros_(m.bias)
elif isinstance(m, torch.nn.BatchNorm1d):
torch.nn.init.ones_(m.weight)
torch.nn.init.zeros_(m.bias)
float_dtype = torch.float32
def evaluate(net, net_loss, best_mAP,
voxel_generator, target_assigner,
config, model_logging,
model_dir, result_path=None):
torch.cuda.empty_cache()
global_step = net_loss.get_global_step()
eval_input_cfg = config.eval_input_reader
model_cfg = config.model.second
eval_dataset = input_reader_builder.build(
eval_input_cfg,
model_cfg,
training=False,
voxel_generator=voxel_generator,
target_assigner=target_assigner)
eval_dataloader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=eval_input_cfg.batch_size, # only support multi-gpu train
shuffle=False,
num_workers=eval_input_cfg.preprocess.num_workers,
pin_memory=False,
collate_fn=merge_second_batch)
result_path_step = result_path / f"step_{global_step}"
# result_path_step.mkdir(parents=True, exist_ok=True)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("# EVAL", global_step)
model_logging.log_text("#################################",
global_step)
model_logging.log_text("Generate output labels...", global_step)
t = time.time()
detections = []
prog_bar = ProgressBar()
prog_bar.start((len(eval_dataset) + eval_input_cfg.batch_size - 1)
// eval_input_cfg.batch_size)
for example in iter(eval_dataloader):
example = example_convert_to_torch(example, float_dtype)
batch_size = example["anchors"].shape[0]
coors = example["coordinates"]
input_features = compute_model_input(voxel_generator.voxel_size,
voxel_generator.point_cloud_range,
with_distance=False,
voxels=example['voxels'],
num_voxels=example['num_points'],
coors=coors)
# input_features = reshape_input(batch_size, input_features, coors, voxel_generator.grid_size)
input_features = reshape_input1(input_features)
net.batch_size = batch_size
preds_list = net(input_features, coors)
detections += net_loss(example, preds_list)
prog_bar.print_bar()
sec_per_ex = len(eval_dataset) / (time.time() - t)
model_logging.log_text(
f'generate label finished({sec_per_ex:.2f}/s). start eval:',
global_step)
result_dict = eval_dataset.dataset.evaluation(
detections, str(result_path_step))
if result_dict['mAp'] > best_mAP:
best_mAP = result_dict['mAp']
ckpt_path = Path(model_dir) / "best_pointpillars.pth"
torch.save(net.state_dict(), ckpt_path)
for k, v in result_dict["results"].items():
model_logging.log_text("Evaluation {}".format(k), global_step)
model_logging.log_text(v, global_step)
model_logging.log_text("mAP {}".format(result_dict['mAp']), global_step)
model_logging.log_text("best_mAP {}".format(best_mAP), global_step)
model_logging.log_metrics(result_dict["detail"], global_step)
# with open(result_path_step / "result.pkl", 'wb') as f:
# pickle.dump(detections, f)
return best_mAP
def train(config_path,
model_dir,
result_path=None,
create_folder=False,
display_step=50,
pretrained_path=None,
pretrained_include=None,
pretrained_exclude=None,
freeze_include=None,
freeze_exclude=None,
multi_gpu=False,
measure_time=False,
resume=False):
"""train a PointPillars model specified by a config file.
"""
torch.cuda.empty_cache()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_dir = str(Path(model_dir).resolve())
if create_folder:
if Path(model_dir).exists():
model_dir = torchplus.train.create_folder(model_dir)
model_dir = Path(model_dir)
if not resume and model_dir.exists():
raise ValueError("model dir exists and you don't specify resume.")
model_dir.mkdir(parents=True, exist_ok=True)
if result_path is None:
result_path = model_dir / 'results'
config, proto_str = load_config(model_dir, config_path)
input_cfg = config.train_input_reader
model_cfg = config.model.second
train_cfg = config.train_config
target_assigner_cfg = model_cfg.target_assigner
voxel_generator = voxel_builder.build(model_cfg.voxel_generator)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
box_coder = box_coder_builder.build(model_cfg.box_coder)
target_assigner = target_assigner_builder.build(target_assigner_cfg,
bv_range, box_coder)
box_coder.custom_ndim = target_assigner._anchor_generators[0].custom_ndim
net = PointPillarsNet(1, voxel_generator.grid_size,
target_assigner.num_anchors_per_location,
target_assigner.box_coder.code_size,
with_distance=False).to(device)
kaiming_init(net, 1.0)
net_loss = build_net_loss(model_cfg, target_assigner).to(device)
net_loss.clear_global_step()
net_loss.clear_metrics()
# print("num parameters:", len(list(net.parameters())))
load_pretrained_model(net, pretrained_path,
pretrained_include, pretrained_exclude,
freeze_include, freeze_exclude)
if resume:
torchplus.train.try_restore_latest_checkpoints(model_dir, [net])
amp_optimizer, lr_scheduler = create_optimizer(model_dir, train_cfg, net)
collate_fn = merge_second_batch
num_gpu = 1
######################
# PREPARE INPUT
######################
dataset = input_reader_builder.build(
input_cfg,
model_cfg,
training=True,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
multi_gpu=multi_gpu)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=input_cfg.batch_size * num_gpu,
shuffle=True,
num_workers=input_cfg.preprocess.num_workers * num_gpu,
pin_memory=False,
collate_fn=collate_fn,
worker_init_fn=_worker_init_fn,
drop_last=not multi_gpu)
######################
# TRAINING
######################
model_logging = SimpleModelLog(model_dir)
model_logging.open()
model_logging.log_text(proto_str + "\n", 0, tag="config")
start_step = net_loss.get_global_step()
total_step = train_cfg.steps
t = time.time()
steps_per_eval = train_cfg.steps_per_eval
clear_metrics_every_epoch = train_cfg.clear_metrics_every_epoch
amp_optimizer.zero_grad()
step_times = []
step = start_step
best_mAP = 0
epoch = 0
net.train()
net_loss.train()
try:
while True:
if clear_metrics_every_epoch:
net_loss.clear_metrics()
for example in dataloader:
lr_scheduler.step(net_loss.get_global_step())
time_metrics = example["metrics"]
example.pop("metrics")
example_torch = example_convert_to_torch(example, float_dtype)
batch_size = example_torch["anchors"].shape[0]
coors = example_torch["coordinates"]
input_features = compute_model_input(voxel_generator.voxel_size,
voxel_generator.point_cloud_range,
with_distance=False,
voxels=example_torch['voxels'],
num_voxels=example_torch['num_points'],
coors=coors)
# input_features = reshape_input(batch_size, input_features, coors, voxel_generator.grid_size)
input_features = reshape_input1(input_features)
net.batch_size = batch_size
preds_list = net(input_features, coors)
ret_dict = net_loss(example_torch, preds_list)
cls_preds = ret_dict["cls_preds"]
loss = ret_dict["loss"].mean()
cls_loss_reduced = ret_dict["cls_loss_reduced"].mean()
loc_loss_reduced = ret_dict["loc_loss_reduced"].mean()
cls_pos_loss = ret_dict["cls_pos_loss"].mean()
cls_neg_loss = ret_dict["cls_neg_loss"].mean()
loc_loss = ret_dict["loc_loss"]
cls_loss = ret_dict["cls_loss"]
cared = ret_dict["cared"]
labels = example_torch["labels"]
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 10.0)
amp_optimizer.step()
amp_optimizer.zero_grad()
net_loss.update_global_step()
net_metrics = net_loss.update_metrics(cls_loss_reduced,
loc_loss_reduced, cls_preds,
labels, cared)
step_time = (time.time() - t)
step_times.append(step_time)
t = time.time()
metrics = {}
num_pos = int((labels > 0)[0].float().sum().cpu().numpy())
num_neg = int((labels == 0)[0].float().sum().cpu().numpy())
if 'anchors_mask' not in example_torch:
num_anchors = example_torch['anchors'].shape[1]
else:
num_anchors = int(example_torch['anchors_mask'][0].sum())
global_step = net_loss.get_global_step()
if global_step % display_step == 0:
loc_loss_elem = [
float(loc_loss[:, :, i].sum().detach().cpu().numpy() /
batch_size) for i in range(loc_loss.shape[-1])
]
metrics["runtime"] = {
"step": global_step,
"steptime": np.mean(step_times),
}
metrics["runtime"].update(time_metrics[0])
step_times = []
metrics.update(net_metrics)
metrics["loss"]["loc_elem"] = loc_loss_elem
metrics["loss"]["cls_pos_rt"] = float(
cls_pos_loss.detach().cpu().numpy())
metrics["loss"]["cls_neg_rt"] = float(
cls_neg_loss.detach().cpu().numpy())
if model_cfg.use_direction_classifier:
dir_loss_reduced = ret_dict["dir_loss_reduced"].mean()
metrics["loss"]["dir_rt"] = float(
dir_loss_reduced.detach().cpu().numpy())
metrics["misc"] = {
"num_vox": int(example_torch["voxels"].shape[0]),
"num_pos": int(num_pos),
"num_neg": int(num_neg),
"num_anchors": int(num_anchors),
"lr": float(amp_optimizer.lr),
"mem_usage": psutil.virtual_memory().percent,
}
model_logging.log_metrics(metrics, global_step)
step += 1
epoch += 1
if epoch % 2 == 0:
global_step = net_loss.get_global_step()
torchplus.train.save_models(model_dir, [net, amp_optimizer], global_step)
net.eval()
net_loss.eval()
best_mAP = evaluate(net, net_loss, best_mAP,
voxel_generator, target_assigner,
config, model_logging,
model_dir, result_path)
net.train()
net_loss.train()
if epoch > 100:
break
if epoch > 100:
break
except Exception as e:
print(json.dumps(example["metadata"], indent=2))
model_logging.log_text(str(e), step)
model_logging.log_text(json.dumps(example["metadata"], indent=2), step)
torchplus.train.save_models(model_dir, [net, amp_optimizer],
step)
raise e
finally:
model_logging.close()
torchplus.train.save_models(model_dir, [net, amp_optimizer],
net_loss.get_global_step())
if __name__ == '__main__':
fire.Fire()
|
def test():
assert (
"from spacy.tokens import Doc" in __solution__
), "Importes-tu correctement la classe Doc ?"
assert (
len(spaces) == 5
), "Il semble que le nombre d'espaces ne correspond pas au nombre de mots."
assert all(isinstance(s, bool) for s in spaces), "Les espaces doivent être des booléens."
assert [int(s) for s in spaces] == [0, 1, 1, 1, 0], "Les espaces sont-ils corrects ?"
assert doc.text == "Allez, on commence !", "Es-tu certain d'avoir correctement créé le Doc ?"
__msg__.good("Bien !")
|
import yaml
from definitions import CONFIG_PATH, DEFAULT_SETTINGS
config = yaml.safe_load(open(CONFIG_PATH, encoding="utf8"))
for setting, default_value in DEFAULT_SETTINGS.items():
if setting not in config:
config[setting] = default_value
|
from database import database, manage, models
|
from django.apps import AppConfig
class portfolioConfig(AppConfig):
name = 'portfolio'
|
import sublime
import sublime_plugin
from Default.exec import ExecCommand
# Related reading/viewing;
# https://stackoverflow.com/questions/56934013/sublimetext-run-the-exec-with-current-file-as-arg-tab-context-menu
# https://youtu.be/WxiMlhOX_Ng
# This plugin is a combination of an exec variant written for a stack overflow
# answer and one from my YouTube video on common custom build targets.
#
# The internal exec command can execute external programs, but unless it is
# invoked via the build command as a part of a build system, variables like
# $file and the like are not expanded. This makes key bindings or menu entries
# that want to execute specific tasks in context harder or impossible to do.
#
# In addition the exec command uses the global show_panel_on_build setting to
# open the build output, which may also not be desirable if you're executing
# ad-hoc programs.
#
# This variant of the command expands variables the same way as they would be
# expanded in a build system, and also supports a custom argument named
# show_panel that controls if the output panel should be displayed or not. A
# value of None honors the show_panel_on_build setting; set it to True or False
# to explicitly show or not show the panel.
class MenuExecCommand(ExecCommand):
"""
A simple wrapper around the internal exec command that expands all of the
normal build variables prior to the build, while also being able to
temporarily suppress the build output panel if desired.
"""
def run(self, show_panel=None, **kwargs):
variables = self.window.extract_variables()
for key in ("cmd", "shell_cmd", "working_dir"):
if key in kwargs:
kwargs[key] = sublime.expand_variables(kwargs[key], variables)
settings = sublime.load_settings("Preferences.sublime-settings")
pref_var = settings.get("show_panel_on_build")
show_panel = pref_var if show_panel is None else show_panel
if show_panel != pref_var:
settings.set("show_panel_on_build", show_panel)
super().run(**kwargs)
if show_panel != pref_var:
settings.set("show_panel_on_build", pref_var)
|
bl_info = {
"name": "Window Generator",
"description": "Generate Window Arrays",
"author": "Austin Jacob",
"version": (1, 0, 0),
"blender": (2, 79, 0),
"location": "View3D > Add > Mesh",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Add Mesh"}
import bpy
import bmesh
from bpy.props import (
BoolProperty,
BoolVectorProperty,
FloatProperty,
FloatVectorProperty,
IntProperty,
EnumProperty,
)
class AddWindows(bpy.types.Operator):
"""Add an array of windows to a mesh"""
bl_idname = "mesh.windows_add"
bl_label = "Add Windows"
bl_options = {'REGISTER', 'UNDO'}
length = FloatProperty(
name="Length",
description="Window Length",
min=0.01,
default=2,
)
width = FloatProperty(
name="Width",
description="Window Width",
min=0.01,
default=0.325,
)
height = FloatProperty(
name="Height",
description="Window Height",
min=0.01,
default=1.5,
)
thick = FloatProperty(
name="Thickness",
description="How thick each window pane is",
min=0.01,
default=0.05,
)
#No boxes can touch because then booleans don't work
#So shift minimums can never quite be 0.
x_shift = FloatProperty(
name="Distance Apart (X)",
description="How far apart each window is on the x-axis",
min=0.0001,
default=0.0001,
)
y_shift = FloatProperty(
name="Distance Apart (Y)",
description="How far apart each window is on the y-axis",
min=0.0001,
default=0.0001,
)
x_win = IntProperty(
name="Windows Per Row",
description="How many windows per row",
min=1,
default=1,
)
y_win = IntProperty(
name="Windows Per Column",
description="How many windows per column",
min=1,
default=1,
)
layers = BoolVectorProperty(
name="Layers",
description="Object Layers",
size=20,
options={'HIDDEN', 'SKIP_SAVE'},
)
# generic transform props
view_align = BoolProperty(
name="Align to View",
default=False,
)
location = FloatVectorProperty(
name="Location",
subtype='TRANSLATION',
)
rotation = FloatVectorProperty(
name="Rotation",
subtype='EULER',
)
def execute(self, context):
#Rename the variables to be easier to work with
x_win = self.x_win
y_win = self.y_win
l = self.length
w = self.width
h = self.height
t = self.thick
x_shift = self.x_shift
y_shift = self.y_shift
x_loc = 0
y_loc = 0
loc = self.location
rot = self.rotation
#Make the window object
mesh1 = bpy.data.meshes.new("windows")
windows_obj = bpy.data.objects.new("Windows_Obj", mesh1)
scene = bpy.context.scene
scene.objects.link(windows_obj)
bm1 = bmesh.new()
verts = [(-(l / 2.0), +(w / 2.0), +(h / 2.0)),
(-((l / 2.0) - t), +(w / 2.0), +((h / 2.0) - t)),
(+((l / 2.0) - t), +(w / 2.0), +((h / 2.0) - t)),
(+(l / 2.0), +(w / 2.0), +(h / 2.0)),
(+(l / 2.0), +(w / 2.0), -(h / 2.0)),
(+((l / 2.0) - t), +(w / 2.0), -((h / 2.0) - t)),
(-((l / 2.0) - t), +(w / 2.0), -((h / 2.0) - t)),
(-(l / 2.0), +(w / 2.0), -(h / 2.0)),
(-(l / 2.0), -(w / 2.0), +(h / 2.0)),
(-((l / 2.0) - t), -(w / 2.0), +((h / 2.0) - t)),
(+((l / 2.0) - t), -(w / 2.0), +((h / 2.0) - t)),
(+(l / 2.0), -(w / 2.0), +(h / 2.0)),
(+(l / 2.0), -(w / 2.0), -(h / 2.0)),
(+((l / 2.0) - t), -(w / 2.0), -((h / 2.0) - t)),
(-((l / 2.0) - t), -(w / 2.0), -((h / 2.0) - t)),
(-(l / 2.0), -(w / 2.0), -(h / 2.0)),
]
faces = [(3, 2, 1, 0),
(4, 5, 2, 3),
(7, 6, 5, 4),
(0, 1, 6, 7),
(8, 9, 10, 11),
(11, 10, 13, 12),
(12, 13, 14, 15),
(15, 14, 9, 8),
(0, 7, 15, 8),
(3, 0, 8, 11),
(4, 3, 11, 12),
(7, 4, 12, 15),
(9, 14, 6, 1),
(1, 2, 10, 9),
(2, 5, 13, 10),
(5, 6, 14, 13),
]
for v_co in verts:
bm1.verts.new(v_co)
bm1.verts.ensure_lookup_table()
for f_idx in faces:
bm1.faces.new([bm1.verts[i] for i in f_idx])
bm1.to_mesh(mesh1)
mesh1.update()
#Apply array modifier twice on the window mesh to make a grid of windows
x_array = bpy.data.objects[windows_obj.name].modifiers.new(name='x_window_array', type='ARRAY')
x_array.count = x_win
x_array.use_relative_offset = False
x_array.use_constant_offset = True
x_array.constant_offset_displace = (x_shift + l, 0.0, 0.0)
y_array = bpy.data.objects[windows_obj.name].modifiers.new(name='y_window_array', type='ARRAY')
y_array.count = y_win
y_array.use_relative_offset = False
y_array.use_constant_offset = True
y_array.constant_offset_displace = (0.0, 0.0, y_shift + h)
#Now make the boolean cutout boxes
#These boxes are used to cut holes
#in the wall where the windows fit
mesh2 = bpy.data.meshes.new("window_boolean")
window_boolean_obj = bpy.data.objects.new("Window_Boolean_Obj", mesh2)
scene = bpy.context.scene
scene.objects.link(window_boolean_obj)
bm2 = bmesh.new()
verts = [(+(l / 2.0), +w / 2.0, -(h / 2.0)),
(+(l / 2.0), -w / 2.0, -(h / 2.0)),
(-(l / 2.0), -w / 2.0, -(h / 2.0)),
(-(l / 2.0), +w / 2.0, -(h / 2.0)),
(+(l / 2.0), +w / 2.0, +(h / 2.0)),
(+(l / 2.0), -w / 2.0, +(h / 2.0)),
(-(l / 2.0), -w / 2.0, +(h / 2.0)),
(-(l / 2.0), +w / 2.0, +(h / 2.0)),
]
faces = [(0, 1, 2, 3),
(4, 7, 6, 5),
(0, 4, 5, 1),
(1, 5, 6, 2),
(2, 6, 7, 3),
(4, 0, 3, 7),
]
for v_co in verts:
bm2.verts.new(v_co)
bm2.verts.ensure_lookup_table()
for f_idx in faces:
bm2.faces.new([bm2.verts[i] for i in f_idx])
bm2.to_mesh(mesh2)
mesh2.update()
#Apply array modifier twice to make a grid of window cutouts
x_array2 = bpy.data.objects[window_boolean_obj.name].modifiers.new(name='x_window_array2', type='ARRAY')
x_array2.count = x_win
x_array2.use_relative_offset = False
x_array2.use_constant_offset = True
x_array2.constant_offset_displace = (x_shift + l, 0.0, 0.0)
y_array2 = bpy.data.objects[window_boolean_obj.name].modifiers.new(name='y_window_array2', type='ARRAY')
y_array2.count = y_win
y_array2.use_relative_offset = False
y_array2.use_constant_offset = True
y_array2.constant_offset_displace = (0.0, 0.0, y_shift + h)
#Transform both objects (translate, rotate)
windows_obj.location = loc
windows_obj.rotation_euler = rot
window_boolean_obj.location = loc
window_boolean_obj.rotation_euler = rot
#Hide the cutout objects to only see the windows
window_boolean_obj.hide = True
#Apply all array modifiers on both objects
#Also apply location, rotation, and scale.
bpy.context.scene.objects.active = windows_obj
bpy.ops.object.modifier_apply(apply_as='DATA', modifier='x_window_array')
bpy.ops.object.modifier_apply(apply_as='DATA', modifier='y_window_array')
bpy.ops.object.transform_apply(location = True, scale = True, rotation = True)
bpy.context.scene.objects.active = window_boolean_obj
bpy.ops.object.modifier_apply(apply_as='DATA', modifier='x_window_array2')
bpy.ops.object.modifier_apply(apply_as='DATA', modifier='y_window_array2')
bpy.ops.object.transform_apply(location = True, scale = True, rotation = True)
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(AddWindows.bl_idname, icon='MOD_LATTICE')
def register():
bpy.utils.register_class(AddWindows)
bpy.types.INFO_MT_mesh_add.append(menu_func)
def unregister():
bpy.utils.unregister_class(AddWindows)
bpy.types.INFO_MT_mesh_add.remove(menu_func)
if __name__ == "__main__":
register()
|
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
{
'includes' : [
'../ion/common.gypi',
],
'variables': {
# Used by make_into_app.gypi. We can define this once per target, or
# globally here.
'target_app_location_param': '<(PRODUCT_DIR)/test',
},
'targets': [
{
'target_name': 'Snapshot_assets',
'type': 'static_library',
'includes': [
'../ion/dev/zipasset_generator.gypi',
],
'dependencies': [
'<(ion_dir)/port/port.gyp:ionport',
],
'sources': [
'Snapshot_assets.iad',
],
}, # target: Snapshot_assets
{
'target_name': 'Snapshot',
'includes': [ 'demobase.gypi', ],
'variables': {
'demo_class_name': 'Snapshot'
},
'sources': [
'Camera.cpp',
'Camera.hpp',
'FileManager.cpp',
'FileManager.hpp',
'FinalAction.hpp',
'Hud.cpp',
'Hud.hpp',
'IonFwd.h',
'KeyboardHandler.cpp',
'KeyboardHandler.hpp',
'Macros.h',
'main.cpp',
'resource.h',
'Scene.cpp',
'Scene.hpp',
'SceneBase.cpp',
'SceneBase.hpp',
'stdafx.cpp',
'stdafx.h',
'targetver.h',
'Window.cpp',
'Window.hpp',
],
'dependencies': [
':Snapshot_assets',
'<(ion_dir)/external/freetype2.gyp:ionfreetype2',
'<(ion_dir)/external/glfw.gyp:glfw',
],
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'%(AdditionalDependencies)',
],
},
'VCCLCompilerTool': {
'OpenMP': 'true',
'EnableEnhancedInstructionSet': '3', # AdvancedVectorExtensions
}
},
}, # target: Snapshot
{
'variables': {
'make_this_target_into_an_app_param': 'Snapshot',
'apk_class_name_param': 'Snapshot',
},
'includes': [
'demo_apk_variables.gypi',
],
},
{
'target_name': 'SnapshotInstaller',
'type': 'wix_installer',
'dependencies': [
':Snapshot',
],
'sources': [
'./InstallSnapshot.wxs',
],
}, # target: SnapshotInstaller
],
}
|
import torch
import torch.nn.functional as F
from torch import nn
from .pooler import RoIAlign
from .utils import Matcher, BalancedPositiveNegativeSampler, roi_align
from .box_ops import BoxCoder, box_iou, process_box, nms
def fastrcnn_loss(class_logit, box_regression, label, regression_target):
classifier_loss = F.cross_entropy(class_logit, label)
N, num_pos = class_logit.shape[0], regression_target.shape[0]
box_regression = box_regression.reshape(N, -1, 4)
box_regression, label = box_regression[:num_pos], label[:num_pos]
box_idx = torch.arange(num_pos, device=label.device)
box_reg_loss = F.smooth_l1_loss(box_regression[box_idx, label], regression_target, reduction='sum') / N
return classifier_loss, box_reg_loss
def maskrcnn_loss(mask_logit, proposal, matched_idx, label, gt_mask):
matched_idx = matched_idx[:, None].to(proposal)
roi = torch.cat((matched_idx, proposal), dim=1)
M = mask_logit.shape[-1]
gt_mask = gt_mask[:, None].to(roi)
mask_target = roi_align(gt_mask, roi, 1., M, M, -1)[:, 0]
idx = torch.arange(label.shape[0], device=label.device)
mask_loss = F.binary_cross_entropy_with_logits(mask_logit[idx, label], mask_target)
return mask_loss
class RoIHeads(nn.Module):
def __init__(self, box_roi_pool, box_predictor,
fg_iou_thresh, bg_iou_thresh,
num_samples, positive_fraction,
reg_weights,
score_thresh, nms_thresh, num_detections):
super().__init__()
self.box_roi_pool = box_roi_pool
self.box_predictor = box_predictor
self.mask_roi_pool = None
self.mask_predictor = None
self.proposal_matcher = Matcher(fg_iou_thresh, bg_iou_thresh, allow_low_quality_matches=False)
self.fg_bg_sampler = BalancedPositiveNegativeSampler(num_samples, positive_fraction)
self.box_coder = BoxCoder(reg_weights)
self.score_thresh = score_thresh
self.nms_thresh = nms_thresh
self.num_detections = num_detections
self.min_size = 1
def has_mask(self):
if self.mask_roi_pool is None:
return False
if self.mask_predictor is None:
return False
return True
def select_training_samples(self, proposal, target):
gt_box = target['boxes']
gt_label = target['labels']
proposal = torch.cat((proposal, gt_box))
iou = box_iou(gt_box, proposal)
pos_neg_label, matched_idx = self.proposal_matcher(iou)
pos_idx, neg_idx = self.fg_bg_sampler(pos_neg_label)
idx = torch.cat((pos_idx, neg_idx))
regression_target = self.box_coder.encode(gt_box[matched_idx[pos_idx]], proposal[pos_idx])
proposal = proposal[idx]
matched_idx = matched_idx[idx]
label = gt_label[matched_idx]
num_pos = pos_idx.shape[0]
label[num_pos:] = 0
return proposal, matched_idx, label, regression_target
def fastrcnn_inference(self, class_logit, box_regression, proposal, image_shape):
N, num_classes = class_logit.shape
device = class_logit.device
pred_score = F.softmax(class_logit, dim=-1)
box_regression = box_regression.reshape(N, -1, 4)
boxes = []
labels = []
scores = []
for l in range(1, num_classes):
score, box_delta = pred_score[:, l], box_regression[:, l]
keep = score >= self.score_thresh
box, score, box_delta = proposal[keep], score[keep], box_delta[keep]
box = self.box_coder.decode(box_delta, box)
box, score = process_box(box, score, image_shape, self.min_size)
keep = nms(box, score, self.nms_thresh)[:self.num_detections]
box, score = box[keep], score[keep]
label = torch.full((len(keep),), l, dtype=keep.dtype, device=device)
boxes.append(box)
labels.append(label)
scores.append(score)
results = dict(boxes=torch.cat(boxes), labels=torch.cat(labels), scores=torch.cat(scores))
return results
def forward(self, feature, proposal, image_shape, target):
if self.training:
proposal, matched_idx, label, regression_target = self.select_training_samples(proposal, target)
box_feature = self.box_roi_pool(feature, proposal, image_shape) # roi pooling
class_logit, box_regression = self.box_predictor(box_feature) # estimate final bbox
result, losses = {}, {}
if self.training:
classifier_loss, box_reg_loss = fastrcnn_loss(class_logit, box_regression, label, regression_target)
losses = dict(roi_classifier_loss=classifier_loss, roi_box_loss=box_reg_loss)
else:
result = self.fastrcnn_inference(class_logit, box_regression, proposal, image_shape)
if self.has_mask():
if self.training:
num_pos = regression_target.shape[0]
mask_proposal = proposal[:num_pos]
pos_matched_idx = matched_idx[:num_pos]
mask_label = label[:num_pos]
'''
# -------------- critial ----------------
box_regression = box_regression[:num_pos].reshape(num_pos, -1, 4)
idx = torch.arange(num_pos, device=mask_label.device)
mask_proposal = self.box_coder.decode(box_regression[idx, mask_label], mask_proposal)
# ---------------------------------------
'''
if mask_proposal.shape[0] == 0:
losses.update(dict(roi_mask_loss=torch.tensor(0)))
return result, losses
else:
mask_proposal = result['boxes']
if mask_proposal.shape[0] == 0:
result.update(dict(masks=torch.empty((0, 28, 28))))
return result, losses
mask_feature = self.mask_roi_pool(feature, mask_proposal, image_shape)
mask_logit = self.mask_predictor(mask_feature)
if self.training:
gt_mask = target['masks']
mask_loss = maskrcnn_loss(mask_logit, mask_proposal, pos_matched_idx, mask_label, gt_mask)
losses.update(dict(roi_mask_loss=mask_loss))
else:
label = result['labels']
idx = torch.arange(label.shape[0], device=label.device)
mask_logit = mask_logit[idx, label]
mask_prob = mask_logit.sigmoid()
result.update(dict(masks=mask_prob))
return result, losses
|
import time
import loggedmethods
class BaseInstrument(object):
logged_methods_on = False
capabilities = (
{'name': 'SystemTime', 'type': 'property'},
)
def __init__(self):
pass
def getSystemTime(self):
return time.time()
def getCapabilities(self):
implemented = []
for cap in self.capabilities:
found = {'name': cap['name'], 'implemented': []}
if cap['type'] == 'property':
for op in ('set','get'):
attr = op + cap['name']
if hasattr(self, attr):
found['implemented'].append(op)
elif cap['type'] == 'method':
if hasattr(self, cap['name']):
found['implemented'].append('call')
if found['implemented']:
implemented.append(found)
return implemented
|
# charter.renderers.pdf.title
#
# Renderer for drawing a title onto the chart in PDF format.
from ...constants import *
#############################################################################
def draw(chart, canvas):
text_width = canvas.stringWidth(chart['title'], "Helvetica", 24)
text_height = 24 * 1.2
left = CHART_WIDTH/2 - text_width/2
bottom = CHART_HEIGHT - TITLE_HEIGHT/2 + text_height/2
canvas.setFont("Helvetica", 24)
canvas.setFillColorRGB(0.25, 0.25, 0.625)
canvas.drawString(left, bottom, chart['title'])
|
from typing import Optional, Tuple, Dict
from dataset_tools import QuestionCase
from filenames import SlotFillingFiles
from neural_sparql_machine.fairseq_wrapper import FairseqTranslator
from entity_linking import BaseEntityLinkingSystem
from query_generation import SparqlQueryGenerator, QueryTemplateGenerator, \
FairseqQueryTemplateGenerator
from query_tools import Query, WikidataQuery, WikidataTokenizer
from question_answering.base_question_answering_system import BaseQuestionAnsweringSystem
from slot_filling import StandardQueryGenerationSlotFillingHelper, \
BaseQueryGenerationSlotFillingHelper, SlotFillingMethodEnum
from slot_filling.slot_filler import BaseSlotFiller, FlairNerSlotFiller
from templates.wikidata_template import WikidataTemplate
class QueryGeneratorNotFound(Exception):
"""
Exception when a Query Generator instance is not found.
"""
pass
class NeuralQuestionAnsweringSystem(BaseQuestionAnsweringSystem):
"""
Question Answering system based on Neural Machine Translation for generating SPARQL queries.
"""
def __init__(self, query_generator: SparqlQueryGenerator):
"""
Neural Question Answering constructor.
:param query_generator: Neural Machine Translation query generator.
"""
self.query_generator = query_generator
def get_query(self, question_case: QuestionCase, num_entities_expected: Optional[int] = None) -> Optional[WikidataQuery]:
"""
Given a question string, obtain the query that should retrieve the answers.
:param question_case: question string.
:param num_entities_expected: maximum number of entities expected.
:return: Wikidata Query, if exists.
"""
sparql_query = self.query_generator.generate_one(question_case)
return sparql_query if str(sparql_query) else None
def get_query_debug(self, question_case: QuestionCase, num_entities_expected: Optional[int] = None) -> Tuple[Optional[WikidataQuery], Dict]:
"""
Given a question string, obtain the query that should retrieve the answers.
Includes a debug dict with entities identified and the query template, for analysis purposes.
:param question_case: question string.
:param num_entities_expected: maximum number of entities expected.
:return: tuple with Wikidata Query (if exists), and its debug dict.
"""
# query_prediction = self.get_query(question_case)
sparql_query_candidates = self.query_generator.generate_one_n_candidates(question_case)
if not sparql_query_candidates:
query_prediction = None
else:
query_prediction = WikidataQuery(str(sparql_query_candidates[0]))
debug = {
'entities': None,
'slots': None,
'query_templates': [
WikidataTemplate(query).get_empty_query(ignore_type=True) for query in sparql_query_candidates
],
'sparql_queries': sparql_query_candidates
}
return query_prediction, debug
@classmethod
def load_model(cls, question_answering_opt: Dict, dataset_opt: Optional[Dict]):
print("Loading SPARQL Query Generator system...")
return cls(SparqlQueryGenerator.load_model(question_answering_opt['query_generator_opt'], dataset_opt))
class EntityLinkingQuestionAnsweringSystem(BaseQuestionAnsweringSystem):
"""
Question Answering system based on Neural Machine Translation and Entity Linking for generating SPARQL queries.
"""
def __init__(self, query_template_generator: QueryTemplateGenerator, entity_linker: BaseEntityLinkingSystem,
slot_filler: BaseSlotFiller, slot_filling_method: Optional[BaseQueryGenerationSlotFillingHelper] = None):
"""
Entity Linking Question Answering constructor.
:param query_template_generator: Neural Machine Translation query template generator.
:param entity_linker: Entity linking system for entity recognition and disambiguation.
:param slot_filler: Slot Filling system for named entity labeling.
"""
self.query_template_generator = query_template_generator
self.entity_linker = entity_linker
self.slot_filler = slot_filler
self.filler_method = slot_filling_method if slot_filling_method else StandardQueryGenerationSlotFillingHelper()
def get_query(self, question_case: QuestionCase, num_entities_expected: Optional[int] = None) -> Optional[
WikidataQuery]:
"""
Given a question string, obtain the query that should retrieve the answers.
:param question_case: question string.
:param num_entities_expected: maximum number of entities expected.
:return: Wikidata Query, if exists.
"""
entities = self.entity_linker.get_entity_extracted(question_case, num_entities_expected)
slots = self.slot_filler.evaluate(question_case)
query_template_candidates = self.query_template_generator.generate_one_n_candidates(question_case)
for query_template in query_template_candidates:
slots_dict = {case['label']: case['slot'] for case in slots}
query_string, _ = self.filler_method.fill_template(query_template, slots_dict, entities)
sparql_query = WikidataQuery(query_string)
if sparql_query.is_valid():
return sparql_query
return None
def get_query_debug(self, question_case: QuestionCase, num_entities_expected: Optional[int] = None) -> Tuple[
Query, Dict]:
"""
Given a question string, obtain the query that should retrieve the answers.
Includes a debug dict with entities identified and the query template, for analysis purposes.
The uid parameter is used for an offline Entity Linking process over known datasets.
:param question_case: question string.
:param num_entities_expected: maximum number of entities expected.
:return: tuple with Wikidata Query (if exists), and its debug dict.
"""
entities = self.entity_linker.get_entity_extracted(question_case, num_entities_expected)
slots = self.slot_filler.evaluate(question_case)
query_template_candidates = self.query_template_generator.generate_one_n_candidates(question_case)
# Create sparql candidates
sparql_query_candidates = list()
slot_entity_map_candidates = list()
for query_template in query_template_candidates:
slots_dict = {case['label']: case['slot'] for case in slots}
query_string, slot_entity_map = self.filler_method.fill_template(str(query_template), slots_dict, entities)
sparql_query = WikidataQuery(query_string)
sparql_query_candidates.append(sparql_query)
slot_entity_map_candidates.append(slot_entity_map)
return sparql_query_candidates[0] if sparql_query_candidates else None, {'entities': entities, 'slots': slots,
'query_templates': query_template_candidates,
'sparql_queries': sparql_query_candidates,
'slot_entity_map': slot_entity_map_candidates}
@classmethod
def create_fairseq_model(cls, vocab: str, checkpoints_folder: str, entity_linker: BaseEntityLinkingSystem):
"""
Construct a Entity Linking Neural Question Answering system based
on the FairseqTranslator class to generate SPARQL queries and the Flair Slot Filler.
:param vocab: vocab path file of the Fairseq Translator
:param checkpoints_folder: model folder of the Fairseq Translator
:param entity_linker: Entity linking system for entity recognition and disambiguation.
:return:
"""
# Initialize query template generator
translator = FairseqTranslator(vocab, checkpoints_folder)
query_template_generator = FairseqQueryTemplateGenerator(translator, WikidataTokenizer())
# Initialize slot filler
file_manager = SlotFillingFiles(dataset_variant='plus')
slot_filler = FlairNerSlotFiller(model_folder=file_manager.model_folder())
return cls(query_template_generator, entity_linker, slot_filler)
@classmethod
def load_model(cls, question_answering_opt: Dict, dataset_opt: Dict):
# Initialize Entity Linking system
entity_linking_opt = question_answering_opt['entity_linking_opt']
print("Building Entity Linking system...")
entity_linker = BaseEntityLinkingSystem.load_model(entity_linking_opt, dataset_opt)
# Initialize Slot Filler system
print("Loading Slot Filling system...")
slot_filler_opt = question_answering_opt['slot_filler_opt']
slot_filler = BaseSlotFiller.load_model(slot_filler_opt, dataset_opt)
filler_helper = SlotFillingMethodEnum[slot_filler_opt['filling_method']].value()
print(f"{slot_filler_opt['filling_method']} filling method...")
# Initialize Query Template generator
print("Loading Query Template Generator system...")
query_template_generator_opt = question_answering_opt['query_template_generator_opt']
query_template_generator = QueryTemplateGenerator.load_model(query_template_generator_opt, dataset_opt)
return cls(query_template_generator, entity_linker, slot_filler, filler_helper)
|
class Trie:
class Node:
def __init__(self):
self.endmark = False
self.next = {}
def __init__(self):
self.root = self.Node()
def insert(self, str):
str = str.lower()
curr = self.root
for c in str:
if c not in curr.next:
curr.next[c] = self.Node()
curr = curr.next[c]
curr.endmark = True
def search(self, str):
str = str.lower()
curr = self.root
for c in str:
if c not in curr.next:
return False
curr = curr.next[c]
return curr.endmark
|
#!/usr/bin/env python3
"""
Created on 15 Oct 2020
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
DESCRIPTION
The disk_volume utility is used to determine whether a volume is mounted and, if so, the free and used space on
the volume. Space is given in blocks. The volume is identified by its mount point.
If the "is-available" field in the report is false, this indicates that an OS error occurred when
an attempt was made to access the volume. This error can occur if a removable medium failed, or
was disconnected without being unmounted.
The disk_volume utility is normally included in the commands accepted by the control_receiver utility.
SYNOPSIS
disk_volume.py [-v] MOUNTED_ON
EXAMPLES
./disk_volume.py -v /srv/SCS_logging
DOCUMENT EXAMPLE
{"filesystem": "/dev/mmcblk0p1", "size": 15384184, "used": 319296, "free": 14892092,
"mounted-on": "/srv/SCS_logging", "is-available": false}
SEE ALSO
scs_dev/disk_usage
"""
import sys
from scs_core.data.json import JSONify
from scs_dev.cmd.cmd_disk_volume import CmdDiskVolume
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
# ----------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdDiskVolume()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("disk_volume: %s" % cmd, file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------
# run...
volume = Host.disk_volume(cmd.mounted_on)
print(JSONify.dumps(volume))
# ----------------------------------------------------------------------------------------------------------------
# end...
if cmd.verbose and volume:
print("disk_volume: percent used: %s" % volume.percent_used(), file=sys.stderr)
|
from dataclasses import dataclass, field
from typing import Dict, Any
@dataclass
class RecconSpanExtractionArguments:
model_name: str = field(
default="mrm8488/spanbert-finetuned-squadv2",
metadata={"help": "Pretrained model to use for training"},
)
train_data_path: str = field(
default="data/subtask1/fold1/dailydialog_qa_train_with_context.json",
metadata={"help": "Path of training data"},
)
val_data_path: str = field(
default="data/subtask1/fold1/dailydialog_qa_valid_with_context.json",
metadata={"help": "Path of validation data"},
)
test_data_path: str = field(
default="data/subtask1/fold1/dailydialog_qa_test_with_context.json",
metadata={"help": "Path of validation data"},
)
max_seq_length: int = field(
default=512,
metadata={"help": "Maximum sequence length"},
)
doc_stride: int = field(
default=512,
metadata={"help": "Document stride"},
)
max_query_length: int = field(
default=512,
metadata={"help": "Maximum query length"},
)
train_args: Dict[str, Any] = field(
default_factory=lambda: {
"output_dir": "output/",
"overwrite_output_dir": True,
"evaluation_strategy": "steps",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"gradient_accumulation_steps": 1,
"learning_rate": 1e-5,
"weight_decay": 0,
"adam_epsilon": 1e-8,
"max_grad_norm": 1,
"num_train_epochs": 12,
"warmup_ratio": 0.06,
"no_cuda": False,
"seed": 0,
"fp16": False,
"load_best_model_at_end": True,
"label_names": ["start_positions", "end_positions"],
"report_to": "none",
},
metadata={"help": "Arguments for training Reccon Span Extraction models."},
)
eval_args: Dict[str, Any] = field(
default_factory=lambda: {
"trained_model_dir": "output/",
"results_path": "result/",
"batch_size": 16,
"n_best_size": 20,
"null_score_diff_threshold": 0.0,
"sliding_window": False,
"no_cuda": False,
"max_answer_length": 200,
},
metadata={"help": "Arguments for evaluating Reccon Span Extraction models."},
)
def __post_init__(self):
# Model
assert self.model_name in [
"mrm8488/spanbert-finetuned-squadv2",
"roberta-base",
], "Invalid model type!"
# Training
assert self.max_seq_length > 0, "max_seq_length must be positive."
assert self.doc_stride > 0, "doc_stride must be positive."
assert self.max_query_length > 0, "max_query_length must be positive."
assert isinstance(
self.train_args, Dict
), "train_args must be represented as a Dictionary."
assert self.train_args["seed"] >= 0, "Random seed must be at least 0."
assert (
self.train_args["num_train_epochs"] > 0
), "num_train_epochs must be at least 1."
assert (
self.train_args["per_device_train_batch_size"] > 0
), "per_device_train_batch_size must be at least 1."
assert (
self.train_args["per_device_eval_batch_size"] > 0
), "per_device_eval_batch_size must be at least 1."
assert (
self.train_args["gradient_accumulation_steps"] > 0
), "gradient_accumulation_steps must be positive."
assert self.train_args["learning_rate"] > 0, "learning_rate must be positive."
assert self.train_args["warmup_ratio"] >= 0, "warmup_ratio must be at least 0."
assert self.train_args["weight_decay"] >= 0, "weight_decay must be at least 0."
assert self.train_args["max_grad_norm"] > 0, "max_grad_norm must be positive."
assert self.train_args["adam_epsilon"] >= 0, "adam_epsilon must be at least 0."
# Eval
assert isinstance(
self.eval_args, Dict
), "eval_args must be represented as a Dictionary."
assert self.eval_args["n_best_size"] >= 1, "n_best_size must be at least 1."
assert (
self.eval_args["null_score_diff_threshold"] >= 0
), "null_score_diff_threshold must be at least 0."
assert (
self.eval_args["max_answer_length"] >= 1
), "max_answer_length must be at least 1."
|
from user_manager.manager.app import app as manager_app
from user_manager.oauth.app import app
app.mount('/api/v1/manager', manager_app)
def print_routes(container, prefix=''):
if hasattr(container, 'routes'):
for route in getattr(container, 'routes'):
print_routes(route, prefix + getattr(container, 'path', ''))
elif hasattr(container, 'path'):
print(
f"Route {', '.join(getattr(container, 'methods', None))} {prefix}{getattr(container, 'path', None)}: "
f"{getattr(container, 'name', None)}"
)
else:
print(f"Route {repr(container)}")
print_routes(app)
|
shadowed = False
def property(f):
global shadowed
shadowed = True
return f
class C(object):
@property
def meth(self):
pass
C()
___assertTrue(shadowed)
|
#!/usr/bin/python
# Get the Commonly Used Abbreviation for ML Estimators/Algorithms
classification_estimators = {
"Logistic Regression": "lr",
"K Nearest Neighbour": "knn",
"Naives Bayes": "nb",
"Decision Tree": "dt",
"SVM (Linear)": "svm",
"SVM (RBF)": "rbfsvm",
"Gaussian Process": "gpc",
"Multi Level Perceptron": "mlp",
"Ridge Classifier": "ridge",
"Random Forest": "rf",
"Quadratic Discriminant Analysis": "qda",
"AdaBoost": "ada",
"Gradient Boosting Classifier": "gbc",
"Linear Discriminant Analysis": "lda",
"Extra Trees Classifier": "et",
"Extreme Gradient Boosting": "xgboost",
"Light Gradient Boosting": "lightgbm",
"Cat Boost Classifier": "catboost",
}
regression_estimators = {
"Linear Regression": "lr",
"Lasso Regression": "lasso",
"Ridge Regression": "ridge",
"Elastic Net": "en",
"Least Angle Regression": "lar",
"Lasso Least Angle Regression": "llar",
"Orthogonal Matching Pursuit": "omp",
"Bayesian Ridge": "br",
"Automatic Relevance Determination": "ard",
"Passive Aggressive Regressor": "par",
"Random Sample Consensus": "ransac",
"TheilSen Regressor": "tr",
"Huber Regressor": "huber",
"Kernel Ridge": "kr",
"Support Vector Machine": "svm",
"K Neighbors Regressor": "knn",
"Decision Tree": "dt",
"Random Forest": "rf",
"Extra Trees Regressor": "et",
"AdaBoost Regressor": "ada",
"Gradient Boosting Regressor": "gbr",
"Multi Level Perceptron": "mlp",
"Extreme Gradient Boosting": "xgboost",
"Light Gradient Boosting": "lightgbm",
"CatBoost Regressor": "catboost",
}
anomaly_detection_estimators = {
"Angle-base Outlier Detection": "abod",
"Isolation Forest": "iforest",
"Clustering-Based Local Outlier": "cluster",
"Connectivity-Based Outlier Factor": "cof",
"Histogram-based Outlier Detection": "histogram",
"k-Nearest Neighbors Detector": "knn",
"Local Outlier Factor": "lof",
"One-class SVM detector": "svm",
"Principal Component Analysis": "pca",
"Minimum Covariance Determinant": "mcd",
"Subspace Outlier Detection": "sod",
"Stochastic Outlier Selection": "sos",
}
nlp_estimators = {
"Latent Dirichlet Allocation": "lda",
"Latent Semantic Indexing": "lsi",
"Hierarchical Dirichlet Process": "hdp",
"Random Projections": "rp",
"Non-Negative Matrix Factorization": "nmf",
}
clustering_estimators = {
"K-Means Clustering": "kmeans",
"Affinity Propagation": "ap",
"Mean shift Clustering": "meanshift",
"Spectral Clustering": "sc",
"Agglomerative Clustering": "hclust",
"Density-Based Spatial Clustering": "dbscan",
"OPTICS Clustering": "optics",
"Birch Clustering": "birch",
"K-Modes Clustering": "kmodes",
}
all_estimators = {
"Logistic Regression": "lr",
"K Nearest Neighbour": "knn",
"Naives Bayes": "nb",
"Decision Tree": "dt",
"SVM (Linear)": "svm",
"SVM (RBF)": "rbfsvm",
"Gaussian Process": "gpc",
"Multi Level Perceptron": "mlp",
"Ridge Classifier": "ridge",
"Random Forest": "rf",
"Quadratic Discriminant Analysis": "qda",
"AdaBoost": "ada",
"Gradient Boosting Classifier": "gbc",
"Linear Discriminant Analysis": "lda",
"Extra Trees Classifier": "et",
"Extreme Gradient Boosting": "xgboost",
"Light Gradient Boosting": "lightgbm",
"Cat Boost Classifier": "catboost",
"Linear Regression": "lr",
"Lasso Regression": "lasso",
"Ridge Regression": "ridge",
"Elastic Net": "en",
"Least Angle Regression": "lar",
"Lasso Least Angle Regression": "llar",
"Orthogonal Matching Pursuit": "omp",
"Bayesian Ridge": "br",
"Automatic Relevance Determination": "ard",
"Passive Aggressive Regressor": "par",
"Random Sample Consensus": "ransac",
"TheilSen Regressor": "tr",
"Huber Regressor": "huber",
"Kernel Ridge": "kr",
"Support Vector Machine": "svm",
"K Neighbors Regressor": "knn",
"Decision Tree": "dt",
"Random Forest": "rf",
"Extra Trees Regressor": "et",
"AdaBoost Regressor": "ada",
"Gradient Boosting Regressor": "gbr",
"Multi Level Perceptron": "mlp",
"Extreme Gradient Boosting": "xgboost",
"Light Gradient Boosting": "lightgbm",
"CatBoost Regressor": "catboost",
"Angle-base Outlier Detection": "abod",
"Isolation Forest": "iforest",
"Clustering-Based Local Outlier": "cluster",
"Connectivity-Based Outlier Factor": "cof",
"Histogram-based Outlier Detection": "histogram",
"k-Nearest Neighbors Detector": "knn",
"Local Outlier Factor": "lof",
"One-class SVM detector": "svm",
"Principal Component Analysis": "pca",
"Minimum Covariance Determinant": "mcd",
"Subspace Outlier Detection": "sod",
"Stochastic Outlier Selection": "sos",
"Latent Dirichlet Allocation": "lda",
"Latent Semantic Indexing": "lsi",
"Hierarchical Dirichlet Process": "hdp",
"Random Projections": "rp",
"Non-Negative Matrix Factorization": "nmf",
"K-Means Clustering": "kmeans",
"Affinity Propagation": "ap",
"Mean shift Clustering": "meanshift",
"Spectral Clustering": "sc",
"Agglomerative Clustering": "hclust",
"Density-Based Spatial Clustering": "dbscan",
"OPTICS Clustering": "optics",
"Birch Clustering": "birch",
"K-Modes Clustering": "kmodes",
"Validation Curve": "vc",
"Confusion Matrix": "cm/confusion_matrix",
}
def get_abbrev(estimator_name, estimator_type="all"):
"""Return the Abbreviation/Short Form for an ML Estimator/Algorithm
# Example
>>> get_abbrev('Logistic Regression')
'lr'
>>> get_abbrev('Logistic Regression','classification')
'lr'
"""
if estimator_type == "classification":
for key, value in classification_estimators.items():
if estimator_name.title() == key:
return value
return False
elif estimator_type == "regression":
for key, value in regression_estimators.items():
if estimator_name.title() == key:
return value
return False
elif estimator_type == "clustering":
for key, value in clustering_estimators.items():
if estimator_name.title() == key:
return value
return False
elif estimator_type == "nlp":
for key, value in nlp_estimators.items():
if estimator_name.title() == key:
return value
return False
elif estimator_type == "anomaly":
for key, value in anomaly_detection_estimators.items():
if estimator_name.title() == key:
return value
return False
else:
for key, value in all_estimators.items():
if estimator_name.title() == key:
return value
return False
def get_fullname(estimator_abbrev, estimator_type="all"):
"""Return the Full Name for an Abbreviated Estimator/Algorithm
# Example
>>> get_fullname('lr')
'Logistic Regression'
'Linear Regression'
>>> get_fullname('dt')
'Decision Tree'
"""
for key, value in all_estimators.items():
if estimator_abbrev.lower() == value:
return key
return False
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Miroslav Bauer, CESNET.
#
# oarepo-references is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration.
See https://pytest-invenio.readthedocs.io/ for documentation on which test
fixtures are available.
"""
import os
import uuid
import pytest
from flask import url_for
from invenio_app.factory import create_api
from invenio_db import db as _db
from invenio_pidstore.providers.recordid import RecordIdProvider
from sqlalchemy_utils import create_database, database_exists
from tests.test_utils import TestRecord
from invenio_search import RecordsSearch
from oarepo_references.api import RecordReferenceAPI
from oarepo_references.models import ClassName
@pytest.fixture(scope="module")
def create_app():
"""Return API app."""
return create_api
@pytest.fixture(scope="module")
def references_api():
"""Returns an instance of RecordReferenceAPI."""
return RecordReferenceAPI()
@pytest.fixture(scope='session')
def celery_config():
"""Celery app test configuration."""
return {
'broker_url': 'memory://localhost/',
'result_backend': 'rpc'
}
@pytest.fixture(scope="module")
def app_config(app_config):
"""Flask application fixture."""
app_config = dict(
TESTING=True,
JSON_AS_ASCII=True,
SQLALCHEMY_TRACK_MODIFICATIONS=True,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI',
'sqlite:///:memory:'),
SERVER_NAME='localhost',
CELERY_ALWAYS_EAGER=True,
CELERY_BROKER_URL='memory://localhost/',
CELERY_RESULT_BACKEND='rpc'
)
app_config['PIDSTORE_RECID_FIELD'] = 'pid'
app_config['RECORDS_REST_ENDPOINTS'] = dict(
recid=dict(
pid_type='recid',
pid_minter='recid',
pid_fetcher='recid',
search_class=RecordsSearch,
search_index=None,
search_type=None,
record_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_response'),
},
search_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_search'),
},
list_route='/records/',
item_route='/records/<pid(recid):pid_value>'
)
)
return app_config
@pytest.fixture
def db(app):
"""Returns fresh db."""
with app.app_context():
if not database_exists(str(_db.engine.url)) and \
app.config['SQLALCHEMY_DATABASE_URI'] != 'sqlite://':
create_database(_db.engine.url)
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
def get_pid():
"""Generates a new PID for a record."""
record_uuid = uuid.uuid4()
provider = RecordIdProvider.create(
object_type='rec',
object_uuid=record_uuid,
)
return record_uuid, provider.pid.pid_value
@pytest.fixture
def referenced_records(db):
"""Create a list of records to be referenced by other records."""
rrdata = [{'title': 'a'}, {'title': 'b'}]
referenced_records = []
for rr in rrdata:
ruuid, pid = get_pid()
rr['pid'] = pid
referenced_records.append(TestRecord.create(rr, id_=ruuid))
db.session.commit()
return referenced_records
def get_ref_url(pid):
"""Returns canonical_url for a record by its PID."""
return url_for('invenio_records_rest.recid_item',
pid_value=pid, _external=True)
@pytest.fixture
def class_names(db):
"""Test Class names fixture."""
class_names = [
ClassName.create(name=str(TestRecord.__class__))
]
db.session.commit()
return class_names
@pytest.fixture
def referencing_records(db, referenced_records):
"""Create sample records with references to others."""
referencing_records = [
TestRecord.create({
'title': 'c',
'pid': get_pid()[1],
'$ref': get_ref_url(referenced_records[0]['pid'])
}),
TestRecord.create({
'title': 'd',
'pid': get_pid()[1],
'$ref': get_ref_url(referenced_records[1]['pid'])
}),
TestRecord.create({'title': 'e',
'pid': get_pid()[1],
'reflist': [
{'$ref': get_ref_url(referenced_records[1]['pid'])},
{'$ref': get_ref_url(referenced_records[0]['pid'])}
]}),
TestRecord.create({'title': 'f',
'pid': get_pid()[1],
'reflist': [
{'title': 'f', '$ref': get_ref_url(referenced_records[0]['pid'])},
]})
]
db.session.commit()
return referencing_records
@pytest.fixture
def test_record_data():
"""Returns a data for a test record."""
return {
'pid': 999,
'title': 'rec1',
'taxo1': {
'links': {
'self': 'http://localhost/api/taxonomies/requestors/a/b/',
},
'slug': 'b'
},
'sub': {
'taxo2': {
'links': {
'self': 'http://localhost/api/taxonomies/requestors/a/c/',
},
'slug': 'c'
}
}
}
|
import json
import requests
import os
import psycopg2
import subprocess
import csv
import datetime
import utilities
import config
def main():
stocks = config.stocks
get_data_csv_name = config.get_data_csv_name
db_csv_name = config.db_csv_name
subprocess.call( ["rm", get_data_csv_name] )
subprocess.call( ["rm", db_csv_name] )
number = 1
for stock_index in range(len(stocks) - 1):
qiita_api = os.environ['QIITA_API']
url = "https://qiita.com/api/v2/items"
h = {"Authorization": "Bearer " + qiita_api}
p = {
'per_page': 100,
'query': 'stocks:<{} stocks:>{}'.format(str(int(stocks[stock_index]) + 1), stocks[stock_index + 1])
}
response = requests.get(url, params=p, headers=h)
response_list = json.loads(response.text)
for index, item in enumerate(response_list):
created_at = response_list[index]["created_at"]
article_id = response_list[index]["id"]
likes_count = response_list[index]["likes_count"]
tags =[]
for tag_index in range(5):
try:
tags.append(response_list[index]["tags"][tag_index]["name"])
except IndexError:
tags.append(None)
title = response_list[index]["title"]
updated_at = response_list[index]["updated_at"]
url = response_list[index]["url"]
user_id = response_list[index]["user"]["id"]
number = utilities.write_csv(get_data_csv_name, number, article_id, user_id, title, likes_count, url, tags, created_at, updated_at)
utilities.delete_csv_row(get_data_csv_name, db_csv_name)
dt_now = datetime.datetime.now()
file = open('update_log.txt', 'a')
file.write(str(dt_now) + "\n")
file.close()
conn = utilities.get_connection()
cur = conn.cursor()
cur.execute('DELETE FROM update_time')
cur.execute('INSERT INTO update_time VALUES (' + str(dt_now.year) + ',' + str(dt_now.month) + ',' + str(dt_now.day) + ')')
cur.execute('DELETE FROM articles')
f = open('db.csv','r')
cur.copy_from( f , 'articles', sep=',', null='\\N')
conn.commit()
cur.close()
conn.close()
if __name__ == "__main__":
# try:
main()
# except Exception as e:
# file = open('update_log.txt', 'a')
# file.write(str(e) + ">>>\n")
# file.close()
|
import pytest
from xdl.errors import (
XDLUndeclaredAlwaysWriteError,
XDLUndeclaredDefaultPropError,
XDLUndeclaredPropLimitError,
XDLUndeclaredInternalPropError
)
from xdl.steps import AbstractStep
from xdl.utils.prop_limits import ROTATION_SPEED_PROP_LIMIT
class TestUndeclaredDefaultProp(AbstractStep):
__test__ = False
PROP_TYPES = {
'volume': float
}
DEFAULT_PROPS = {
'volume': '15 mL',
'stir': True
}
def __init__(self):
super().__init__(locals())
def get_steps(self):
return []
class TestUndeclaredInternalProp(AbstractStep):
__test__ = False
PROP_TYPES = {
'volume': float
}
DEFAULT_PROPS = {
'volume': '15 mL'
}
INTERNAL_PROPS = [
'stir'
]
def __init__(self):
super().__init__(locals())
def get_steps(self):
return []
class TestUndeclaredAlwaysWrite(AbstractStep):
__test__ = False
PROP_TYPES = {
'volume': float
}
DEFAULT_PROPS = {
'volume': '15 mL'
}
ALWAYS_WRITE = [
'stir'
]
def __init__(self):
super().__init__(locals())
def get_steps(self):
return []
class TestUndeclaredPropLimit(AbstractStep):
__test__ = False
PROP_TYPES = {
'volume': float
}
DEFAULT_PROPS = {
'volume': '15 mL'
}
PROP_LIMITS = {
'stir_speed': ROTATION_SPEED_PROP_LIMIT
}
def __init__(self):
super().__init__(locals())
def get_steps(self):
return []
@pytest.mark.unit
def test_undeclared_default_props():
"""Test error raised if default prop specified that isn't in PROP_TYPES."""
with pytest.raises(XDLUndeclaredDefaultPropError):
TestUndeclaredDefaultProp()
@pytest.mark.unit
def test_undeclared_prop_limits():
"""Test error raised if prop limit specified that isn't in PROP_TYPES."""
with pytest.raises(XDLUndeclaredPropLimitError):
TestUndeclaredPropLimit()
@pytest.mark.unit
def test_undeclared_internal_props():
"""Test error raised if internal prop specified that isn't in PROP_TYPES."""
with pytest.raises(XDLUndeclaredInternalPropError):
TestUndeclaredInternalProp()
@pytest.mark.unit
def test_undeclared_always_write():
"""Test error raised if always write specified that isn't in PROP_TYPES."""
with pytest.raises(XDLUndeclaredAlwaysWriteError):
TestUndeclaredAlwaysWrite()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
UL call demonstrated: TmrDevice.pulse_out_start()
Purpose: Generate an output pulse using the
specified timer
Demonstration: Outputs user defined pulse on the
specified timer
Steps:
1. Call get_daq_device_inventory() to get the list of available DAQ devices
2. Call DaqDevice() to create a DaqDevice object
3. Call DaqDevice.get_tmr_device() to get the TmrDevice object for the timer
subsystem
4. Verify the TmrDevice object is valid
5. Call DaqDevice.connect() to connect to the device
6. Call TmrDevice.pulse_out_start() to start the output pulse for the specified
timer
7. Call TmrDevice.get_pulse_out_status() to get the output status and display
the status
8. Call TmrDevice.scan_stop() to stop the scan
9. Call DaqDevice.disconnect() and DaqDevice.release() before exiting the
process
"""
from __future__ import print_function
from time import sleep
from sys import stdout
from os import system
from uldaq import (get_daq_device_inventory, DaqDevice, InterfaceType,
TmrIdleState, PulseOutOption, TmrStatus)
# Constants
ERASE_LINE = '\x1b[2K'
def main():
"""Timer pulse output example."""
timer_number = 0
frequency = 1000.0 # Hz
duty_cycle = 0.5 # 50 percent
pulse_count = 0 # Continuous
initial_delay = 0.0
idle_state = TmrIdleState.LOW
options = PulseOutOption.DEFAULT
interface_type = InterfaceType.ANY
daq_device = None
tmr_device = None
try:
# Get descriptors for all of the available DAQ devices.
devices = get_daq_device_inventory(interface_type)
number_of_devices = len(devices)
# Verify at least one DAQ device is detected.
if number_of_devices == 0:
raise RuntimeError('Error: No DAQ devices found')
print('Found', number_of_devices, 'DAQ device(s):')
for i in range(number_of_devices):
print(' [', i, '] ', devices[i].product_name, ' (',
devices[i].unique_id, ')', sep='')
descriptor_index = input('\nPlease select a DAQ device, enter a number'
+ ' between 0 and '
+ str(number_of_devices - 1) + ': ')
descriptor_index = int(descriptor_index)
if descriptor_index not in range(number_of_devices):
raise RuntimeError('Error: Invalid descriptor index')
# Create the DAQ device from the descriptor at the specified index.
daq_device = DaqDevice(devices[descriptor_index])
tmr_device = daq_device.get_tmr_device()
# Verify the specified DAQ device supports timers.
if tmr_device is None:
raise RuntimeError('Error: The DAQ device does not support timers')
# Establish a connection to the device.
descriptor = daq_device.get_descriptor()
print('\nConnecting to', descriptor.dev_string, '- please wait...')
# For Ethernet devices using a connection_code other than the default
# value of zero, change the line below to enter the desired code.
daq_device.connect(connection_code=0)
print('\n', descriptor.dev_string, 'ready')
print(' Function demonstrated: TmrDevice.pulse_out_start')
print(' Timer:', timer_number)
print(' Frequency:', frequency, 'Hz')
print(' Duty cycle:', duty_cycle)
print(' Initial delay:', initial_delay)
try:
input('\nHit ENTER to continue')
except (NameError, SyntaxError):
pass
# Start the timer pulse output.
(frequency,
duty_cycle,
initial_delay) = tmr_device.pulse_out_start(timer_number, frequency,
duty_cycle, pulse_count,
initial_delay, idle_state,
options)
system('clear')
print('Please enter CTRL + C to terminate the process\n')
print('Active DAQ device: ', descriptor.dev_string, ' (',
descriptor.unique_id, ')\n', sep='')
print(' Actual frequency:', frequency, 'Hz')
print(' Actual duty cycle:', duty_cycle, 'Hz')
print(' Actual initial delay:', initial_delay, 'Hz')
try:
print('\n Outputting {0:.6f} Hz pulse with duty cycle {1:.3f} '
'for timer {2:d}'.format(frequency, duty_cycle, timer_number))
status = tmr_device.get_pulse_out_status(timer_number)
count = 0
if status == TmrStatus.RUNNING:
# If the status is RUNNING, then this timer does support the
# get_pulse_out_status() function so the status is checked to
# determine if the pulse output is stopped due to an error.
while status == TmrStatus.RUNNING:
status = tmr_device.get_pulse_out_status(timer_number)
print_status_dots(count)
count += 1
else:
# If the status is IDLE, then this timer does not support the
# get_pulse_out_status() function so we will wait for user
# input to stop the pulse output.
while True:
print_status_dots(count)
count += 1
except KeyboardInterrupt:
pass
except RuntimeError as error:
print('\n', error)
finally:
if daq_device:
# Stop the scan.
if tmr_device:
tmr_device.pulse_out_stop(timer_number)
stdout.write(ERASE_LINE)
print('\r Status:', TmrStatus.IDLE)
# Disconnect from the DAQ device.
if daq_device.is_connected():
daq_device.disconnect()
# Release the DAQ device resource.
daq_device.release()
def print_status_dots(count):
"""Display incrementing dots to indicate a status of running."""
if count % 6 == 0:
stdout.write(ERASE_LINE)
print('\r ', TmrStatus.RUNNING, end='')
else:
print('.', end='')
stdout.flush()
sleep(0.5)
if __name__ == '__main__':
main()
|
__author__ = "Swas.py"
__title__ = "vscode"
__license__ = "MIT"
__copyright__ = "Copyright 2021 Swas.py"
__version__ = "1.4.5"
from . import window
from .compiler import build
from .extension import Extension
from .envMethods import env
from . import _types as ext
from ._types import *
|
# Generated by Django 3.2.5 on 2021-07-27 17:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('movielist_app', '0002_auto_20210727_1527'),
]
operations = [
migrations.AddField(
model_name='watchlist',
name='platform',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='watchlist', to='movielist_app.streamplatform'),
preserve_default=False,
),
]
|
from flask_restful import Resource, current_app, request
from schematics.exceptions import DataError
from server.models.postgis.task import Task
from server.models.postgis.task_annotation import TaskAnnotation
from server.services.project_service import ProjectService, NotFound
from server.services.task_annotations_service import TaskAnnotationsService
from server.services.application_service import ApplicationService
class AnnotationsRestAPI(Resource):
def get(self, project_id: int, annotation_type: str = None):
"""
Get all task annotations for a project
---
tags:
- annotations
produces:
- application/json
parameters:
- name: project_id
in: path
description: The ID of the project
required: true
type: integer
- name: annotation_type
in: path
description: The type of annotation to fetch
required: false
type: string
responses:
200:
description: Project Annotations
404:
description: Project or annotations not found
500:
description: Internal Server Error
"""
try:
ProjectService.get_project_by_id(project_id)
except NotFound as e:
current_app.logger.error(f"Error validating project: {str(e)}")
return {"Error": "Project not found"}, 404
try:
if annotation_type:
annotations = TaskAnnotation.get_task_annotations_by_project_id_type(
project_id, annotation_type
)
else:
annotations = TaskAnnotation.get_task_annotations_by_project_id(
project_id
)
return annotations.to_primitive(), 200
except NotFound:
return {"Error": "Annotations not found"}, 404
def post(self, project_id: int, annotation_type: str):
"""
Store new task annotations for tasks of a project
---
tags:
- annotations
produces:
- application/json
parameters:
- in: header
name: Content-Type
description: Content type for post body
required: true
type: string
default: application/json
- name: project_id
in: path
description: The unique project ID
required: true
type: integer
- name: annotation_type
in: path
description: Annotation type
required: true
type: string
- name: Application-Token
in: header
description: Application token registered with TM
required: true
type: string
- in: body
name: body
required: true
description: JSON object for creating draft project
schema:
projectId:
type: integer
required: true
annotationType:
type: string
required: true
tasks:
type: array
required: true
items:
schema:
taskId:
type: integer
required: true
annotationSource:
type: string
annotationMarkdown:
type: string
properties:
description: JSON object with properties
responses:
200:
description: Project updated
400:
description: Client Error - Invalid Request
404:
description: Project or task not found
500:
description: Internal Server Error
"""
if "Application-Token" in request.headers:
application_token = request.headers["Application-Token"]
try:
is_valid_token = ApplicationService.check_token( # noqa
application_token
)
except NotFound:
current_app.logger.error(f"Invalid token")
return {"Error": "Invalid token"}, 500
else:
current_app.logger.error(f"No token supplied")
return {"Error": "No token supplied"}, 500
try:
annotations = request.get_json() or {}
except DataError as e:
current_app.logger.error(f"Error validating request: {str(e)}")
try:
ProjectService.get_project_by_id(project_id)
except NotFound as e:
current_app.logger.error(f"Error validating project: {str(e)}")
task_ids = [t["taskId"] for t in annotations["tasks"]]
# check if task ids are valid
tasks = Task.get_tasks(project_id, task_ids)
tasks_ids_db = [t.id for t in tasks]
if len(task_ids) != len(tasks_ids_db):
return {"Error": "Invalid task id"}, 500
for annotation in annotations["tasks"]:
try:
TaskAnnotationsService.add_or_update_annotation(
annotation, project_id, annotation_type
)
except DataError as e:
current_app.logger.error(f"Error creating annotations: {str(e)}")
return {"Error": "Error creating annotations"}, 500
return project_id, 200
def put(self, project_id: int, task_id: int):
"""
Update a single task's annotations
"""
pass
|
from enum import Enum
from typing import Literal as _Literal, Sequence
from .lexeme import Lexeme
__all__ = ["Token"]
# backwards / forwards paradox again...
# TODO: improve on this
Type = type
Object = object # TODO: make this precise
def construct(name, bases: Sequence[Type]):
...
def create(name, bases: Sequence[Type], methods) -> Object:
...
class _Token(Enum):
LEFT_PAREN = "("
RIGHT_PAREN = ")"
class Line(int):
...
class Literal(Enum, _Literal):
...
class TokenType:
...
class Token(
type,
Lexeme,
Literal,
Line,
):
...
def scan():
...
|
#!/usr/bin/env python3
"""
Computes embeddings on a set of tasks
"""
import json
import os
import shutil
import time
from pathlib import Path
import click
import tensorflow as tf
import torch
from slugify import slugify
from tqdm import tqdm
import heareval.gpu_max_mem as gpu_max_mem
from heareval.embeddings.task_embeddings import Embedding, task_embeddings
if torch.cuda.is_available() and not tf.test.is_gpu_available(
cuda_only=False, min_cuda_compute_capability=None
):
raise ValueError("GPUs not available in tensorflow, but found by pytorch")
@click.command()
@click.argument("module", type=str)
@click.option(
"--model",
default=None,
help="Location of model weights file",
type=click.Path(exists=True),
)
@click.option(
"--tasks-dir",
default="tasks",
help="Location of tasks to compute embeddings on",
type=str,
)
@click.option(
"--task",
default="all",
help="Task to run. (Default: all)",
type=str,
)
@click.option(
"--embeddings-dir", default="embeddings", help="Location to save task embeddings"
)
@click.option(
"--model-options", default="{}", help="A JSON dict of kwargs to pass to load_model"
)
def runner(
module: str,
model: str = None,
tasks_dir: str = "tasks",
task: str = "tasks",
embeddings_dir: str = "embeddings",
model_options: str = "{}",
) -> None:
model_options_dict = json.loads(model_options)
if isinstance(model_options_dict, dict):
if model_options_dict:
options_str = "-" + "-".join(
[
"%s=%s" % (slugify(k), slugify(str(v)))
for k, v in model_options_dict.items()
]
)
else:
options_str = ""
else:
raise ValueError("model_options should be a JSON dict")
# Check for directory containing the tasks
tasks_dir_path = Path(tasks_dir)
embeddings_dir_path = Path(embeddings_dir)
print(embeddings_dir_path)
if not tasks_dir_path.is_dir():
raise ValueError(
"Cannot locate directory containing tasks. "
f"Ensure that directory named {tasks_dir_path} exists or specify a folder "
f"containing HEAR tasks using the argument --tasks-dir"
)
# Load the embedding model
embedding = Embedding(module, model, model_options_dict)
if task == "all":
tasks = list(tasks_dir_path.iterdir())
else:
tasks = [tasks_dir_path.joinpath(task)]
assert os.path.exists(tasks[0]), f"{tasks[0]} does not exist"
for task_path in tqdm(tasks):
# TODO: Would be good to include the version here
# https://github.com/neuralaudio/hear2021-eval-kit/issues/37
embed_dir = embeddings_dir_path.joinpath(embedding.name + options_str)
task_name = task_path.name
embed_task_dir = embed_dir.joinpath(task_name)
done_embeddings = embed_task_dir.joinpath(".done.embeddings")
if os.path.exists(done_embeddings):
continue
if os.path.exists(embed_task_dir):
shutil.rmtree(embed_task_dir)
start = time.time()
gpu_max_mem.reset()
task_embeddings(embedding, task_path, embed_task_dir)
time_elapsed = time.time() - start
gpu_max_mem_used = gpu_max_mem.measure()
print(
f"...computed embeddings in {time_elapsed} sec "
f"(GPU max mem {gpu_max_mem_used}) "
f"for {task_path.name} using {module} {model_options}"
)
open(embed_task_dir.joinpath("profile.embeddings.json"), "wt").write(
json.dumps(
{
"time_elapsed": time_elapsed,
"gpu_max_mem": gpu_max_mem_used,
"gpu_device_name": gpu_max_mem.device_name(),
},
indent=4,
)
)
# Touch this file to indicate that processing completed successfully
open(done_embeddings, "wt")
if __name__ == "__main__":
runner()
|
"""XPO logistics LTL rate quote response Datatype definition module."""
import attr
from typing import List, Union, Optional
from jstruct import JList, JStruct
@attr.s(auto_attribs=True)
class rateQuote:
confirmationNbr
shipmentInfo
accessorialTariffName
actlDiscountPct
amcAmt
aMCInd
commodity
deficitRatingInfo
dscntSrcCd
fscTariffName
inboundZoneCd
lnhChargeAmt
nhChargeAmt
offShrSIC
offShrTariff
offShrZoneCd
ratingTariffName
serviced
bill2Party
shipperToConsigneeMiles
consignee
shipperToConsigneeMiles
totAccessorialAmt
totCharge
exchangeRate
totDiscountAmt
totFSCAmt
totOffShrAccChargeAmt
totOffShrAmt
totOffShrFscCharge
totOffShrLnhChargeAmt
totTaxAmt
trailerCnt
vspApplied
@attr.s(auto_attribs=True)
class transitTime:
destStateCd: Optional[str] = None
destPostalCd: Optional[str] = None
destSicCd: Optional[str] = None
estDlvrDate: Optional[str] = None
garntInd: Optional[bool] = None
latestPkupDate: Optional[str] = None
origPostalCd: Optional[str] = None
origStateCd: Optional[str] = None
origSicCd: Optional[str] = None
requestedDlvrDate: Optional[str] = None
requestedPkupDate: Optional[str] = None
transitDays: Optional[int] = None
earliestPkupDate: Optional[str] = None
note: Optional[str] = None
isPkupDateHoliday: Optional[bool] = None
isrqstdDeliveryDateHoliday: Optional[bool] = None
@attr.s(auto_attribs=True)
class msgs:
errorCd: Optional[str] = None
message: Optional[str] = None
fieldName: Optional[str] = None
fieldValue: Optional[str] = None
@attr.s(auto_attribs=True)
class RateResponse:
rateQuote: rateQuote = JStruct[rateQuote]
transitTime: transitTime = JStruct[transitTime]
msgs= msgs = JStruct[msgs]
|
"""
usage: $ deephyper-analytics plot csv -p results.csv --xy elapsed_sec objective
"""
import sys
import matplotlib.pyplot as plt
import pandas as pd
def add_subparser(subparsers):
subparser_name = "plot"
function_to_call = main
parser = subparsers.add_parser(
subparser_name, help="Tool to generate a quick 2D plot from file."
)
subparsers = parser.add_subparsers(help="Kind of analytics.")
# best search_spaces
subparser = subparsers.add_parser("csv", help="Plot for CSV files.")
subparser.add_argument(
"--path", "-p", type=str, default="results.csv", help="Path to CSV file."
)
subparser.add_argument(
"--xy",
metavar="xy",
type=str,
nargs=2,
default=["elapsed_sec", "objective"],
help="name of x y variables in the CSV file.",
)
return subparser_name, function_to_call
def main(path, xy, *args, **kwargs):
if sys.argv[2] == "csv":
df = pd.read_csv(path)
plt.figure()
plt.scatter(df[xy[0]], df[xy[1]])
plt.xlabel(xy[0])
plt.ylabel(xy[1])
plt.show()
|
"""
Exceptions and warnings
"""
class ArimWarning(UserWarning):
pass
class InvalidDimension(ValueError):
"""
Raised when an array has an invalid dimension.
"""
@classmethod
def message_auto(cls, array_name, expected_dimension, current_dimension=None):
current = (
" (current: {})".format(current_dimension)
if current_dimension is not None
else ""
)
message = "Dimension of array '{}' must be {}{}.".format(
array_name, expected_dimension, current
)
return cls(message)
class InvalidShape(ValueError):
"""
Raised when an array has an invalid shape.
"""
@classmethod
def message_auto(cls, array_name, expected_shape, current_shape=None):
current = (
" (current: {})".format(current_shape) if current_shape is not None else ""
)
message = "Array '{}' must have shape {} (current: {}){}.".format(
array_name, expected_shape, current_shape, current
)
return cls(message)
class NotAnArray(TypeError):
def __init__(self, array_name, message=None):
if message is None:
message = " '{}' must be an array. Try to convert to numpy.array first.".format(
array_name
)
super().__init__(message)
|
import unittest
import sys
import os
sys.path.insert(0,"../src/")
import SVN
import shutil
class TestSVNbackend(unittest.TestCase):
def setUp(self):
self.workerc=4
self.workdir="testdir/workdir"
self.repodir="testdir/repo"
self.text="""
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut metus massa,
sagittis consequat tincidunt non, sodales at quam. Cras bibendum, mauris eu
placerat condimentum, magna nisi laoreet massa, eget venenatis ligula velit eu
nisi. Aenean nec turpis vel nunc porta ornare. Donec dolor dolor, imperdiet vel
ultricies interdum, eleifend at lorem. Aliquam vitae nunc lacus. Suspendisse
vitae leo sed risus tempor fermentum quis ut odio. Nunc eu faucibus nunc.
Integer accumsan tempus eros, vitae placerat risus pulvinar ut. Quisque eu
congue ipsum. Fusce ultrices sapien erat, sed pulvinar erat faucibus ac. Nullam
sit amet lectus mauris. Donec et tincidunt justo. Fusce porttitor augue et
libero varius pretium. Sed aliquet metus nec quam bibendum commodo. Morbi
venenatis sagittis semper. Integer venenatis accumsan magna vel bibendum. Aenean
elementum lorem lacus, nec imperdiet velit sagittis quis. Praesent lorem metus,
consectetur et consequat sit amet, suscipit in velit. Etiam ornare augue enim.
Phasellus egestas nunc vitae nisi imperdiet, sed lacinia ante sollicitudin.
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Ut ut quam fringilla
est elementum fringilla et ut ligula. Nullam augue ipsum, porta ut turpis id,
facilisis lacinia eros. Nullam euismod fringilla massa, non lobortis tortor
placerat vitae. Cras risus mi, pulvinar quis augue at, convallis dignissim est.
Curabitur malesuada, massa a lacinia fermentum, ligula lorem molestie erat, in
consectetur risus purus ut justo. Aliquam lobortis laoreet enim, condimentum
consectetur felis. Aenean id scelerisque lectus, a placerat ex. Mauris felis
diam, interdum vitae augue sit amet, faucibus euismod velit. Vestibulum
malesuada augue at quam pharetra gravida. Vestibulum ante ipsum primis in
faucibus orci luctus et ultrices posuere cubilia Curae; Etiam tempus faucibus
justo vel vestibulum. Nulla ipsum lorem, blandit nec scelerisque ut, blandit at
"""
def _prepareRepo(self):
shutil.rmtree(self.workdir, ignore_errors=True)
shutil.rmtree(self.repodir, ignore_errors=True)
self.repo=SVN.SVNBackend(self.workdir,self.repodir,self.workerc)
def _testCleanUp(self):
for a in range(self.workerc):
for f in os.listdir(os.path.join(self.repo.workdir,"wd%d"%a)):
self.assertTrue(f.startswith('.'))
self.assertTrue(self.repo.workers.qsize()==self.workerc)
def test_add_get(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text==res)
self._testCleanUp()
def test_changefile(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text==res)
self.repo.addFile("test1","file1",self.text[10:])
res=self.repo.getFile("test1","file1")
self.assertTrue(self.text[10:]==res)
self._testCleanUp()
def test_loadCollections(self):
self._prepareRepo()
self.repo.addFile("test1","file1",self.text)
self.repo.addFile("test2","file1",self.text)
wd=self.repo.getWorkdir()
res=wd.loadCollections()
self.assertTrue(len(res)==2)
self.assertTrue("test1" in res)
self.assertTrue("test2" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_loadObjects(self):
self._prepareRepo()
self.repo.addFile("test2","file1",self.text)
self.repo.addFile("test2","file2",self.text)
wd=self.repo.getWorkdir()
res=wd.loadObjects("test2")
self.assertTrue(len(res)==2)
self.assertTrue("file2" in res)
self.assertTrue("file1" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_getFileInfo(self):
self._prepareRepo()
self.repo.addFile("test2","file1",self.text)
wd=self.repo.getWorkdir()
fp=wd.openFile("test2","file1",'r')
res=wd.getFileInfo(fp)
fp.close()
self.assertTrue("changed" in res)
self.repo.freeWorkdir(wd)
self._testCleanUp()
def test_reopen(self):
self._prepareRepo()
self.repo.addFile("test3","file1",self.text)
repo2=SVN.SVNBackend(self.workdir+"2",self.repodir,1)
res=repo2.getFile("test3","file1")
self.assertTrue(self.text==res)
self._testCleanUp()
if __name__ == '__main__':
unittest.main()
|
# blog/admin.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import xadmin
from xadmin.layout import Fieldset, Row
# from django.contrib import admin
from django.utils.html import format_html
from django.urls import reverse
from .models import Category, Tag, Post
# from typeidea.custom_site import custom_site
from .adminforms import PostAdminForm
from typeidea.adminx import BaseOwnerAdmin
# class PostAdmin(admin.ModelAdmin):
class PostAdmin(BaseOwnerAdmin):
form = PostAdminForm
list_display = [
'title',
'category',
'status_show',
'pv',
'uv',
'owner',
'created_time',
'operator']
# category、status变为可点链接
# list_display_links = ['category', 'status_show', 'title']
search_fields = ['title', 'category__name', 'owner__username']
list_filter = ['owner']
# 动作按钮在上
actions_on_top = True
# 动作按钮在底部
actions_on_bottom = False
# 创建时间
date_hierarchy = 'created_time'
# 展示可编辑项
# list_editable = ['title']
'''
编辑页面
'''
# 增加删除保存键置顶
# save_on_top = True
# 要展示的字段
# fields = (
# 'title', 'category',
# 'tag',
# 'desc',
# 'status',
# 'content', 'is_markdown',
# )
exclude = (
'html', 'owner', 'html', 'pv', 'uv',
)
form_layout = (
Fieldset(
"基础信息",
'title',
'desc',
Row('category', 'tag', 'status'),
'is_markdown',
'content',
),
)
#
# status不展示
# exclude = ('status',)
# fieldsets = ( # 跟fields互斥
# ('基础设置', {
# 'fields': (('category', 'title'),
# 'desc',
# 'status', # TODO(Treehl): 后面添加的字段
# 'content')
# }),
# ('高级设置', {
# 'classes': ('collapse', 'addon'),
# 'fields': ('tag',),
# }),
# )
# 水平布局
# filter_horizontal = ('tag', )
# 垂直布局
# filter_vertical = ('tag',)
def operator(self, obj):
return format_html(
'<a href="{}">编辑</a>',
reverse('cus_admin:blog_post_change', args=(obj.id,))
)
operator.short_description = '操作'
xadmin.site.register(Post, PostAdmin)
# xadmin.site.register(Post, PostAdmin)
# def save_model(self, request, obj, form, change):
# print self, request, obj, form, change
# obj.owner = request.user
# super(PostAdmin, self).save_model(request, obj, form, change)
# class PostInline(admin.TabularInline):
# fields = ('title', 'desc', 'status')
# extra = 2 # 控制额外多几个
# # 指定模型类
# model = Post
# class CategoryAdmin(admin.ModelAdmin):
class CategoryAdmin(BaseOwnerAdmin):
list_display = ['name', 'status', 'is_nav', 'created_time', 'operator']
fields = ('name', 'status', 'is_nav')
def operator(self, obj):
return format_html(
'<a href="{}">编辑</a>',
reverse('cus_admin:blog_category_change', args=(obj.id,))
)
operator.short_description = '操作'
xadmin.site.register(Category, CategoryAdmin)
# class TagAdmin(admin.ModelAdmin):
class TagAdmin(BaseOwnerAdmin):
list_display = ['name', 'status', 'created_time', 'operator']
fields = ('name', 'status')
def operator(self, obj):
return format_html(
'<a href="{}">编辑</a>',
reverse('cus_admin:blog_tag_change', args=(obj.id,))
)
operator.short_description = '操作'
xadmin.site.register(Tag, TagAdmin)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class Home(scrapy.Item):
location = Field()
rent_price = Field()
available_at = Field()
allows_pets = Field()
furnished = Field()
min_time_to_stay = Field()
deposit = Field()
source_id = Field()
description = Field()
url = Field()
title = Field()
dimensions = Field()
source = Field()
updated_at = Field()
address = Field()
geolocation = Field()
rooms = Field()
|
import unittest
from solution.linked_list import LinkedList
from solution.linked_list_helpers import LinkedListHelpers
class TestCasesLinkedListHelpers(unittest.TestCase):
def execute_tests_both_linked_lists_none(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = None
linked_list_2: LinkedList = None
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
self.assertIsNone(result_union)
self.assertIsNone(result_intersection)
def execute_tests_both_linked_lists_first_is_none(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = None
linked_list_2: LinkedList = LinkedList()
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_2:
linked_list_2.append(i)
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
self.assertIsNone(result_union)
self.assertIsNone(result_intersection)
def execute_tests_both_linked_lists_second_is_none(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = LinkedList()
element_1 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_1:
linked_list_1.append(i)
linked_list_2: LinkedList = None
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
self.assertIsNone(result_union)
self.assertIsNone(result_intersection)
def execute_tests_both_linked_lists_both_empty_lists(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = LinkedList()
linked_list_2: LinkedList = LinkedList()
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
self.assertEqual(result_union.size(), 0)
self.assertEqual(result_intersection.size(), 0)
def execute_tests_both_linked_lists_first_empty_list(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = LinkedList()
linked_list_2: LinkedList = LinkedList()
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_2:
linked_list_2.append(i)
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
result_union_list: list = sorted(result_union.to_list())
result_intersection_list: list = sorted(result_intersection.to_list())
self.assertEqual(result_union.size(), 7)
self.assertListEqual(result_union_list, [1, 4, 6, 9, 11, 21, 32])
self.assertEqual(result_intersection.size(), 0)
self.assertListEqual(result_intersection_list, [])
def execute_tests_both_linked_lists_second_empty_list(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = LinkedList()
element_1 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_1:
linked_list_1.append(i)
linked_list_2: LinkedList = LinkedList()
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
result_union_list: list = sorted(result_union.to_list())
result_intersection_list: list = sorted(result_intersection.to_list())
self.assertEqual(result_union.size(), 7)
self.assertListEqual(result_union_list, [1, 4, 6, 9, 11, 21, 32])
self.assertEqual(result_intersection.size(), 0)
self.assertListEqual(result_intersection_list, [])
def execute_tests_both_lists_with_elements_and_overlap(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]
for i in element_1:
linked_list_1.append(i)
linked_list_2: LinkedList = LinkedList()
element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]
for i in element_2:
linked_list_2.append(i)
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
result_union_list: list = result_union.to_list()
result_union_list_sorted: list = sorted(result_union_list)
result_intersection_list: list = result_intersection.to_list()
result_intersection_list_sorted: list = sorted(
result_intersection_list)
self.assertEqual(result_union.size(), 11)
self.assertListEqual(result_union_list_sorted, [1, 2, 3, 4, 6, 9, 11, 21, 32, 35, 65])
self.assertEqual(result_intersection.size(), 3)
self.assertListEqual(result_intersection_list_sorted, [4, 6, 21])
def execute_tests_both_lists_with_elements_and_no_overlap(self: object) -> None:
# Arrange
linked_list_helpers: LinkedListHelpers = LinkedListHelpers()
linked_list_1: LinkedList = LinkedList()
element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]
for i in element_1:
linked_list_1.append(i)
linked_list_2: LinkedList = LinkedList()
element_2 = [32, 9, 1, 11, 1]
for i in element_2:
linked_list_2.append(i)
# Act
result_union: LinkedList = linked_list_helpers.union(
linked_list_1, linked_list_2)
result_intersection: LinkedList = linked_list_helpers.intersection(
linked_list_1, linked_list_2)
# Assert
result_union_list: list = result_union.to_list()
result_union_list_sorted: list = sorted(result_union_list)
result_intersection_list: list = result_intersection.to_list()
result_intersection_list_sorted: list = sorted(
result_intersection_list)
self.assertEqual(result_union.size(), 11)
self.assertListEqual(result_union_list_sorted, [1, 2, 3, 4, 6, 9, 11, 21, 32, 35, 65])
self.assertEqual(result_intersection.size(), 0)
self.assertListEqual(result_intersection_list_sorted, [])
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import get_block, get_norm
from .unet_utils import inconv, down_block, up_block
from .dual_attention_utils import DAHead
class DAUNet(nn.Module):
def __init__(self, in_ch, num_classes, base_ch=32, block='BasicBlock', pool=True):
super().__init__()
block = get_block(block)
nb = 2 # num_block
self.inc = inconv(in_ch, base_ch, block=block)
self.down1 = down_block(base_ch, 2*base_ch, num_block=nb, block=block, pool=pool)
self.down2 = down_block(2*base_ch, 4*base_ch, num_block=nb, block=block, pool=pool)
self.down3 = down_block(4*base_ch, 8*base_ch, num_block=nb, block=block, pool=pool)
self.down4 = down_block(8*base_ch, 16*base_ch, num_block=nb, block=block, pool=pool)
self.DAModule = DAHead(16*base_ch, num_classes)
self.up1 = up_block(16*base_ch, 8*base_ch, num_block=nb, block=block)
self.up2 = up_block(8*base_ch, 4*base_ch, num_block=nb, block=block)
self.up3 = up_block(4*base_ch, 2*base_ch, num_block=nb, block=block)
self.up4 = up_block(2*base_ch, base_ch, num_block=nb, block=block)
self.outc = nn.Conv2d(base_ch, num_classes, kernel_size=1)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
feat_fuse, sasc_pred, sa_pred, sc_pred = self.DAModule(x5)
out = self.up1(feat_fuse, x4)
out = self.up2(out, x3)
out = self.up3(out, x2)
out = self.up4(out, x1)
out = self.outc(out)
return out
|
def multiprocess_state_generator(video_frame_generator, stream_sha256):
"""Returns a packaged dict object for use in frame_process"""
for frame in video_frame_generator:
yield {'mode': 'video', 'main_sequence': True}
|
from unittest import TestCase
from anybadge import Badge, parse_args, main
class TestAnybadge(TestCase):
"""Test case class for anybadge package."""
def test_badge_equal_label_value_width(self):
"""Test that label and value widths are equal when text is the same."""
badge = Badge(label='a', value='a', num_padding_chars=0)
self.assertEqual(badge.label_width, badge.value_width)
def test_badge_equal_split(self):
"""Test that the color split is in the middle when label and value are equal width."""
badge = Badge(label='a', value='a')
self.assertEqual(int(badge.badge_width / 2), badge.color_split_position)
def test_badge_equal_split_no_padding(self):
"""Test that the color split is in the middle when label and value are equal width."""
badge = Badge(label='a', value='a', num_padding_chars=0)
self.assertEqual(int(badge.badge_width / 2), badge.color_split_position)
def test_badge_width_with_long_value_text(self):
"""Test the width of a badge generated with a long text value."""
badge = Badge(label='CppCheck',
value='err: 2 | warn: 9 | info: 99 | style: 365',
default_color='red')
badge.write_badge('test_badge_1.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 326)
def test_badge_width_with_long_value_text_zero_padding(self):
"""Test the width of a badge generated with a long text value."""
badge = Badge(label='CppCheck',
value='err: 2 | warn: 9 | info: 99 | style: 365',
default_color='red',
num_padding_chars=0)
badge.write_badge('test_badge_2.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 306)
def test_badge_width_with_medium_value_text(self):
"""Test the width of a badge generated with a medium text value."""
badge = Badge(label='medium',
value='89.67%',
default_color='green')
badge.write_badge('test_badge_medium.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 138)
def test_badge_width_with_medium_value_text_zero_pad(self):
"""Test the width of a badge generated with a medium text value."""
badge = Badge(label='medium no padding',
value='89.67%',
default_color='green',
num_padding_chars=0)
badge.write_badge('test_badge_medium_no_padding.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 156)
def test_badge_width_with_short_value_text(self):
"""Test the width of a badge generated with a short text value."""
badge = Badge(label='short',
value='1',
default_color='green')
badge.write_badge('test_badge_short.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 101)
def test_badge_width_with_short_value_text_zero_pad(self):
"""Test the width of a badge generated with a short text value."""
badge = Badge(label='short value no padding',
value='1',
default_color='green',
num_padding_chars=0)
badge.write_badge('test_badge_short_no_padding.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 143)
def test_badge_width_with_tiny_value_text(self):
"""Test the width of a badge generated with a short text value."""
badge = Badge(label='a',
value='1',
default_color='green')
badge.write_badge('test_badge_tiny_text_value.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 76)
def test_badge_width_with_tiny_value_text_no_padding(self):
"""Test the width of a badge generated with a short text value."""
badge = Badge(label='a',
value='1',
default_color='green',
num_padding_chars=0)
badge.write_badge('test_badge_tiny_text_value_no_padding.svg', overwrite=True)
self.assertLessEqual(badge.badge_width, 76)
def test_badge_with_thresholds(self):
"""Test generating a badge using thresholds."""
thresholds = {
2: 'red', 4: 'orange', 6: 'green', 8: 'brightgreen'
}
badge = Badge('thresholds', '2.22', value_suffix='%',
thresholds=thresholds)
badge.write_badge('test_badge_thresholds.svg', overwrite=True)
def test_badge_with_text_color(self):
"""Test generating a badge with alternate text_color."""
badge = Badge('text color', '2.22', value_suffix='%',
text_color='#010101,#101010')
badge.write_badge('test_badge_text_color.svg', overwrite=True)
def test_multiple_badges_in_one_session(self):
badges = [
Badge('multiple 1', value='100', value_suffix='%', num_padding_chars=0),
Badge('multiple 2', value='1234567890'),
]
self.assertNotEqual(badges[0].badge_width, badges[1].badge_width)
def test_multiple_badges_get_different_mask_id(self):
badges = [
Badge('multiple 1', value='100', value_suffix='%', num_padding_chars=0),
Badge('multiple 2', value='1234567890'),
]
self.assertNotEqual(badges[0].mask_id, badges[1].mask_id)
def test_integer_str_value_is_handled_as_integer(self):
badge = Badge('integer', value='1234')
self.assertTrue(badge.value_is_int)
self.assertFalse(badge.value_is_float)
badge.write_badge('test_badge_int_str.svg', overwrite=True)
def test_integer_int_value_is_handled_as_integer(self):
badge = Badge('integer', value=1234)
self.assertTrue(badge.value_is_int)
self.assertFalse(badge.value_is_float)
badge.write_badge('test_badge_int.svg', overwrite=True)
def test_float_str_value_is_handled_as_float(self):
badge = Badge('float str', value='1234.1')
self.assertFalse(badge.value_is_int)
self.assertTrue(badge.value_is_float)
badge.write_badge('test_badge_float_str.svg', overwrite=True)
def test_float_value_is_handled_as_float(self):
badge = Badge('float int', value=1234.1)
self.assertFalse(badge.value_is_int)
self.assertTrue(badge.value_is_float)
badge.write_badge('test_badge_float.svg', overwrite=True)
def test_float_value_with_zero_decimal(self):
badge = Badge('float with zeros', value='10.00')
self.assertFalse(badge.value_is_int)
self.assertTrue(badge.value_is_float)
badge.write_badge('test_badge_float_zeros.svg', overwrite=True)
def test_float_value_with_non_zero_decimal(self):
badge = Badge('float str no decimal', value='10.01')
self.assertFalse(badge.value_is_int)
self.assertTrue(badge.value_is_float)
badge.write_badge('test_badge_float-str-no-decimal.svg', overwrite=True)
def test_padding_label(self):
badge = Badge('label padding', value='10.01', num_label_padding_chars=2)
badge.write_badge('test_badge_padding_label.svg', overwrite=True)
def test_padding_value(self):
badge = Badge('value padding', value='10.01', num_value_padding_chars=2)
badge.write_badge('test_badge_padding_value.svg', overwrite=True)
def test_value_formatting(self):
badge = Badge('value formatting', value="10", value_format="%s hits/sec")
self.assertEqual("10 hits/sec", badge.value_text)
def test_font_name(self):
font = 'Arial, Helvetica, sans-serif'
badge = Badge('font', value=font, font_name=font)
badge.write_badge('test_badge_font.svg', overwrite=True)
badge_repr = repr(badge)
self.assertTrue("font_name='Arial, Helvetica, sans-serif'" in badge_repr)
def test_invalid_font_name(self):
font = 'Invalid font'
with self.assertRaises(ValueError):
_ = Badge('font', value=font, font_name=font)
def test_font_size(self):
for size in [10, 11, 12]:
badge = Badge('font size', value=size, font_size=size)
badge.write_badge('test_badge_font_size_%s.svg' % size, overwrite=True)
def test_font_size_repr(self):
badge = Badge('font size', value=10, font_size=10)
badge_repr = repr(badge)
self.assertTrue("font_size=10" in badge_repr)
def test_template_from_file(self):
file = "tests/template.svg"
badge = Badge('template from file', value=file, template=file)
_ = badge.badge_svg_text
def test_repr_svg(self):
badge = Badge('label', 'value')
self.assertEqual(badge.badge_svg_text, badge._repr_svg_())
def test_str_value_with_threshold_and_default(self):
badge = Badge('label', value='fred', thresholds={'pass': 'green', 'fail': 'red'}, default_color='orange')
self.assertTrue('orange', badge.badge_color)
def test_invalid_color(self):
with self.assertRaises(ValueError):
badge = Badge('label', value='fred', default_color='floberry')
_ = badge.badge_color_code
def test_invalid_write_path(self):
badge = Badge('label', 'value')
with self.assertRaisesRegexp(ValueError, r'File location may not be a directory\.'):
badge.write_badge('tests/')
with self.assertRaisesRegexp(RuntimeError, r'File ".*tests\/exists\.svg" already exists\.'):
badge.write_badge('tests/exists')
badge.write_badge('tests/exists')
def test_arg_parsing(self):
args = parse_args(['-l', 'label', '-v', 'value'])
self.assertEqual('label', args.label)
self.assertEqual('value', args.value)
def test_main_print(self):
main(['--label', 'label', '--value', 'value'])
def test_main_write_to_file(self):
main(['--label', 'label', '--value', 'value', '--file', 'test_badge_main.svg', '--overwrite'])
def test_main_thresholds(self):
main([
'--label', 'label',
'--value', 'value',
'--file', 'test_badge_main_threshold.svg',
'--overwrite',
'2=red', '4=orange'])
def test_named_threshold(self):
main([
'--value', 'value',
'--file', 'test_badge_main_named_threshold.svg',
'--overwrite',
'coverage'])
def test_main_missing_value(self):
with self.assertRaisesRegexp(ValueError, r'Label has not been set\. Please use --label argument\.'):
main(['--value', '123', '--file', 'test_badge_main.svg', '--overwrite'])
|
import re
import json
import socket
import asyncio
import logging
from time import time
import websockets
from .util import Queue, get as default_get, current_task
from .error import (
SocketIOError, ConnectionFailed,
ConnectionClosed, PingTimeout
)
from .proxy import ProxyError
class SocketIOResponse:
"""socket.io event response.
Attributes
----------
id : `int`
match : `function`(`str`, `object`)
future : `asyncio.Future`
"""
MAX_ID = 2 ** 32
last_id = 0
def __init__(self, match):
self.id = (self.last_id + 1) % self.MAX_ID
self.__class__.last_id = self.id
self.match = match
self.future = asyncio.Future()
def __eq__(self, res):
if isinstance(res, SocketIOResponse):
return self is res
return self.id == res
def __str__(self):
return '<SocketIOResponse #%d>' % self.id
__repr__ = __str__
def set(self, value):
self.future.set_result(value)
def cancel(self, ex=None):
if not self.future.done():
if ex is None:
self.future.cancel()
else:
self.future.set_exception(ex)
@staticmethod
def match_event(ev=None, data=None):
def match(ev_, data_):
if not re.match(ev, ev_):
return False
if data is not None:
if isinstance(data, dict):
if not isinstance(data_, dict):
return False
for key, value in data.items():
if value != data_.get(key):
return False
else:
raise NotImplementedError('match_event !isinstance(data, dict)')
return True
return match
class SocketIO:
"""Asynchronous socket.io connection.
Attributes
----------
websocket : `websockets.client.WebSocketClientProtocol`
Websocket connection.
ping_interval : `float`
Ping interval in seconds.
ping_timeout : `float`
Ping timeout in seconds.
error : `None` or `Exception`
events : `asyncio.Queue` of ((`str`, `object`) or `None`)
Event queue.
response : `list` of `cytube_bot.socket_io.SocketIOResponse`
response_lock : `asyncio.Lock`
ping_task : `asyncio.tasks.Task`
recv_task : `asyncio.tasks.Task`
close_task : `asyncio.tasks.Task`
closing : `asyncio.Event`
closed : `asyncio.Event`
ping_response : `asyncio.Event`
loop : `asyncio.events.AbstractEventLoop`
Event loop.
"""
logger = logging.getLogger(__name__)
def __init__(self, websocket, config, qsize, loop):
"""
Parameters
----------
websocket : `websockets.client.WebSocketClientProtocol`
Websocket connection.
config : `dict`
Websocket configuration.
qsize : `int`
Event queue size.
loop : `asyncio.events.AbstractEventLoop`
Event loop.
"""
self.websocket = websocket
self.loop = loop
self._error = None
self.closing = asyncio.Event(loop=self.loop)
self.closed = asyncio.Event(loop=self.loop)
self.ping_response = asyncio.Event(loop=self.loop)
self.events = Queue(maxsize=qsize, loop=self.loop)
self.response = []
self.response_lock = asyncio.Lock()
self.ping_interval = max(1, config.get('pingInterval', 10000) / 1000)
self.ping_timeout = max(1, config.get('pingTimeout', 10000) / 1000)
self.ping_task = self.loop.create_task(self._ping())
self.recv_task = self.loop.create_task(self._recv())
self.close_task = None
@property
def error(self):
return self._error
@error.setter
def error(self, ex):
if self._error is not None:
self.logger.info('error already set: %r', self._error)
return
self.logger.info('set error %r', ex)
self._error = ex
if ex is not None:
self.logger.info('create close task')
self.close_task = self.loop.create_task(self.close())
@asyncio.coroutine
def close(self):
"""Close the connection.
"""
self.logger.info('close')
if self.close_task is not None:
if self.close_task is current_task(self.loop):
self.logger.info('current task is close task')
else:
self.logger.info('wait for close task')
yield from asyncio.wait_for(self.close_task,
None, loop=self.loop)
if self.closed.is_set():
self.logger.info('already closed')
return
if self.closing.is_set():
self.logger.info('already closing, wait')
yield from self.closed.wait()
return
self.closing.set()
try:
if self._error is None:
self.logger.info('set error')
self._error = ConnectionClosed()
else:
self.logger.info('error already set: %r', self._error)
self.logger.info('queue null event')
try:
self.events.put_nowait(None)
except asyncio.QueueFull:
pass
self.logger.info('set response future exception')
for res in self.response:
res.cancel(self.error)
self.response = []
self.logger.info('cancel ping task')
self.ping_task.cancel()
self.logger.info('cancel recv task')
self.recv_task.cancel()
self.logger.info('wait for tasks')
yield from asyncio.wait_for(
asyncio.gather(self.ping_task, self.recv_task),
None, loop=self.loop
)
self.ping_response.clear()
self.logger.info('close websocket')
yield from self.websocket.close()
self.logger.info('clear event queue')
while not self.events.empty():
ev = yield from self.events.get()
self.events.task_done()
if isinstance(ev, Exception):
self.error = ev
#yield from self.events.join()
finally:
self.ping_task = None
self.recv_task = None
self.websocket = None
self.closed.set()
@asyncio.coroutine
def recv(self):
"""Receive an event.
Returns
-------
(`str`, `object`)
Event name and data.
Raises
------
`ConnectionClosed`
"""
if self.error is not None:
raise self.error # pylint:disable=raising-bad-type
ev = yield from self.events.get()
self.events.task_done()
if ev is None:
raise self.error # pylint:disable=raising-bad-type
return ev
@asyncio.coroutine
def emit(self, event, data, match_response=False, response_timeout=None):
"""Send an event.
Parameters
----------
event : `str`
Event name.
data : `object`
Event data.
match_response : `function` or `None`, optional
Response match function.
response_timeout : `float` or `None`, optional
Response timeout in seconds.
Returns
-------
`object`
Response data if `get_response` is `True`.
Raises
------
`asyncio.CancelledError`
`SocketIOError`
"""
if self.error is not None:
raise self.error # pylint:disable=raising-bad-type
data = '42%s' % json.dumps((event, data))
self.logger.info('emit %s', data)
release = False
response = None
try:
if match_response is not None:
yield from self.response_lock.acquire()
release = True
response = SocketIOResponse(match_response)
self.logger.info('get response %s', response)
self.response.append(response)
yield from self.websocket.send(data)
if match_response is not None:
self.response_lock.release()
release = False
if response_timeout is not None:
res = asyncio.wait_for(response.future,
response_timeout,
loop=self.loop)
else:
res = response.future
try:
res = yield from res
self.logger.info('%s', res)
except asyncio.CancelledError:
self.logger.info('response cancelled %s', event)
raise
except asyncio.TimeoutError as ex:
self.logger.info('response timeout %s', event)
response.cancel()
res = None
finally:
yield from self.response_lock.acquire()
try:
self.response.remove(response)
except ValueError:
pass
finally:
self.response_lock.release()
self.logger.info('response %s %r', event, res)
return res
except asyncio.CancelledError:
self.logger.error('emit cancelled')
raise
except Exception as ex:
self.logger.error('emit error: %r', ex)
if not isinstance(ex, SocketIOError):
ex = SocketIOError(ex)
raise ex
finally:
if release:
self.response_lock.release()
@asyncio.coroutine
def _ping(self):
"""Ping task."""
try:
dt = 0
while self.error is None:
yield from asyncio.sleep(max(self.ping_interval - dt, 0))
self.logger.debug('ping')
self.ping_response.clear()
dt = time()
yield from self.websocket.send('2')
yield from asyncio.wait_for(
self.ping_response.wait(),
self.ping_timeout,
loop=self.loop
)
dt = max(time() - dt, 0)
except asyncio.CancelledError:
self.logger.info('ping cancelled')
except asyncio.TimeoutError:
self.logger.error('ping timeout')
self.error = PingTimeout()
except (socket.error,
ProxyError,
websockets.exceptions.ConnectionClosed,
websockets.exceptions.InvalidState,
websockets.exceptions.PayloadTooBig,
websockets.exceptions.WebSocketProtocolError
) as ex:
self.logger.error('ping error: %r', ex)
self.error = ConnectionClosed(ex)
@asyncio.coroutine
def _recv(self):
"""Read task."""
try:
while self.error is None:
data = yield from self.websocket.recv()
self.logger.debug('recv %s', data)
if data.startswith('2'):
data = data[1:]
self.logger.debug('ping %s', data)
yield from self.websocket.send('3' + data)
elif data.startswith('3'):
self.logger.debug('pong %s', data[1:])
self.ping_response.set()
elif data.startswith('4'):
try:
if data[1] == '0':
event = ''
data = None
elif data[1] == '1':
event = data[2:]
data = None
else:
data = json.loads(data[2:])
if not isinstance(data, list):
raise ValueError('not an array')
if len(data) == 0:
raise ValueError('empty array')
if len(data) == 1:
event, data = data[0], None
elif len(data) == 2:
event, data = data
else:
event = data[0]
data = data[1:]
except ValueError as ex:
self.logger.error('invalid event %s: %r', data, ex)
else:
self.logger.debug('event %s %s', event, data)
yield from self.events.put((event, data))
for response in self.response:
if response.match(event, data):
self.logger.debug('response %s %s', event, data)
response.set((event, data))
break
else:
self.logger.warning('unknown event: "%s"', data)
except asyncio.CancelledError:
self.logger.info('recv cancelled')
self.error = ConnectionClosed()
except (socket.error,
ProxyError,
websockets.exceptions.ConnectionClosed,
websockets.exceptions.InvalidState,
websockets.exceptions.PayloadTooBig,
websockets.exceptions.WebSocketProtocolError
) as ex:
self.logger.error('recv error: %r', ex)
self.error = ConnectionClosed(ex)
except Exception as ex:
self.error = ConnectionClosed(ex)
raise
@classmethod
def _get_config(cls, url, loop, get):
"""Get socket configuration.
Parameters
----------
url : `str`
get : `function`
Returns
-------
`dict`
Socket id, ping timeout, ping interval.
"""
url = url + '?EID=2&transport=polling'
cls.logger.info('get %s', url)
data = yield from get(url, loop=loop)
try:
data = json.loads(data[data.index('{'):])
if 'sid' not in data:
raise ValueError('no sid in %s' % data)
except ValueError:
raise websockets.exceptions.InvalidHandshake(data)
return data
@classmethod
@asyncio.coroutine
def _connect(cls, url, qsize, loop, get, connect):
"""Create a connection.
Parameters
----------
url : `str`
qsize : `int`
loop : `asyncio.events.AbstractEventLoop`
get : `function`
connect : `function`
Returns
-------
`SocketIO`
"""
conf = yield from cls._get_config(url, loop, get)
sid = conf['sid']
cls.logger.info('sid=%s', sid)
url = '%s?EID=3&transport=websocket&sid=%s' % (
url.replace('http', 'ws', 1), sid
)
cls.logger.info('connect %s', url)
websocket = yield from connect(url, loop=loop)
try:
cls.logger.info('2probe')
yield from websocket.send('2probe')
res = yield from websocket.recv()
cls.logger.info('3probe')
if res != '3probe':
raise websockets.exceptions.InvalidHandshake(
'invalid response: "%s" != "3probe"',
res
)
cls.logger.info('upgrade')
yield from websocket.send('5')
return SocketIO(websocket, conf, qsize, loop)
except:
yield from websocket.close()
raise
@classmethod
@asyncio.coroutine
def connect(cls,
url,
retry=-1,
retry_delay=1,
qsize=0,
loop=None,
get=default_get,
connect=websockets.connect):
"""Create a connection.
Parameters
----------
url : `str`
socket.io URL.
retry : `int`
Maximum number of tries.
retry_delay : `float`
Delay between tries in seconds.
qsize : `int`
Event queue size.
loop : `None` or `asyncio.events.AbstractEventLoop`
Event loop.
get : `function`
HTTP GET request coroutine.
connect : `function`
Websocket connect coroutine.
Returns
-------
`SocketIO`
Raises
------
`ConnectionFailed`
`asyncio.CancelledError`
"""
loop = loop or asyncio.get_event_loop()
i = 0
while True:
try:
io = yield from cls._connect(url, qsize, loop, get, connect)
return io
except asyncio.CancelledError:
cls.logger.error(
'connect(%s) (try %d / %d): cancelled',
url, i + 1, retry + 1
)
raise
except Exception as ex:
cls.logger.error(
'connect(%s) (try %d / %d): %r',
url, i + 1, retry + 1, ex
)
if i == retry:
raise ConnectionFailed(ex)
i += 1
yield from asyncio.sleep(retry_delay)
|
import os
from dotenv import load_dotenv
if os.path.isfile('./.env'):
load_dotenv()
JWT_SECRET = os.getenv('JWT_SECRET', 'pass')
PORT = os.getenv('PORT', '8000')
DEBUG = os.getenv('MODE', 'production') == 'development'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-29 05:03
from __future__ import unicode_literals
from django.db import migrations
import tagging.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20180328_2107'),
]
operations = [
migrations.AddField(
model_name='link',
name='tags',
field=tagging.fields.TagField(blank=True, max_length=255),
),
]
|
from __future__ import print_function
from .logger import logger
from .kinto2yaml import introspect_server
async def initialize_server(async_client, config, bucket=None, collection=None,
force=False, delete_missing_records=False):
logger.debug("Converting YAML config into a server batch.")
bid = bucket
cid = collection
# 1. Introspect current server state.
if not force or delete_missing_records:
current_server_status = await introspect_server(
async_client,
bucket=bucket,
collection=collection,
records=True
)
current_server_buckets = current_server_status["buckets"]
else:
# We don't need to load it because we will override it nevertheless.
current_server_buckets = {}
# 2. For each bucket
if 'buckets' in config:
buckets = config.get("buckets", {})
else: # pragma: no cover
# Legacy for file before kinto-wizard 4.0
logger.warning("Your file seems to be in legacy format. "
"Please add a `buckets:` root level.")
buckets = config
with async_client.batch() as batch:
for bucket_id, bucket in buckets.items():
# Skip buckets that we don't want to import.
if bid and bucket_id != bid:
logger.debug("Skip bucket {}".format(bucket_id))
continue
bucket_exists = bucket_id in current_server_buckets
bucket_data = bucket.get('data', {})
bucket_permissions = bucket.get('permissions', {})
bucket_groups = bucket.get('groups', {})
bucket_collections = bucket.get('collections', {})
# Skip bucket if we don't have a collection in them
if cid and cid not in bucket_collections:
logger.debug("Skip bucket {}".format(bucket_id))
continue
if not bucket_exists:
bucket_current_groups = {}
bucket_current_collections = {}
# Create the bucket if not present in the introspection
batch.create_bucket(id=bucket_id,
data=bucket_data,
permissions=bucket_permissions,
safe=(not force))
else:
current_bucket = current_server_buckets[bucket_id]
bucket_current_groups = {}
bucket_current_collections = {}
current_bucket_data = {}
current_bucket_permissions = {}
if current_bucket:
bucket_current_groups = current_bucket.get('groups', {})
bucket_current_collections = current_bucket.get('collections', {})
# Patch the bucket if mandatory
current_bucket_data = current_bucket.get('data', {})
current_bucket_permissions = current_bucket.get('permissions', {})
if (current_bucket_data != bucket_data or
current_bucket_permissions != bucket_permissions):
batch.patch_bucket(id=bucket_id,
data=bucket_data,
permissions=bucket_permissions)
# 2.1 For each group, patch it if needed
for group_id, group_info in bucket_groups.items():
group_exists = bucket_exists and group_id in bucket_current_groups
group_data = group_info.get('data', {})
group_permissions = group_info.get('permissions', {})
if not group_exists:
batch.create_group(id=group_id,
bucket=bucket_id,
data=group_data,
permissions=group_permissions,
safe=(not force))
else:
current_group = bucket_current_groups[group_id]
current_group_data = current_group.get('data', {})
current_group_permissions = current_group.get('permissions', {})
if (current_group_data != group_data or
current_group_permissions != group_permissions):
batch.patch_group(id=group_id,
bucket=bucket_id,
data=group_data,
permissions=group_permissions)
# 2.2 For each collection patch it if mandatory
for collection_id, collection in bucket_collections.items():
# Skip collections that we don't want to import.
if cid and collection_id != cid:
logger.debug("Skip collection {}/{}".format(bucket_id, collection_id))
continue
collection_exists = bucket_exists and collection_id in bucket_current_collections
collection_data = collection.get('data', {})
collection_permissions = collection.get('permissions', {})
if not collection_exists:
batch.create_collection(id=collection_id,
bucket=bucket_id,
data=collection_data,
permissions=collection_permissions,
safe=(not force))
else:
current_collection = bucket_current_collections[collection_id]
current_collection_data = current_collection.get('data', {})
current_collection_permissions = current_collection.get('permissions', {})
if (current_collection_data != collection_data or
current_collection_permissions != collection_permissions):
batch.patch_collection(id=collection_id,
bucket=bucket_id,
data=collection_data,
permissions=collection_permissions)
# 2.2.1 For each collection, create its records.
collection_records = collection.get('records', {})
for record_id, record in collection_records.items():
record_exists = (collection_exists and
record_id in current_collection.get('records', {}))
record_data = record.get('data', {})
record_permissions = record.get('permissions', None)
if not record_exists:
batch.create_record(id=record_id,
bucket=bucket_id,
collection=collection_id,
data=record_data,
permissions=record_permissions,
safe=(not force))
else:
current_record = current_collection['records'][record_id]
current_record_data = current_record.get('data', {})
current_record_permissions = current_record.get('permissions', {})
if (current_record_data != record_data or
current_record_permissions != record_permissions):
batch.update_record(id=record_id,
bucket=bucket_id,
collection=collection_id,
data=record_data,
permissions=record_permissions)
if delete_missing_records and collection_exists and collection_records:
# Fetch all records IDs
file_records_ids = set(collection_records.keys())
server_records_ids = set(current_collection['records'].keys())
to_delete = server_records_ids - file_records_ids
if not force:
message = ("Are you sure that you want to delete the "
"following {} records?".format(len(list(to_delete))))
value = input(message)
if value.lower() not in ['y', 'yes']:
print("Exiting")
exit(1)
for record_id in to_delete:
batch.delete_record(id=record_id,
bucket=bucket_id,
collection=collection_id)
logger.debug('Sending batch:\n\n%s' % batch.session.requests)
logger.info("Batch uploaded")
|
dict = dict()
lista = list()
dict['nome'] = str(input('Nome do jogador: ')).capitalize()
partidas = int(input(f'Quantas partidas {dict["nome"]} jogou? '))
for c in range(0, partidas):
lista.append(int(input(f' Quantos gols na {c}° partida? ')))
dict['gols'] = lista[:]
dict['total'] = sum(lista)
print('-='*30)
print(dict)
print('-='*30)
for k, v in dict.items():
print(f'O campo {k} tem valor {v}')
print('-='*30)
print(f'O jogador {dict["nome"]} jogou {partidas} partidas.')
for i, v in enumerate(lista):
print(f' => Na partida {i}, fez {v} gols.')
print(dict['total'])
|
#source: https://medium.com/@joel.barmettler/how-to-upload-your-python-package-to-pypi-65edc5fe9c56
#README.md: https://dillinger.io/
from distutils.core import setup
setup(
name = 'ChoateStudentHelp', # How you named your package folder (MyLib)
packages = ['ChoateStudentHelp'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'This module includes 6 distinct functions that I thought would be useful in my life as a student of Choate Rosemary Hall.', # Give a short description about your library
author = 'Brian Harder', # Type in your name
author_email = 'bharder23@choate.edu', # Type in your E-Mail
url = 'https://github.com/brianHarder/ChoateStudentHelp', # Provide either the link to your github or to your website
download_url = 'https://github.com/brianHarder/ChoateStudentHelp/archive/v_01.tar.gz', # I explain this later on
keywords = ['useful', 'unique'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'yfinance',
'stockquotes',
'sympy',
'Pillow',
'geopy',
'matplotlib',
'bs4',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.9',
],
)
|
import unittest
from translator import english_to_french, french_to_english
class Testetof1(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french("Hello"),"Bonjour")
class Testetof2(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french(" ")," ")
class Testftoe1(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english("Bonjour"),"Hello")
class Testftoe2(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english(" ")," ")
unittest.main()
|
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
from fabric_mb.message_bus.message_bus_exception import MessageBusException
from fabric_mb.message_bus.messages.term_avro import TermAvro
class ResourceTicketAvro:
def __init__(self):
self.guid = None
self.term = None
self.units = 0
self.properties = None
self.type = None
self.issuer = None
self.holder = None
def from_dict(self, value: dict):
"""
The Avro Python library does not support code generation.
For this reason we must provide conversion from dict to our class for de-serialization
:param value: incoming message dictionary
"""
self.guid = value.get('guid', None)
temp = value.get('term', None)
if temp is not None:
self.term = TermAvro()
self.term.from_dict(value=temp)
self.units = value.get('units', 0)
self.properties = value.get('properties', None)
self.type = value.get('type', None)
self.issuer = value.get('issuer', None)
self.holder = value.get('holder', None)
def to_dict(self) -> dict:
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
:return dict representing the class
"""
if not self.validate():
raise MessageBusException("Invalid arguments")
result = {"guid": self.guid, "units": self.units}
if self.term is not None:
result["term"] = self.term.to_dict()
if self.properties is not None:
result["properties"] = self.properties
if self.type is not None:
result["type"] = self.type
if self.issuer is not None:
result["issuer"] = self.issuer
if self.holder is not None:
result["holder"] = self.holder
return result
def get_guid(self) -> str:
return self.guid
def get_units(self) -> int:
return self.units
def get_term(self) -> TermAvro:
return self.term
def get_properties(self) -> dict:
return self.properties
def get_type(self) -> str:
return self.type
def get_issuer(self) -> str:
return self.issuer
def get_holder(self) -> str:
return self.holder
def set_guid(self, guid: str):
self.guid = guid
def set_units(self, units: int):
self.units = units
def set_term(self, term: TermAvro):
self.term = term
def set_properties(self, properties: dict):
self.properties = properties
def set_type(self, rtype: str):
self.type = rtype
def set_issuer(self, issuer: str):
self.issuer = issuer
def set_holder(self, holder: str):
self.holder = holder
def __str__(self):
return f"guid: {self.guid} units: {self.units} term: {self.term} properties: {self.properties} " \
f"type: {self.type} issuer: {self.issuer} holder: {self.holder}"
def validate(self) -> bool:
"""
Check if the object is valid and contains all mandatory fields
:return True on success; False on failure
"""
ret_val = True
if self.guid is None or self.units is None or self.type is None:
ret_val = False
return ret_val
def __eq__(self, other):
if not isinstance(other, ResourceTicketAvro):
return False
return self.guid == other.guid and self.units == other.units and self.term == other.term and \
self.properties == other.properties and self.type == other.type and self.issuer == other.issuer and \
self.holder == other.holder
|
#!/usr/bin/env python
import curses
import os
from box import Box
from utils import load_yaml
def main(screen):
"""
Draws and redraws the screen.
"""
# Hide the cursor.
curses.curs_set(0)
# Load config from file.
config = load_yaml(os.path.expanduser('~/.suave/config.yml'))
# Create boxes from config.
boxes = []
for box in config:
boxes.append(
Box(
screen=screen,
rows=box['rows'],
columns=box['columns'],
rows_offset=box['rows-offset'],
columns_offset=box['columns-offset'],
command=box['command'],
interval=box['interval'],
)
)
while True:
# Redraw the screen only when it changes.
if screen.is_wintouched():
screen.clear()
screen.refresh()
# Give every box an opportunity to redraw if it has changed.
[box.redraw_if_changed() for box in boxes]
# Wait before redrawing again.
curses.napms(1000)
curses.wrapper(main)
|
# -*- coding: utf-8 -*-
"""
filtering exceptions module.
"""
from pyrin.core.exceptions import CoreException, CoreBusinessException
class FilteringException(CoreException):
"""
filtering exception.
"""
pass
class FilteringBusinessException(CoreBusinessException, FilteringException):
"""
filtering business exception.
"""
pass
|
import timeit
from itertools import product
import pickle
import os
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sphere.distribution import fb8, FB8Distribution, fb8_mle, spa
plt.style.use('paper.mplstyle')
def grid(npts):
return [_.flatten() for _ in np.meshgrid(np.linspace(0, np.pi, npts), np.linspace(0,2*np.pi, npts))]
def make_title(fb8, kbdec=0):
def FBname(n):
return r'\rm{{FB}}_{}'.format(n)
def FBtitle(n, ps):
return r'${}({})$'.format(FBname(n), ps)
kapbet = r'\kappa = {:.'+str(kbdec)+r'f}, \beta = {:.'+str(kbdec)+r'f}'
kapbet = kapbet.format(fb8.kappa, fb8.beta)
if fb8.nu[0] == 1.:
if fb8.eta == 1.:
return FBtitle(5, kapbet)
if fb8.eta == -1.:
return FBtitle(4, kapbet)
return FBtitle(6, kapbet+r', \eta={:.1g}'.format(fb8.eta))
return FBtitle(8, kapbet+r', \eta={:.1g}, \vec{{\nu}}=({:.3g},{:.3g},{:.3g})'.format(
fb8.eta, np.round(fb8.nu[0],3), np.round(fb8.nu[1],3), np.round(fb8.nu[2],3)))
def plot_fb8(fb8, npts):
"""
Plot fb8 on 3D sphere
"""
xs = fb8.spherical_coordinates_to_nu(*grid(npts))
pdfs = fb8.pdf(xs)
z,x,y = xs.T
fig = plt.figure(figsize=plt.figaspect(1.))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x.reshape(npts, npts),
y.reshape(npts, npts),
z.reshape(npts, npts),
alpha=0.5,
rstride=1, cstride=1,
facecolors=cm.gray(pdfs.reshape(npts, npts)/pdfs.max()))
# ax.set_xticks([])
# ax.set_yticks([])
# ax.set_zticks([])
ax.set_axis_off()
ax.set_title(make_title(fb8), fontsize=12, y=0.18)
plt.tight_layout(-5)
def hp_plot_fb8(fb8, nside):
import healpy as hp
npix = hp.nside2npix(nside)
fb8_map = fb8.pdf(fb8.spherical_coordinates_to_nu(
*hp.pix2ang(nside, np.arange(npix))))
plt.figure(figsize=(9,6))
vmap = cm.gray
vmap.set_under('w')
vmap.set_bad('w')
hp.mollview(fb8_map,
title=make_title(fb8, 1),
min=0,
max=np.round(np.nanmax(fb8_map),2),
cmap=vmap, hold=True,
cbar=True,
xsize=1600)
hp.graticule()
def build_args(kappa, beta, eta, alpha=0., rho=0.):
if kappa is None:
xvals = np.arange(beta/10., 0.8*beta)
idx = 3
xlabel='kappa'
text = rf'$\beta={beta}, \eta={eta:.1g}$'
textx = 0.03
elif beta is None:
xvals = np.arange(kappa/10., 0.8*kappa)
idx = 4
xlabel = 'beta'
text = rf'$\kappa={kappa}, \eta={eta:.1g}$'
textx = 0.03
elif eta is None:
xvals = np.arange(-1., 1.02, 0.02)
idx = 5
xlabel = 'eta'
text = rf'$\kappa={kappa}, \beta={beta}$'
textx = 0.5
args = []
for x in xvals:
arg = [0.,0.,0.,kappa,beta,eta,alpha,rho]
arg[idx] = x
args.append(arg)
return xvals, xlabel, text, textx, args
def approx_norm(kappa, beta, eta):
"""
Compare log-c6 vs approx log-c6
"""
xvals, xlabel, text, textx, args = build_args(kappa, beta, eta)
plt.figure()
plt.plot(xvals, [np.log(fb8(*_).normalize()) for _ in args], label='Series', color='k', linewidth=3.5)
plt.plot(xvals, [fb8(*_)._approx_log_normalize() for _ in args],
linestyle='--',
color='gray',
label='Approximate')
plt.plot(xvals, [spa(fb8(*_)).log_c3() for _ in args],
linestyle=':',
color='gray',
label='Saddlepoint')
plt.xlabel(rf'$\{xlabel}$')
plt.ylabel(rf'$\ln c_6(\{xlabel})$')
plt.legend()
plt.text(textx,0.7,text,
transform=plt.gca().transAxes, fontsize=14)
plt.tight_layout(0.1)
def numerical_norm(kappa, beta, eta, alpha, rho):
"""
Compare log-c8 (series) vs numerical integration log-c8
"""
xvals, xlabel, text, textx, args = build_args(kappa, beta, eta, alpha, rho)
plt.figure()
plt.plot(xvals, [np.log(fb8(*_).normalize()) for _ in args],
label='Series', color='k', linewidth=3.5)
plt.plot(xvals, [np.log(fb8(*_)._nnormalize()) for _ in args],
linestyle='--',
color='gray',
label='Numerical integration')
plt.plot(xvals, [spa(fb8(*_[:-2])).log_c3() for _ in args],
linestyle=':',
color='gray',
label='Saddlepoint')
plt.xlabel(rf'$\{xlabel}$')
plt.ylabel(rf'$\ln c_8(\{xlabel})$')
plt.legend()
plt.text(textx,0.7,text,
transform=plt.gca().transAxes, fontsize=14)
_ = fb8(0.,0.,0.,100.,10.,-0.5,alpha,rho)
textnu = rf'$\vec{{\nu}}=({_.nu[0]:.3g},{_.nu[1]:.3g},{_.nu[2]:.3g})$'
plt.text(textx,0.6,textnu,
transform=plt.gca().transAxes, fontsize=14)
plt.tight_layout(0.1)
def time_norm(kappa, beta, eta, alpha, rho):
""" Plot execution time of .normalize to ._nnormalize
"""
xvals, xlabel, text, textx, args = build_args(kappa, beta, eta, alpha, rho)
tfile = os.path.join('figs', 'time', 'timec8.pkl')
if os.path.isfile(tfile) and str(args) in pickle.load(open(tfile, 'rb')):
times_normalize, times_nnormalize = pickle.load(open(tfile, 'rb'))[str(args)]
else:
times_normalize = []
times_nnormalize = []
setup = 'from sphere.distribution import fb8'
for _ in args:
times_normalize.append(
min(timeit.repeat(stmt=('fb8('+','.join(['{}']*8)+').normalize(cache=dict())').format(*_),
setup=setup, repeat=3, number=1)))
times_nnormalize.append(
min(timeit.repeat(stmt=('fb8('+','.join(['{}']*8)+')._nnormalize()').format(*_),
setup=setup, repeat=3, number=1)))
if os.path.isfile(tfile):
ddd = pickle.load(open(tfile, 'rb'))
else:
ddd = {}
ddd[str(args)] = [times_normalize, times_nnormalize]
with open(tfile, 'wb') as f:
pickle.dump(ddd, f)
plt.figure()
plt.plot(xvals, times_normalize,
label='Series', color='k', linewidth=3.5)
plt.plot(xvals, times_nnormalize,
linestyle='--',
color='gray',
label='Numerical integration')
plt.xlabel(rf'$\{xlabel}$')
plt.ylabel(rf'Runtime [s]')
# plt.yscale('log')
plt.legend()
plt.text(0.42,0.38,text,
transform=plt.gca().transAxes, fontsize=14)
_ = fb8(0.,0.,0.,100.,10.,-0.5,alpha,rho)
textnu = rf'$\vec{{\nu}}=({_.nu[0]:.3g},{_.nu[1]:.3g},{_.nu[2]:.3g})$'
plt.text(0.42,0.28,textnu,
transform=plt.gca().transAxes, fontsize=14)
plt.tight_layout(0.1)
def do_fits(ths, phs):
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import art3d
xs = FB8Distribution.spherical_coordinates_to_nu(ths, phs)
z,x,y = xs.T
fit5 = fb8_mle(xs, True, fb5_only=True)
plot_fb8(fit5, 200)
ax = plt.gca()
# ax.scatter(x*1.05, y*1.05, z*1.05, color='k', depthshade=False, edgecolors='k', linewidth=0.5)
for (_x, _y, _z) in zip(x,y,z):
p = Circle((_x, _y), 0.01, ec='k', fc="none")
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=_z)
fit8 = fb8_mle(xs, True)
plot_fb8(fit8, 200)
ax = plt.gca()
for (_x, _y, _z) in zip(x,y,z):
p = Circle((_x, _y), 0.01, ec='k', fc="none")
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=_z)
def hp_fits(ths, phs, nside=64):
import healpy as hp
xs = FB8Distribution.spherical_coordinates_to_nu(ths, phs)
z,x,y = xs.T
fit5 = fb8_mle(xs, True, fb5_only=True)
hp_plot_fb8(fit5, nside)
hp.projscatter(ths, phs, marker='.', linewidths=0, s=5, c='k')
ax = plt.gca()
ax.annotate(r"$\bf{-180^\circ}$", xy=(1.7, 0.625), size="medium")
ax.annotate(r"$\bf{180^\circ}$", xy=(-1.95, 0.625), size="medium")
ax.annotate("Galactic", xy=(0.8, -0.05),
size="medium", xycoords="axes fraction")
plt.savefig('figs/Fig5_fb5.png')
fit8 = fb8_mle(xs, True)
hp_plot_fb8(fit8, nside)
hp.projscatter(ths, phs, marker='.', linewidths=0, s=5, c='k')
ax = plt.gca()
ax.annotate(r"$\bf{-180^\circ}$", xy=(1.7, 0.625), size="medium")
ax.annotate(r"$\bf{180^\circ}$", xy=(-1.95, 0.625), size="medium")
ax.annotate("Galactic", xy=(0.8, -0.05),
size="medium", xycoords="axes fraction")
plt.savefig('figs/Fig5_fb8.png')
def yukspor():
phs, ths = np.radians(np.loadtxt('yukspor.txt'))
do_fits(ths, phs)
def bsc5(mag_low=6):
dat = np.loadtxt('bsc5.dat', comments='#', skiprows=43)
_ = dat[dat[:,-1]<=mag_low]
phs, ths = np.radians([_[:,1], 90.-_[:,2]])
hp_fits(ths, phs)
def toy(seed=92518):
from matplotlib.patches import Circle
from mpl_toolkits.mplot3d import art3d
np.random.seed(seed)
toyf8 = fb8(np.pi/16, -np.pi/3,0,55,60,-1.,0.07,0.3)
xs = toyf8.rvs(100)
fit5 = fb8_mle(xs, fb5_only=True)
print(fit5, -fit5.log_likelihood(xs))
plot_fb8(fit5, 200)
ax = plt.gca()
for (_z, _x, _y) in xs:
p = Circle((_x, _y), 0.01, ec='w', fc="none")
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=_z, zdir='z')
plt.savefig('figs/Fig4_toyfb5.png')
fit8 = fb8_mle(xs)
print(fit8, -fit8.log_likelihood(xs))
plot_fb8(fit8, 200)
ax = plt.gca()
for (_z, _x, _y) in xs:
p = Circle((_x, _y), 0.01, ec='w', fc="none")
ax.add_patch(p)
art3d.pathpatch_2d_to_3d(p, z=_z)
plt.savefig('figs/Fig4_toyfb8.png')
def time(eta=1, alpha=0, rho=0, step=10):
""" Plot ratio of time spent on .normalize to ._nnormalize
"""
times_normalize = []
times_nnormalize = []
kappas = range(1, 200, step)
betas = range(1, 200, step)
setup = 'from sphere.distribution import fb8'
for x in product([0], [0], [0],
kappas, betas,
[eta], [alpha], [rho]):
print(x)
times_normalize.append(
min(timeit.timeit(stmt=('fb8('+','.join(['{}']*8)+').normalize(dict())').format(*x),
setup=setup, number=1)))
times_nnormalize.append(
min(timeit.timeit(stmt=('fb8('+','.join(['{}']*8)+')._nnormalize()').format(*x),
setup=setup, number=1)))
np.reshape(times_normalize, (len(kappas), len(betas)))
np.reshape(times_nnormalize, (len(kappas), len(betas)))
return times_normalize, times_nnormalize
def appendix(th, ph, ps):
for x in product([th], [ph], [ps],
[10,], [1,10],
[-1, -0.8, 1], [0, np.pi/2], [0]):
plot_fb8(fb8(*x), 200)
plt.savefig('figs/appendix/fb8_k{:.0f}_b{:.0f}_e{:.1f}_a{:.2f}.png'.format(*x[3:-1]))
def __main__():
th,ph,ps = (np.pi/16, -np.pi/3, 0)
# FB4
plot_fb8(fb8(th,ph,ps,10,10,-1,0,0), 200)
plt.savefig('figs/Fig1_fb4.png')
# FB5
plot_fb8(fb8(th,ph,ps,10,4,1,0,0), 200)
plt.savefig('figs/Fig1_fb5.png')
# FB6
plot_fb8(fb8(th+np.pi/6,ph,ps,10,10,-0.5,0,0), 200)
plt.savefig('figs/Fig2_fb6.png')
# FB8
plot_fb8(fb8(th,ph,ps,10,10,-1,0.5,0.3), 200)
plt.savefig('figs/Fig2_fb8.png')
# approx_c6
approx_norm(None, 100., -0.5)
plt.savefig('figs/Fig3_approxc6_kappa.pdf')
approx_norm(100., None, -0.5)
plt.savefig('figs/Fig3_approxc6_beta.pdf')
approx_norm(100., 100., None)
plt.savefig('figs/Fig3_approxc6_eta.pdf')
# ln_c8
numerical_norm(None, 100., -0.5, 0.5, 0.3)
plt.savefig('figs/Fig3_lnc8_kappa.pdf')
numerical_norm(100., None, -0.5, 0.5, 0.3)
plt.savefig('figs/Fig3_lnc8_beta.pdf')
numerical_norm(100., 100., None, 0.5, 0.3)
plt.savefig('figs/Fig3_lnc8_eta.pdf')
# time_c8
time_norm(None, 100., -0.5, 0.5, 0.3)
plt.savefig('figs/Fig3_timec8_kappa.pdf')
time_norm(100., None, -0.5, 0.5, 0.3)
plt.savefig('figs/Fig3_timec8_beta.pdf')
time_norm(100., 100., None, 0.5, 0.3)
plt.savefig('figs/Fig3_timec8_eta.pdf')
# toy application
toy()
# bright stars catalog
bsc5()
# appendixfb8s
appendix(0,0,0)
if __name__=='__main__':
__main__()
|
import os, re, sys
import subprocess as sp
import random, string
import numpy as np
from .utils import *
from .pfunc import pfunc
DEBUG=False
# load package locations from yaml file, watch! global dict
package_locs = load_package_locations()
def bpps(sequence, package='vienna', constraint=None, pseudo=False,
T=37, coaxial=True, linear=False, dna=False,
motif=None, dangles=True,param_file=None,reweight=None, beam_size=100, DEBUG=False, threshknot=False):
''' Compute base pairing probability matrix for RNA sequence.
Args:
sequence (str): nucleic acid sequence
T (float): temperature (Celsius)
linear (bool): call LinearPartition to estimate Z in Vienna or Contrafold
constraint (str): structure constraint (functional in vienna, contrafold, rnastructure)
motif (str): argument to vienna motif
pseudo (bool): (NUPACK only) include pseudoknot calculation
dangles (bool): dangles or not, specifiable for vienna, nupack
dna (bool): (NUPACK only) use SantaLucia 1998 parameters for DNA
coaxial (bool): coaxial stacking or not, specifiable for rnastructure, vfold
noncanonical(bool): include noncanonical pairs or not (for contrafold, RNAstructure (Cyclefold))
beam size (int): Beam size for LinearPartition base pair calculation.
DEBUG (bool): Output command-line calls to packages.
threshknot (bool): calls threshknot to predict pseudoknots (for contrafold with LinearPartition)
Possible packages: 'vienna_2', 'vienna_1','contrafold_1','contrafold_2',
'nupack_95','nupack_99','rnasoft_2007','rnasoft_1999','rnastructure','vfold_0','vfold_1'
Returns
array: NxN matrix of base pair probabilities
'''
try:
pkg, version = package.lower().split('_')
except:
pkg, version = package, None
if motif is not None and pkg != 'vienna':
raise ValueError('motif option can only be used with Vienna.')
if pseudo and pkg != 'nupack':
raise ValueError('pseudoknot option only implemented with Nupack.')
if not dangles and pkg not in ['vienna','nupack']:
print('Warning: %s does not support dangles options' % pkg)
if not coaxial and pkg not in ['rnastructure','vfold']:
print('Warning: %s does not support coaxial options' % pkg)
if linear and pkg not in ['vienna','contrafold','eternafold']:
print('Warning: LinearPartition only implemented for vienna, contrafold, eternafold.')
if pkg=='nupack':
return bpps_nupack_(sequence, version = version, dangles = dangles, T = T, pseudo=pseudo, dna=dna)
elif pkg=='vfold':
return bpps_vfold_(sequence, version = version, T = T, coaxial = coaxial)
else:
_, tmp_file = pfunc(sequence, package=package, bpps=True, linear=linear,
motif=motif, constraint=constraint, T=T, coaxial=coaxial,
dangles=dangles, param_file=param_file,reweight=reweight, beam_size=beam_size, DEBUG=DEBUG, threshknot=threshknot)
if linear:
#parse linearpartition output
return bpps_linearpartition_(sequence, tmp_file)
else:
if 'contrafold' in package:
return bpps_contrafold_(sequence, tmp_file)
if package=='eternafold':
return bpps_contrafold_(sequence, tmp_file)
elif 'vienna' in package:
return bpps_vienna_(sequence, tmp_file)
elif 'rnasoft' in package:
return bpps_rnasoft_(sequence, tmp_file)
elif 'rnastructure' in package:
return bpps_rnastructure_(sequence, tmp_file, coaxial=coaxial)
else:
raise RuntimeError('package not yet implemented')
def bpps_vienna_(sequence, tmp_file):
dot_fname = tmp_file
probs=np.zeros([len(sequence), len(sequence)])
with open(dot_fname,'r') as f:
for line in f.readlines():
if 'ubox' in line:
try:
i, j, p, _ = line.split()
i, j, p = int(i)-1, int(j)-1, float(p)**2
probs[i,j] = p
probs[j,i] = p
except:
pass
os.remove(dot_fname)
return probs
def bpps_contrafold_(sequence, tmp_file):
fname = tmp_file
probs=np.zeros([len(sequence), len(sequence)])
for line in open(fname).readlines():
if len(line.split(':')) > 1:
first_ind = int(line.split()[0])-1
for x in line.split()[2:]:
second_ind = int(x.split(':')[0])-1
p = float(x.split(':')[1])
probs[first_ind, second_ind] = p
probs[second_ind, first_ind] = p
os.remove(fname)
return probs
def bpps_rnasoft_(sequence, tmp_file):
fname = tmp_file
probs=np.zeros([len(sequence), len(sequence)])
for line in open(fname).readlines():
i,j,p = int(line.split()[0]), int(line.split()[1]), float(line.split()[2])
probs[i,j] = p
probs[j,i] = p
os.remove(fname)
return probs
def bpps_nupack_(sequence, version='95', T=37, dangles=True, pseudo=False,dna=False):
if not version: version='95'
nupack_materials={'95': 'rna1995', '99': 'rna1999'}
if dna:
material='dna1998'
else:
material=nupack_materials[version]
DIR = package_locs['nupack']
if dangles:
dangle_option='some'
else:
dangle_option='none'
seqfile = write([sequence])
command=['%s/pairs' % DIR, '%s' % seqfile.replace('.in',''),
'-T', str(T), '-material', material, '-dangles', dangle_option, '-cutoff', '0.0000000001']
if pseudo:
command.append('--pseudo')
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
raise Exception('Nupack pfunc failed: on %s\n%s' % (sequence, stderr))
ppairs_file = '%s.ppairs' % seqfile.replace('.in','')
os.remove(seqfile)
probs=np.zeros([len(sequence), len(sequence)])
with open(ppairs_file, 'r') as f:
for line in f.readlines():
if not line.startswith('%'):
fields = line.split()
if len(fields) > 1:
if int(fields[1]) <= len(sequence):
i, j, p = int(fields[0])-1, int(fields[1])-1, float(fields[2])
probs[i,j] = p
probs[j,i] = p
return probs
def bpps_rnastructure_(sequence, tmp_file, coaxial=True):
DIR = package_locs['rnastructure']
pfsfile = tmp_file #'%s/rnastructtmp.pfs' % package_locs['TMP']
outfile = '%s.probs' % (tmp_file.replace('.pfs',''))
command = ['%s/ProbabilityPlot' % DIR, pfsfile, outfile, '-t', '-min', '0.0000000001']
probs=np.zeros([len(sequence), len(sequence)])
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('RNAstructure ProbabilityPlot failed: on %s\n%s' % (seq, stderr))
with open(outfile, 'r') as f:
for line in f.readlines()[2:]:
fields = line.split()
i, j, p = int(fields[0])-1, int(fields[1])-1, 10**(-1*float(fields[2]))
probs[i,j] = p
probs[j,i] = p
os.remove(outfile)
os.remove(pfsfile)
return probs
def bpps_vfold_(sequence, version='0',T=37, coaxial=True):
#available versions: 0 for Turner 04 params, 1 for Mfold 2.3 params
DIR = package_locs["vfold"]
cwd = os.getcwd()
os.chdir(DIR) #vfold precompiled binaries don't work being called from elsewhere
if DEBUG: print(os.getcwd())
seqfile = write([sequence])
outfile = filename()+'.pij'
if sys.platform=="linux":
platform='linux'
elif sys.platform=="darwin":
platform='mac'
elif sys.platform=="win32":
platform='win'
else:
raise RuntimeError('Vfold has binaries for linux, macOS, and win')
command = ['./Vfold2d_npk_%s.o %d %d %s %s %d' % (platform, int(coaxial), T, seqfile, outfile, int(version))]
if DEBUG: print(' '.join(command))
p = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
stdout, stderr = p.communicate()
os.chdir(cwd)
if DEBUG:
print('stdout')
print(stdout)
print('stderr')
print(stderr)
if p.returncode:
raise Exception('Vfold2d_npk failed: on %s\n%s' % (sequence, stderr))
os.remove(seqfile)
probs = np.zeros([len(sequence),len(sequence)])
p_ij_output = np.loadtxt(outfile,usecols=(0,2,3)) #col 0: set of inds 1, col 1: set of inds 2, col 2: bpp
for i,j,p in p_ij_output:
probs[int(i-1),int(j-1)] = p
probs[int(j-1),int(i-1)] = p
os.remove(outfile)
return probs
#output: take second field of last line for Z
def bpps_linearpartition_(sequence, tmp_file):
fname = tmp_file
probs=np.zeros([len(sequence), len(sequence)])
for line in open(fname,'r').readlines():
if len(line.strip())>0:
first_ind, second_ind, p = line.strip().split(' ')
first_ind = int(first_ind)-1
second_ind = int(second_ind)-1
p = float(p)
probs[first_ind, second_ind] = p
probs[second_ind, first_ind] = p
os.remove(fname)
return probs
|
"""
Module: 'mlx90640' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1 - updated
from typing import Any
def deinit():
pass
def getCenterTmp():
pass
def getMaxTmp():
pass
def getMinTmp():
pass
def getTmp():
pass
def init():
pass
def setColorMaxTmp():
pass
def setColorMinTmp():
pass
def update():
pass
|
# Generated by Django 3.2.12 on 2022-03-21 17:33
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MLAlgorithm',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=1000)),
('code', models.CharField(max_length=50000)),
('version', models.CharField(max_length=128)),
('owner', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='MLEndpoint',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('owner', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='MLRequest',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('input_data', models.CharField(max_length=10000)),
('full_response', models.CharField(max_length=10000)),
('response', models.CharField(max_length=10000)),
('feedback', models.CharField(blank=True, max_length=10000, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='endpoints.mlalgorithm')),
],
),
migrations.CreateModel(
name='MLAlgorithmStatus',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=128)),
('active', models.BooleanField()),
('created_by', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('parent_mlalgorithm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='endpoints.mlalgorithm')),
],
),
migrations.AddField(
model_name='mlalgorithm',
name='parent_endpoint',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='endpoints.mlendpoint'),
),
migrations.CreateModel(
name='ABTest',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10000)),
('created_by', models.CharField(max_length=128)),
('created_at', models.DateTimeField(auto_now_add=True)),
('ended_at', models.DateTimeField(blank=True, null=True)),
('summary', models.CharField(blank=True, max_length=10000, null=True)),
('parent_mlalgorithm_1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_mlalgorithm_1', to='endpoints.mlalgorithm')),
('parent_mlalgorithm_2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_mlalgorithm_2', to='endpoints.mlalgorithm')),
],
),
]
|
import os
files = os.listdir('.')
best_files = []
for f in files:
if 'best.' in f and 'py' not in f:
best_files.append(f)
best_data = []
for f in best_files:
with open(f) as ff:
best_data.append(ff.readlines())
best_dict = {}
for b in best_data:
#print(b)
b = b[0]
tab_split = b.split("\t")
print(tab_split)
peak = tab_split[0]
best = tab_split[1]
if float(peak) not in best_dict:
best_dict[float(peak)] = [float(best)]
else:
best_dict[float(peak)].append(float(best))
print(best_dict)
import numpy as np
for k in sorted(best_dict.keys()):
print(str(k) + '\t' + str(np.mean(best_dict[k])) + '\t' + str(np.std(best_dict[k])/np.sqrt(len(best_dict[k]))))
|
""""
Samuro Bot
Автор: *fennr*
github: https://github.com/fennr/Samuro-HotsBot
Бот для сообществ по игре Heroes of the Storm
"""
import os
import discord
from discord import Embed
from discord.ext import commands
from utils.library import files
from utils.classes.Const import config
from utils import library
guild_ids = [845658540341592096] # Put your server ID in this array.
class general(commands.Cog, name="General"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="info", aliases=["botinfo"])
async def info(self, context):
"""
- Получить информацию о боте
"""
embed = discord.Embed(
description="Русскоязычный бот по игре Heroes of the Storm",
color=config.success
)
embed.set_author(
name="Samuro"
)
embed.add_field(
name="Автор:",
value="fenrir#5455",
inline=True
)
embed.add_field(
name="Префикс:",
value=f"{config.bot_prefix}",
inline=False
)
embed.set_footer(
text=f"Информация для {context.author}"
)
await context.send(embed=embed)
@commands.command(name="invite")
async def invite(self, context):
"""
- Получить ссылку для приглашения бота на свой канал
"""
try:
APP_ID = os.environ.get('app_id_prod')
except:
APP_ID = os.environ.get('APP_ID')
embed = discord.Embed(
title="Приглашение на сервер",
description=f"Для подключения Самуро перейдите по [ссылке](https://discordapp.com/oauth2/authorize?&client_id={APP_ID}&permissions=270416&scope=bot)\n"
f"По багам/вопросам писать: __fenrir#5455__",
color=config.info
)
await context.send(embed=embed)
await context.author.send(embed=embed)
@commands.command(name="ping")
async def ping(self, context):
"""
- Проверка жив ли бот
"""
embed = discord.Embed(
color=config.success
)
embed.add_field(
name="Pong!",
value=":ping_pong:",
inline=True
)
embed.set_footer(
text=f"Pong request by {context.author}"
)
await context.send(embed=embed)
@commands.command(name="poll")
async def poll(self, context, *args):
"""
Создать опрос
"""
poll_title = " ".join(args)
embed = discord.Embed(
title=f"{poll_title}",
color=config.success
)
embed.set_footer(
text=f"Опрос создан: {context.message.author} • Проголосовать!"
)
embed_message = await context.send(embed=embed)
await embed_message.add_reaction("👍")
await embed_message.add_reaction("👎")
await embed_message.add_reaction("🤷")
@commands.command(name="avatar")
async def avatar(self, ctx, member: discord.Member):
user_avatar = library.avatar(ctx, member)
embed = Embed(
title=f"{member.name}",
color=config.info
)
embed.set_image(
url=user_avatar
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(general(bot))
|
import torch
import torch.nn as nn
import torch.nn.functional as nnf
class SpatialTransformer(nn.Module):
"""
N-D Spatial Transformer
"""
def __init__(self, size, mode='bilinear'):
super().__init__()
self.mode = mode
# create sampling grid
vectors = [torch.arange(0, s) for s in size]
grids = torch.meshgrid(vectors)
grid = torch.stack(grids)
grid = torch.unsqueeze(grid, 0)
grid = grid.type(torch.FloatTensor)
# registering the grid as a buffer cleanly moves it to the GPU, but it also
# adds it to the state dict. this is annoying since everything in the state dict
# is included when saving weights to disk, so the model files are way bigger
# than they need to be. so far, there does not appear to be an elegant solution.
# see: https://discuss.pytorch.org/t/how-to-register-buffer-without-polluting-state-dict
self.register_buffer('grid', grid)
def forward(self, src, flow):
# new locations
new_locs = self.grid + flow
shape = flow.shape[2:]
# need to normalize grid values to [-1, 1] for resampler
for i in range(len(shape)):
new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
# move channels dim to last position
# also not sure why, but the channels need to be reversed
if len(shape) == 2:
new_locs = new_locs.permute(0, 2, 3, 1)
new_locs = new_locs[..., [1, 0]]
elif len(shape) == 3:
new_locs = new_locs.permute(0, 2, 3, 4, 1)
new_locs = new_locs[..., [2, 1, 0]]
return nnf.grid_sample(src, new_locs, align_corners=True, mode=self.mode)
class VecInt(nn.Module):
"""
Integrates a vector field via scaling and squaring.
"""
def __init__(self, inshape, nsteps):
super().__init__()
assert nsteps >= 0, 'nsteps should be >= 0, found: %d' % nsteps
self.nsteps = nsteps
self.scale = 1.0 / (2 ** self.nsteps)
self.transformer = SpatialTransformer(inshape)
def forward(self, vec):
vec = vec * self.scale
for _ in range(self.nsteps):
vec = vec + self.transformer(vec, vec)
return vec
class ResizeTransform(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, x):
if self.factor < 1:
# resize first to save memory
x = nnf.interpolate(x, align_corners=True, scale_factor=self.factor, mode=self.mode)
x = self.factor * x
elif self.factor > 1:
# multiply first to save memory
x = self.factor * x
x = nnf.interpolate(x, align_corners=True, scale_factor=self.factor, mode=self.mode)
# don't do anything if resize is 1
return x
|
import inspect
from flask.ext.admin.form import BaseForm
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class InlineFormAdmin(object):
"""
Settings for inline form administration.
You can use this class to customize displayed form.
For example::
class MyUserInfoForm(InlineFormAdmin):
form_columns = ('name', 'email')
"""
_defaults = ['form_columns', 'excluded_form_columns', 'form_args']
def __init__(self, model, **kwargs):
"""
Constructor
:param model:
Target model class
:param kwargs:
Additional options
"""
self.model = model
for k in self._defaults:
if not hasattr(self, k):
setattr(self, k, None)
for k, v in kwargs.iteritems():
setattr(self, k, v)
class ModelConverterBase(object):
def __init__(self, converters=None, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def get_converter(self, column):
if self.use_mro:
types = inspect.getmro(type(column.type))
else:
types = [type(column.type)]
# Search by module + name
for col_type in types:
type_string = '%s.%s' % (col_type.__module__, col_type.__name__)
if type_string in self.converters:
return self.converters[type_string]
# Search by name
for col_type in types:
if col_type.__name__ in self.converters:
return self.converters[col_type.__name__]
return None
def get_form(self, model, base_class=BaseForm,
only=None, exclude=None,
field_args=None):
raise NotImplemented()
|
#
# Flask
#
from . import auth_blueprint
from flask import Flask, request, url_for, redirect, Response, make_response
from flask_cors import CORS, cross_origin
#
# Configuration Object
#
from config import CONFIG as conf
#
# Python Standard Library
#
import logging
import os
from pprint import pprint
import json
from time import gmtime, strftime
import datetime
#
# Password Hashing
#
import bcrypt
#
# JWT
#
import jwt
#
# GraphQL
#
from application.gql import Query, Mutation
from application.gql.mutations import CREATE_USER
from application.gql.queries import GET_USER_BY_EMAIL
#
# User Helper Functions
#
from application.auth.user import get_user_by_email, get_user_by_uuid
CONFIG = conf()
def generate_auth_token(user):
'''Generates a JWT access token.
'''
payload = {
"email": user['email'],
"aud": CONFIG.JWT_AUDIENCE,
"exp": (datetime.datetime.utcnow() + datetime.timedelta(seconds=CONFIG.ACCESS_TOKEN_EXPIRE)),
"alg": "RS256",
"expires_at": "",
"https://hasura.io/jwt/claims": {
"x-hasura-allowed-roles": ["user"],
"x-hasura-default-role": "anonymous",
"x-hasura-user-id": user['uuid'],
"x-hasura-org-id": "123",
"x-hasura-role": user['role'],
"x-hasura-custom": "custom-value"
}
}
token = jwt.encode(payload, CONFIG.JWT_SECRET, algorithm='RS256')
# print(token)
# print(token.decode('UTF-8'))
return token.decode('UTF-8')
def generate_refresh_token(user):
'''Generates a JWT refresh token.
'''
payload = {
"email": user['email'],
"aud": CONFIG.JWT_AUDIENCE,
"exp": (datetime.datetime.utcnow() + datetime.timedelta(seconds=CONFIG.REFRESH_TOKEN_EXPIRE)),
"expires_at": "",
"alg": "RS256",
"token_version": 0
}
token = jwt.encode(payload, CONFIG.JWT_SECRET, algorithm='RS256')
# print(token)
# print(token.decode('UTF-8'))
return token.decode('UTF-8')
@auth_blueprint.route('/login', methods=['POST'])
def auth_login():
'''Login: The user tries to log in by posting via the client with data in the body of the request.
If the username and password match the DB record, then a token will be generated and sent back in the response.
This token expires based on CONFIG.ACCESS_TOKEN_EXPIRE.
If either username does not exist or username/password combo do not match, then return an error with the appropriate message.
'''
# Get request body as json(dict)
data = request.get_json()
email = data.get('email')
password = data.get('password')
#
# Check the credentials (Try and get a user with that email)
#
user = get_user_by_email(email)
if user is None: # user does not exist based on that email
return {
"STATUS": "ERROR",
"MESSAGE": "User not found"
}
else:
if bcrypt.checkpw(password.encode('utf8'), user['password_hash'].encode('utf8')):
#
# User credentials are correct!
#
if not user['has_confirmed']:
#
# User has not confirmed their email yet
#
return {
"STATUS": "ERROR",
"MESSAGE": "Email not confirmed"
}
token = generate_auth_token(user)
refresh_token = generate_refresh_token(user)
res = make_response(
{
"STATUS": "OK",
"token": token,
"tokenExpiry": CONFIG.ACCESS_TOKEN_EXPIRE,
"refreshToken": refresh_token,
"callback": f"http://127.0.0.1:8000/auth/callback?uuid={user['uuid']}",
"user": {
"uuid": user['uuid'],
"email": user['email'],
"name": user['name'],
"initials": user['initials'],
"hasCompletedOnboarding": user['has_completed_onboarding']
}
}
)
# res.headers['Access-Control-Allow-Credentials'] = True
# res.set_cookie(key='refreshToken', value=refresh_token,
# domain='127.0.0.1:3000', httponly=True) # max_age=CONFIG.REFRESH_TOKEN_EXPIRE,
return res
else:
return {"STATUS": "ERROR", "MESSAGE": "Wrong password"}
@auth_blueprint.route('/callback')
def login_callback():
"""
This should set a cookie on the client containing a valid jwt
"""
user = get_user_by_uuid(request.args.get('uuid'))
# print('\n\n\nCALLBACK USER: ', user)
if user:
refresh_token = generate_refresh_token(user)
res = make_response(
{"STATUS": "OK", "refreshToken": refresh_token})
res.headers['Access-Control-Allow-Credentials'] = True
res.set_cookie(key='refreshToken', value=refresh_token,
domain='127.0.0.1', httponly=True) # max_age=CONFIG.REFRESH_TOKEN_EXPIRE,
return res
@auth_blueprint.route('/refresh', methods=['POST'])
def auth_refresh():
# TODO: Refresh token here
# print('Request Cookies: ', request.cookies)
body = request.get_json()
# print('Body: ', body)
#
# TODO: Decode refresh token and get user
#
try:
rt = body.get('refreshToken').encode('utf-8')
# print('\n\nRT: ', rt)
decoded_result = decode_token(rt, verify=True)
# print('Refresh Token Result: ', decoded_result)
# user = get_user_by_email(refresh['email'])
# print('User from RF Token: ', user)
# new_token = generate_auth_token(user)
if decoded_result[0] == 'ERROR' and decoded_result[1] == "EXPIRED":
return {"STATUS": "ERROR", 'MESSAGE': "refresh token expired", "newToken": None}
elif decoded_result[0] == 'SUCCESS' and decoded_result[1] == 'VALID':
user = get_user_by_email(decoded_result[2]['email'])
returnUser = {
"uuid": user['uuid'],
"email": user['email'],
"name": user['name'],
"initials": user['initials'],
"hasCompletedOnboarding": user['has_completed_onboarding']
}
return {"STATUS": "OK", "newToken": generate_auth_token(user), "user": returnUser, "newRefreshToken": generate_refresh_token(user)}
except Exception as e:
print('\n\n\n\nError: ', str(e))
return {'STATUS': "ERROR"}
#
# TODO: Generate new access token
#
def decode_token(raw_token, verify=False):
try:
decoded = jwt.decode(raw_token, CONFIG.JWT_PUBLIC_KEY, algorithms=[
'RS256'], audience=CONFIG.JWT_AUDIENCE, verify=verify)
return ('SUCCESS', 'VALID', decoded)
except jwt.ExpiredSignatureError as e:
print('INVALID TOKEN: EXPIRED!')
return ('ERROR', 'EXPIRED', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.