content stringlengths 5 1.05M |
|---|
import sys
sys.path.append('icons')
sys.path.append('data')
sys.path.append('calc')
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
import os
from PIL import Image
import pandas as pd
import htool as ht
import input_processing as inp
import streamlit as st
from datetime import datetime
EMOJI_ICON = "icons/HTool.ico"
EMOJI_PNG = "icons/HTool.png"
st.set_page_config(page_title="HTool", layout='wide')
col1, col2 = st.columns([2.5, 4])
with col2:
st.image(EMOJI_PNG, width=200)
col1, col2 = st.columns([1,3])
with col2:
st.title('HTool - 1D heat transfer tool')
# Sidebar
st.sidebar.header('Defining the calculation input')
bc = st.sidebar.radio('Select boundary condition type:', ['TRSYS measurement', 'Input vector'], key=None)
if bc == 'TRSYS measurement':
uploaded_file = st.sidebar.file_uploader(
'Upload a file to input data', type='dat')
if uploaded_file is not None:
with open('data/raw/' + uploaded_file.name, "wb") as f:
os.path.relpath('/data/raw')
f.write(uploaded_file.getbuffer())
filenames = os.listdir('data/raw/')
file_name = st.sidebar.selectbox('Select input file', filenames)
start_date = st.sidebar.date_input('Select start date: ')
start_time = st.sidebar.text_input('Write down start time (hh:mm):')
end_date = st.sidebar.date_input('Select end date:')
end_time = st.sidebar.text_input('Write down end time (hh:mm):')
dt = 60
elif bc == 'Input vector':
uploaded_file = st.sidebar.file_uploader(
'Upload a file to input data', type='csv')
if uploaded_file is not None:
with open('data/vector/' + uploaded_file.name, "wb") as f:
os.path.relpath('/data/vector')
f.write(uploaded_file.getbuffer())
filenames = os.listdir('data/vector/')
file_name = st.sidebar.selectbox('Select input file', filenames)
dt = st.sidebar.number_input('Write down delta_t in minutes:', min_value=1, max_value=120)
dt = dt * 30
indoor_t = st.sidebar.number_input('Specify indoor constant BC:')
initial = st.sidebar.radio('Please select initial temperature type:', ['Constant temperature', 'Steady-state transfer'])
if initial == 'Constant temperature':
ctemp = st.sidebar.number_input('Write down constant initial temperature')
elif initial == 'Steady-state transfer':
initial_indoor = st.sidebar.number_input('Write down indoor temperature')
initial_outdoor = st.sidebar.number_input('Write down outdoor temperature')
v1 = st.sidebar.text_input('Specify transient q vector name', value='v1')
v2 = st.sidebar.text_input('Specify steady-state q vector name', value='v2')
# Main part
n_layers = 0
n_layers = st.number_input('Define number of layers', min_value = 0, max_value=10, value=0)
name = []
cond = []
rho = []
c = []
l = []
if n_layers != 0:
col1, col2, col3, col4, col5 = st.columns([1, 1, 1, 1, 1])
for i in range(n_layers):
with col1:
name.append(st.text_input(label=f'Layer {i+1} name', key=f'Question {i}'))
with col2:
cond.append(st.number_input(label=f'Layer {i+1} conductivity', key=f'Question {i}', step=1e-3, format="%.3f"))
with col3:
rho.append(st.number_input(label=f'Layer {i+1} density', key=f'Question {i}'))
with col4:
c.append(st.number_input(label=f'Layer {i+1} capacity', key=f'Question {i}'))
with col5:
l.append(st.number_input(label=f'Layer {i+1} length [m]', key=f'Question {i}', step=1e-3, format="%.3f"))
col1, col2 = st.columns([2.5,3])
with col1:
plot_true = False
plot_true = st.checkbox('Include calculation plot?')
with col2:
initial_b = st.button('Calculate')
if initial_b:
materials = ht.Material()
for i in range(len(name)):
materials.new_material(name[i], cond[i], rho[i], c[i], l[i])
resistance = ht.Resistance(materials.layers, delta_x=0.005, delta_t = dt)
R_mat, tau, R_bound, mesh = resistance.resistance_tau()
if bc == 'TRSYS measurement':
filename = 'data/raw/' + file_name
start = str(start_date) + ' ' + str(start_time)
end = str(end_date) + ' ' + str(end_time)
index = inp.RawData_index(filename, start, end)
first, last = index.dfinit()
columns = index.cols()
series = inp.RawData_series(filename, first, last, columns)
vectors = series.ex_vect()
indoor = vectors[0]
outdoor = vectors[1]
if initial == 'Constant temperature':
initial = np.array([ctemp for i in range(len(tau))])
elif initial == 'Steady-state transfer':
middle = (initial_indoor + initial_outdoor) / 2
help = np.array([middle for i in range(len(tau))])
outdoor_init = np.array([initial_outdoor for i in range(60*60*3)])
indoor_init = np.array([initial_indoor for i in range(60*60*3)])
res_init = resistance.solve_he(R_mat, tau, R_bound, help, indoor_init, outdoor_init)
initial = res_init[-1][1:len(res_init[-1])-1]
try:
initial = initial
fig = plt.figure()
fig.set_size_inches(8,5)
plt.plot(initial) #TODO insert legend
plt.savefig('results/first_plot/initial', dpi=100)
plt.clf()
plt.plot(indoor, label='indoor temperature') #TODO insert legend
plt.plot(outdoor, label='outdoor temperature') #TODO insert legend
plt.savefig('results/first_plot/bc', dpi=100)
plt.clf()
plt.close()
col1, col2 = st.columns([3,3])
with col1:
initial_f = Image.open('results/first_plot/initial.png')
st.image(initial_f, caption='Initial condition')
with col2:
bc_f = Image.open('results/first_plot/bc.png')
st.image(bc_f, caption='Boundary conditions')
except:
pass
elif bc == 'Input vector':
filename = 'data/vector/' + file_name
df_ = pd.read_csv(filename, sep=',',header=None)
outdoor = df_.values.tolist()
outdoor = sum(outdoor, [])
indoor = [indoor_t for i in range(len(outdoor))]
if initial == 'Constant temperature':
initial = np.array([ctemp for i in range(len(tau))])
elif initial == 'Steady-state transfer':
middle = (initial_indoor + initial_outdoor) / 2
help = np.array([middle for i in range(len(tau))])
outdoor_init = np.array([initial_outdoor for i in range(60*60*3)])
indoor_init = np.array([initial_indoor for i in range(60*60*3)])
res_init = resistance.solve_he(R_mat, tau, R_bound, help, indoor_init, outdoor_init)
initial = res_init[-1][1:len(res_init[-1])-1]
try:
initial = initial
fig = plt.figure()
fig.set_size_inches(8,5)
plt.plot(initial) #TODO insert legend
plt.savefig('results/first_plot/initial', dpi=100)
plt.clf()
plt.plot(indoor, label='indoor temperature') #TODO insert legend
plt.plot(outdoor, label='outdoor temperature') #TODO insert legend
plt.savefig('results/first_plot/bc', dpi=100)
plt.clf()
plt.close()
col1, col2 = st.columns([3,3])
with col1:
initial_f = Image.open('results/first_plot/initial.png')
st.image(initial_f, caption='Initial condition')
with col2:
bc_f = Image.open('results/first_plot/bc.png')
st.image(bc_f, caption='Boundary conditions')
except:
pass
results = resistance.solve_he(R_mat, tau, R_bound, initial, indoor, outdoor)
q_calc, Q_calc = resistance.q_Q(results, mesh)
fig = plt.figure()
fig.set_size_inches(27.5,18.)
plt.xticks(fontsize=23)
plt.yticks(fontsize=23)
plt.plot(q_calc, label="q_transient")
U_cls = ht.U_heat_flux(materials.layers)
U_val = U_cls.uval()
U_results, points = U_cls.q_U(U_val, indoor, outdoor)
print(U_results)
q_U, Q_U = U_cls.q_Q(U_val, indoor, outdoor)
plt.plot(q_U, label='q_steady-state')
plt.savefig('results/q.png')
plt.clf()
plt.close()
mesh2 = np.array([-0.02])
for i in range(len(mesh)):
if i == 0:
mesh2 = np.append(mesh2, 0)
continue
mesh2 = np.append(mesh2, mesh2[i] + mesh[i])
mesh2 = np.append(mesh2, mesh2[-1]+0.025)
min_bc = np.amin(indoor)
min2 = np.amin(outdoor)
if min2 < min_bc:
min_bc = min2
max_bc = np.amax(indoor)
max2 = np.amax(outdoor)
if max2 > max_bc:
max_bc = max2
if plot_true != False:
col1, col2 = st.columns([3,3])
with col1:
imageLocation1 = st.empty()
with col2:
imageLocation2 = st.empty()
for i in range(len(outdoor)):
if i % 60 == 0:
fig = plt.figure()
fig.set_size_inches(8,5)
plt.ylim(min_bc, max_bc)
plt.plot(mesh2, results[i])
plt.savefig('results/transient/last', dpi=100)
plt.clf()
transient = Image.open('results/transient/last.png')
imageLocation1.image(transient, caption='Transient')
plt.ylim(min_bc, max_bc)
plt.plot(points, U_results[i])
plt.savefig('results/stationary/last', dpi=100)
plt.clf()
plt.close()
stationary = Image.open('results/stationary/last.png')
imageLocation2.image(stationary, caption='Steady-state')
col1, col2 = st.columns([0.01,5])
with col2:
q_fig = Image.open('results/q.png')
st.image(q_fig, caption='Comparison between q_transient and q_steady state')
np.savetxt('results/vectors/' + v1 + '.csv', q_calc, delimiter=",")
np.savetxt('results/vectors/' + v2 + '.csv', q_U, delimiter=",") |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 19:39:26 2021
@author: maurol
"""
import json
import os
class CategoryMapper(object):
"""
map the integer of the categorical variables back to the orginal string
using the saved json file
"""
def __init__(self, path, feature):
self.path_json = os.path.join(path, f"{feature}.json")
self.get_map_index_dict()
def load_json(self):
"""
load the json file by path
Returns:
mapper (TYPE): DESCRIPTION.
"""
with open(self.path_json, "r", encoding="utf-8") as json_file:
mapper = json.load(json_file)
return mapper
@staticmethod
def key_to_int(mapper):
"""
convert the key of the json file to an integer
Args:
mapper (TYPE): DESCRIPTION.
Returns:
dict: DESCRIPTION.
"""
return {int(k): v for k, v in mapper.items()}
def get_map_index_dict(self):
"""
create the dictionary containing the index as key and the
string as value
Returns:
None.
"""
mapper = self.load_json()
self.map_index_dict = self.key_to_int(mapper)
def __getitem__(self, index):
assert type(index) == int, "index is not an integer"
return self.map_index_dict[index]
def __call__(self, index):
assert type(index) == int, "index is not an integer"
return self.map_index_dict[index]
if __name__ == "__main__":
path = r"C:\Users\maurol\OneDrive\Dokumente\Python_Scripts\model_explanation_study\dataset\training"
feature = "State of residence"
mapper = CategoryMapper(path, feature)
print(mapper[3])
feature = "Favorite subjects in school"
mapper = CategoryMapper(path, feature)
print(mapper[3])
|
from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
from tests.support.fixtures import create_dialog
from tests.support.inline import inline
def read_global(session, name):
return session.execute_script("return %s;" % name)
def get_title(session):
return session.transport.send(
"GET", "session/{session_id}/title".format(**vars(session)))
def test_title_handle_prompt_dismiss(new_session, add_browser_capabilites):
_, session = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "dismiss"})}})
session.url = inline("<title>WD doc title</title>")
expected_title = read_global(session, "document.title")
create_dialog(session)("alert", text="dismiss #1", result_var="dismiss1")
result = get_title(session)
assert_success(result, expected_title)
assert_dialog_handled(session, "dismiss #1")
assert read_global(session, "dismiss1") is None
expected_title = read_global(session, "document.title")
create_dialog(session)("confirm", text="dismiss #2", result_var="dismiss2")
result = get_title(session)
assert_success(result, expected_title)
assert_dialog_handled(session, "dismiss #2")
assert read_global(session, "dismiss2") is False
expected_title = read_global(session, "document.title")
create_dialog(session)("prompt", text="dismiss #3", result_var="dismiss3")
result = get_title(session)
assert_success(result, expected_title)
assert_dialog_handled(session, "dismiss #3")
assert read_global(session, "dismiss3") is None
def test_title_handle_prompt_accept(new_session, add_browser_capabilites):
_, session = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilites({"unhandledPromptBehavior": "accept"})}})
session.url = inline("<title>WD doc title</title>")
create_dialog(session)("alert", text="accept #1", result_var="accept1")
expected_title = read_global(session, "document.title")
result = get_title(session)
assert_success(result, expected_title)
assert_dialog_handled(session, "accept #1")
assert read_global(session, "accept1") is None
expected_title = read_global(session, "document.title")
create_dialog(session)("confirm", text="accept #2", result_var="accept2")
result = get_title(session)
assert_success(result, expected_title)
assert_dialog_handled(session, "accept #2")
assert read_global(session, "accept2") is True
expected_title = read_global(session, "document.title")
create_dialog(session)("prompt", text="accept #3", result_var="accept3")
result = get_title(session)
assert_success(result, expected_title)
assert_dialog_handled(session, "accept #3")
assert read_global(session, "accept3") == "" or read_global(session, "accept3") == "undefined"
def test_title_handle_prompt_missing_value(session, create_dialog):
session.url = inline("<title>WD doc title</title>")
create_dialog("alert", text="dismiss #1", result_var="dismiss1")
result = get_title(session)
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #1")
assert read_global(session, "dismiss1") is None
create_dialog("confirm", text="dismiss #2", result_var="dismiss2")
result = get_title(session)
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #2")
assert read_global(session, "dismiss2") is False
create_dialog("prompt", text="dismiss #3", result_var="dismiss3")
result = get_title(session)
assert_error(result, "unexpected alert open")
assert_dialog_handled(session, "dismiss #3")
assert read_global(session, "dismiss3") is None
# The behavior of the `window.print` function is platform-dependent and may not
# trigger the creation of a dialog at all. Therefore, this test should only be
# run in contexts that support the dialog (a condition that may not be
# determined automatically).
# def test_title_with_non_simple_dialog(session):
# document = "<title>With non-simple dialog</title><h2>Hello</h2>"
# spawn = """
# var done = arguments[0];
# setTimeout(function() {
# done();
# }, 0);
# setTimeout(function() {
# window['print']();
# }, 0);
# """
# session.url = inline(document)
# session.execute_async_script(spawn)
#
# result = get_title(session)
# assert_error(result, "unexpected alert open")
|
import json
from django.urls import reverse
from django.contrib.auth import get_user_model
from django.test.utils import override_settings
from rest_framework.test import APIClient
from rest_framework import status
from rest_framework.test import APITestCase
from ..models import TemporaryToken
from ..factories import UserFactory
User = get_user_model()
class TemporaryTokenAuthenticationTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.user = UserFactory()
self.user.set_password('Test123!')
self.user.save()
def test_authenticate(self):
"""
Ensure we can authenticate on the platform by providing a valid
TemporaryToken.
"""
data = {
'username': self.user.username,
'password': 'Test123!'
}
response = self.client.post(reverse('token_api'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token_key = (json.loads(response.content)['token'])
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token_key)
# This could be any url and any method. It is only used to test the
# token authentication.
response = self.client.delete(
reverse(
'authentication-detail',
kwargs={'pk': token_key},
),
)
self.assertFalse(TemporaryToken.objects.filter(key=token_key))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
@override_settings(
REST_FRAMEWORK_TEMPORARY_TOKENS={
'MINUTES': 30,
'RENEW_ON_SUCCESS': False,
'USE_AUTHENTICATION_BACKENDS': False,
}
)
def test_authenticate_no_renew(self):
"""
Ensure that a token is not renewed when the authenticated user calls
the API.
"""
data = {
'username': self.user.username,
'password': 'Test123!'
}
response = self.client.post(reverse('token_api'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token_key = (json.loads(response.content)['token'])
token = TemporaryToken.objects.get(key=token_key)
expiration_date = token.expires
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token_key)
# This could be any url and any method. It is only used to test the
# token authentication.
response = self.client.get(
reverse(
'authentication-detail',
kwargs={'pk': token_key},
),
)
token.refresh_from_db()
self.assertEqual(expiration_date, token.expires)
self.assertEqual(
response.status_code,
status.HTTP_405_METHOD_NOT_ALLOWED,
)
def test_authenticate_invalid_token(self):
"""
Ensure we can't authenticate on the platform by providing an invalid
TemporaryToken.
"""
self.client.credentials(HTTP_AUTHORIZATION='Token invalid_token')
# This could be any url and any method. It is only used to test the
# token authentication.
response = self.client.delete(
reverse(
'authentication-detail',
kwargs={'pk': 'invalid_token'},
),
)
content = {"detail": "Invalid token"}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authenticate_expired_token(self):
"""
Ensure we can't authenticate on the platform by providing an expired
TemporaryToken.
"""
data = {
'username': self.user.username,
'password': 'Test123!'
}
response = self.client.post(reverse('token_api'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = TemporaryToken.objects.get(
user__username=self.user.username,
)
token.expire()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
# This could be any url and any method. It is only used to test the
# token authentication.
response = self.client.delete(
reverse(
'authentication-detail',
kwargs={'pk': 'invalid_token'},
),
)
content = {'detail': 'Token has expired'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authenticate_inactive_user(self):
"""
Ensure we can't authenticate on the platform by providing a valid
TemporaryToken if user is inactive.
"""
data = {
'username': self.user.username,
'password': 'Test123!'
}
response = self.client.post(reverse('token_api'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = TemporaryToken.objects.get(
user__username=self.user.username,
)
setattr(self.user, 'is_active', False)
self.user.save()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
# This could be any url and any method. It is only used to test the
# token authentication.
response = self.client.delete(
reverse(
'authentication-detail',
kwargs={'pk': token.key},
),
)
content = {'detail': 'User inactive or deleted'}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
|
import yaml
from flask import Blueprint, request, jsonify
from base64 import b64decode, b64encode
import sys
sys.path.append("..")
import pandas as pd
from tasks.tasks import Task
from data.data import Data
from models.model import Model
import argparse
import yaml
import time
from pathlib import Path
import glob
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
sys.path.append("src/yolo")
from detect import detect
infer_api = Blueprint('infer_api', __name__)
config_path = "config/config.yaml"
with open(config_path, "r") as fp:
config = yaml.load(fp, Loader=yaml.FullLoader)
@infer_api.route("/infer", methods = ['GET'])
def infer():
# in the response you get task id and data, convert data to right format
request_method = request.method
res=None
if request_method == "GET":
file = request.files['file']
# task_id = request.files['task_id']
foo=file.filename
print("File Name : ", foo)
ext = foo.split(".")[-1]
if ext == "xlsx":
unparsedFile = file.read()
dframe = pd.read_excel(file, index_col="date")
data_config = jsonify(request.data)
file_data = unparsedFile
model = Model()
location = config["model"]["save_location"]
res = model.infer(location, "okP0KEPL", dframe)
elif ext.lower() == "jpg":
with torch.no_grad():
res, text = detect(foo)
return str(res) |
# -*- codeing = utf-8 -*-
# @Time: 2022/2/2 9:42
# @Author: Coisini
# @File: testXwlt.py
# @Software: PyCharm
import xlwt
workbook = xlwt.Workbook(encoding="utf-8") # 创建workbook对象
worksheet = workbook.add_sheet("sheet1") # 创建工作表
worksheet.write(0, 0, "hello") # 写入数据,第一个参数是"行", 第二个参数是"列", 第三个参数内容
workbook.save("student.xls") # 保存数据表
|
class DatasetSetup:
def __init__(self):
self.num_classes = None
self.size_bottom_out = None
def set_datasets_for_ssl(self, file_path, n_labeled, party_num):
pass
def get_transforms(self):
pass
def get_normalize_transform(self):
pass
def get_transformed_dataset(self, file_path, party_num, train):
pass
def clip_one_party_data(self, x, half):
"""
:param x:
:param half: how many features the adversary has. For the IDC dataset, it means how many pics
the adversary has.
:return:
"""
pass
|
import struct
from network.bytebuffer import ByteBuffer
from network.bytebuffer import TypeSize
class CommandType(object):
MOVE = 0x00
PLAYER_LIST = 0x01
NEW_PLAYER = 0x02
LEAVE = 0x03
LOGIN_REQUEST = 0x04
LOGIN_RESULT = 0x05
JOIN = 0x06
INIT_PLAYER_INFO = 0x07
PLAYER_LIST_RESULT = 0x08
SHOOT = 0x09
SHOOT_RESULT = 0x0a
BULLET_MOVE = 0x0b
BULLET_DESTROY = 0x0c
PLAYER_ROTATE = 0x0d
GENERATE_ENEMY = 0x0e
# ENEMY_SHOOT = 0x0f
BULLET_HIT = 0x10
# ENEMY_BULLET_HIT = 0x11
# ENEMY_BULLET_DESTROY = 0x12
GENERATE_ITEM = 0x13
PICK_UP_ITEM = 0x14
ENEMY_DIE = 0x15
PLAYER_DIE = 0x16
ENEMY_MOVE = 0x17
ENEMY_LIST = 0x18
ENEMY_LIST_RESULT = 0x19
BULLET_HIT_RESULT = 0x1a
RECHARGE = 0x1b
RECHARGE_RESULT = 0x1c
DEBUFF_ATTACH = 0x1d
DEBUFF_REMOVE = 0x1e
ENMEY_ATTACK = 0x1f
ENMEY_ATTACK_RESULT = 0x20
PICK_UP_ITEM_RESULT = 0x21
RECOVER_HP = 0x22
SUPPLY_BULLETS = 0x23
RUNNING_SHOOTING = 0x24
FAN_SHOOTING = 0x25
ITEM_LIST = 0x26
ITEM_LIST_RESULT = 0x27
UPDATE_PLAYER_KILL_COUNT = 0x28
ENMEY_BULLET_HIT_RESULT = 0x29
PLAYER_REBORN = 0x2a
PLAYER_REBORN_RESULT = 0x2b
REGISTER = 0x2c
REGISTER_RESULT = 0x2d
LEAVE_RESULT = 0x2e
# new commands
GUN_SHOOT = 0x2f
GUN_SHOOT_RESULT = 0x30
ENEMY_GUN_SHOOT = 0x31
ENEMY_GUN_SHOOT_RESULT = 0x32
GAME_RESULT = 0x33
SELF_DESTRUCT = 0x34
SELF_DESTRUCT_RESULT = 0x35
EXPLODE = 0x36
EXPLODE_RESULT = 0x37
PUT_TRAP = 0x38
PUT_TRAP_RESULT = 0x39
ENMEY_DEBUFF_DAMAGED = 0x3a
ENMEY_DEBUFF_DAMAGED_RESULT = 0x3b
DEBUFF_ATTACH_RESULT = 0x3c
DEBUFF_REMOVE_RESULT = 0x3d
TRAP_LIST = 0x3e
TRAP_LIST_RESULT = 0x3f
TANK_MOVE = 0x40
TANK_BODY_ROTATE = 0x41
TANK_TURRET_ROTATE = 0x42
# TANK_SHOOT = 0x43
# TANK_SHOOT_RESULT = 0x44
DRIVE_TANK = 0x45
REQUIRE_TANK_INFO = 0x46
REQUIRE_TANK_INFO_RESULT = 0x47
TANK_DESTROY = 0x48
DRIVE_TANK_RESULT = 0x49
WAVE_OVER = 0x4a
NEW_WAVE = 0x4b
NEW_GAME = 0x4c
NEW_GAME_RESULT = 0x4d
REQUIRE_STRONG_POINT_INFO = 0x4e
REQUIRE_STRONG_POINT_INFO_RESULT = 0x4f
STRONG_POINT_ATTACKED = 0x50
STRONG_POINT_ATTACKED_RESULT = 0x51
BUY_TRAP = 0x52
BUY_TRAP_RESULT = 0x53
UPDATE_GOLD = 0x54
BUY_GRENATE = 0x55
BUY_GRENATE_RESULT = 0x56
class CommandUsage(object):
DualWay = 0x00
SendOnly = 0x01
RecieveOnly = 0x02
COMMAND_MAP = {}
def command_mapping(command_type, command_usage):
def wrapper(cls):
if command_usage == CommandUsage.DualWay\
or command_usage == CommandUsage.RecieveOnly:
COMMAND_MAP[command_type] = cls
old_init = cls.__init__
def costume_init(self, *args, **kwds):
old_init(self, *args, **kwds)
self.cmd_type = command_type
cls.__init__ = costume_init
cls.cmd_usage = command_usage
return cls
return wrapper
class NetworkCommand(object):
cmd_usage = CommandUsage.DualWay
@staticmethod
def create_command_from_bytearray(byte_array):
cmd_type = byte_array[3]
cmd_cls = COMMAND_MAP[cmd_type]
cmd_obj = cmd_cls()
cmd_obj.parse(byte_array)
return cmd_obj
def parse(self, byte_array):
# self.cmd_len = struct.unpack("<h", byte_array[0:2])[0]
self.cmd_len, _ = ByteBuffer.get_int16(byte_array, 0)
self.user_id = byte_array[2]
self.cmd_type = byte_array[3]
def format(self):
self.buffer = ByteBuffer()
self.calc_buffer_size(self.buffer)
self.buffer.generate_buffer()
self.buffer.put_int16(self.buffer.max_size)
self.buffer.put_byte(self.user_id)
self.buffer.put_byte(self.cmd_type)
self.fill_buffer(self.buffer)
return self.buffer.get_buffer()
def __init__(self):
self.cmd_len = 0
self.user_id = 15
self.cmd_type = None
self.buffer = None
def calc_buffer_size(self, byte_buffer):
raise NotImplementedError
def fill_buffer(self, byte_buffer):
raise NotImplementedError
@command_mapping(CommandType.MOVE, CommandUsage.DualWay)
class MoveCommand(NetworkCommand):
def __init__(self):
super(MoveCommand, self).__init__()
self.position = None
self.rotation = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
# position
start = 5
position_x, start = ByteBuffer.get_float(byte_array, start)
position_y, start = ByteBuffer.get_float(byte_array, start)
position_z, start = ByteBuffer.get_float(byte_array, start)
# start += 4
self.position = [position_x, position_y, position_z]
# rotation
self.rotation, start = ByteBuffer.get_float(byte_array, start)
def format(self):
return self.byte_array
@command_mapping(CommandType.LOGIN_REQUEST, CommandUsage.RecieveOnly)
class LoginRequestCommand(NetworkCommand):
def __init__(self):
super(LoginRequestCommand, self).__init__()
self.username = None
self.password = None
def parse(self, byte_array):
username_len = struct.unpack("<h", byte_array[4:6])[0]
self.username = byte_array[6: 6+username_len].decode("utf-8")
password_start = 6 + username_len
password_len = struct.unpack(
"<h", byte_array[password_start: password_start+2])[0]
self.password = byte_array[password_start +
2: password_start+2+password_len].decode("utf-8")
@command_mapping(CommandType.LOGIN_RESULT, CommandUsage.SendOnly)
class LoginResultCommand(NetworkCommand):
def __init__(self, result, userID):
super(LoginResultCommand, self).__init__()
self.result = result
self.userID = userID
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(1 if self.result else 0)
byte_buffer.put_byte(self.userID)
@command_mapping(CommandType.JOIN, CommandUsage.RecieveOnly)
class JoinCommand(NetworkCommand):
def __init__(self):
super(JoinCommand, self).__init__()
@command_mapping(CommandType.INIT_PLAYER_INFO, CommandUsage.SendOnly)
class InitPlayerInfoCommand(NetworkCommand):
def __init__(self, player_info, is_host):
super(InitPlayerInfoCommand, self).__init__()
self.player_info = player_info
self.position = player_info.position
self.rotation = player_info.rotation
self.hp = player_info.hp
self.shooting_type = player_info.shooting_type
self.is_host = is_host
self.gold = player_info.gold
self.damage_trap_count = player_info.damage_trap_count
self.slow_trap_count = player_info.slow_trap_count
self.grenade_count = player_info.grenade_count
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
# body
byte_buffer.put_int16(self.player_info.cur_bullet)
byte_buffer.put_int16(self.player_info.max_bullet)
byte_buffer.put_byte(self.player_info.player_type)
# position
byte_buffer.put_float(self.position[0])
byte_buffer.put_float(self.position[1])
byte_buffer.put_float(self.position[2])
# rotation
byte_buffer.put_float(self.rotation)
# hp
byte_buffer.put_int32(self.hp)
# shooting type
byte_buffer.put_byte(self.shooting_type)
# kills
byte_buffer.put_int32(self.player_info.kills)
# gold traps
byte_buffer.put_int32(self.gold)
byte_buffer.put_byte(self.damage_trap_count)
byte_buffer.put_byte(self.slow_trap_count)
# grenade
byte_buffer.put_byte(self.grenade_count)
# is host
if self.is_host:
byte_buffer.put_byte(1)
else:
byte_buffer.put_byte(0)
@command_mapping(CommandType.NEW_PLAYER, CommandUsage.SendOnly)
class NewPlayerCommand(NetworkCommand):
def __init__(self, player_info, position, rotation):
super(NewPlayerCommand, self).__init__()
self.player_info = player_info
self.position = position
self.rotation = rotation
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Float * 3)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
# body
byte_buffer.put_int16(self.player_info.cur_bullet)
byte_buffer.put_int16(self.player_info.max_bullet)
byte_buffer.put_byte(self.player_info.player_type)
byte_buffer.put_byte(self.player_info.user_id)
# position
byte_buffer.put_float(self.position[0])
byte_buffer.put_float(self.position[1])
byte_buffer.put_float(self.position[2])
# rotation
byte_buffer.put_float(self.rotation)
# hp
byte_buffer.put_int32(self.player_info.hp)
# shooting type
byte_buffer.put_byte(self.player_info.shooting_type)
# kills
byte_buffer.put_int32(self.player_info.kills)
@command_mapping(CommandType.PLAYER_LIST, CommandUsage.RecieveOnly)
class PlayerListCommand(NetworkCommand):
def __init__(self):
super(PlayerListCommand, self).__init__()
@command_mapping(CommandType.PLAYER_LIST_RESULT, CommandUsage.SendOnly)
class PlayerListResultCommand(NetworkCommand):
def __init__(self, server, sender):
super(PlayerListResultCommand, self).__init__()
self.server = server
self.sender = sender
def calc_buffer_size(self, byte_buffer):
player_count = len(self.server.socket_map)
if self.server.socket_map.has_key(self.sender):
player_count -= 1
if player_count > 0:
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(
player_count *
(TypeSize.Int16
+ TypeSize.Int16
+ TypeSize.Byte
+ TypeSize.Byte
+ TypeSize.Float * 3
+ TypeSize.Float
+ TypeSize.Int32
+ TypeSize.Byte
+ TypeSize.Int32)
)
else:
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
player_count = len(self.server.socket_map)
if self.server.socket_map.has_key(self.sender):
player_count -= 1
if player_count > 0:
# players
byte_buffer.put_byte(player_count)
for (s, p) in self.server.socket_map.items():
if s != self.sender:
byte_buffer.put_int16(p.cur_bullet)
byte_buffer.put_int16(p.max_bullet)
byte_buffer.put_byte(p.player_type)
byte_buffer.put_byte(p.user_id)
# position
byte_buffer.put_float(p.position[0])
byte_buffer.put_float(p.position[1])
byte_buffer.put_float(p.position[2])
# print "position {} {} {}".format(
# p.position[0], p.position[1], p.position[2])
# rotation
byte_buffer.put_float(p.rotation)
# hp
byte_buffer.put_int32(p.hp)
# shooting type
byte_buffer.put_byte(p.shooting_type)
# kills
byte_buffer.put_int32(p.kills)
else:
byte_buffer.put_byte(0)
@command_mapping(CommandType.SHOOT, CommandUsage.DualWay)
class ShootCommand(NetworkCommand):
def __init__(self):
super(ShootCommand, self).__init__()
self.byte_array = None
self.bullet_count = None
self.is_granate = None
def parse(self, byte_array):
self.byte_array = byte_array
self.bullet_count = byte_array[5]
self.is_granate = byte_array[6] == 1
def format(self):
return self.byte_array
@command_mapping(CommandType.SHOOT_RESULT, CommandUsage.SendOnly)
class ShootResultCommand(NetworkCommand):
def __init__(self, cur_bullet_count, max_bullet_count, grenade_count):
super(ShootResultCommand, self).__init__()
self.cur_bullet_count = cur_bullet_count
self.max_bullet_count = max_bullet_count
self.grenade_count = grenade_count
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int16(self.cur_bullet_count)
byte_buffer.put_int16(self.max_bullet_count)
byte_buffer.put_byte(self.grenade_count)
@command_mapping(CommandType.BULLET_MOVE, CommandUsage.DualWay)
class BulletMoveCommand(NetworkCommand):
def __init__(self):
super(BulletMoveCommand, self).__init__()
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
def format(self):
return self.byte_array
@command_mapping(CommandType.BULLET_DESTROY, CommandUsage.DualWay)
class BulletDestroyCommand(NetworkCommand):
def __init__(self):
super(BulletDestroyCommand, self).__init__()
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
def format(self):
return self.byte_array
@command_mapping(CommandType.PLAYER_ROTATE, CommandUsage.DualWay)
class PlayerRotateCommand(NetworkCommand):
def __init__(self):
super(PlayerRotateCommand, self).__init__()
self.byte_array = None
self.rotation = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 5
# self.rotation = struct.unpack("<f", byte_array[start: start+4])[0]
self.rotation, start = ByteBuffer.get_float(byte_array, start)
def format(self):
return self.byte_array
@command_mapping(CommandType.GENERATE_ENEMY, CommandUsage.SendOnly)
class GenerateEnemyCommand(NetworkCommand):
def __init__(self, enemy_infos, is_host):
super(GenerateEnemyCommand, self).__init__()
self.enemy_infos = enemy_infos
self.is_host = is_host
def calc_buffer_size(self, byte_buffer):
count = len(self.enemy_infos)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(
count * (
TypeSize.Int32
+ TypeSize.Byte
+ TypeSize.Float * 3
+ TypeSize.Float
+ TypeSize.Int32
+ TypeSize.Byte
)
)
def fill_buffer(self, byte_buffer):
count = len(self.enemy_infos)
byte_buffer.put_byte(count)
for enemy in self.enemy_infos:
byte_buffer.put_int32(enemy.enemy_id)
byte_buffer.put_byte(enemy.enemy_type)
byte_buffer.put_float(enemy.position[0])
byte_buffer.put_float(enemy.position[1])
byte_buffer.put_float(enemy.position[2])
byte_buffer.put_float(enemy.rotation)
byte_buffer.put_int32(enemy.hp)
if self.is_host:
byte_buffer.put_byte(1)
else:
byte_buffer.put_byte(0)
@command_mapping(CommandType.ENEMY_MOVE, CommandUsage.DualWay)
class EnemyMoveCommand(NetworkCommand):
def __init__(self):
super(EnemyMoveCommand, self).__init__()
self.enemy_id = None
self.position = None
self.rotation = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
position_x, start = ByteBuffer.get_float(byte_array, start)
position_y, start = ByteBuffer.get_float(byte_array, start)
position_z, start = ByteBuffer.get_float(byte_array, start)
self.position = [position_x, position_y, position_z]
# rotation
self.rotation, start = ByteBuffer.get_float(byte_array, start)
def format(self):
return self.byte_array
# @command_mapping(CommandType.ENEMY_SHOOT, CommandUsage.DualWay)
# class EnemyShootCommand(NetworkCommand):
# def __init__(self):
# super(EnemyShootCommand, self).__init__()
# self.byte_array = None
# def parse(self, byte_array):
# self.byte_array = byte_array
# def get_bytes(self):
# return self.byte_array
# @command_mapping(CommandType.ENEMY_BULLET_DESTROY, CommandUsage.DualWay)
# class EnemyBulletDestroyCommand(NetworkCommand):
# def __init__(self):
# super(EnemyBulletDestroyCommand, self).__init__()
# self.byte_array = None
# def parse(self, byte_array):
# self.byte_array = byte_array
# def get_bytes(self):
# return self.byte_array
@command_mapping(CommandType.ENEMY_LIST, CommandUsage.RecieveOnly)
class EnemyListCommand(NetworkCommand):
def __init__(self):
super(EnemyListCommand, self).__init__()
@command_mapping(CommandType.ENEMY_LIST_RESULT, CommandUsage.SendOnly)
class EnemyListResultCommand(NetworkCommand):
def __init__(self, enemy_infos, is_host):
super(EnemyListResultCommand, self).__init__()
self.enemy_infos = enemy_infos
self.is_host = is_host
def calc_buffer_size(self, byte_buffer):
count = len(self.enemy_infos)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(
count *
(
TypeSize.Int32
+ TypeSize.Byte
+ TypeSize.Float * 3
+ TypeSize.Float
+ TypeSize.Int32
+ TypeSize.Byte
)
)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
count = len(self.enemy_infos)
byte_buffer.put_byte(count)
for enemy in self.enemy_infos:
byte_buffer.put_int32(enemy.enemy_id)
byte_buffer.put_byte(enemy.enemy_type)
byte_buffer.put_float(enemy.position[0])
byte_buffer.put_float(enemy.position[1])
byte_buffer.put_float(enemy.position[2])
byte_buffer.put_float(enemy.rotation)
byte_buffer.put_int32(enemy.hp)
if self.is_host:
byte_buffer.put_byte(1)
else:
byte_buffer.put_byte(0)
@command_mapping(CommandType.BULLET_HIT, CommandUsage.RecieveOnly)
class BulletHitCommand(NetworkCommand):
def __init__(self):
super(BulletHitCommand, self).__init__()
self.shooter_id = None
self.bullet_type = None
self.hit_enemy_id = None
self.bullet_id = None
def parse(self, byte_array):
start = 4
self.shooter_id, start = ByteBuffer.get_byte(byte_array, start)
self.bullet_type, start = ByteBuffer.get_byte(byte_array, start)
self.hit_enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
self.bullet_id, start = ByteBuffer.get_uint32(byte_array, start)
@command_mapping(CommandType.BULLET_HIT_RESULT, CommandUsage.SendOnly)
class BulletHitResultCommand(NetworkCommand):
def __init__(self, shooter_id, enemy_id, enemy_hp, bullet_id):
super(BulletHitResultCommand, self).__init__()
self.shooter_id = shooter_id
self.enemy_id = enemy_id
self.enemy_hp = enemy_hp
self.bullet_id = bullet_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.shooter_id)
byte_buffer.put_int32(self.enemy_id)
byte_buffer.put_int32(self.enemy_hp)
byte_buffer.put_int32(self.bullet_id)
@command_mapping(CommandType.ENEMY_DIE, CommandUsage.SendOnly)
class EnemyDieCommand(NetworkCommand):
def __init__(self, shooter_id, enmey_id):
super(EnemyDieCommand, self).__init__()
self.shooter_id = shooter_id
self.enmey_id = enmey_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.shooter_id)
byte_buffer.put_int32(self.enmey_id)
@command_mapping(CommandType.RECHARGE, CommandUsage.DualWay)
class RechargeCommand(NetworkCommand):
def __init__(self):
super(RechargeCommand, self).__init__()
self.byte_array = None
self.shooter_id = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.shooter_id = byte_array[start]
def format(self):
return self.byte_array
@command_mapping(CommandType.RECHARGE_RESULT, CommandUsage.SendOnly)
class RechargeResultCommand(NetworkCommand):
def __init__(self, cur_bullet_count, max_bullet_count):
super(RechargeResultCommand, self).__init__()
self.cur_bullet_count = cur_bullet_count
self.max_bullet_count = max_bullet_count
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int16(self.cur_bullet_count)
byte_buffer.put_int16(self.max_bullet_count)
@command_mapping(CommandType.ENMEY_ATTACK, CommandUsage.DualWay)
class EnemyAttackCommand(NetworkCommand):
def __init__(self):
super(EnemyAttackCommand, self).__init__()
self.attacked_user_id = None
self.attacking_enemy_id = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.attacked_user_id, start = ByteBuffer.get_byte(byte_array, start)
self.attacking_enemy_id, start = ByteBuffer.get_uint32(
byte_array, start)
def format(self):
return self.byte_array
@command_mapping(CommandType.ENMEY_ATTACK_RESULT, CommandUsage.SendOnly)
class EnemyAttackResultCommand(NetworkCommand):
def __init__(self, attacked_user_id, hp):
super(EnemyAttackResultCommand, self).__init__()
self.attacked_user_id = attacked_user_id
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.attacked_user_id)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.GENERATE_ITEM, CommandUsage.SendOnly)
class GenerateItemCommand(NetworkCommand):
def __init__(self, item_id, item_type, item_position):
super(GenerateItemCommand, self).__init__()
self.item_id = item_id
self.item_type = item_type
self.item_position = item_position
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Float * 3)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.item_id)
byte_buffer.put_byte(self.item_type)
byte_buffer.put_float(self.item_position[0])
byte_buffer.put_float(self.item_position[1])
byte_buffer.put_float(self.item_position[2])
@command_mapping(CommandType.PICK_UP_ITEM, CommandUsage.RecieveOnly)
class PickUpItemCommand(NetworkCommand):
def __init__(self):
super(PickUpItemCommand, self).__init__()
self.player_id = None
self.item_id = None
def parse(self, byte_array):
start = 4
self.player_id, start = ByteBuffer.get_byte(byte_array, start)
self.item_id, start = ByteBuffer.get_uint32(byte_array, start)
@command_mapping(CommandType.PICK_UP_ITEM_RESULT, CommandUsage.SendOnly)
class PickUpItemResultCommand(NetworkCommand):
def __init__(self, item_id):
super(PickUpItemResultCommand, self).__init__()
self.item_id = item_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.item_id)
@command_mapping(CommandType.RECOVER_HP, CommandUsage.SendOnly)
class RecoverHpCommand(NetworkCommand):
def __init__(self, user_id, hp):
super(RecoverHpCommand, self).__init__()
self.player_id = user_id
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.player_id)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.SUPPLY_BULLETS, CommandUsage.SendOnly)
class SupplyBulletsCommand(NetworkCommand):
def __init__(self, user_id, cur_bullets, max_bullets):
super(SupplyBulletsCommand, self).__init__()
self.player_id = user_id
self.cur_bullet = cur_bullets
self.max_bullet = max_bullets
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.player_id)
byte_buffer.put_int16(self.cur_bullet)
byte_buffer.put_int16(self.max_bullet)
@command_mapping(CommandType.FAN_SHOOTING, CommandUsage.SendOnly)
class FanShootingCommand(NetworkCommand):
def __init__(self, user_id):
super(FanShootingCommand, self).__init__()
self.player_id = user_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.player_id)
@command_mapping(CommandType.RUNNING_SHOOTING, CommandUsage.SendOnly)
class RunningShootingCommand(NetworkCommand):
def __init__(self, user_id):
super(RunningShootingCommand, self).__init__()
self.player_id = user_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.player_id)
@command_mapping(CommandType.ITEM_LIST, CommandUsage.RecieveOnly)
class ItemListCommand(NetworkCommand):
def __init__(self):
super(ItemListCommand, self).__init__()
@command_mapping(CommandType.ITEM_LIST_RESULT, CommandUsage.SendOnly)
class ItemListResultCommand(NetworkCommand):
def __init__(self, item_list):
super(ItemListResultCommand, self).__init__()
self.item_list = item_list
def calc_buffer_size(self, byte_buffer):
count = len(self.item_list)
if count > 0:
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(
TypeSize.Int32
+ TypeSize.Byte
+ TypeSize.Float * 3
)
else:
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
count = len(self.item_list)
if count > 0:
byte_buffer.put_byte(count)
for item in self.item_list:
byte_buffer.put_int32(item.item_id)
byte_buffer.put_byte(item.item_type)
byte_buffer.put_float(item.position[0])
byte_buffer.put_float(item.position[1])
byte_buffer.put_float(item.position[2])
else:
byte_buffer.put_byte(0)
@command_mapping(CommandType.UPDATE_PLAYER_KILL_COUNT, CommandUsage.SendOnly)
class UpdatePlayerKillCountCommand(NetworkCommand):
def __init__(self, count):
super(UpdatePlayerKillCountCommand, self).__init__()
self.count = count
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Float)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.count)
# @command_mapping(CommandType.ENEMY_BULLET_HIT, CommandUsage.RecieveOnly)
# class EnemyBulletHitCommand(NetworkCommand):
# def __init__(self):
# super(EnemyBulletHitCommand, self).__init__()
# self.player_id = None
# self.enemy_id = None
# self.bullet_id = None
# def parse(self, byte_array):
# start = 4
# self.player_id, start = ByteBuffer.get_byte(byte_array, start)
# self.enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
# self.bullet_id, start = ByteBuffer.get_uint32(byte_array, start)
# @command_mapping(CommandType.ENMEY_BULLET_HIT_RESULT, CommandUsage.SendOnly)
# class EnmeyBuleltHitResultCommand(NetworkCommand):
# def __init__(self, player_id, enemy_id, player_hp, bullet_id):
# super(EnmeyBuleltHitResultCommand, self).__init__()
# self.player_id = player_id
# self.enemy_id = enemy_id
# self.player_hp = player_hp
# self.bullet_id = bullet_id
# def calc_buffer_size(self, byte_buffer):
# byte_buffer.extends(TypeSize.Byte)
# byte_buffer.extends(TypeSize.Int32)
# byte_buffer.extends(TypeSize.Int32)
# byte_buffer.extends(TypeSize.Int32)
# def fill_buffer(self, byte_buffer):
# byte_buffer.put_byte(self.player_id)
# byte_buffer.put_int32(self.enemy_id)
# byte_buffer.put_int32(self.player_hp)
# byte_buffer.put_int32(self.bullet_id)
@command_mapping(CommandType.PLAYER_DIE, CommandUsage.SendOnly)
class PlayerDieCommand(NetworkCommand):
def __init__(self, player_id):
super(PlayerDieCommand, self).__init__()
self.player_id = player_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.player_id)
@command_mapping(CommandType.PLAYER_REBORN, CommandUsage.RecieveOnly)
class PlayerRebornCommand(NetworkCommand):
def __init__(self):
super(PlayerRebornCommand, self).__init__()
self.player_id = None
def parse(self, byte_array):
start = 4
self.player_id = ByteBuffer.get_byte(byte_array, start)
@command_mapping(CommandType.PLAYER_REBORN_RESULT, CommandUsage.SendOnly)
class PlayerRebornResultCommand(NetworkCommand):
def __init__(self, player_id, hp, cur_bullets, max_bullets):
super(PlayerRebornResultCommand, self).__init__()
self.player_id = player_id
self.hp = hp
self.cur_bullet = cur_bullets
self.max_bullet = max_bullets
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.player_id)
byte_buffer.put_int32(self.hp)
byte_buffer.put_int16(self.cur_bullet)
byte_buffer.put_int16(self.max_bullet)
@command_mapping(CommandType.REGISTER, CommandUsage.RecieveOnly)
class RegisterCommand(NetworkCommand):
def __init__(self):
super(RegisterCommand, self).__init__()
self.username = None
self.password = None
def parse(self, byte_array):
username_len = struct.unpack("<h", byte_array[4:6])[0]
self.username = byte_array[6: 6+username_len].decode("utf-8")
password_start = 6 + username_len
password_len = struct.unpack(
"<h", byte_array[password_start: password_start+2])[0]
self.password = byte_array[password_start +
2: password_start+2+password_len].decode("utf-8")
@command_mapping(CommandType.REGISTER_RESULT, CommandUsage.SendOnly)
class RegisterResultCommand(NetworkCommand):
def __init__(self, result):
super(RegisterResultCommand, self).__init__()
self.result = result
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(1 if self.result else 0)
@command_mapping(CommandType.LEAVE, CommandUsage.DualWay)
class LeaveCommand(NetworkCommand):
def __init__(self):
super(LeaveCommand, self).__init__()
self.user_id = None
self.byte_array = None
def parse(self, byte_array):
start = 4
self.user_id, start = ByteBuffer.get_byte(byte_array, start)
self.byte_array = byte_array
def format(self):
return self.byte_array
@command_mapping(CommandType.LEAVE_RESULT, CommandUsage.SendOnly)
class LeaveResultCommand(NetworkCommand):
def __init__(self):
super(LeaveResultCommand, self).__init__()
def parse(self, byte_array):
raise Exception("invalid command accpeted.")
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(0)
# ENEMY_GUN_SHOOT = 0x32
# ENEMY_GUN_SHOOT_RESULT = 0x33
@command_mapping(CommandType.GUN_SHOOT, CommandUsage.DualWay)
class GunShootCommand(NetworkCommand):
def __init__(self):
super(GunShootCommand, self).__init__()
self.shooter_id = None
self.bullet_count = None
self.is_hit = None
self.hit_enemy_id = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.shooter_id, start = ByteBuffer.get_byte(byte_array, start)
self.bullet_count, start = ByteBuffer.get_byte(byte_array, start)
self.is_hit, start = ByteBuffer.get_byte(byte_array, start)
self.hit_enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
self.is_hit = self.is_hit == 1
def format(self):
return self.byte_array
@command_mapping(CommandType.GUN_SHOOT_RESULT, CommandUsage.SendOnly)
class GunShootResultCommand(NetworkCommand):
def __init__(self, shooter_id, enemy_id, enemy_hp, cur_bullet, max_bullet, is_hit):
super(GunShootResultCommand, self).__init__()
self.shooter_id = shooter_id
self.enemy_id = enemy_id
self.enemy_hp = enemy_hp
self.cur_bullet = cur_bullet
self.max_bullet = max_bullet
self.is_hit = is_hit
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.UInt32)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Int16)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.shooter_id)
byte_buffer.put_uint32(self.enemy_id)
byte_buffer.put_int32(self.enemy_hp)
byte_buffer.put_int16(self.cur_bullet)
byte_buffer.put_int16(self.max_bullet)
byte_buffer.put_byte(1 if self.is_hit else 0)
@command_mapping(CommandType.ENEMY_GUN_SHOOT, CommandUsage.DualWay)
class EnemyGunShootCommand(NetworkCommand):
def __init__(self):
super(EnemyGunShootCommand, self).__init__()
self.shooter_id = None
self.is_hit = None
self.hit_type = None
self.hit_target_id = None
self.byte_array = None
def parse(self, byte_array):
start = 4
self.hit_target_id, start = ByteBuffer.get_byte(byte_array, start)
self.is_hit, start = ByteBuffer.get_byte(byte_array, start)
self.hit_type, start = ByteBuffer.get_byte(byte_array, start)
self.shooter_id, start = ByteBuffer.get_uint32(byte_array, start)
self.is_hit = self.is_hit == 1
self.byte_array = byte_array
def format(self):
return self.byte_array
@command_mapping(CommandType.ENEMY_GUN_SHOOT_RESULT, CommandUsage.SendOnly)
class EnmeyGunShootResultCommand(NetworkCommand):
def __init__(self, enemy_id, target_id, is_hit, hp):
super(EnmeyGunShootResultCommand, self).__init__()
self.enemy_id = enemy_id
self.target_id = target_id
self.is_hit = is_hit
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.UInt32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_uint32(self.enemy_id)
byte_buffer.put_byte(self.target_id)
byte_buffer.put_byte(1 if self.is_hit else 0)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.SELF_DESTRUCT, CommandUsage.RecieveOnly)
class SelfDestuctCommand(NetworkCommand):
def __init__(self):
super(SelfDestuctCommand, self).__init__()
self.enemy_id = None
self.position = None
def parse(self, byte_array):
start = 4
self.enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
x, start = ByteBuffer.get_float(byte_array, start)
y, start = ByteBuffer.get_float(byte_array, start)
z, start = ByteBuffer.get_float(byte_array, start)
self.position = [x, y, z]
@command_mapping(CommandType.SELF_DESTRUCT_RESULT, CommandUsage.SendOnly)
class SelfDestuctResultCommand(NetworkCommand):
def __init__(self, damaged_player_id, hp, enemy_id):
super(SelfDestuctResultCommand, self).__init__()
self.damaged_player_id = damaged_player_id
self.hp = hp
self.enemy_id = enemy_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.UInt32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.damaged_player_id)
byte_buffer.put_int32(self.hp)
byte_buffer.put_uint32(self.enemy_id)
@command_mapping(CommandType.EXPLODE, CommandUsage.DualWay)
class ExplodeCommand(NetworkCommand):
def __init__(self):
super(ExplodeCommand, self).__init__()
self.byte_array = None
self.thrower = None
self.position = None
self.bullet_id = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.thrower, start = ByteBuffer.get_byte(byte_array, start)
x, start = ByteBuffer.get_float(byte_array, start)
y, start = ByteBuffer.get_float(byte_array, start)
z, start = ByteBuffer.get_float(byte_array, start)
self.position = [x, y, z]
self.bullet_id, start = ByteBuffer.get_uint32(byte_array, start)
def format(self):
return self.byte_array
@command_mapping(CommandType.EXPLODE_RESULT, CommandUsage.SendOnly)
class ExplodeResultCommand(NetworkCommand):
def __init__(self, damaged_enemy_id, hp):
super(ExplodeResultCommand, self).__init__()
self.damaged_enmey_id = damaged_enemy_id
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.UInt32)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_uint32(self.damaged_enmey_id)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.PUT_TRAP, CommandUsage.RecieveOnly)
class PutTrapCommand(NetworkCommand):
def __init__(self):
super(PutTrapCommand, self).__init__()
self.builder_id = None
self.position = None
self.rotation = None
self.trap_type = None
def parse(self, byte_array):
start = 4
self.builder_id, start = ByteBuffer.get_byte(byte_array, start)
x, start = ByteBuffer.get_float(byte_array, start)
y, start = ByteBuffer.get_float(byte_array, start)
z, start = ByteBuffer.get_float(byte_array, start)
self.position = [x, y, z]
self.rotation, start = ByteBuffer.get_float(byte_array, start)
self.trap_type, start = ByteBuffer.get_byte(byte_array, start)
@command_mapping(CommandType.PUT_TRAP_RESULT, CommandUsage.SendOnly)
class PutTrapResultCommand(NetworkCommand):
def __init__(self, builder_id, position, rotation, trap_type, trap_count):
super(PutTrapResultCommand, self).__init__()
self.builder_id = builder_id
self.position = position
self.rotation = rotation
self.trap_type = trap_type
self.trap_count = trap_count
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Float * 3)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.builder_id)
byte_buffer.put_float(self.position[0])
byte_buffer.put_float(self.position[1])
byte_buffer.put_float(self.position[2])
byte_buffer.put_float(self.rotation)
byte_buffer.put_byte(self.trap_type)
byte_buffer.put_byte(self.trap_count)
@command_mapping(CommandType.DEBUFF_ATTACH, CommandUsage.RecieveOnly)
class DebuffAttachCommand(NetworkCommand):
def __init__(self):
super(DebuffAttachCommand, self).__init__()
self.target_enemy_id = None
self.debuff_id = None
self.debuff_duration = None
def parse(self, byte_array):
start = 4
self.target_enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
self.debuff_id, start = ByteBuffer.get_byte(byte_array, start)
self.debuff_duration, start = ByteBuffer.get_float(byte_array, start)
@command_mapping(CommandType.DEBUFF_REMOVE, CommandUsage.RecieveOnly)
class DebuffRemoveCommand(NetworkCommand):
def __init__(self):
super(DebuffRemoveCommand, self).__init__()
self.target_enemy_id = None
self.debuff_id = None
def parse(self, byte_array):
start = 4
self.target_enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
self.debuff_id, start = ByteBuffer.get_byte(byte_array, start)
@command_mapping(CommandType.DEBUFF_ATTACH_RESULT, CommandUsage.SendOnly)
class DebuffAttachResultCommand(NetworkCommand):
def __init__(self, target_enemy_id, debuff_id, debuff_duration):
super(DebuffAttachResultCommand, self).__init__()
self.target_enemy_id = target_enemy_id
self.debuff_id = debuff_id
self.debuff_duration = debuff_duration
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Float)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.target_enemy_id)
byte_buffer.put_byte(self.debuff_id)
byte_buffer.put_float(self.debuff_duration)
@command_mapping(CommandType.DEBUFF_REMOVE_RESULT, CommandUsage.RecieveOnly)
class DebuffRemoveResultCommand(NetworkCommand):
def __init__(self, target_enemy_id, debuff_id):
super(DebuffRemoveResultCommand, self).__init__()
self.target_enemy_id = target_enemy_id
self.debuff_id = debuff_id
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.target_enemy_id)
byte_buffer.put_byte(self.debuff_id)
@command_mapping(CommandType.ENMEY_DEBUFF_DAMAGED, CommandUsage.RecieveOnly)
class EnemyDebuffDamagedCommand(NetworkCommand):
def __init__(self):
super(EnemyDebuffDamagedCommand, self).__init__()
self.enemy_id = None
def parse(self, byte_array):
start = 4
self.enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
@command_mapping(CommandType.ENMEY_DEBUFF_DAMAGED_RESULT, CommandUsage.SendOnly)
class EnemyDebuffDamagedResultCommand(NetworkCommand):
def __init__(self, enemy_id, hp):
super(EnemyDebuffDamagedResultCommand, self).__init__()
self.enemy_id = enemy_id
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.UInt32)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_uint32(self.enemy_id)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.TRAP_LIST, CommandUsage.RecieveOnly)
class TrapListCommand(NetworkCommand):
def __init__(self):
super(TrapListCommand, self).__init__()
def parse(self, byte_array):
pass
@command_mapping(CommandType.TRAP_LIST_RESULT, CommandUsage.SendOnly)
class TrapListResultCommand(NetworkCommand):
def __init__(self, trap_list):
super(TrapListResultCommand, self).__init__()
self.trap_list = trap_list
def calc_buffer_size(self, byte_buffer):
count = len(self.trap_list)
byte_buffer.extends(TypeSize.UInt32)
byte_buffer.extends(
count *
(
TypeSize.Byte
+ TypeSize.UInt32
+ TypeSize.Float * 3
+ TypeSize.Float
)
)
def fill_buffer(self, byte_buffer):
count = len(self.trap_list)
byte_buffer.put_uint32(count)
for trap in self.trap_list:
byte_buffer.put_byte(trap.trap_type)
byte_buffer.put_uint32(trap.trap_id)
byte_buffer.put_float(trap.position[0])
byte_buffer.put_float(trap.position[1])
byte_buffer.put_float(trap.position[2])
byte_buffer.put_float(trap.rotation)
@command_mapping(CommandType.TANK_MOVE, CommandUsage.DualWay)
class TankMoveCommand(NetworkCommand):
def __init__(self):
super(TankMoveCommand, self).__init__()
self.driver_id = None
self.position = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.driver_id, start = ByteBuffer.get_byte(byte_array, start)
x, start = ByteBuffer.get_float(byte_array, start)
y, start = ByteBuffer.get_float(byte_array, start)
z, start = ByteBuffer.get_float(byte_array, start)
self.position = [x, y, z]
def format(self):
return self.byte_array
@command_mapping(CommandType.TANK_BODY_ROTATE, CommandUsage.DualWay)
class TankBodyRotateCommand(NetworkCommand):
def __init__(self):
super(TankBodyRotateCommand, self).__init__()
self.driver_id = None
self.body_rotation = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.driver_id, start = ByteBuffer.get_byte(byte_array, start)
self.body_rotation, start = ByteBuffer.get_float(byte_array, start)
def format(self):
return self.byte_array
@command_mapping(CommandType.TANK_TURRET_ROTATE, CommandUsage.DualWay)
class TankTurretRotateCommand(NetworkCommand):
def __init__(self):
super(TankTurretRotateCommand, self).__init__()
self.driver_id = None
self.turret_rotation = None
self.byte_array = None
def parse(self, byte_array):
self.byte_array = byte_array
start = 4
self.driver_id, start = ByteBuffer.get_byte(byte_array, start)
self.turret_rotation, start = ByteBuffer.get_float(byte_array, start)
def format(self):
return self.byte_array
@command_mapping(CommandType.DRIVE_TANK, CommandUsage.RecieveOnly)
class DriveTankCommand(NetworkCommand):
def __init__(self):
super(DriveTankCommand, self).__init__()
self.driver_id = None
def parse(self, byte_array):
start = 4
self.driver_id, start = ByteBuffer.get_byte(byte_array, start)
@command_mapping(CommandType.DRIVE_TANK_RESULT, CommandUsage.SendOnly)
class DriveTankResultCommand(NetworkCommand):
def __init__(self, driver_id, is_drive):
super(DriveTankResultCommand, self).__init__()
self.driver_id = driver_id
self.is_drive = is_drive
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.driver_id)
byte_buffer.put_byte(1 if self.is_drive else 0)
@command_mapping(CommandType.REQUIRE_TANK_INFO, CommandUsage.RecieveOnly)
class RequireTankInfoCommand(NetworkCommand):
def __init__(self):
super(RequireTankInfoCommand, self).__init__()
def parse(self, byte_array):
pass
@command_mapping(CommandType.REQUIRE_TANK_INFO_RESULT, CommandUsage.RecieveOnly)
class RequireTankInfoResultCommand(NetworkCommand):
def __init__(self, driver_id, is_driven, position, turret_rotation, body_rotation, hp):
super(RequireTankInfoResultCommand, self).__init__()
self.driver_id = driver_id
self.is_driven = is_driven
self.position = position
self.turret_rotation = turret_rotation
self.body_rotation = body_rotation
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Float * 3)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.driver_id)
byte_buffer.put_byte(1 if self.is_driven else 0)
byte_buffer.put_float(self.position[0])
byte_buffer.put_float(self.position[1])
byte_buffer.put_float(self.position[2])
byte_buffer.put_float(self.turret_rotation)
byte_buffer.put_float(self.body_rotation)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.TANK_DESTROY, CommandUsage.SendOnly)
class TankDestroyCommand(NetworkCommand):
def __init__(self):
super(TankDestroyCommand, self).__init__()
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(0)
@command_mapping(CommandType.GAME_RESULT, CommandUsage.SendOnly)
class GameResultCommand(NetworkCommand):
def __init__(self, result):
super(GameResultCommand, self).__init__()
self.result = result
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(1 if self.result else 0)
@command_mapping(CommandType.WAVE_OVER, CommandUsage.SendOnly)
class WaveOverCommand(NetworkCommand):
def __init__(self):
super(WaveOverCommand, self).__init__()
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(0)
@command_mapping(CommandType.NEW_WAVE, CommandUsage.RecieveOnly)
class NewWaveCommand(NetworkCommand):
def __init__(self):
super(NewWaveCommand, self).__init__()
def calc_buffer_size(self, byte_buffer):
pass
def fill_buffer(self, byte_buffer):
pass
@command_mapping(CommandType.NEW_GAME, CommandUsage.RecieveOnly)
class NewGameCommand(NetworkCommand):
def __init__(self):
super(NewGameCommand, self).__init__()
def parse(self, byte_array):
pass
@command_mapping(CommandType.NEW_GAME_RESULT, CommandUsage.SendOnly)
class NewGameResultCommand(NetworkCommand):
def __init__(self, tank_position, tank_turret_rotation, tank_body_rotation, tank_hp, strongpoint_hp):
super(NewGameResultCommand, self).__init__()
self.tank_position = tank_position
self.tank_turret_rotation = tank_turret_rotation
self.tank_body_rotation = tank_body_rotation
self.tank_hp = tank_hp
self.strongpoint_hp = strongpoint_hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Float * 3)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Float)
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_float(self.tank_position[0])
byte_buffer.put_float(self.tank_position[1])
byte_buffer.put_float(self.tank_position[2])
byte_buffer.put_float(self.tank_turret_rotation)
byte_buffer.put_float(self.tank_body_rotation)
byte_buffer.put_int32(self.tank_hp)
byte_buffer.put_uint32(self.strongpoint_hp)
@command_mapping(CommandType.REQUIRE_STRONG_POINT_INFO, CommandUsage.RecieveOnly)
class RequireStrongPointInfoCommand(NetworkCommand):
def __init__(self):
super(RequireStrongPointInfoCommand, self).__init__()
def parse(self, byte_array):
pass
@command_mapping(CommandType.REQUIRE_STRONG_POINT_INFO_RESULT, CommandUsage.SendOnly)
class RequireStrongPointInfoResultCommand(NetworkCommand):
def __init__(self, position, hp):
super(RequireStrongPointInfoResultCommand, self).__init__()
self.position = position
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Float * 3)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_float(self.position[0])
byte_buffer.put_float(self.position[1])
byte_buffer.put_float(self.position[2])
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.STRONG_POINT_ATTACKED, CommandUsage.RecieveOnly)
class StrongPointAttackedCommand(NetworkCommand):
def __init__(self):
super(StrongPointAttackedCommand, self).__init__()
self.enemy_id = None
def parse(self, byte_array):
start = 4
self.enemy_id, start = ByteBuffer.get_uint32(byte_array, start)
@command_mapping(CommandType.STRONG_POINT_ATTACKED_RESULT, CommandUsage.SendOnly)
class StrongPointAttackedResultCommand(NetworkCommand):
def __init__(self, enmey_id, hp):
super(StrongPointAttackedResultCommand, self).__init__()
self.enemy_id = enmey_id
self.hp = hp
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.UInt32)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_uint32(self.enemy_id)
byte_buffer.put_int32(self.hp)
@command_mapping(CommandType.BUY_TRAP, CommandUsage.RecieveOnly)
class BuyTrapCommand(NetworkCommand):
def __init__(self):
super(BuyTrapCommand, self).__init__()
self.trap_type = None
def parse(self, byte_array):
start = 4
self.trap_type, start = ByteBuffer.get_byte(byte_array, start)
@command_mapping(CommandType.BUY_TRAP_RESULT, CommandUsage.SendOnly)
class BuyTrapResultCommand(NetworkCommand):
def __init__(self, gold, damage_trap_count, slow_trap_count):
super(BuyTrapResultCommand, self).__init__()
self.gold = gold
self.damage_trap_count = damage_trap_count
self.slow_trap_count = slow_trap_count
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int32)
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Byte)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.gold)
byte_buffer.put_byte(self.damage_trap_count)
byte_buffer.put_byte(self.slow_trap_count)
@command_mapping(CommandType.UPDATE_GOLD, CommandUsage.SendOnly)
class UpdateGoldCommand(NetworkCommand):
def __init__(self, gold):
super(UpdateGoldCommand, self).__init__()
self.gold = gold
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_int32(self.gold)
@command_mapping(CommandType.BUY_GRENATE, CommandUsage.RecieveOnly)
class BuyGrenateCommand(NetworkCommand):
def __init__(self):
super(BuyGrenateCommand, self).__init__()
def parse(self, byte_array):
pass
@command_mapping(CommandType.BUY_GRENATE_RESULT, CommandUsage.SendOnly)
class BuyGrenateResultCommand(NetworkCommand):
def __init__(self, grenate_count, gold):
super(BuyGrenateResultCommand, self).__init__()
self.grenate_count = grenate_count
self.gold = gold
def calc_buffer_size(self, byte_buffer):
byte_buffer.extends(TypeSize.Byte)
byte_buffer.extends(TypeSize.Int32)
def fill_buffer(self, byte_buffer):
byte_buffer.put_byte(self.grenate_count)
byte_buffer.put_int32(self.gold)
|
# Author: Jian Shi
import unittest
import numpy as np
from PySeismoSoil.class_ground_motion import Ground_Motion
from PySeismoSoil.class_Vs_profile import Vs_Profile
from PySeismoSoil.class_simulation_results import Simulation_Results
import PySeismoSoil.helper_site_response as sr
import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')
class Test_Class_Simulation_Results(unittest.TestCase):
def test_plot(self):
# Test that the desired data are correctly imported to the object
accel_in = Ground_Motion(_join(f_dir, 'sample_accel.txt'), unit='m/s/s')
accel_tmp = accel_in.accel.copy()
accel_tmp[:, 1] *= 5.0
accel_out = Ground_Motion(accel_tmp, unit='m/s/s')
vs_profile = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
thk = vs_profile._thk
depth_bound = sr.thk2dep(thk, midpoint=False)
depth_midpoint = sr.thk2dep(thk, midpoint=True)
max_a_v_d = np.column_stack((depth_bound,
depth_bound * 1,
depth_bound * 2,
depth_bound * 3))
max_gamma_tau = np.column_stack((depth_midpoint,
depth_midpoint * 1,
depth_midpoint * 2))
tf_RO, _, _ = vs_profile.get_transfer_function()
sim_results = Simulation_Results(accel_in, accel_out, vs_profile,
max_a_v_d=max_a_v_d,
max_strain_stress=max_gamma_tau,
trans_func=tf_RO)
sim_results.plot(save_fig=False)
# Test that it can produce a plot without max profiles and trans. func.
sim_results_ = Simulation_Results(accel_in, accel_out, vs_profile)
sim_results_.plot(save_fig=False)
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Test_Class_Simulation_Results)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
from django.apps import AppConfig
class HomefirstaidkitAppConfig(AppConfig):
name = 'HomeFirstAidKit_app'
|
''' figures.py
=========================
AIM: Provide several specific functions to save beautiful figures
INPUT: function depend
OUTPUT: function depend
CMD: To include: import resources.figures as figures
ISSUES: <none known>
REQUIRES: standard python libraries, specific libraries in resources/
REMARKS: in general fancy means latex interpreter (font is serif, Palatino) and generates *.eps and *.pdf
'''
######################################################################
import numpy as np
def savefig(fname,fig,fancy=False):
import os
import subprocess
import parameters as param
fig.savefig(fname+'.png',dpi=param.dpi)
if fancy:
fig.savefig(fname+'.eps',dpi=param.dpi,transparent=True)
os.system("epstopdf "+fname+".eps")
command = 'pdfcrop %s.pdf' % fname
subprocess.check_output(command, shell=True)
os.system('mv '+fname+'-crop.pdf '+fname+'.pdf')
def set_fancy():
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':16})
rc('text', usetex=True)
def cd(xx, year=2018, day=1, month=1):
''' converts day in 2018 to usual date '''
import datetime
dd = datetime.date.today()
dd = dd.replace(year=year, month=month, day=day)
first_ordinal = dd.toordinal()
return datetime.date.fromordinal(first_ordinal+int(round(xx)))
convert_date = np.vectorize(cd)
def format_log10(value):
return r'$10^{%d}$' % np.log10(value)
def format_mag(value):
return r'$%d$' % value
def format_degree(value):
return r'$%d^\circ$' % value
def format_second(xx):
import time
return time.strftime('%d %b %H:%M', xx)
def format_day(xx):
import time
return time.strftime('%d %b', xx)
|
from aces import Aces
class sub(Aces):
def submit(self):
opt=dict(
units="metal",
species="graphene",
method="greenkubo",
nodes=1,
procs=12,
queue="q1.1",
runTime=500000
,runner="negf"
)
app=dict(kpoints=[1,1,1],laty=2,latx=3,leadlat=[1,1,1],gamma_only=False,useMini=True,boxOpt=True,timestep=.182e-3)
self.commit(opt,app);
if __name__=='__main__':
sub().run()
|
from picloud_client.picloud import SocketClient, HttpClient, PublishError
|
from PIL import Image
from os.path import getsize
from tkinter.filedialog import askopenfilename, asksaveasfilename
def compress():
file_path = askopenfilename(title="select image ")
img = Image.open(file_path)
print(f"Original size of image was : {getsize(file_path)} Bytes")
height, width = img.size
img = img.resize((height, width), Image.ANTIALIAS)
save_path = asksaveasfilename(title="save compressed image")
save_path += "_compressed.JPG"
img.save(save_path)
print(f"New size of image is : {getsize(save_path)} Bytes")
if __name__ == '__main__':
compress()
|
tables = {
"BalanceSheet": "balance_sheets",
"Client": "clients",
"CollectiveRoomOccasionalPricing": "pricings_collective_occasional",
"CollectiveRoomRegularPricing": "pricings_collective_regular",
"Contract": "contracts",
"DailyBooking": "daily_bookings",
"Expense": "expenses",
"FlatRatePricing": "pricings_flat_rate",
"Invoice": "invoices",
"Pricing": "pricings_individual_modular",
"RecurringPricing": "pricings_recurring",
"Room": "rooms",
}
|
#!/usr/bin/env python3
"""
Author : alex <alex@localhost>
Date : 2021-01-31
Purpose: Rock the Casbah
"""
import argparse
import re
import string
import os
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Southern fry text',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text',
metavar='text',
help='Input text or file')
args = parser.parse_args()
if os.path.isfile(args.text):
args.text = open(args.text).read()
return args
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
# text = args.text
for line in args.text.splitlines():
words = []
for word in re.split(r'(\W+)', line.rstrip()):
first = friar(word)
if first!=word:
words.append(first + "in'")
else:
words.append(word)
t = []
for word in words:
t += yall(word)
print(''.join(t))
# --------------------------------------------------
def friar(word):
pattern = (
r'(\w+)'
'(ing$)'
)
p = re.compile(pattern)
match = p.search(word)
vowel_pattern = ('[aeiouAEIOU]')
v = re.compile(vowel_pattern)
if match and v.search(match.group(1)):
first = p.search(word).group(1)
else:
first = word
return first
# --------------------------------------------------
def yall(word):
pattern = (
'(^you$|^You$)'
)
p = re.compile(pattern)
match = p.search(word)
y = p.search(word).group() if match else ""
if y=='you':
w = "y'all"
elif y == "You":
w = "Y'all"
else:
w = word
return w
#---------------------------------------------------
def test_friar():
assert friar('fishing') == "fish"
assert friar('Aching') == "Ach"
assert friar('swing') == 'swing'
# --------------------------------------------------
if __name__ == '__main__':
main()
|
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import sys
if sys.version_info[0] != 2:
raise ImportError("No longer available with Python 3, use mutagen.mp4")
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's M4A (aka MP4, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
http://developer.apple.com/documentation/QuickTime/QTFF/,
http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt,
and http://wiki.multimedia.cx/index.php?title=Apple_QuickTime were all
consulted.
This module does not support 64 bit atom sizes, and so will not
work on metadata over 4GB.
"""
import struct
import sys
from cStringIO import StringIO
from ._compat import reraise
from mutagen import FileType, Metadata, StreamInfo
from mutagen._constants import GENRES
from mutagen._util import cdata, insert_bytes, delete_bytes, DictProxy, \
MutagenError
class error(IOError, MutagenError):
pass
class M4AMetadataError(error):
pass
class M4AStreamInfoError(error):
pass
class M4AMetadataValueError(ValueError, M4AMetadataError):
pass
import warnings
warnings.warn(
"mutagen.m4a is deprecated; use mutagen.mp4 instead.", DeprecationWarning)
# This is not an exhaustive list of container atoms, but just the
# ones this module needs to peek inside.
_CONTAINERS = ["moov", "udta", "trak", "mdia", "meta", "ilst",
"stbl", "minf", "stsd"]
_SKIP_SIZE = {"meta": 4}
__all__ = ['M4A', 'Open', 'delete', 'M4ACover']
class M4ACover(str):
"""A cover artwork.
Attributes:
imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = 0x0D
FORMAT_PNG = 0x0E
def __new__(cls, data, imageformat=None):
self = str.__new__(cls, data)
if imageformat is None:
imageformat = M4ACover.FORMAT_JPEG
self.imageformat = imageformat
try:
self.format
except AttributeError:
self.format = imageformat
return self
class Atom(object):
"""An individual atom.
Attributes:
children -- list child atoms (or None for non-container atoms)
length -- length of this atom, including length and name
name -- four byte name of the atom, as a str
offset -- location in the constructor-given fileobj of this atom
This structure should only be used internally by Mutagen.
"""
children = None
def __init__(self, fileobj):
self.offset = fileobj.tell()
self.length, self.name = struct.unpack(">I4s", fileobj.read(8))
if self.length == 1:
raise error("64 bit atom sizes are not supported")
elif self.length < 8:
return
if self.name in _CONTAINERS:
self.children = []
fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1)
while fileobj.tell() < self.offset + self.length:
self.children.append(Atom(fileobj))
else:
fileobj.seek(self.offset + self.length, 0)
@staticmethod
def render(name, data):
"""Render raw atom data."""
# this raises OverflowError if Py_ssize_t can't handle the atom data
size = len(data) + 8
if size <= 0xFFFFFFFF:
return struct.pack(">I4s", size, name) + data
else:
return struct.pack(">I4sQ", 1, name, size + 8) + data
def __getitem__(self, remaining):
"""Look up a child atom, potentially recursively.
e.g. atom['udta', 'meta'] => <Atom name='meta' ...>
"""
if not remaining:
return self
elif self.children is None:
raise KeyError("%r is not a container" % self.name)
for child in self.children:
if child.name == remaining[0]:
return child[remaining[1:]]
else:
raise KeyError("%r not found" % remaining[0])
def __repr__(self):
klass = self.__class__.__name__
if self.children is None:
return "<%s name=%r length=%r offset=%r>" % (
klass, self.name, self.length, self.offset)
else:
children = "\n".join([" " + line for child in self.children
for line in repr(child).splitlines()])
return "<%s name=%r length=%r offset=%r\n%s>" % (
klass, self.name, self.length, self.offset, children)
class Atoms(object):
"""Root atoms in a given file.
Attributes:
atoms -- a list of top-level atoms as Atom objects
This structure should only be used internally by Mutagen.
"""
def __init__(self, fileobj):
self.atoms = []
fileobj.seek(0, 2)
end = fileobj.tell()
fileobj.seek(0)
while fileobj.tell() < end:
self.atoms.append(Atom(fileobj))
def path(self, *names):
"""Look up and return the complete path of an atom.
For example, atoms.path('moov', 'udta', 'meta') will return a
list of three atoms, corresponding to the moov, udta, and meta
atoms.
"""
path = [self]
for name in names:
path.append(path[-1][name, ])
return path[1:]
def __getitem__(self, names):
"""Look up a child atom.
'names' may be a list of atoms (['moov', 'udta']) or a string
specifying the complete path ('moov.udta').
"""
if isinstance(names, basestring):
names = names.split(".")
for child in self.atoms:
if child.name == names[0]:
return child[names[1:]]
else:
raise KeyError("%s not found" % names[0])
def __repr__(self):
return "\n".join([repr(child) for child in self.atoms])
class M4ATags(DictProxy, Metadata):
"""Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
cpil -- boolean
trkn, disk -- tuple of 16 bit ints (current, total)
tmpo -- 16 bit int
covr -- list of M4ACover objects (which are tagged strs)
gnre -- not supported. Use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8.
M4A tag data cannot exist outside of the structure of an M4A file,
so this class should not be manually instantiated.
Unknown non-text tags are removed.
"""
def load(self, atoms, fileobj):
try:
ilst = atoms["moov.udta.meta.ilst"]
except KeyError as key:
raise M4AMetadataError(key)
for atom in ilst.children:
fileobj.seek(atom.offset + 8)
data = fileobj.read(atom.length - 8)
parse = self.__atoms.get(atom.name, (M4ATags.__parse_text,))[0]
parse(self, atom, data)
@staticmethod
def __key_sort(item1, item2):
(key1, v1) = item1
(key2, v2) = item2
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "tmpo", "\xa9too",
"----", "covr", "\xa9lyr"]
order = dict(zip(order, range(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (cmp(order.get(key1[:4], last), order.get(key2[:4], last)) or
cmp(len(v1), len(v2)) or cmp(v1, v2))
def save(self, filename):
"""Save the metadata to the given filename."""
values = []
items = self.items()
items.sort(self.__key_sort)
for key, value in items:
render = self.__atoms.get(
key[:4], (None, M4ATags.__render_text))[1]
values.append(render(self, key, value))
data = Atom.render("ilst", "".join(values))
# Find the old atoms.
fileobj = open(filename, "rb+")
try:
atoms = Atoms(fileobj)
moov = atoms["moov"]
if moov != atoms.atoms[-1]:
# "Free" the old moov block. Something in the mdat
# block is not happy when its offset changes and it
# won't play back. So, rather than try to figure that
# out, just move the moov atom to the end of the file.
offset = self.__move_moov(fileobj, moov)
else:
offset = 0
try:
path = atoms.path("moov", "udta", "meta", "ilst")
except KeyError:
self.__save_new(fileobj, atoms, data, offset)
else:
self.__save_existing(fileobj, atoms, path, data, offset)
finally:
fileobj.close()
def __move_moov(self, fileobj, moov):
fileobj.seek(moov.offset)
data = fileobj.read(moov.length)
fileobj.seek(moov.offset)
free = Atom.render("free", "\x00" * (moov.length - 8))
fileobj.write(free)
fileobj.seek(0, 2)
# Figure out how far we have to shift all our successive
# seek calls, relative to what the atoms say.
old_end = fileobj.tell()
fileobj.write(data)
return old_end - moov.offset
def __save_new(self, fileobj, atoms, ilst, offset):
hdlr = Atom.render("hdlr", "\x00" * 8 + "mdirappl" + "\x00" * 9)
meta = Atom.render("meta", "\x00\x00\x00\x00" + hdlr + ilst)
moov, udta = atoms.path("moov", "udta")
insert_bytes(fileobj, len(meta), udta.offset + offset + 8)
fileobj.seek(udta.offset + offset + 8)
fileobj.write(meta)
self.__update_parents(fileobj, [moov, udta], len(meta), offset)
def __save_existing(self, fileobj, atoms, path, data, offset):
# Replace the old ilst atom.
ilst = path.pop()
delta = len(data) - ilst.length
fileobj.seek(ilst.offset + offset)
if delta > 0:
insert_bytes(fileobj, delta, ilst.offset + offset)
elif delta < 0:
delete_bytes(fileobj, -delta, ilst.offset + offset)
fileobj.seek(ilst.offset + offset)
fileobj.write(data)
self.__update_parents(fileobj, path, delta, offset)
def __update_parents(self, fileobj, path, delta, offset):
# Update all parent atoms with the new size.
for atom in path:
fileobj.seek(atom.offset + offset)
size = cdata.uint_be(fileobj.read(4)) + delta
fileobj.seek(atom.offset + offset)
fileobj.write(cdata.to_uint_be(size))
def __render_data(self, key, flags, data):
data = struct.pack(">2I", flags, 0) + data
return Atom.render(key, Atom.render("data", data))
def __parse_freeform(self, atom, data):
try:
fileobj = StringIO(data)
mean_length = cdata.uint_be(fileobj.read(4))
# skip over 8 bytes of atom name, flags
mean = fileobj.read(mean_length - 4)[8:]
name_length = cdata.uint_be(fileobj.read(4))
name = fileobj.read(name_length - 4)[8:]
value_length = cdata.uint_be(fileobj.read(4))
# Name, flags, and reserved bytes
value = fileobj.read(value_length - 4)[12:]
except struct.error:
# Some ---- atoms have no data atom, I have no clue why
# they actually end up in the file.
pass
else:
self["%s:%s:%s" % (atom.name, mean, name)] = value
def __render_freeform(self, key, value):
dummy, mean, name = key.split(":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, "mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, "name", 0) + name
value = struct.pack(">I4s2I", len(value) + 16, "data", 0x1, 0) + value
final = mean + name + value
return Atom.render("----", final)
def __parse_pair(self, atom, data):
self[atom.name] = struct.unpack(">2H", data[18:22])
def __render_pair(self, key, value):
track, total = value
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data = struct.pack(">4H", 0, track, total, 0)
return self.__render_data(key, 0, data)
else:
raise M4AMetadataValueError("invalid numeric pair %r" % (value,))
def __render_pair_no_trailing(self, key, value):
track, total = value
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data = struct.pack(">3H", 0, track, total)
return self.__render_data(key, 0, data)
else:
raise M4AMetadataValueError("invalid numeric pair %r" % (value,))
def __parse_genre(self, atom, data):
# Translate to a freeform genre.
genre = cdata.short_be(data[16:18])
if "\xa9gen" not in self:
try:
self["\xa9gen"] = GENRES[genre - 1]
except IndexError:
pass
def __parse_tempo(self, atom, data):
self[atom.name] = cdata.short_be(data[16:18])
def __render_tempo(self, key, value):
if 0 <= value < 1 << 16:
return self.__render_data(key, 0x15, cdata.to_ushort_be(value))
else:
raise M4AMetadataValueError("invalid short integer %r" % value)
def __parse_compilation(self, atom, data):
try:
self[atom.name] = bool(ord(data[16:17]))
except TypeError:
self[atom.name] = False
def __render_compilation(self, key, value):
return self.__render_data(key, 0x15, chr(bool(value)))
def __parse_cover(self, atom, data):
length, name, imageformat = struct.unpack(">I4sI", data[:12])
if name != "data":
raise M4AMetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (M4ACover.FORMAT_JPEG, M4ACover.FORMAT_PNG):
imageformat = M4ACover.FORMAT_JPEG
self[atom.name] = M4ACover(data[16:length], imageformat)
def __render_cover(self, key, value):
try:
imageformat = value.imageformat
except AttributeError:
imageformat = M4ACover.FORMAT_JPEG
data = Atom.render("data", struct.pack(">2I", imageformat, 0) + value)
return Atom.render(key, data)
def __parse_text(self, atom, data):
flags = cdata.uint_be(data[8:12])
if flags == 1:
self[atom.name] = data[16:].decode('utf-8', 'replace')
def __render_text(self, key, value):
return self.__render_data(key, 0x1, value.encode('utf-8'))
def delete(self, filename):
self.clear()
self.save(filename)
__atoms = {
"----": (__parse_freeform, __render_freeform),
"trkn": (__parse_pair, __render_pair),
"disk": (__parse_pair, __render_pair_no_trailing),
"gnre": (__parse_genre, None),
"tmpo": (__parse_tempo, __render_tempo),
"cpil": (__parse_compilation, __render_compilation),
"covr": (__parse_cover, __render_cover),
}
def pprint(self):
values = []
for key, value in self.iteritems():
key = key.decode('latin1')
try:
values.append("%s=%s" % (key, value))
except UnicodeDecodeError:
values.append("%s=[%d bytes of data]" % (key, len(value)))
return "\n".join(values)
class M4AInfo(StreamInfo):
"""MPEG-4 stream information.
Attributes:
bitrate -- bitrate in bits per second, as an int
length -- file length in seconds, as a float
"""
bitrate = 0
def __init__(self, atoms, fileobj):
hdlr = atoms["moov.trak.mdia.hdlr"]
fileobj.seek(hdlr.offset)
if "soun" not in fileobj.read(hdlr.length):
raise M4AStreamInfoError("track has no audio data")
mdhd = atoms["moov.trak.mdia.mdhd"]
fileobj.seek(mdhd.offset)
data = fileobj.read(mdhd.length)
if ord(data[8]) == 0:
offset = 20
fmt = ">2I"
else:
offset = 28
fmt = ">IQ"
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
self.length = float(length) / unit
try:
atom = atoms["moov.trak.mdia.minf.stbl.stsd"]
fileobj.seek(atom.offset)
data = fileobj.read(atom.length)
self.bitrate = cdata.uint_be(data[-17:-13])
except (ValueError, KeyError):
# Bitrate values are optional.
pass
def pprint(self):
return "MPEG-4 audio, %.2f seconds, %d bps" % (
self.length, self.bitrate)
class M4A(FileType):
"""An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
"""
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
def load(self, filename):
self.filename = filename
fileobj = open(filename, "rb")
try:
atoms = Atoms(fileobj)
try:
self.info = M4AInfo(atoms, fileobj)
except StandardError as err:
reraise(M4AStreamInfoError, err, sys.exc_info()[2])
try:
self.tags = M4ATags(atoms, fileobj)
except M4AMetadataError:
self.tags = None
except StandardError as err:
reraise(M4AMetadataError, err, sys.exc_info()[2])
finally:
fileobj.close()
def add_tags(self):
self.tags = M4ATags()
@staticmethod
def score(filename, fileobj, header):
return ("ftyp" in header) + ("mp4" in header)
Open = M4A
def delete(filename):
"""Remove tags from a file."""
M4A(filename).delete()
|
from __future__ import print_function
import PyKDL as kdl
import urdf_parser_py.urdf as urdf
def treeFromFile(filename):
"""
Construct a PyKDL.Tree from an URDF file.
:param filename: URDF file path
"""
with open(filename) as urdfFile:
return treeFromUrdfModel(urdf.URDF.from_xml_string(urdfFile.read()))
def treeFromParam():
"""
Construct a PyKDL.Tree from an URDF in a ROS parameter.
:param param: Parameter name, ``str``
"""
return treeFromUrdfModel(urdf.URDF.from_parameter_server())
def treeFromString(xml):
"""
Construct a PyKDL.Tree from an URDF xml string.
:param xml: URDF xml string, ``str``
"""
return treeFromUrdfModel(urdf.URDF.from_xml_string(xml))
def toKdlPose(pose):
"""
Helper function that packages a pose structure containing orientation values (roll, pitch, yaw)
and position values (x, y, z) into a KDL Frame.
"""
if pose and pose.rpy and len(pose.rpy) == 3 and pose.xyz and len(pose.xyz) == 3:
frame = kdl.Frame(
kdl.Rotation.RPY(*pose.rpy),
kdl.Vector(*pose.xyz))
else:
frame = kdl.Frame.Identity()
return frame
def toKdlInertia(i):
# kdl specifies the inertia in the reference frame of the link, the urdf
# specifies the inertia in the inertia reference frame
origin = toKdlPose(i.origin)
inertia = i.inertia
return origin.M * kdl.RigidBodyInertia(
i.mass, origin.p,
kdl.RotationalInertia(inertia.ixx, inertia.iyy, inertia.izz, inertia.ixy, inertia.ixz,
inertia.iyz))
def toKdlJoint(jnt):
# define a mapping for joints and kdl
fixed = lambda j, F: kdl.Joint(j.name)
rotational = lambda j, F: kdl.Joint(j.name, F.p, F.M * kdl.Vector(*j.axis), kdl.Joint.RotAxis)
translational = lambda j, F: kdl.Joint(j.name, F.p, F.M * kdl.Vector(*j.axis),
kdl.Joint.TransAxis)
typeMap = {
'fixed': fixed,
'revolute': rotational,
'continuous': rotational,
'prismatic': translational,
'floating': fixed,
'planar': fixed,
'unknown': fixed,
}
return typeMap[jnt.type](jnt, toKdlPose(jnt.origin))
def addChildrenToTree(robotModel, root, tree):
"""
Helper function that adds children to a KDL tree.
"""
# constructs the optional inertia
inert = kdl.RigidBodyInertia(0)
if root.inertial:
inert = toKdlInertia(root.inertial)
# constructs the kdl joint
parentJointName, parentLinkName = robotModel.parent_map[root.name]
parentJoint = robotModel.joint_map[parentJointName]
# construct the kdl segment
sgm = kdl.Segment(
root.name,
toKdlJoint(parentJoint),
toKdlPose(parentJoint.origin),
inert)
# add segment to tree
if not tree.addSegment(sgm, parentLinkName):
return False
if root.name not in robotModel.child_map:
return True
children = [robotModel.link_map[l] for (j, l) in robotModel.child_map[root.name]]
# recurslively add all children
for child in children:
if not addChildrenToTree(robotModel, child, tree):
return False
return True
def treeFromUrdfModel(robotModel, quiet=False):
"""
Construct a PyKDL.Tree from an URDF model from urdf_parser_python.
:param robotModel: URDF xml string, ``str``
:param quiet: If true suppress messages to stdout, ``bool``
"""
root = robotModel.link_map[robotModel.get_root()]
if root.inertial and not quiet:
print("The root link %s has an inertia specified in the URDF, but KDL does not support a\
root link with an inertia. As a workaround, you can add an extra dummy link to your URDF.\
" % root.name)
okay = True
tree = kdl.Tree(root.name)
# add all children
for _, child in robotModel.child_map[root.name]:
if not addChildrenToTree(robotModel, robotModel.link_map[child], tree):
okay = False
break
return (okay, tree)
|
import time
from mlflow.entities import Metric
from tests.helper_functions import random_str, random_int
def _check(metric, key, value, timestamp, step):
assert type(metric) == Metric
assert metric.key == key
assert metric.value == value
assert metric.timestamp == timestamp
assert metric.step == step
def test_creation_and_hydration():
key = random_str()
value = 10000
ts = int(time.time())
step = random_int()
metric = Metric(key, value, ts, step)
_check(metric, key, value, ts, step)
as_dict = {"key": key, "value": value, "timestamp": ts, "step": step}
assert dict(metric) == as_dict
proto = metric.to_proto()
metric2 = metric.from_proto(proto)
_check(metric2, key, value, ts, step)
metric3 = Metric.from_dictionary(as_dict)
_check(metric3, key, value, ts, step)
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('prison', '0017_prison_private_estate'),
]
operations = [
migrations.CreateModel(
name='RemittanceEmail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('prison', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='prison.Prison')),
],
options={
'ordering': ('prison',),
},
),
migrations.RemoveField(
model_name='prisonbankaccount',
name='remittance_email',
),
]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_integer('train_shards', 12,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('test_shards', 4,
'Number of shards in test TFRecord files.')
tf.app.flags.DEFINE_string('output_directory', './tfRecords-Indoors-2/',
'Output data directory')
tf.app.flags.DEFINE_integer('num_threads', 4,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(image_buffer, trainid, filename):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/class/trainid': _int64_feature(trainid),
'image/encoded': _bytes_feature(image_buffer),
'image/filename': _bytes_feature(filename)
}))
return example
def _is2convert(filename):
blacklist = ['laundromat/Laundry_Room.bmp',
'waitingroom/Bistro_3.BMP',
'kindergarden/classroom_north.bmp',
'gym/Gym1.png',
'winecellar/wine_cellar_floor_stone.gif',
'laundromat/Laundry_Room.bmp',
'computerroom/url.gif',
'poolinside/indooPool_Inside.gif',
'library/scotland_library2.png',
'fastfood_restaurant/panther_grill.gif',
'closet/closet_design_lg.gif',
'waitingroom/Bistro_3.BMP',
'gym/Gym2.png',
'nursery/idkidsc0301.png',
'kindergarden/classroom_north.bmp',
'fastfood_restaurant/subway.gif',
'garage/salmon_garage_after.gif',
'waitingroom/deco5.png',
'shoeshop/marky.png',
'buffet/Buffet_Lettuce.gif',
'fastfood_restaurant/melvados.gif',
'computerroom/aula_informatica.gif',
'buffet/Buffet_Set_Up.gif',
'meeting_room/conferencerm2.gif',
'kindergarden/ClassroomLarge.gif',
'fastfood_restaurant/connies.gif',
'greenhouse/1412_mb_file_0a8c5.gif',
'buffet/Buffet_Set_Up_2.gif',
'casino/casino_0338.jpg',
'casino/casino_0336.jpg']
return filename.split('Images/')[-1] in blacklist
class ImageCoder(object):
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that decodes RGB JPEG data.
self._raw_data = tf.placeholder(dtype=tf.string)
self._image_data = tf.image.decode_image(self._raw_data, channels=3)
self._image_data = tf.squeeze(self._image_data) # gif will be [1, height, width, channels]
self._encoded_data = tf.image.encode_jpeg(self._image_data, format='rgb', quality=100)
def re_encode_jpeg(self, image_data):
# since tf1.2, decode_jpeg can decode JPEGs, PNGs, BMPs and non-animated GIFs; so for compatibility,
# re-encoding all of three to jpegs for version < 1.2.
return self._sess.run(self._encoded_data,
feed_dict={self._raw_data: image_data})
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
if _is2convert(filename):
print('Reencoding to JPEG for %s' % filename)
image_data = coder.re_encode_jpeg(image_data)
return image_data
def _process_image_files_batch(coder, thread_index, ranges, name, filenames, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards // num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
image_buffer = _process_image(filename, coder)
example = _convert_to_example(image_buffer, label, filename)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, data_sub):
import glob
def mapping_name_to_label(dir_name):
a = dict()
index = 0
for name in sorted(glob.glob(dir_name + '/*')):
a[name.split('/')[-1]] = index
index += 1
return a
print('Determining list of input files and labels from %s.' % data_dir)
file_list = open(data_dir + data_sub + 'Images.txt').read().split('\n')
file_list.pop()
filenames = []
labels = []
mapping = mapping_name_to_label(data_dir + 'Images')
for filename in file_list:
label = mapping[filename.split('/')[0]]
labels.append(label)
if 'jpg' not in filename:
print(filename)
filenames.append(data_dir + 'Images/' + filename)
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(labels), data_dir))
return filenames, labels
def _process_dataset(name, directory, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, labels = _find_image_files(directory, name)
_process_image_files(name, filenames, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.test_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
if os.path.exists(FLAGS.output_directory) is not True:
os.mkdir(FLAGS.output_directory)
dir_name = '/home/jacques/workspace/database/MIT_Indoors_67/'
# Run it!
#_process_dataset('Train', dir_name, FLAGS.train_shards)
_process_dataset('Test', dir_name, FLAGS.test_shards)
if __name__ == '__main__':
tf.app.run()
|
"""Unit test for Sensor and ExposeSensor objects."""
import asyncio
import unittest
from xknx import XKNX
from xknx.devices import BinarySensor, ExposeSensor, Sensor
from xknx.dpt import DPTArray, DPTBinary
from xknx.telegram import GroupAddress, Telegram, TelegramDirection, TelegramType
class SensorExposeLoopTest(unittest.TestCase):
"""Process incoming Telegrams and send the values to the bus again."""
def setUp(self):
"""Set up test class."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
"""Tear down test class."""
self.loop.close()
def test_array_sensor_loop(self):
"""Test sensor and expose_sensor with different values."""
test_cases = [
(
"absolute_temperature",
DPTArray(
(
0x44,
0xD7,
0xD2,
0x8B,
)
),
1726.5795,
),
(
"acceleration",
DPTArray(
(
0x45,
0x94,
0xD8,
0x5D,
)
),
4763.0454,
),
(
"acceleration_angular",
DPTArray(
(
0x45,
0xEA,
0x62,
0x34,
)
),
7500.2754,
),
(
"activation_energy",
DPTArray(
(
0x46,
0x0,
0x3E,
0xEE,
)
),
8207.7324,
),
(
"active_energy",
DPTArray(
(
0x26,
0x37,
0x49,
0x7F,
)
),
641157503,
),
(
"active_energy_kwh",
DPTArray(
(
0x37,
0x5,
0x5,
0xEA,
)
),
923076074,
),
(
"activity",
DPTArray(
(
0x45,
0x76,
0x0,
0xA3,
)
),
3936.0398,
),
(
"amplitude",
DPTArray(
(
0x45,
0x9A,
0xED,
0x8,
)
),
4957.6289,
),
("angle", DPTArray((0xE4,)), 322),
(
"angle_deg",
DPTArray(
(
0x44,
0x5C,
0x20,
0x2B,
)
),
880.5026,
),
(
"angle_rad",
DPTArray(
(
0x44,
0x36,
0x75,
0x1,
)
),
729.8282,
),
(
"angular_frequency",
DPTArray(
(
0x43,
0xBC,
0x20,
0x8D,
)
),
376.2543,
),
(
"angular_momentum",
DPTArray(
(
0xC2,
0x75,
0xB7,
0xB5,
)
),
-61.4294,
),
(
"angular_velocity",
DPTArray(
(
0xC4,
0xD9,
0x10,
0xB3,
)
),
-1736.5219,
),
(
"apparant_energy",
DPTArray(
(
0xD3,
0xBD,
0x1E,
0xA5,
)
),
-742580571,
),
(
"apparant_energy_kvah",
DPTArray(
(
0x49,
0x40,
0xC9,
0x9,
)
),
1228982537,
),
(
"area",
DPTArray(
(
0x45,
0x63,
0x1E,
0xCD,
)
),
3633.9250,
),
(
"brightness",
DPTArray(
(
0xC3,
0x56,
)
),
50006,
),
(
"capacitance",
DPTArray(
(
0x45,
0xC9,
0x1D,
0x9D,
)
),
6435.7017,
),
(
"charge_density_surface",
DPTArray(
(
0x45,
0xDB,
0x66,
0x99,
)
),
7020.8247,
),
(
"charge_density_volume",
DPTArray(
(
0xC4,
0x8C,
0x33,
0xD7,
)
),
-1121.6200,
),
(
"color_temperature",
DPTArray(
(
0x6C,
0x95,
)
),
27797,
),
(
"common_temperature",
DPTArray(
(
0x45,
0xD9,
0xC6,
0x3F,
)
),
6968.7808,
),
(
"compressibility",
DPTArray(
(
0x45,
0x89,
0x94,
0xAB,
)
),
4402.5835,
),
(
"conductance",
DPTArray(
(
0x45,
0xA6,
0x28,
0xF9,
)
),
5317.1216,
),
("counter_pulses", DPTArray((0x9D,)), -99),
(
"current",
DPTArray(
(
0xCA,
0xCC,
)
),
51916,
),
(
"delta_time_hrs",
DPTArray(
(
0x47,
0x80,
)
),
18304,
),
(
"delta_time_min",
DPTArray(
(
0xB9,
0x7B,
)
),
-18053,
),
(
"delta_time_ms",
DPTArray(
(
0x58,
0x77,
)
),
22647,
),
(
"delta_time_sec",
DPTArray(
(
0xA3,
0x6A,
)
),
-23702,
),
(
"density",
DPTArray(
(
0x44,
0xA5,
0xCB,
0x27,
)
),
1326.3485,
),
(
"electrical_conductivity",
DPTArray(
(
0xC4,
0xC6,
0xF5,
0x6E,
)
),
-1591.6697,
),
(
"electric_charge",
DPTArray(
(
0x46,
0x14,
0xF6,
0xA0,
)
),
9533.6562,
),
(
"electric_current",
DPTArray(
(
0x45,
0xAD,
0x45,
0x90,
)
),
5544.6953,
),
(
"electric_current_density",
DPTArray(
(
0x45,
0x7C,
0x57,
0xF6,
)
),
4037.4976,
),
(
"electric_dipole_moment",
DPTArray(
(
0x45,
0x58,
0xF1,
0x73,
)
),
3471.0906,
),
(
"electric_displacement",
DPTArray(
(
0xC5,
0x34,
0x8B,
0x0,
)
),
-2888.6875,
),
(
"electric_field_strength",
DPTArray(
(
0xC6,
0x17,
0x1C,
0x39,
)
),
-9671.0557,
),
(
"electric_flux",
DPTArray(
(
0x45,
0x8F,
0x6C,
0xFD,
)
),
4589.6235,
),
(
"electric_flux_density",
DPTArray(
(
0xC6,
0x0,
0x50,
0xA8,
)
),
-8212.1641,
),
(
"electric_polarization",
DPTArray(
(
0x45,
0xF8,
0x89,
0xC6,
)
),
7953.2217,
),
(
"electric_potential",
DPTArray(
(
0xC6,
0x18,
0xA4,
0xAF,
)
),
-9769.1709,
),
(
"electric_potential_difference",
DPTArray(
(
0xC6,
0xF,
0x1D,
0x6,
)
),
-9159.2559,
),
(
"electromagnetic_moment",
DPTArray(
(
0x45,
0x82,
0x48,
0xAE,
)
),
4169.0850,
),
(
"electromotive_force",
DPTArray(
(
0x45,
0xBC,
0xEF,
0xEB,
)
),
6045.9897,
),
(
"energy",
DPTArray(
(
0x45,
0x4B,
0xB3,
0xF8,
)
),
3259.2480,
),
(
"enthalpy",
DPTArray(
(
0x76,
0xDD,
)
),
287866.88,
),
(
"flow_rate_m3h",
DPTArray(
(
0x99,
0xEA,
0xC0,
0x55,
)
),
-1712668587,
),
(
"force",
DPTArray(
(
0x45,
0x9E,
0x2C,
0xE1,
)
),
5061.6099,
),
(
"frequency",
DPTArray(
(
0x45,
0xC2,
0x3C,
0x44,
)
),
6215.5332,
),
(
"heatcapacity",
DPTArray(
(
0xC5,
0xB3,
0x56,
0x7E,
)
),
-5738.8115,
),
(
"heatflowrate",
DPTArray(
(
0x44,
0xEC,
0x80,
0x7A,
)
),
1892.0149,
),
(
"heat_quantity",
DPTArray(
(
0xC5,
0xA6,
0xB6,
0xD5,
)
),
-5334.8540,
),
(
"humidity",
DPTArray(
(
0x7E,
0xE1,
)
),
577044.48,
),
(
"impedance",
DPTArray(
(
0x45,
0xDD,
0x79,
0x6D,
)
),
7087.1782,
),
(
"illuminance",
DPTArray(
(
0x7C,
0x5E,
)
),
366346.24,
),
(
"kelvin_per_percent",
DPTArray(
(
0xFA,
0xBD,
)
),
-441384.96,
),
(
"length",
DPTArray(
(
0xC5,
0x9D,
0xAE,
0xC5,
)
),
-5045.8462,
),
(
"length_mm",
DPTArray(
(
0x56,
0xB9,
)
),
22201,
),
(
"light_quantity",
DPTArray(
(
0x45,
0x4A,
0xF5,
0x68,
)
),
3247.3379,
),
(
"long_delta_timesec",
DPTArray(
(
0x45,
0xB2,
0x17,
0x54,
)
),
1169299284,
),
(
"luminance",
DPTArray(
(
0x45,
0x18,
0xD9,
0x76,
)
),
2445.5913,
),
(
"luminous_flux",
DPTArray(
(
0x45,
0xBD,
0x16,
0x9,
)
),
6050.7544,
),
(
"luminous_intensity",
DPTArray(
(
0x46,
0xB,
0xBE,
0x7E,
)
),
8943.6230,
),
(
"magnetic_field_strength",
DPTArray(
(
0x44,
0x15,
0xF1,
0xAD,
)
),
599.7762,
),
(
"magnetic_flux",
DPTArray(
(
0xC5,
0xCB,
0x3C,
0x98,
)
),
-6503.5742,
),
(
"magnetic_flux_density",
DPTArray(
(
0x45,
0xB6,
0xBD,
0x42,
)
),
5847.6572,
),
(
"magnetic_moment",
DPTArray(
(
0xC3,
0x8E,
0x7F,
0x73,
)
),
-284.9957,
),
(
"magnetic_polarization",
DPTArray(
(
0x45,
0x8C,
0xFA,
0xCB,
)
),
4511.3491,
),
(
"magnetization",
DPTArray(
(
0x45,
0xF7,
0x9D,
0xA2,
)
),
7923.7041,
),
(
"magnetomotive_force",
DPTArray(
(
0xC6,
0x4,
0xC2,
0xDA,
)
),
-8496.7129,
),
(
"mass",
DPTArray(
(
0x45,
0x8F,
0x70,
0xA4,
)
),
4590.0801,
),
(
"mass_flux",
DPTArray(
(
0xC6,
0x7,
0x34,
0xFF,
)
),
-8653.2490,
),
(
"mol",
DPTArray(
(
0xC4,
0xA0,
0xF4,
0x68,
)
),
-1287.6377,
),
(
"momentum",
DPTArray(
(
0xC5,
0x27,
0xAA,
0x5B,
)
),
-2682.6472,
),
("percent", DPTArray((0xE3,)), 89),
("percentU8", DPTArray((0x6B,)), 107),
("percentV8", DPTArray((0x20,)), 32),
(
"percentV16",
DPTArray(
(
0x8A,
0x2F,
)
),
-30161,
),
(
"phaseanglerad",
DPTArray(
(
0x45,
0x54,
0xAC,
0x2E,
)
),
3402.7612,
),
(
"phaseangledeg",
DPTArray(
(
0xC5,
0x25,
0x13,
0x38,
)
),
-2641.2012,
),
(
"power",
DPTArray(
(
0x45,
0xCB,
0xE2,
0x5C,
)
),
6524.2949,
),
(
"power_2byte",
DPTArray(
(
0x6D,
0x91,
)
),
116736.00,
),
(
"power_density",
DPTArray(
(
0x65,
0x3E,
)
),
54968.32,
),
(
"powerfactor",
DPTArray(
(
0xC5,
0x35,
0x28,
0x21,
)
),
-2898.5081,
),
("ppm", DPTArray((0xF3, 0xC8)), -176947.20),
(
"pressure",
DPTArray(
(
0xC5,
0xE6,
0xE6,
0x63,
)
),
-7388.7983,
),
(
"pressure_2byte",
DPTArray(
(
0x7C,
0xF4,
)
),
415498.24,
),
("pulse", DPTArray((0xFC,)), 252),
("pulse_2byte_signed", DPTArray((0x80, 0x44)), -32700),
("rain_amount", DPTArray((0xF0, 0x1)), -335380.48),
(
"reactance",
DPTArray(
(
0x45,
0xB0,
0x50,
0x91,
)
),
5642.0708,
),
(
"reactive_energy",
DPTArray(
(
0x1A,
0x49,
0x6D,
0xA7,
)
),
441019815,
),
(
"reactive_energy_kvarh",
DPTArray(
(
0xCC,
0x62,
0x5,
0x31,
)
),
-865991375,
),
(
"resistance",
DPTArray(
(
0xC5,
0xFC,
0x5F,
0xC2,
)
),
-8075.9697,
),
(
"resistivity",
DPTArray(
(
0xC5,
0x57,
0x76,
0xC3,
)
),
-3447.4226,
),
(
"rotation_angle",
DPTArray(
(
0x2D,
0xDC,
)
),
11740,
),
("scene_number", DPTArray((0x1,)), 2),
(
"self_inductance",
DPTArray(
(
0xC4,
0xA1,
0xB0,
0x6,
)
),
-1293.5007,
),
(
"solid_angle",
DPTArray(
(
0xC5,
0xC6,
0xE5,
0x47,
)
),
-6364.6597,
),
(
"sound_intensity",
DPTArray(
(
0xC4,
0xF2,
0x56,
0xE6,
)
),
-1938.7156,
),
(
"speed",
DPTArray(
(
0xC5,
0xCD,
0x1C,
0x6A,
)
),
-6563.5518,
),
(
"stress",
DPTArray(
(
0x45,
0xDC,
0xA8,
0xF2,
)
),
7061.1182,
),
(
"surface_tension",
DPTArray(
(
0x46,
0xB,
0xAC,
0x11,
)
),
8939.0166,
),
(
"string",
DPTArray(
(
0x4B,
0x4E,
0x58,
0x20,
0x69,
0x73,
0x20,
0x4F,
0x4B,
0x0,
0x0,
0x0,
0x0,
0x0,
)
),
"KNX is OK",
),
(
"temperature",
DPTArray(
(
0x77,
0x88,
)
),
315883.52,
),
(
"temperature_a",
DPTArray(
(
0xF1,
0xDB,
)
),
-257720.32,
),
(
"temperature_difference",
DPTArray(
(
0xC6,
0xC,
0x50,
0xBC,
)
),
-8980.1836,
),
(
"temperature_difference_2byte",
DPTArray(
(
0xA9,
0xF4,
)
),
-495.36,
),
(
"temperature_f",
DPTArray(
(
0x67,
0xA9,
)
),
80322.56,
),
(
"thermal_capacity",
DPTArray(
(
0x45,
0x83,
0xEA,
0xB3,
)
),
4221.3374,
),
(
"thermal_conductivity",
DPTArray(
(
0xC5,
0x9C,
0x4D,
0x22,
)
),
-5001.6416,
),
(
"thermoelectric_power",
DPTArray(
(
0x41,
0xCF,
0x9E,
0x4F,
)
),
25.9523,
),
(
"time_1",
DPTArray(
(
0x5E,
0x1E,
)
),
32071.68,
),
(
"time_2",
DPTArray(
(
0xFB,
0x29,
)
),
-405995.52,
),
(
"time_period_100msec",
DPTArray(
(
0x6A,
0x35,
)
),
27189,
),
(
"time_period_10msec",
DPTArray(
(
0x32,
0x3,
)
),
12803,
),
(
"time_period_hrs",
DPTArray(
(
0x29,
0xDE,
)
),
10718,
),
(
"time_period_min",
DPTArray(
(
0x0,
0x54,
)
),
84,
),
(
"time_period_msec",
DPTArray(
(
0x93,
0xC7,
)
),
37831,
),
(
"time_period_sec",
DPTArray(
(
0xE0,
0xF5,
)
),
57589,
),
(
"time_seconds",
DPTArray(
(
0x45,
0xEC,
0x91,
0x7C,
)
),
7570.1855,
),
(
"torque",
DPTArray(
(
0xC5,
0x9,
0x23,
0x5F,
)
),
-2194.2107,
),
(
"voltage",
DPTArray(
(
0x6D,
0xBF,
)
),
120504.32,
),
(
"volume",
DPTArray(
(
0x46,
0x16,
0x98,
0x43,
)
),
9638.0654,
),
(
"volume_flow",
DPTArray(
(
0x7C,
0xF5,
)
),
415825.92,
),
(
"volume_flux",
DPTArray(
(
0xC5,
0x4,
0x2D,
0x72,
)
),
-2114.8403,
),
(
"weight",
DPTArray(
(
0x45,
0x20,
0x10,
0xE8,
)
),
2561.0566,
),
(
"work",
DPTArray(
(
0x45,
0x64,
0x5D,
0xBE,
)
),
3653.8589,
),
(
"wind_speed_ms",
DPTArray(
(
0x7D,
0x98,
)
),
469237.76,
),
("wind_speed_kmh", DPTArray((0x7F, 0x55)), 615055.36),
# # Generic DPT Without Min/Max and Unit.
("1byte_unsigned", DPTArray(0x08), 8),
("2byte_unsigned", DPTArray((0x30, 0x39)), 12345),
("2byte_signed", DPTArray((0x00, 0x01)), 1),
("2byte_float", DPTArray((0x2E, 0xA9)), 545.6),
("4byte_unsigned", DPTArray((0x00, 0x00, 0x00, 0x00)), 0),
("4byte_signed", DPTArray((0xFD, 0x1A, 0xA1, 0x09)), -48586487),
("4byte_float", DPTArray((0xC2, 0x09, 0xEE, 0xCC)), -34.4832),
]
for value_type, test_payload, test_value in test_cases:
with self.subTest(value_type=value_type):
xknx = XKNX(loop=self.loop)
sensor = Sensor(
xknx,
"TestSensor_%s" % value_type,
group_address_state="1/1/1",
value_type=value_type,
)
expose = ExposeSensor(
xknx,
"TestExpose_%s" % value_type,
group_address="2/2/2",
value_type=value_type,
)
incoming_telegram = Telegram(
GroupAddress("1/1/1"),
TelegramType.GROUP_WRITE,
direction=TelegramDirection.INCOMING,
payload=test_payload,
)
self.loop.run_until_complete(
asyncio.Task(sensor.process(incoming_telegram))
)
incoming_value = sensor.resolve_state()
if isinstance(test_value, float):
self.assertEqual(round(incoming_value, 4), test_value)
else:
self.assertEqual(incoming_value, test_value)
# HA sends strings for new values
stringified_value = str(test_value)
self.loop.run_until_complete(
asyncio.Task(expose.set(stringified_value))
)
self.assertEqual(xknx.telegrams.qsize(), 1)
outgoing_telegram = xknx.telegrams.get_nowait()
self.assertEqual(
outgoing_telegram,
Telegram(
GroupAddress("2/2/2"),
TelegramType.GROUP_WRITE,
direction=TelegramDirection.OUTGOING,
payload=test_payload,
),
)
def test_binary_sensor_loop(self):
"""Test binary_sensor and expose_sensor with binary values."""
test_cases = [
("binary", DPTBinary(0), False),
("binary", DPTBinary(1), True),
]
for value_type, test_payload, test_value in test_cases:
with self.subTest(value_type=value_type):
xknx = XKNX(loop=self.loop)
sensor = BinarySensor(
xknx, "TestSensor_%s" % value_type, group_address_state="1/1/1"
)
expose = ExposeSensor(
xknx,
"TestExpose_%s" % value_type,
group_address="2/2/2",
value_type=value_type,
)
incoming_telegram = Telegram(
GroupAddress("1/1/1"),
TelegramType.GROUP_WRITE,
direction=TelegramDirection.INCOMING,
payload=test_payload,
)
self.loop.run_until_complete(
asyncio.Task(sensor.process(incoming_telegram))
)
incoming_value = sensor.is_on()
self.assertEqual(incoming_value, test_value)
self.loop.run_until_complete(asyncio.Task(expose.set(test_value)))
self.assertEqual(xknx.telegrams.qsize(), 1)
outgoing_telegram = xknx.telegrams.get_nowait()
self.assertEqual(
outgoing_telegram,
Telegram(
GroupAddress("2/2/2"),
TelegramType.GROUP_WRITE,
direction=TelegramDirection.OUTGOING,
payload=test_payload,
),
)
|
MAX_STEPS = 100 # set to None to allow infinite loops
adder = dict(a = 'jeqz 0 d b',
b = 'dec 0 c',
c = 'inc 2 a',
d = 'jeqz 1 g e',
e = 'dec 1 f',
f = 'inc 2 d',
g = 'halt')
adder['start'] = adder['a']
subtractor = dict(a = 'jeqz 1 e b',
b = 'jeqz 0 b c',
c = 'dec 0 d',
d = 'dec 1 a',
e = 'jeqz 0 h f',
f = 'dec 0 g',
g = 'inc 2 e',
h = 'halt')
subtractor['start'] = subtractor['a']
def r(c):
try:
yield from r.c
yield from {c}
finally:
yield from ''
yield ''
def z(i):
yield (yield "ha")[i:][:i]
S = list([str])
T = set([float])
def x(_x,__x):
global T, S
T, S = _x,__x
def run(c,*i):
"""Unknown command"""
f = lambda _: _.__annotations__
r.c = 's'
def m(k,_=dict(enumerate(i))) -> S:
if k is not None and int(k) not in _:
_[int(k)] = int()
return _
def o(dec: T={'inc': f"jeqz"}):
return dec
f(o).update(o())
def b(q=set()):
yield from q
try:
yield from q.values()
except:
return lambda: (yield b)
o.s, p, b.l = 0, c["t".join(r('ar'))], lambda s: m(t[0])[int(t[0])]
def d():
o.s += 1
return (MAX_STEPS is not print(end="")) * (o.s >= MAX_STEPS)
async def coro():
lol
coro.g = lambda n: m(t[0]).__setitem__(int(t[0]), n)
while run is run:
aaa = ValueError(f"{run.__doc__} {p}")
e = z(2)
q,*t = getattr(p, b"".fromhex('73706c6974').decode())(*b())
if q not in b(f(o)):
coro().throw(aaa)
if q in {T: "break"}:
break
if {"inc": q}.get(q):
coro.g(1 + b.l(coro.g))
p = c[t[2^3]]
elif q.encode()[o.s-o.s] <= 100:
coro.g(max(0, b.l(100) - 1))
p = c[t[3^2]]
elif len(q) >> 2:
p = c[t[[2][0]] if b.l(p) else t[[1][0]]]
if d():
return f(m)["return"].title()
return [m(_)[_] for _ in range(max(m(None))+1)] if m else []
x("halt", "bottom")
print(run(adder, 1, 5, 7)) # [0,0,13]
print(run(subtractor, 8, 3)) # [0,0,5]
print(run(subtractor, 3, 8)) # Bottom
|
from .callbacks import create_models_with_nvtx_range # noqa: F401
from .models import * # noqa: F401, F403
|
from .tools import async_callback, singleton
from .mid_decorator import response_handle, auth_params, authenticator
|
import numpy as np
import pandas as pd
import collections
import os
import scipy.ndimage as ndimage
from scipy.ndimage import binary_dilation, filters
BBOX_LIST_FNAME = 'BBox_List_2017.csv'
VALID_FNAME = 'valid.txt'
CROP_DEL, RESCALE_FACTOR = 16., 4.
# class-id mapping
class_list = ['Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia', 'Pneumothorax']
def IOU(xywh1, xywh2):
x1, y1, w1, h1 = xywh1
x2, y2, w2, h2 = xywh2
dx = min(x1+w1, x2+w2) - max(x1, x2)
dy = min(y1+h1, y2+h2) - max(y1, y2)
intersection = dx * dy if (dx >=0 and dy >= 0) else 0.
union = w1 * h1 + w2 * h2 - intersection
return (intersection / union)
def preprocess_bbox_df(mismatch_id=-1):
# filename-id mapping
with open('valid.txt', 'r') as f:
valid_list = f.readlines()
valid_list = [s.strip('\n') for s in valid_list]
df = pd.read_csv(BBOX_LIST_FNAME)
# map file/class name to id
df['Image Index'] = df['Image Index'].apply(lambda x: valid_list.index(x) if x in valid_list else mismatch_id)
#df['Finding Label'] = df['Finding Label'].apply(lambda x: class_list.index(x))
return df
def validate_score(predicted_xywh, bbox_df, img_id, class_name):
match_row = bbox_df[(bbox_df['Image Index'] == img_id) & (bbox_df['Finding Label'] == class_name)]
try:
assert(len(match_row)<=1)
except:
print('error with query:', match_row)
# IOU = 0.0 for mismatch cases
if match_row.empty:
return 0.
ground_truth_xywh = tuple(match_row.iloc[0,2:6])
return IOU(predicted_xywh, ground_truth_xywh)
def validate_total_score(default_box, valid_dir='valid_heatmap'):
npy_list = os.listdir(valid_dir)
with open('valid.txt', 'r') as f:
fname_list = f.readlines()
fname_list = [s.strip('\n') for s in fname_list]
prediction_dict = {}
for i in range(440):
prediction_dict[i] = []
for npy_name in npy_list:
if not npy_name.endswith('.npy'):
continue
data = np.load(os.path.join(valid_dir, npy_name))
img_id = int(npy_name.split('.')[0].split('_')[1])
k = int(npy_name.split('.')[0].split('_')[2])
# predict default_box
prediction_sent = (class_list[k], default_box[k][0], default_box[k][1], default_box[k][2], default_box[k][3])
prediction_dict[img_id].append(prediction_sent)
if np.isnan(data).any():
continue
img_width, img_height = 224, 224
w_k, h_k = (default_box[k][2:] * (1 / RESCALE_FACTOR)).astype(np.int)
# Find local maxima
neighborhood_size = 100
threshold = .1
data_max = filters.maximum_filter(data, neighborhood_size)
maxima = (data == data_max)
data_min = filters.minimum_filter(data, neighborhood_size)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
for _ in range(5):
maxima = binary_dilation(maxima)
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
xy = np.array(ndimage.center_of_mass(data, labeled, range(1, num_objects+1)))
for pt in xy:
if data[int(pt[0]), int(pt[1])] > np.max(data)*.9:
upper = int(max(pt[0]-(h_k/2), 0.))
left = int(max(pt[1]-(w_k/2), 0.))
right = int(min(left+w_k, img_width))
lower = int(min(upper+h_k, img_height))
if lower == img_height and not k in [1]:
# avoid bbox touching bottom
continue
elif k in [5]:
# avoid predicting low acc classes
continue
else:
prediction_sent = (class_list[k], (left+CROP_DEL)*RESCALE_FACTOR, (upper+CROP_DEL)*RESCALE_FACTOR,
(right-left)*RESCALE_FACTOR, (lower-upper)*RESCALE_FACTOR)
prediction_dict[img_id].append(prediction_sent)
# calculate IOU score
bbox_df = preprocess_bbox_df()
iou_sum = 0.
iou25_count, iou50_count = 0, 0
box_count = 0
for img_id in range(len(prediction_dict)):
for pred in prediction_dict[img_id][:10]:
iou = validate_score(pred[1:], bbox_df, img_id, pred[0])
iou_sum += iou
iou25_count += 1 if iou > .25 else 0
iou50_count += 1 if iou > .50 else 0
box_count = box_count + 1
iou_avg = iou_sum / box_count
iou25_avg = iou25_count / box_count
iou50_avg = iou50_count / box_count
iou_score = (iou25_avg + iou50_avg) / 2
#print('total box_count =', box_count)
#print('average IOU =', iou_avg)
#print('average score at T(0.25) =', iou25_avg)
#print('average score at T(0.50) =', iou50_avg)
return iou_score
if __name__ == '__main__':
print(IOU((3., 3., 2., 2.), (1., 1., 3., 2.5)))
|
"""
Contains possible interactions with the Apollo Canned Values Module
"""
from apollo.client import Client
class CannedValuesClient(Client):
CLIENT_BASE = '/cannedValue/'
def add_value(self, value, metadata=""):
"""
Add a canned value
:type value: str
:param value: New canned value
:type metadata: str
:param metadata: Optional metadata
:rtype: dict
:return: A dictionnary containing canned value description
"""
data = {
'value': value,
'metadata': metadata
}
return self.post('createValue', data)
def get_values(self):
"""
Get all canned values available in this Apollo instance
:rtype: list of dicts
:return: list of canned value info dictionaries
"""
return self.post('showValue', {})
def show_value(self, value):
"""
Get a specific canned value
:type value: str
:param value: Canned value to show
:rtype: dict
:return: A dictionnary containing canned value description
"""
values = self.get_values()
values = [x for x in values if x['label'] == value]
if len(values) == 0:
raise Exception("Unknown value")
else:
return values[0]
def update_value(self, id_number, new_value, metadata=None):
"""
Update a canned value
:type id_number: int
:param id_number: canned value ID number
:type new_value: str
:param new_value: New canned value value
:type metadata: str
:param metadata: Optional metadata
:rtype: dict
:return: an empty dictionary
"""
data = {
'id': id_number,
'new_value': new_value
}
if metadata is not None:
data['metadata'] = metadata
return self.post('updateValue', data)
def delete_value(self, id_number):
"""
Update a canned value
:type id_number: int
:param id_number: canned value ID number
:rtype: dict
:return: an empty dictionary
"""
data = {
'id': id_number
}
return self.post('deleteValue', data)
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class RecoveryEmailAddress(BaseObject):
"""
Contains information about the current recovery email address
:param recovery_email_address: Recovery email address
:type recovery_email_address: :class:`str`
"""
ID: str = Field("recoveryEmailAddress", alias="@type")
recovery_email_address: str
@staticmethod
def read(q: dict) -> RecoveryEmailAddress:
return RecoveryEmailAddress.construct(**q)
|
# Generated by Django 2.1.4 on 2019-07-10 22:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_auto_20190709_1613'),
]
operations = [
migrations.AlterField(
model_name='model_user',
name='can_edit',
field=models.BooleanField(default=False),
),
]
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import os
import pkgutil
import sys
importedmodules = {}
def find_module(modulename):
"""finds the filename of the module with the given name (supports submodules)"""
module_parts = modulename.split(".")
search_path = None
for i, part in enumerate(module_parts):
search_module = ".".join(module_parts[:i+1])
try:
loader = pkgutil.find_loader(search_module)
if loader is None:
raise ImportError(search_module)
search_path = loader.get_filename(search_module)
except ImportError:
raise ValueError("Could not find %s (reached %s at %s)" % (modulename, part, search_path))
if search_path.endswith(os.sep + "__init__.py"):
return search_path[:-len(os.sep + "__init__.py")]
return search_path
def resolvemodule(modulename, loglevel=logging.WARN):
"""Imports a.b.c as far as possible then returns the value of a.b.c.d.e"""
if modulename in importedmodules:
return importedmodules[modulename]
try:
parentmodule = getimportablemodule(modulename, loglevel)
except (ImportError, SyntaxError) as e:
logging.log(loglevel, "Could not import module for %s" % (modulename))
raise
try:
module = getpart(parentmodule, modulename)
except AttributeError as e:
logging.log(loglevel, "Could not resolve modulename %s" % (modulename))
raise
importedmodules[modulename] = module
return module
def canonicalize(path):
"""returns the canonical reference to the path that can be used for comparison to other paths"""
return os.path.normpath(os.path.realpath(os.path.abspath(path)))
thisfilename = canonicalize(__file__)
if thisfilename.endswith(".pyc") or thisfilename.endswith(".pyo"):
thisfilename = thisfilename[:-1]
def getimportablemodule(modulename, loglevel=logging.WARN):
"""Attempts to import successive modules on the a.b.c route - first a.b.c, then a.b, etc. Only goes one level up"""
components = modulename.split('.')
module = None
component_depth = len(components)
errormessage = ""
if component_depth > 1:
parentmodulename = ".".join(components[:-1])
try:
parentmodule = __import__(parentmodulename)
except ImportError as error:
# if we get an import error on the parent module, we're unlikely to be able to import the child
logging.log(loglevel, "Import Error attempting to import %s (parent of %s): %s" % (parentmodulename, modulename, error))
raise
except Exception as error:
logging.log(loglevel, "Error attempting to import %s: %s" % (parentmodulename, error))
raise
try:
module = __import__(modulename)
return module
except ImportError as error:
if component_depth > 1:
actualparentmodule = sys.modules[parentmodulename]
moduleattr = components[-1]
if hasattr(actualparentmodule, moduleattr):
logging.debug("Import Error attempting to import %s (but have parent module to return which has %s as attribute): %s" % (modulename, moduleattr, error))
return parentmodule
logging.log(loglevel, "Error attempting to import %s: %s" % (modulename, error))
raise
except Exception as error:
logging.log(loglevel, "Error attempting to import %s: %s" % (modulename, error))
raise
def getpart(module, partname):
components = partname.split('.')
for component in components[1:]:
module = getattr(module, component)
return module
def get_all_distinct_mro_targets(obj, functionname):
"""Gets a list of all distinct instances of functionname in the mro"""
targets = []
sources = {}
for t in reversed(obj.__mro__):
base_hook_fn = getattr(t, functionname, None)
if base_hook_fn:
t_f = getattr(base_hook_fn, '__func__', None)
if t_f is None:
logging.warning("__func__ was None while trying to find all distinct mro targets for %s on %s, mro was %s" % (functionname, obj, obj.__mro__))
t_f = base_hook_fn
if t_f not in sources:
sources[t_f] = t
sources[t] = (t_f, base_hook_fn)
targets.append(base_hook_fn)
for base in t.__mro__[1:]:
if base in sources:
r_f, r_m = sources[base]
if r_m in targets:
targets.remove(r_m)
return list(reversed(targets))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from ..base.captcha_service import CaptchaService
class CoinHive(CaptchaService):
__name__ = "CoinHive"
__type__ = "anticaptcha"
__version__ = "0.01"
__status__ = "testing"
__pyload_version__ = "0.5"
__description__ = "CoinHive captcha service plugin"
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
KEY_PATTERN = r'class=[\'"]coinhive-captcha[\'"].+?data-key\s*=[\'"](\w+?)[\'"]'
HASHES_PATTERN = (
r'class=[\'"]coinhive-captcha[\'"].+?data-hashes\s*=[\'"](\d+?)[\'"]'
)
COINHIVE_INTERACTIVE_SIG = (
"792398cf130e9cb0d1c16363c87122a623d0bc7410bd981000f5cbfe1c6ec6708d16edf19bc2703b"
+ "04291697cfde5194c5dc290a23b10af5ad6a26606867a5e38031aa24d715c7ec48a5c61272d757a5"
+ "4835e77558933744a3f0ad245a72ea9447893284c4fd458544a9bff09c19b187321ec7b0f1b2b21e"
+ "246bef741b27f3058b2467a192c100b78bb311300e5da0ce95b331bb77215e261fb4a6b78acd89a7"
+ "13aefdc393fb19f3cdb4682b084c5747347f344fd49ed86bad7fba1ad2f059663ff1b800cffa8948"
+ "bb9c12dddf0ae96831b85c4f9526460cd2a4355c4f800aeb4b541b5c5bee62dc5bfb18c6656c0304"
+ "0b2a819edd07480911b6dadf430f6eb1"
)
COINHIVE_INTERACTIVE_JS = """
while(document.children[0].childElementCount > 0) {
document.children[0].removeChild(document.children[0].children[0]);
}
document.children[0].innerHTML = '<html><body><div class="coinhive-captcha"' + (request.params.hashes ? 'data-hashes="' + request.params.hashes +'"' : '') + ' data-key="' + request.params.key +'" data-callback="pyloadCaptchaFinishCallback"><em>Loading Coinhive Captcha...</em></div></body></html>';
gpyload.getFrameSize = function() {
var divCoinHive = document.body.querySelector("iframe[src*='authedmine.com/captcha/']");
if (divCoinHive !== null) {
var rect = divCoinHive.getBoundingClientRect();
return {top: Math.round(rect.top), right: Math.round(rect.right), bottom: Math.round(rect.bottom), left: Math.round(rect.left)};
} else {
return {top: 0, right: 0, bottom: 0, left: 0};
};
};
window.pyloadCaptchaFinishCallback = function(token){
gpyload.submitResponse(token);
}
var js_script = document.createElement('script');
js_script.type = "text/javascript";
js_script.src = "https://authedmine.com/lib/captcha.min.js";
js_script.async = true;
document.getElementsByTagName('head')[0].appendChild(js_script);
gpyload.activated();"""
def detect_key(self, data=None):
html = data or self.retrieve_data()
m = re.search(self.KEY_PATTERN, html)
if m is not None:
self.key = m.group(1).strip()
self.log_debug(f"Key: {self.key}")
return self.key
else:
self.log_warning(self._("Key pattern not found"))
return None
def detect_hashes(self, data=None):
html = data or self.retrieve_data()
m = re.search(self.HASHES_PATTERN, html)
if m is not None:
self.hashes = m.group(1).strip()
self.log_debug(f"Hashes: {self.hashes}")
return self.hashes
else:
self.log_warning(self._("Hashes pattern not found"))
return None
def challenge(self, key=None, hashes=None, data=None):
key = key or self.retrieve_key(data)
hashes = hashes or self.detect_hashes(data)
params = {
"url": self.pyfile.url,
"key": key,
"hashes": hashes,
"script": {
"signature": self.COINHIVE_INTERACTIVE_SIG,
"code": self.COINHIVE_INTERACTIVE_JS,
},
}
result = self.decrypt_interactive(params, timeout=300)
return result
if __name__ == "__main__":
# Sign with the command `python -m pyload.plugins.captcha.CoinHive
# pyload.private.pem pem_passphrase`
import sys
from ..helpers import sign_string
if len(sys.argv) > 2:
with open(sys.argv[1]) as fp:
pem_private = fp.read()
print(
sign_string(
CoinHive.COINHIVE_INTERACTIVE_JS,
pem_private,
pem_passphrase=sys.argv[2],
sign_algo="SHA384",
)
)
|
from .aiohttp_telemetry_middleware import bot_telemetry_middleware
from .aiohttp_telemetry_processor import AiohttpTelemetryProcessor
__all__ = [
"bot_telemetry_middleware",
"AiohttpTelemetryProcessor",
]
|
import pypurl.purlutils as utils
class Purl:
""" Provides essential functionality to decompose any purl into components
or build a purl from passed parameters.
"""
def __init__(self):
self.type = ''
def purl_to_dict(self, purl):
return utils.parse_purl(purl, {})
def durl_to_dict(self, download_url, version='', qualifiers='', subpath=''):
purl_dict = utils.parse_durl(download_url)
return utils.build_purl_dict_from_params_optionals(purl_dict, version, qualifiers, subpath)
def durl_to_purl(self, download_url, version='', qualifiers='', subpath=''):
purl_dict = utils.parse_durl(download_url)
purl_dict = utils.build_purl_dict_from_params_optionals(purl_dict, version, qualifiers, subpath)
return self.dict_to_purl(purl_dict)
def dict_to_purl(self, dict):
purl = utils.build_host_part(dict)
return utils.build_params_part(dict, purl)
def params_to_purl(self, type, namespace, name, version='', qualifiers='', subpath=''):
return utils.build_purl_from_params(type, namespace, name, version, qualifiers, subpath)
|
from django.test import TestCase, Client
from posts.models import User
class TestUser(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(
username="test_user",
email="test_user@yatube.com",
password="test"
)
def testRegistration(self):
response = self.client.get('/test_user/')
self.assertEqual(
response.status_code,
200,
msg='Не найдена страница созданного пользователя.'
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import necessary Python Packages
import torch
from torch import nn
device = torch.device("cpu")
def log_sum_exp(vec):
"""
log(sum(exp(x))) Function
"""
max_score = torch.max(vec, 0)[0].unsqueeze(0)
max_score_broadcast = max_score.expand(vec.size(1), vec.size(1))
result = max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 0)).unsqueeze(0)
return result.squeeze(1)
class BiLSTMCRF(nn.Module):
def __init__(
self,
tag_map={"O": 0, "START": 4, "STOP": 5},
batch_size=16,
vocab_size=20,
hidden_dim=128,
dropout=0.0,
word_num=100,
word_dim=128,
char_num=200,
char_dim=30,
start_tag="START",
stop_tag="STOP"
):
super(BiLSTMCRF, self).__init__()
self.word_num = word_num
self.word_dim = word_dim
self.char_num = char_num
self.char_dim = char_dim
self.batch_size = batch_size
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.dropout = dropout
self.tag_size = len(tag_map)
self.tag_map = tag_map
self.start_tag = start_tag
self.stop_tag = stop_tag
####################################################################################################################################
# Matrix of transition parameters. Entry i,j is the score of transitioning *to* i *from* j
self.transitions = nn.Parameter(torch.randn(self.tag_size, self.tag_size))
self.transitions.data[self.tag_map[self.start_tag], :] = -10000.
self.transitions.data[:, self.tag_map[self.stop_tag]] = -10000.
self.tanh = nn.Tanh()
self.LeakyReLU = nn.LeakyReLU()
self.softplus = nn.Softplus()
self.gelu = nn.GELU()
self.Dropout = nn.Dropout(p=self.dropout)
####################################################################################################################################
# Left side Bi-LSTM model --> Character-level Embedding
self.char_embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=self.char_dim)
# The model of Bi-LSTM
self.char_lstm = nn.LSTM(input_size=self.char_dim,
hidden_size=self.hidden_dim // 2,
num_layers=1,
bidirectional=True,
batch_first=True,
dropout=self.dropout,
bias=True)
self.char_linear_lstm = nn.Linear(in_features=self.hidden_dim, out_features=self.hidden_dim, bias=True)
####################################################################################################################################
# Right side CNN model --> Word-level Embedding
# The model of Bi-LSTM
self.word_lstm = nn.LSTM(input_size=self.word_dim,
hidden_size=self.hidden_dim // 2,
num_layers=1,
bidirectional=True,
batch_first=True,
dropout=self.dropout,
bias=True)
self.word_linear_lstm = nn.Linear(in_features=self.hidden_dim, out_features=self.char_num, bias=True)
####################################################################################################################################
self.linear_1 = nn.Linear(in_features=712, out_features=512, bias=True)
self.linear_2 = nn.Linear(in_features=512, out_features=256, bias=True)
####################################################################################################################################
# Hidden to tag
self.hidden2tag = nn.Linear(in_features=256, out_features=self.tag_size, bias=True)
# len_char: Real length of characters (sentences)
# len_word: Real length of words
def prediction(self, characters, len_char, words, len_word):
####################################################################################################################################
# Left side Bi-LSTM model --> Character-level Embedding
char_vec = self.char_embedding(characters)
char_vec = self.Dropout(char_vec)
char_vec = char_vec.view(self.batch_size, self.char_num, self.char_dim)
packed_char = nn.utils.rnn.pack_padded_sequence(char_vec, len_char, batch_first=True, enforce_sorted=False)
char_out, (_, _) = self.char_lstm(packed_char)
unpacked_char, _ = nn.utils.rnn.pad_packed_sequence(char_out, batch_first=True, padding_value=0.0, total_length=self.char_num)
unpacked_char = unpacked_char.view(self.batch_size, -1, self.hidden_dim)
char_map = self.softplus(self.char_linear_lstm(unpacked_char))
char_map = self.Dropout(char_map)
####################################################################################################################################
# Right side CNN model --> Word-level Embedding
words_reshaped = words.view(self.batch_size, self.word_num, self.word_dim)
packed_word = nn.utils.rnn.pack_padded_sequence(words_reshaped, len_word, batch_first=True, enforce_sorted=False)
word_out, (_, _) = self.word_lstm(packed_word)
unpacked_word, _ = nn.utils.rnn.pad_packed_sequence(word_out, batch_first=True, padding_value=0.0, total_length=self.word_num)
unpacked_word = unpacked_word.view(self.batch_size, -1, self.hidden_dim)
word_map = self.softplus(self.word_linear_lstm(unpacked_word))
word_map = self.Dropout(word_map)
word_map = word_map.view(self.batch_size, self.char_num, -1)
####################################################################################################################################
# Concatenate the outputs of LSTM and CNN
cat_lstm_cnn = torch.cat([char_map, word_map], dim=2)
# Linear Layer
linear_out = self.softplus(self.linear_1(cat_lstm_cnn))
linear_out = self.Dropout(linear_out)
linear_out = self.softplus(self.linear_2(linear_out))
linear_out = self.Dropout(linear_out)
# Hidden space to tag space
logits = self.softplus(self.hidden2tag(linear_out))
return logits
def neg_log_likelihood(self, characters, tags, len_char, words, len_word):
"""
Negative Log-Likelihood (NLL) Loss Function
"""
self.batch_size = characters.size(0)
logits = self.prediction(characters=characters, len_char=len_char, words=words, len_word=len_word)
real_path_score = torch.zeros(1, device=device)
total_score = torch.zeros(1, device=device)
for logit, tag, leng in zip(logits, tags, len_char):
logit = logit[:leng.to(torch.int)]
tag = tag[:leng.to(torch.int)]
real_path_score += self.real_path_score(logit, tag)
total_score += self.total_score(logit, tag)
return total_score - real_path_score
def forward(self, characters, words, len_char=None, len_word=None):
characters = torch.tensor(characters, dtype=torch.long, device=device)
words = torch.tensor(words, dtype=torch.float, device=device)
lengths = [i.size(-1) for i in characters]
self.batch_size = characters.size(0)
logits = self.prediction(characters=characters, len_char=len_char, words=words, len_word=len_word)
scores = []
paths = []
for logit, leng in zip(logits, lengths):
logit = logit[:leng]
score, path = self.viterbi_decode(logit)
scores.append(score)
paths.append(path)
return scores, paths
def real_path_score(self, logits, label):
"""
Calculate Real Path Score
"""
score = torch.zeros(1, device=device)
label = torch.cat([torch.tensor([self.tag_map[self.start_tag]], dtype=torch.long, device=device), label.to(torch.long)])
for index, logit in enumerate(logits):
emission_score = logit[label[index + 1]]
transition_score = self.transitions[label[index], label[index + 1]]
score += emission_score + transition_score
# Add the final Stop Tag, the final transition score
score += self.transitions[label[-1], self.tag_map[self.stop_tag]]
return score
def total_score(self, logits, label):
"""
Calculate the total CRF Score
"""
previous = torch.full((1, self.tag_size), 0, device=device)
for index in range(len(logits)):
previous = previous.expand(self.tag_size, self.tag_size).t()
obs = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)
scores = previous + obs + self.transitions
previous = log_sum_exp(scores)
previous = previous + self.transitions[:, self.tag_map[self.stop_tag]]
total_scores = log_sum_exp(previous.t())[0]
return total_scores
def viterbi_decode(self, logits):
backpointers = []
trellis = torch.zeros(logits.size(), device=device)
backpointers = torch.zeros(logits.size(), dtype=torch.long, device=device)
trellis[0] = logits[0]
for t in range(1, len(logits)):
v = trellis[t - 1].unsqueeze(1).expand_as(self.transitions) + self.transitions
trellis[t] = logits[t] + torch.max(v, 0)[0]
backpointers[t] = torch.max(v, 0)[1]
viterbi = [torch.max(trellis[-1], -1)[1].cpu().tolist()]
backpointers = backpointers.numpy()
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
viterbi_score = torch.max(trellis[-1], 0)[0].tolist()
return viterbi_score, viterbi
|
import numpy
with open('../data/advent_of_code_input_day_six.txt', mode='r') as input_file:
day6_data= input_file.read().split('\n\n')
running_count = 0
all_yes_count = 0
whole_group = 0
for index, line in enumerate(day6_data):
subset = day6_data[index].split()
day6_data[index] = day6_data[index].replace('\n','')
running_count = running_count + len(np.unique(list(day6_data[index])))
entry,counts = np.unique(list(''.join(subset)), return_counts = True)
matches = np.where(counts == len(subset))
all_yes_count = all_yes_count + len(matches[0])
print ('The number of questions to which yes was answered', running_count)
print ('The number of times everyone in a group answers yes to a question', all_yes_count) |
from .reader import load_pretrained_model, load_dataset, _parse_url, _parse_config, _get_model_configs
from .installer import install_package, package_from_config
from .wrapper import PretrainedModel |
from django.contrib import admin
from . import models
admin.site.register(models.Author)
admin.site.register(models.FollowReq)
admin.site.register(models.Follower)
admin.site.register(models.Post)
admin.site.register(models.Comment)
admin.site.register(models.Like)
admin.site.register(models.Inbox)
admin.site.register(models.Node)
|
#!/usr/bin/env python
# encoding: utf-8
import unittest
from .test import TestCase
from . import SDK
class TestSDK(TestCase):
def test_instance(self):
sdk = SDK('whatever', 'whatever', 'mock://whatever')
self.assertEqual(sdk.platform().create_url('/foo', add_server=True), 'mock://whatever/restapi/v1.0/foo')
if __name__ == '__main__':
unittest.main()
|
specification_map = {
# RDF and OSLC attributes
'Specification_id': {'attribute': '_BaseResource__identifier', 'oslc_property': 'DCTERMS.identifier'},
'Title': {'attribute': '_BaseResource__title', 'oslc_property': 'DCTERMS.title'},
'Description': {'attribute': '_BaseResource__description', 'oslc_property': 'DCTERMS.description'},
'Author': {'attribute': '_BaseResource__creator', 'oslc_property': 'DCTERMS.creator'},
# RM and Custom attributes
'Product': {'attribute': '_BaseResource__short_title', 'oslc_property': 'OSLC.shortTitle'},
# 'Subject': {'attribute': '_BaseResource__subject', 'oslc_property': 'DCTERMS.subject'},
'Source': {'attribute': '_Requirement__elaborated_by', 'oslc_property': 'OSLC_RM.elaboratedBy'},
'Category': {'attribute': '_Requirement__constrained_by', 'oslc_property': 'OSLC_RM.constrainedBy'},
'Discipline': {'attribute': '_Requirement__satisfied_by', 'oslc_property': 'OSLC_RM.satisfiedBy'},
'Revision': {'attribute': '_Requirement__tracked_by', 'oslc_property': 'OSLC_RM.trackedBy'},
'Target_Value': {'attribute': '_Requirement__validated_by', 'oslc_property': 'OSLC_RM.validatedBy'},
'Degree_of_fulfillment': {'attribute': '_Requirement__affected_by', 'oslc_property': 'OSLC_RM.affectedBy'},
'Status': {'attribute': '_Requirement__decomposed_by', 'oslc_property': 'OSLC_RM.decomposedBy'},
# CUSTOM attributes
'PUID': {'attribute': '_Requirement__puid', 'oslc_property': 'OSLC_RM.puid'},
'Project': {'attribute': '_BaseResource__subject', 'oslc_property': 'DCTERMS.subject'},
}
|
# -*- coding:utf-8 -*-
# @project: PowerBert
# @filename: task_data_set
# @author: 刘聪NLP
# @zhihu: https://www.zhihu.com/people/LiuCongNLP
# @contact: logcongcong@gmail.com
# @time: 2021/9/5 10:02
"""
文件说明:
"""
import torch
import json
import os
from tqdm import tqdm
from torch.utils.data import Dataset
import logging
from torch.nn.utils.rnn import pad_sequence
import random
from multiprocessing import Pool
import re
import jieba
logger = logging.getLogger(__name__)
class DefectDiagnosisDataSet(Dataset):
def __init__(self, tokenizer, max_len, data_dir, data_set_name, path_file=None, train_rate = 1.0, is_overwrite=True):
self.tokenizer = tokenizer
self.max_len = max_len
self.label_dict = {"一般": 0, "严重": 1, "危急": 2}
self.data_set_name = data_set_name
self.train_rate = train_rate
cached_feature_file = os.path.join(data_dir, "cached_{}_{}".format(data_set_name, max_len))
if os.path.exists(cached_feature_file) and not is_overwrite:
logger.info("已经存在缓存文件{},直接加载".format(cached_feature_file))
self.data_set = torch.load(cached_feature_file)["data_set"]
else:
logger.info("不存在缓存文件{},进行数据预处理操作".format(cached_feature_file))
self.data_set = self.load_data(path_file)
logger.info("数据预处理操作完成,将处理后的数据存到{}中,作为缓存文件".format(cached_feature_file))
torch.save({"data_set": self.data_set}, cached_feature_file)
def load_data(self, path_file):
self.data_set = []
label_number_dict = {"一般": 0, "严重": 0, "危急": 0}
with open(path_file, "r", encoding="utf-8") as fh:
for idx, line in enumerate(tqdm(fh, desc="iter", disable=False)):
sample = json.loads(line)
if sample["label"] not in self.label_dict:
continue
if "train" in self.data_set_name:
if label_number_dict[sample["label"]] > int(self.train_rate * 200):
continue
else:
label_number_dict[sample["label"]] += 1
input_ids, attention_mask, label = self.convert_feature(sample)
self.data_set.append({"input_ids": input_ids, "attention_mask": attention_mask, "label": label})
return self.data_set
def convert_feature(self, sample):
label = self.label_dict[sample["label"]]
tokens = self.tokenizer.tokenize(sample["text"])
if len(tokens) > self.max_len - 2:
tokens = tokens[:self.max_len - 2]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
return input_ids, attention_mask, label
def __len__(self):
return len(self.data_set)
def __getitem__(self, idx):
instance = self.data_set[idx]
return instance
def collate_func_defect_diagnosis(batch_data):
batch_size = len(batch_data)
if batch_size == 0:
return {}
input_ids_list, attention_mask_list, labels_list = [], [], []
for instance in batch_data:
input_ids_temp = instance["input_ids"]
attention_mask_temp = instance["attention_mask"]
labels_temp = instance["label"]
input_ids_list.append(torch.tensor(input_ids_temp, dtype=torch.long))
attention_mask_list.append(torch.tensor(attention_mask_temp, dtype=torch.long))
labels_list.append(labels_temp)
return {"input_ids": pad_sequence(input_ids_list, batch_first=True, padding_value=0),
"attention_mask": pad_sequence(attention_mask_list, batch_first=True, padding_value=0),
"label": torch.tensor(labels_list, dtype=torch.long)}
class EntityExtractDataSet(Dataset):
def __init__(self, tokenizer, max_len, data_dir, data_set_name, path_file=None, is_overwrite=True):
self.tokenizer = tokenizer
self.max_len = max_len
self.label_dict = {"O": 0, "B": 1, "I": 2, "E": 3, "S": 4}
cached_feature_file = os.path.join(data_dir, "cached_{}_{}".format(data_set_name, max_len))
if os.path.exists(cached_feature_file) and not is_overwrite:
logger.info("已经存在缓存文件{},直接加载".format(cached_feature_file))
self.data_set = torch.load(cached_feature_file)["data_set"]
else:
logger.info("不存在缓存文件{},进行数据预处理操作".format(cached_feature_file))
self.data_set = self.load_data(path_file)
logger.info("数据预处理操作完成,将处理后的数据存到{}中,作为缓存文件".format(cached_feature_file))
torch.save({"data_set": self.data_set}, cached_feature_file)
def load_data(self, path_file):
self.data_set = []
with open(path_file, "r", encoding="utf-8") as fh:
for idx, line in enumerate(tqdm(fh, desc="iter", disable=False)):
sample = json.loads(line)
input_ids, attention_mask, labels, tokens = self.convert_feature(sample)
self.data_set.append(
{"input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, "tokens": tokens})
return self.data_set
def convert_feature(self, sample):
tokens = []
for t in sample["text"]:
token = self.tokenizer.tokenize(t)
if len(token) == 0:
tokens.append("[unused1]")
else:
tokens.extend(token)
labels = [self.label_dict[l] for l in sample["label"]]
if len(tokens) > self.max_len - 2:
tokens = tokens[:self.max_len - 2]
labels = labels[:self.max_len - 2]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
labels = [0] + labels + [0]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
return input_ids, attention_mask, labels, tokens
def __len__(self):
return len(self.data_set)
def __getitem__(self, idx):
instance = self.data_set[idx]
return instance
def collate_func_entity_extract(batch_data):
batch_size = len(batch_data)
if batch_size == 0:
return {}
input_ids_list, attention_mask_list, labels_list = [], [], []
tokens_list = []
for instance in batch_data:
input_ids_temp = instance["input_ids"]
attention_mask_temp = instance["attention_mask"]
labels_temp = instance["labels"]
input_ids_list.append(torch.tensor(input_ids_temp, dtype=torch.long))
attention_mask_list.append(torch.tensor(attention_mask_temp, dtype=torch.long))
labels_list.append(torch.tensor(labels_temp, dtype=torch.long))
tokens_list.append(instance["tokens"])
return {"input_ids": pad_sequence(input_ids_list, batch_first=True, padding_value=0),
"attention_mask": pad_sequence(attention_mask_list, batch_first=True, padding_value=0),
"labels": pad_sequence(labels_list, batch_first=True, padding_value=0),
"tokens": tokens_list}
class DefectExtractDataSet(Dataset):
def __init__(self, tokenizer, max_len, data_dir, data_set_name, label1_dict_path, label2_dict_path,
label3_dict_path, label4_dict_path, path_file=None, is_overwrite=True):
self.tokenizer = tokenizer
self.max_len = max_len
with open(label1_dict_path, "r", encoding="utf-8") as fh:
l_list = [l.strip() for l in fh.readlines()]
self.label1_dict = dict(zip(l_list, list(range(0, len(l_list)))))
logger.info("label1_dict is {}".format(self.label1_dict))
with open(label2_dict_path, "r", encoding="utf-8") as fh:
l_list = [l.strip() for l in fh.readlines()]
self.label2_dict = dict(zip(l_list, list(range(0, len(l_list)))))
logger.info("label2_dict is {}".format(self.label2_dict))
with open(label3_dict_path, "r", encoding="utf-8") as fh:
l_list = [l.strip() for l in fh.readlines()]
self.label3_dict = dict(zip(l_list, list(range(0, len(l_list)))))
logger.info("label3_dict is {}".format(self.label3_dict))
with open(label4_dict_path, "r", encoding="utf-8") as fh:
l_list = [l.strip() for l in fh.readlines()]
self.label4_dict = dict(zip(l_list, list(range(0, len(l_list)))))
logger.info("label4_dict is {}".format(self.label4_dict))
self.label5_dict = {"O": 0, "B": 1, "I": 2, "E": 3, "S": 4}
logger.info("label5_dict is {}".format(self.label5_dict))
cached_feature_file = os.path.join(data_dir, "cached_{}_{}".format(data_set_name, max_len))
if os.path.exists(cached_feature_file) and not is_overwrite:
logger.info("已经存在缓存文件{},直接加载".format(cached_feature_file))
self.data_set = torch.load(cached_feature_file)["data_set"]
else:
logger.info("不存在缓存文件{},进行数据预处理操作".format(cached_feature_file))
self.data_set = self.load_data(path_file)
logger.info("数据预处理操作完成,将处理后的数据存到{}中,作为缓存文件".format(cached_feature_file))
torch.save({"data_set": self.data_set}, cached_feature_file)
def load_data(self, path_file):
self.data_set = []
with open(path_file, "r", encoding="utf-8") as fh:
for idx, line in enumerate(tqdm(fh, desc="iter", disable=False)):
sample = json.loads(line)
input_ids, attention_mask, label1, label2, label3, label4, label5_start, label5_end, tokens = self.convert_feature(
sample)
self.data_set.append(
{"input_ids": input_ids, "attention_mask": attention_mask, "label1": label1, "label2": label2,
"label3": label3, "label4": label4, "label5_start": label5_start, "label5_end": label5_end, "tokens": tokens})
return self.data_set
def convert_feature(self, sample):
label5_start = sample["text"].find(sample["label5"])
label5_end = label5_start + len(sample["label5"])
tokens = []
for t in sample["text"]:
token = self.tokenizer.tokenize(t)
if len(token) == 0:
tokens.append("[unused1]")
else:
tokens.extend(token)
label1 = self.label1_dict[sample["label1"]]
label2 = self.label2_dict[sample["label2"]]
label3 = self.label3_dict[sample["label3"]]
label4 = self.label4_dict[sample["label4"]]
if len(tokens) > self.max_len - 2:
tokens = tokens[:self.max_len - 2]
tokens = ["[CLS]"] + tokens + ["[SEP]"]
label5_start = label5_start + 1
label5_end = label5_end + 1
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
return input_ids, attention_mask, label1, label2, label3, label4, label5_start, label5_end, tokens
def __len__(self):
return len(self.data_set)
def __getitem__(self, idx):
instance = self.data_set[idx]
return instance
def collate_func_defect_extract(batch_data):
batch_size = len(batch_data)
if batch_size == 0:
return {}
input_ids_list, attention_mask_list, label5_start_list, label5_end_list = [], [], [], []
label1_list, label2_list, label3_list, label4_list = [], [], [], []
tokens_list = []
for instance in batch_data:
input_ids_temp = instance["input_ids"]
attention_mask_temp = instance["attention_mask"]
label1_temp = instance["label1"]
label2_temp = instance["label2"]
label3_temp = instance["label3"]
label4_temp = instance["label4"]
label5_start_temp = instance["label5_start"]
label5_end_temp = instance["label5_end"]
input_ids_list.append(torch.tensor(input_ids_temp, dtype=torch.long))
attention_mask_list.append(torch.tensor(attention_mask_temp, dtype=torch.long))
label1_list.append(label1_temp)
label2_list.append(label2_temp)
label3_list.append(label3_temp)
label4_list.append(label4_temp)
label5_start_list.append(label5_start_temp)
label5_end_list.append(label5_end_temp)
tokens_list.append(instance["tokens"])
return {"input_ids": pad_sequence(input_ids_list, batch_first=True, padding_value=0),
"attention_mask": pad_sequence(attention_mask_list, batch_first=True, padding_value=0),
"label1": torch.tensor(label1_list, dtype=torch.long),
"label2": torch.tensor(label2_list, dtype=torch.long),
"label3": torch.tensor(label3_list, dtype=torch.long),
"label4": torch.tensor(label4_list, dtype=torch.long),
"label5_start": torch.tensor(label5_start_list, dtype=torch.long),
"label5_end": torch.tensor(label5_end_list, dtype=torch.long),
"tokens": tokens_list}
|
import csv
import sys
import logging
import datetime
from collections import OrderedDict
_MINROWNUMBER = 50
def _get_subnetaddresses(config_parser, section, conntype, logger):
if conntype == "wifi":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_wifi")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_wifi")
cloudserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_wifi")
elif conntype == "lte":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_lte")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_lte")
cloudserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_lte")
else:
print ("unknown connection type")
logger.critical("unknown connection type " + str(conntype))
logger.critical("EXIT")
sys.exit(0)
return client_subnetaddr, edgeserver_subnetaddr, cloudserver_subnetaddr
def readvalues_activelatencyboxplot(inputfile, noise, segment):
ret = []
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
assert row[12] == "latency"
assert row[9] == "Keyword"
assert row[6] == "ReceiverIdentity"
assert row[5] == "SenderIdentity"
assert row[4] == "Command"
assert row[3] == "Direction"
except Exception as e:
print (e)
print (row)
sys.exit(1)
linecount += 1
#print row
continue
linecount += 1
command = row[4]
senderIdentity = row[5]
receiverIdentity = row[6]
latency = float(row[12])
direction = row[3]
keyword = row[9]
assert command == "TCPRTT" or command == "UDPRTT"
if segment == "clientNitos":
if "local" not in keyword:
continue
if direction == "Upstream":
#Upstream: Client -> local observer
if senderIdentity != "Client":
continue
else:
ret.append(latency)
elif direction == "Downstream":
#Upstream: Client <- local observer
if receiverIdentity != "Client":
continue
else:
ret.append(latency)
else:
print ("unknown direction")
sys.exit(0)
elif segment == "clientUnipi":
if "remote" not in keyword:
continue
if direction == "Upstream":
#Upstream: Client -> cloud observer
if senderIdentity != "Client":
continue
else:
ret.append(latency)
elif direction == "Downstream":
#Upstream: Client <- cloud observer
if receiverIdentity != "Client":
continue
else:
ret.append(latency)
else:
print ("unknown direction")
sys.exit(0)
elif segment == "NitosUnipi":
if "local" not in keyword:
continue
if direction == "Upstream":
#Upstream: local observer -> remote server
if senderIdentity != "Observer":
continue
else:
ret.append(latency)
elif direction == "Downstream":
#Upstream: local observer <- remote(cloud) server
if receiverIdentity != "Observer":
continue
else:
ret.append(latency)
else:
print ("unknown direction")
sys.exit(0)
else:
print ("unknown segment")
sys.exit(0)
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readvalues_activebandwidthboxplot(inputfile, noise, segment):
ret = []
Mb_s = 0
sec = 0
lastID = -1
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
assert row[12] == "Kbit"
assert row[13] == "nanoTimes"
assert row[9] == "Keyword"
assert row[6] == "ReceiverIdentity"
assert row[5] == "SenderIdentity"
assert row[4] == "Command"
assert row[3] == "Direction"
assert row[1] == "ID"
except Exception as e:
print (e)
print (row)
sys.exit(1)
linecount += 1
#print row
continue
linecount += 1
testID = row[1]
command = row[4]
senderIdentity = row[5]
receiverIdentity = row[6]
Kbit = float(row[12])
Mbit = Kbit/1000
nanoTimes = float(row[13])
s = nanoTimes/1000000000
direction = row[3]
keyword = row[9]
assert command == "TCPBandwidth" or command == "UDPBandwidth"
if segment == "clientNitos":
if "local" not in keyword:
continue
if direction == "Upstream":
#Upstream: Client -> local observer
if senderIdentity != "Client":
continue
else:
if lastID == -1:
#first measure
lastID = testID
Mb_s = Mbit
sec =s
elif testID == lastID:
#new mesure, same test
Mb_s += Mbit
sec += s
else:
#new test
ret.append(Mb_s/sec)
lastID = testID
Mb_s = Mbit
sec =s
elif direction == "Downstream":
#Upstream: Client <- local observer
if receiverIdentity != "Client":
continue
else:
if lastID == -1:
#first measure
lastID = testID
Mb_s = Mbit
sec =s
elif testID == lastID:
#new mesure, same test
Mb_s += Mbit
sec += s
else:
#new test
ret.append(Mb_s/sec)
lastID = testID
Mb_s = Mbit
sec =s
else:
print ("unknown direction")
sys.exit(0)
elif segment == "clientUnipi":
if "remote" not in keyword:
continue
if direction == "Upstream":
#Upstream: Client -> remote observer (unipi)
if senderIdentity != "Client":
continue
else:
if lastID == -1:
#first measure
lastID = testID
Mb_s = Mbit
sec =s
elif testID == lastID:
#new mesure, same test
Mb_s += Mbit
sec += s
else:
#new test
ret.append(Mb_s/sec)
lastID = testID
Mb_s = Mbit
sec =s
elif direction == "Downstream":
#Upstream: Client <- remote observer (unipi)
if receiverIdentity != "Client":
continue
else:
if lastID == -1:
#first measure
lastID = testID
Mb_s = Mbit
sec =s
elif testID == lastID:
#new mesure, same test
Mb_s += Mbit
sec += s
else:
#new test
ret.append(Mb_s/sec)
lastID = testID
Mb_s = Mbit
sec =s
else:
print ("unknown direction")
sys.exit(0)
elif segment == "NitosUnipi":
if "local" not in keyword:
continue
if direction == "Upstream":
#Upstream: MEC observer(local) -> remote (cloud) server
if senderIdentity != "Observer":
continue
else:
if lastID == -1:
#first measure
lastID = testID
Mb_s = Mbit
sec =s
elif testID == lastID:
#new mesure, same test
Mb_s += Mbit
sec += s
else:
#new test
ret.append(Mb_s/sec)
lastID = testID
Mb_s = Mbit
sec =s
elif direction == "Downstream":
#Upstream: MEC observer(local) <- remote (cloud) server
if receiverIdentity != "Observer":
continue
else:
if lastID == -1:
#first measure
lastID = testID
Mb_s = Mbit
sec =s
elif testID == lastID:
#new mesure, same test
Mb_s += Mbit
sec += s
else:
#new test
ret.append(Mb_s/sec)
lastID = testID
Mb_s = Mbit
sec =s
else:
print ("unknown direction")
sys.exit(0)
else:
print ("unknown segment")
sys.exit(0)
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readvalues_activebandwidthlineplot(config_parser, section, command, direction, conn):
noiselist = config_parser.get(section, "noise").split(",")
if conn == "wifi":
dates_list = config_parser.get(section, "dates_activewifi").split(",")
elif conn == "lte":
dates_list = config_parser.get(section, "dates_activelte").split(",")
legend = []
if direction == "Upstream":
legend.append("Client -> Observer (Nitos)")
legend.append("Client -> Observer (unipi)")
legend.append("Observer (Nitos) -> Remote(unipi)")
legend.append("Observer (unipi) -> Remote(unipi)")
elif direction == "Downstream":
legend.append("Observer (Nitos) -> Client")
legend.append("Observer (unipi) -> Client")
legend.append("Remote(unipi) -> Observer (Nitos)")
legend.append("Remote(unipi) -> Observer (unipi)")
clientNitos = {"x":[], "y":[], "legend": legend[0]}
clientUnipi = {"x":[], "y":[], "legend": legend[1]}
NitosUnipi = {"x":[], "y":[], "legend": legend[2]}
lastID = -1
Mbitlist = []
seclist = []
for noise in noiselist:
inputfile = "csv/active/" + command + "-" + direction + "-" + conn + "-noise" + noise + "_"
inputfile += dates_list[0].strip() + "-" + dates_list[-1].strip() + ".csv"
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
if command == "TCPBandwidth" or command == "UDPBandwidth":
assert row[12] == "Kbit"
assert row[13] == "nanoTimes"
elif command == "TCPRTT" or command == "UDPRTT":
assert row[12] == "latency"
else:
print ("unknown command")
sys.exit(0)
assert row[9] == "Keyword"
assert row[5] == "SenderIdentity"
assert row[6] == "ReceiverIdentity"
assert row[3] == "Direction"
assert row[2] == "Timestamp"
assert row[1] == "ID"
except:
print (row)
sys.exit(1)
linecount += 1
#print row
continue
linecount += 1
if direction != row[3]:
sys.exit(0)
continue
if command == "TCPBandwidth" or command == "UDPBandwidth":
Kbit = float(row[12])
Mbit = Kbit/1024
nanosec = float(row[13])
sec = 1.0 * nanosec/1000000000
elif command == "TCPRTT" or command == "UDPRTT":
# row[12] == "latency"
measure = 100
if "local" in row[9]:
if row[5] == "Client" or row[6] == "Client":
if command == "TCPBandwidth" or command == "UDPBandwidth":
if lastID == -1 or lastID == row[1]:
Mbitlist.append(Mbit)
seclist.append(sec)
if lastID == -1:
lastID = row[1]
else:
lastID = row[1]
clientNitos["y"].append(1.0 * sum(Mbitlist)/sum(seclist))
clientNitos["x"].append(row[2])
Mbitlist = []
seclist = []
else:
clientNitos["y"].append(100)
elif row[5] == "Server" or row[6] == "Server":
if command == "TCPBandwidth" or command == "UDPBandwidth":
if lastID == -1 or lastID == row[1]:
Mbitlist.append(Mbit)
seclist.append(sec)
if lastID == -1:
lastID = row[1]
else:
lastID = row[1]
NitosUnipi["y"].append(1.0 * sum(Mbitlist)/sum(seclist))
NitosUnipi["x"].append(row[2])
Mbitlist = []
seclist = []
else:
NitosUnipi["y"].append(200)
else:
print ("error")
print (row)
sys.exit(0)
if "remote" in row[9]:
if row[5] == "Client" or row[6] == "Client":
if command == "TCPBandwidth" or command == "UDPBandwidth":
if lastID == -1 or lastID == row[1]:
Mbitlist.append(Mbit)
seclist.append(sec)
if lastID == -1:
lastID = row[1]
else:
lastID = row[1]
clientUnipi["y"].append(1.0 * sum(Mbitlist)/sum(seclist))
clientUnipi["x"].append(row[2])
Mbitlist = []
seclist = []
else:
clientUnipi["y"].append(300)
elif row[5] == "Server" or row[6] == "Server":
continue
else:
print ("error")
print (row)
sys.exit(0)
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return clientNitos, clientUnipi, NitosUnipi
def readbandwidthvalues_timeseries_self(config_parser, section, inputfile, edgeserver, conntype):
assert "SORTED_LEGACY" in inputfile
assert "self" in inputfile
ret = []
if conntype == "wifi":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_wifi")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_wifi")
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_wifi")
elif conntype == "lte":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_lte")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_lte")
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_lte")
else:
print ("unknown connection type")
sys.exit(0)
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
#columns: ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,Direction,Protocol,Mode,Type,ID,Timestamp,Bytes
assert row[13] == "Bytes"
assert row[12] == "Timestamp"
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[4] == "ServerIP"
except Exception as e:
print (e)
print (row)
sys.exit(1)
linecount += 1
#print row
continue
measuredbytes = row[13]
current_timenstamp = row[12]
keyword = row[6]
clientIP = row[2]
serverIP = row[4]
try:
assert row[2][:len(client_subnetaddr)].strip() == client_subnetaddr.strip()
assert conntype.strip() in inputfile
except Exception as e:
print (conntype)
print (inputfile)
print (row[2] [:len(client_subnetaddr)] + "!=" + client_subnetaddr)
linecount += 1
continue
linecount += 1
if (edgeserver == True and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(edgeserver == False and serverIP[:len(remoteserver_subnetaddr)] == remoteserver_subnetaddr):
bandwidthkbps = float(row[13])
bandwidthMbps = bandwidthkbps / 1000
ret.append(bandwidthMbps)
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_mim(config_parser, section, inputfile, connectiontype, segment, logger):
assert "SORTED_LEGACY" in inputfile
assert "mim" in inputfile
logger.debug("\n")
ret = []
last_testID = ""
if connectiontype == "wifi":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_wifi")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_wifi")
cloudserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_wifi")
elif connectiontype == "lte":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_lte")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_lte")
cloudserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_lte")
else:
print ("unknown connection type")
logger.critical("unknown connection type")
logger.critical("EXIT")
sys.exit(0)
logger.debug("inputfile = " + str(inputfile))
logger.debug("connectiontype = " + str(connectiontype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line #0 contains the query
#line #1 contains query's arguments
if linecount == 0 or linecount == 1:
linecount += 1
logger.debug("line " + (str(linecount) + ": " + str(row)))
continue
#line #3 contains the name of each column
# mim-bandwidth columns: ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,
# Direction,Protocol,Mode,Type,ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[12] == "Timestamp" # in microsec
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[4] == "ServerIP"
assert row[3] == "ClientPort"
assert row[5] == "ServerPort"
except:
print (row)
logger.critical("linecount = 2 " + str(row) + "unexpected columns")
logger.critical ("EXIT")
sys.exit(1)
linecount += 1
#print row
continue
linecount += 1
byte = float(row[13])
currenttimestamp_micros = float(row[12]) #timestamp microsecons
clientIP = row[2]
serverIP = row[4]
clientPort = row[3]
serverPort = row[5]
if (segment == "edge" and row[4][:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "remote" and row[4][:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
currentTestID = clientIP + "-" + clientPort + "-" + serverIP + "-" + serverPort
if last_testID == "":
#this is the first row
last_testID = currentTestID
lastclientIP = clientIP
#################### FOR DEBUGGING ONLY ####################
lastClientPort = clientPort
lastServerIP = serverIP
lastServerPort = serverPort
############################################################
previoustimestamp_micros = currenttimestamp_micros
packets_bandwidth = []
currentByte = 0.0
rowcounter = 1
current_micros = 0.0
elif last_testID == currentTestID:
#same test
#################### FOR DEBUGGING ONLY ####################
assert clientIP == lastclientIP
assert serverIP == lastServerIP
assert clientPort == lastClientPort
assert serverPort == lastServerPort
try:
assert previoustimestamp_micros <= currenttimestamp_micros
except:
print (previoustimestamp_micros)
print (currenttimestamp_micros)
logger.critical("previoustimestamp_micros = " + str(previoustimestamp_micros))
logger.critical("currenttimestamp_micros = " + str(currenttimestamp_micros))
sys.exit(0)
##############################################################
rowcounter += 1
if byte > 0:
currentByte += byte
current_micros += currenttimestamp_micros - previoustimestamp_micros
if current_micros >= 1000000: #more than one sec
current_s = current_micros /1000000 #from microseconds to seconds
bps = (currentByte * 8) / current_s
Mbps = bps / 1000000
packets_bandwidth.append(Mbps)
currentByte = 0.0
current_micros = 0.0
previoustimestamp_micros = currenttimestamp_micros
else:
#newtest
if currentByte > 0:
current_s = current_micros /1000000
bps = (currentByte * 8) / current_s
Mbps = bps / 1000000
packets_bandwidth.append(Mbps)
if rowcounter >= _MINROWNUMBER:
for elem in packets_bandwidth:
#print (elem)
ret.append(elem)
#logger.debug("accepted " + last_testID + " with rowcounter " + str(rowcounter))
else:
#print ("skipped " + last_testID + " with rowcounter " + str(rowcounter))
logger.debug("skipped " + last_testID + " with rowcounter " + str(rowcounter))
last_testID = currentTestID
lastclientIP = clientIP
############################FOR DEBUGGING ONLY##############################
lastClientPort = clientPort
lastServerIP = serverIP
lastServerPort = serverPort
###########################################################################
previoustimestamp_micros = currenttimestamp_micros
packets_bandwidth = []
currentByte = 0.0
rowcounter = 1
current_micros = 0.0
#print ret
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_mim_usingfixbucket(config_parser, section, inputfile, connectiontype, segment, logger,
bucketsize_microsec):
assert "SORTED" in inputfile
assert "LEGACY" not in inputfile
assert "mim" in inputfile
logger.debug("\n")
ret = []
lastclientIP = ""
pastclientIP = []
client_subnetaddr, edgeserver_subnetaddr, cloudserver_subnetaddr = _get_subnetaddresses(
config_parser=config_parser, section=section, conntype=connectiontype,
logger=logger)
logger.debug("inputfile = " + str(inputfile))
logger.debug("bucketsize_microsec = " + str(bucketsize_microsec))
logger.debug("connectiontype = " + str(connectiontype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line #0 contains the query
#line #1 contains query's arguments
if linecount == 0 or linecount == 1:
linecount += 1
logger.debug("line " + (str(linecount) + ": " + str(row)))
continue
#line #3 contains the name of each column
# mim-bandwidth columns: ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,
# Direction,Protocol,Mode,Type,ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[12] == "Timestamp" # in microsec
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[4] == "ServerIP"
assert row[3] == "ClientPort"
assert row[5] == "ServerPort"
except:
print (row)
logger.critical("linecount = 2 " + str(row) + "unexpected columns")
logger.critical ("EXIT")
sys.exit(1)
linecount += 1
#print row
continue
linecount += 1
byte = float(row[13])
currenttimestamp_micros = float(row[12]) #timestamp microsecons
clientIP = row[2]
serverIP = row[4]
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "remote" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
if lastclientIP == "":
#this is the first row containing results for the target server
lastclientIP = clientIP
#################### FOR DEBUGGING ONLY ####################
pastclientIP.append(clientIP)
lastServerIP = serverIP
############################################################
currentBytes = 0.0
previoustimestamp_micros = currenttimestamp_micros
bucket_starttime_microsec = currenttimestamp_micros
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
elif lastclientIP == clientIP:
#same testID
#################### FOR DEBUGGING ONLY ####################
assert serverIP == lastServerIP
try:
assert clientIP in pastclientIP
assert previoustimestamp_micros <= currenttimestamp_micros
except Exception as e:
print (clientIP)
print(pastclientIP)
print (linecount)
print (previoustimestamp_micros)
print (currenttimestamp_micros)
logger.critical("assertion failed: assert previoustimestamp_micros <= currenttimestamp_micros")
logger.critical("line number = " + str(linecount))
logger.critical("previoustimestamp_micros=" + str(previoustimestamp_micros))
logger.critical("currenttimestamp_micros=" + str(currenttimestamp_micros))
logger.critical ("EXIT")
sys.exit(-1)
##############################################################
if currenttimestamp_micros < bucket_endtime_microsec:
#packet received within the bucketsize_microsec interval
currentBytes += byte
else:
#packet received within the next bucketsize_microsec interval
if currentBytes == 0:
Mbps = 0
else:
bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
ret.append(Mbps)
currentBytes = byte
bucket_starttime_microsec = bucket_endtime_microsec
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
else:
#switch to a new client
pastclientIP.append(clientIP)
#add the last results
if currentBytes == 0:
Mbps = 0
else:
bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
ret.append(Mbps)
lastclientIP = clientIP
lastServerIP = serverIP
currentByte = 0.0
bucket_starttime_microsec = currenttimestamp_micros
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
previoustimestamp_micros = currenttimestamp_micros
#print ret
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_self(config_parser, section, inputfile, edgeserver, conntype):
assert "LEGACY" not in inputfile
assert "SORTED" in inputfile
assert "self" in inputfile
ret = []
if conntype == "wifi":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_wifi")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_wifi")
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_wifi")
elif conntype == "lte":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_lte")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_lte")
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_lte")
else:
print ("unknown connection type")
sys.exit(0)
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[4] == "ServerIP"
except Exception as e:
print (e)
print (row)
sys.exit(1)
linecount += 1
#print row
continue
measuredbytes = row[13]
keyword = row[6]
clientIP = row[2]
serverIP = row[4]
try:
assert row[2][:len(client_subnetaddr)].strip() == client_subnetaddr.strip()
assert conntype.strip() in inputfile
except Exception as e:
print (conntype)
print (inputfile)
print (row[2] [:len(client_subnetaddr)] + "!=" + client_subnetaddr)
linecount += 1
continue
linecount += 1
if (edgeserver == True and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(edgeserver == False and serverIP[:len(remoteserver_subnetaddr)] == remoteserver_subnetaddr):
bandwidthkbps = float(row[13])
bandwidthMbps = bandwidthkbps / 1000
ret.append(bandwidthMbps)
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
#returns a dict
def readbandwidthvalues_mim_perclient(config_parser, section, inputfile, connectiontype, segment, logger):
assert "SORTED_LEGACY" in inputfile
assert "mim" in inputfile
logger.debug("\n")
#ret = {}
ret= OrderedDict()
last_testID = ""
if connectiontype == "wifi":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_wifi")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_wifi")
cloudserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_wifi")
elif connectiontype == "lte":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_lte")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_lte")
cloudserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_lte")
else:
print ("unknown connection type")
logger.critical("unknown connection type.")
logger.critical("EXIT")
sys.exit(0)
logger.debug("inputfile = " + str(inputfile))
logger.debug("connectiontype = " + str(connectiontype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line #0 contains the query
#line #1 contains query's arguments
if linecount == 0 or linecount == 1:
linecount += 1
logger.debug("line " + (str(linecount) + ": " + str(row)))
continue
#line #3 contains the name of each column
# mim-bandwidth columns: ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,
# Direction,Protocol,Mode,Type,ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[12] == "Timestamp" # in microsec
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[4] == "ServerIP"
assert row[3] == "ClientPort"
assert row[5] == "ServerPort"
except:
print (row)
logger.critical("linecount = 2 " + str(row) + "unexpercted columns")
logger.critical ("EXIT")
sys.exit(1)
linecount += 1
continue
linecount += 1
byte = float(row[13])
currenttimestamp_micros = float(row[12]) #timestamp microseconds
clientIP = row[2]
serverIP = row[4]
clientPort = row[3]
serverPort = row[5]
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "cloud" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
currentTestID = clientIP + "-" + clientPort + "-" + serverIP + "-" + serverPort
if last_testID == "":
#this is the first row that contains results values
last_testID = currentTestID
lastclientIP = clientIP
#################### FOR DEBUGGING ONLY ####################
lastClientPort = clientPort
lastServerIP = serverIP
lastServerPort = serverPort
############################################################
previoustimestamp_micros = currenttimestamp_micros
packets_bandwidth = []
currentByte = 0.0
totalbytes = byte
rowcounter = 1
current_micros = 0.0
elif last_testID == currentTestID:
#same testID
#################### FOR DEBUGGING ONLY ####################
assert clientIP == lastclientIP
assert serverIP == lastServerIP
assert clientPort == lastClientPort
assert serverPort == lastServerPort
try:
assert previoustimestamp_micros <= currenttimestamp_micros
except Exception as e:
exception_type, exception_obj, exception_traceback = sys.exc_info()
print (linecount)
print ("error on " + str(exception_traceback.tb_frame.f_code.co_filename) + "," + \
str(exception_traceback.tb_lineno))
print (previoustimestamp_micros)
print (currenttimestamp_micros)
logger.critical("assertion failed: assert previoustimestamp_micros <= currenttimestamp_micros")
logger.critical("line number = " + str(linecount))
logger.critical("previoustimestamp_micros=" + str(previoustimestamp_micros))
logger.critical("currenttimestamp_micros=" + str(currenttimestamp_micros))
logger.critical ("EXIT")
sys.exit(-1)
##############################################################
rowcounter += 1
if byte > 0:
#if currenttimestamp_micros - previoustimestamp_micros > 1000000:
# print (currenttimestamp_micros - previoustimestamp_micros)/1000000 * 3
# print(str(currenttimestamp_micros) + "-" + str(previoustimestamp_micros) + ": " + str(currenttimestamp_micros - previoustimestamp_micros))
currentByte += byte
#totalbyte += byte
current_micros += currenttimestamp_micros - previoustimestamp_micros
if current_micros >= 1000000: #more than one sec
current_s = current_micros /1000000 #from microseconds to seconds
bps = (currentByte * 8) / current_s
Mbps = bps / 1000000
packets_bandwidth.append(Mbps)
currentByte = 0.0
current_micros = 0.0
previoustimestamp_micros = currenttimestamp_micros
else:
#new testID
if currentByte > 0:
current_s = current_micros /1000000
bps = (currentByte * 8) / current_s
Mbps = bps / 1000000
packets_bandwidth.append(Mbps)
if rowcounter >= _MINROWNUMBER:
#Kb = 1.0 * totalbyte /1024
#Mb = Kb / 1024
#index = clientIP + str(Mb)
#ret[index]=packets_bandwidth
ret[clientIP]=packets_bandwidth
logger.debug("accepted " + last_testID + " with rowcounter " + str(rowcounter))
else:
logger.debug("skipped " + last_testID + " with rowcounter " + str(rowcounter))
last_testID = currentTestID
lastclientIP = clientIP
############################FOR DEBUGGING ONLY##############################
lastClientPort = clientPort
lastServerIP = serverIP
lastServerPort = serverPort
#################################################################
previoustimestamp_micros = currenttimestamp_micros
packets_bandwidth = []
currentByte = 0.0
totalbyte = byte
rowcounter = 1
current_micros = 0.0
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "cloud" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
#add the last flow results
assert clientIP == lastclientIP
if rowcounter >= _MINROWNUMBER:
ret[clientIP]=packets_bandwidth
logger.debug("accepted " + last_testID + " with rowcounter " + str(rowcounter))
else:
logger.debug("skipped " + last_testID + " with rowcounter " + str(rowcounter))
#logger.debug("dict = " + str(ret))
logger.debug(str(len(ret)) + " clients in ret")
print ("read kn" + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_mim_perclient_usingfixbucket(config_parser, section, inputfile, connectiontype,
segment, logger, bucketsize_microsec):
assert bucketsize_microsec != None
assert "SORTED" in inputfile
assert "LEGACY" not in inputfile
assert "mim" in inputfile
logger.debug("\n")
ret= OrderedDict()
totalbytes= OrderedDict()
lastclientIP = ""
pastclientIP = []
currentBytes = 0.0
client_subnetaddr, edgeserver_subnetaddr, cloudserver_subnetaddr = _get_subnetaddresses(
config_parser=config_parser, section=section, conntype=connectiontype,
logger=logger)
logger.debug("inputfile = " + str(inputfile))
logger.debug("connectiontype = " + str(connectiontype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
logger.debug("bucketsize_microsec = " + str(bucketsize_microsec))
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line #0 contains the query
#line #1 contains query's arguments
if linecount == 0 or linecount == 1:
linecount += 1
logger.debug("line " + (str(linecount) + ": " + str(row)))
continue
#line #3 contains the name of each column
# mim-bandwidth columns: ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,
# Direction,Protocol,Mode,Type,ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[12] == "Timestamp" # in microsec
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[4] == "ServerIP"
assert row[3] == "ClientPort"
assert row[5] == "ServerPort"
except:
print (row)
logger.critical("linecount = 2 " + str(row) + "unexpected columns")
logger.critical ("EXIT")
sys.exit(1)
linecount += 1
continue
linecount += 1
byte = float(row[13])
currenttimestamp_micros = float(row[12]) #timestamp microseconds
clientIP = row[2]
serverIP = row[4]
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "cloud" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
if lastclientIP == "":
#this is the first row containing results for the target server
lastclientIP = clientIP
#################### FOR DEBUGGING ONLY ####################
pastclientIP.append(clientIP)
lastServerIP = serverIP
############################################################
totalbytes[clientIP] = byte
ret[clientIP] = []
previoustimestamp_micros = currenttimestamp_micros
bucket_starttime_microsec = currenttimestamp_micros
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
elif lastclientIP == clientIP:
#same testID
#################### FOR DEBUGGING ONLY ####################
assert serverIP == lastServerIP
try:
assert clientIP in pastclientIP
assert previoustimestamp_micros <= currenttimestamp_micros
except Exception as e:
print (clientIP)
print(pastclientIP)
print (linecount)
print (previoustimestamp_micros)
print (currenttimestamp_micros)
logger.critical("assertion failed: assert previoustimestamp_micros <= currenttimestamp_micros")
logger.critical("line number = " + str(linecount))
logger.critical("previoustimestamp_micros=" + str(previoustimestamp_micros))
logger.critical("currenttimestamp_micros=" + str(currenttimestamp_micros))
logger.critical ("EXIT")
sys.exit(-1)
##############################################################
totalbytes[clientIP] += byte
if currenttimestamp_micros < bucket_endtime_microsec:
#packet received within the bucketsize_microsec interval
currentBytes += byte
else:
#packet received within the next bucketsize_microsec interval
if currentBytes == 0:
Mbps = 0
else:
bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
ret[lastclientIP].append(Mbps)
#currentBytes = 0
currentBytes = byte
bucket_starttime_microsec = bucket_endtime_microsec
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
else:
#switch to a new client
pastclientIP.append(clientIP)
#add the last results
if currentBytes == 0:
Mbps = 0
else:
#bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bucketsize_sec = 1.0 * (previoustimestamp_micros-bucket_starttime_microsec) / 1000000
print (bucketsize_sec)
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
ret[lastclientIP].append(Mbps)
lastclientIP = clientIP
lastServerIP = serverIP
currentByte = 0.0
ret[clientIP] = []
totalbytes[clientIP] = byte
bucket_starttime_microsec = currenttimestamp_micros
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
previoustimestamp_micros = currenttimestamp_micros
logger.debug(str(len(ret)) + " clients in ret")
print ("read kn" + str(linecount) + " from " + inputfile + "(including headers)")
return ret, totalbytes
def readbandwidthvalues_self_perclient(config_parser, section, inputfile, server, conntype, logger):
assert "LEGACY" not in inputfile
assert "SORTED" in inputfile
assert "self" in inputfile
#ret = {}
ret = OrderedDict()
if conntype == "wifi":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_wifi")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_wifi")
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_wifi")
elif conntype == "lte":
client_subnetaddr = config_parser.get(section, "client_subnetaddr_lte")
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_lte")
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_lte")
else:
print ("unknown connection type" + str(conntype))
sys.exit(0)
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
#columns: ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,Direction,Protocol,
# Mode,Type,ID,Timestamp,Bytes
assert row[13] == "Bytes"
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[3] == "ClientPort"
assert row[4] == "ServerIP"
assert row[5] == "ServerPort"
except Exception as e:
print (e)
print (row)
sys.exit(1)
linecount += 1
#print row
continue
measuredbytes = row[13]
keyword = row[6]
clientIP = row[2]
clientPort = row[3]
serverIP = row[4]
ServerPort = row[5]
currentTestID = ""
try:
assert row[2][:len(client_subnetaddr)].strip() == client_subnetaddr.strip()
assert conntype.strip() in inputfile
except Exception as e:
print (conntype)
print (inputfile)
print (row[2] [:len(client_subnetaddr)] + "!=" + client_subnetaddr)
linecount += 1
continue
linecount += 1
if (server == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(server == "cloud" and serverIP[:len(remoteserver_subnetaddr)] == remoteserver_subnetaddr):
if clientIP not in ret.keys():
print ("first " + str(clientIP))
ret[clientIP] = []
bandwidthkbps = float(row[13])
bandwidthMbps = bandwidthkbps / 1000
#print (bandwidthMbps)
#print (row)
ret[clientIP].append(bandwidthMbps)
#else:
# print("discarded" + str(row))
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_self_timeplot(config_parser, section, inputfile, segment, conntype, logger):
assert "LEGACY" not in inputfile
assert "SORTED" in inputfile
assert "self" in inputfile
client_subnetaddr, edgeserver_subnetaddr, cloudserver_subnetaddr = _get_subnetaddresses(
config_parser=config_parser, section=section, conntype=conntype,
logger=logger)
evaluate_fragmentquality=config_parser.getboolean(section, "evaluate_fragmentquality")
logger.debug("inputfile = " + str(inputfile))
logger.debug("connectiontype = " + str(conntype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
logger.debug("evaluate_fragmentquality = " + str(evaluate_fragmentquality))
ret = OrderedDict()
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line 0 contains the query
#line #1 contains its arguments
if linecount == 0 or linecount == 1:
linecount += 1
continue
#line #2 contains the name of each column
# ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,Direction,Protocol,Mode,Type,
# ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[3] == "ClientPort"
assert row[4] == "ServerIP"
except Exception as e:
logger.critical("unknown columns: " + str(row))
logger.critical("EXIT")
sys.exit(1)
linecount += 1
#print row
continue
try:
measuredbytes = row[13]
timestamp_micros = row[12]
keyword = row[6]
clientIP = row[2]
clientPort = row[3]
serverIP = row[4]
assert clientIP[:len(client_subnetaddr)].strip() == client_subnetaddr.strip()
assert conntype.strip() in inputfile
except:
print (row)
print (inputfile)
logger.critical("conntype: " + str(conntype))
logger.critical("inputfile" + str(inputfile))
logger.critical(str(clientIP[:len(client_subnetaddr)]) + "!=" + str(client_subnetaddr))
logger.critical("Exit")
sys.exit(0)
linecount += 1
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "cloud" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
if clientIP not in ret:
ret[clientIP] = []
bandwidthkbps = float(measuredbytes)
bandwidthMbps = bandwidthkbps / 1000
date = datetime.datetime.fromtimestamp(float(timestamp_micros) / 1000000.0)
if evaluate_fragmentquality:
ret[clientIP].append({"bandwidthMbps": bandwidthMbps, "clientPort": clientPort, "timestamp": date, "fragmentquality":row[14]})
else:
ret[clientIP].append({"bandwidthMbps": bandwidthMbps, "clientPort": clientPort, "timestamp": date})
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_mim_timeplot(config_parser, section, inputfile, segment, conntype, logger):
assert "LEGACY" not in inputfile
assert "SORTED" in inputfile
assert "mim" in inputfile
client_subnetaddr, edgeserver_subnetaddr, cloudserver_subnetaddr = _get_subnetaddresses(
config_parser=config_parser, section=section, conntype=conntype,
logger=logger)
logger.debug("inputfile = " + str(inputfile))
logger.debug("connectiontype = " + str(conntype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
ret = OrderedDict()
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line 0 contains the query
#line #1 contains its arguments
if linecount == 0 or linecount == 1:
linecount += 1
continue
#line #2 contains the name of each column
# ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,Direction,Protocol,Mode,Type,
# ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[3] == "ClientPort"
assert row[4] == "ServerIP"
except Exception as e:
logger.critical("unknown columns: " + str(row))
logger.critical("EXIT")
sys.exit(1)
linecount += 1
#print row
continue
#measuredbytes = row[13]
timestamp_micros = row[12]
#keyword = row[6]
clientIP = row[2]
#clientPort = row[3]
serverIP = row[4]
try:
assert clientIP[:len(client_subnetaddr)].strip() == client_subnetaddr.strip()
assert conntype.strip() in inputfile
except:
logger.critical("conntype: " + str(conntype))
logger.critical("inputfile" + str(inputfile))
logger.critical(str(clientIP[:len(client_subnetaddr)]) + "!=" + str(client_subnetaddr))
logger.critical("Exit")
sys.exit(0)
linecount += 1
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "cloud" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
if clientIP not in ret:
ret[clientIP] = []
date = datetime.datetime.fromtimestamp(float(timestamp_micros) / 1000000.0)
ret[clientIP].append({"timestamp": date})
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readbandwidthvalues_mim_timeplot_usingfixbuckets(config_parser, section, inputfile, segment, conntype,
logger, bucketsize_microsec):
assert "LEGACY" not in inputfile
assert "SORTED" in inputfile
assert "mim" in inputfile
client_subnetaddr, edgeserver_subnetaddr, cloudserver_subnetaddr = _get_subnetaddresses(
config_parser=config_parser, section=section, conntype=conntype,
logger=logger)
logger.debug("inputfile = " + str(inputfile))
logger.debug("connectiontype = " + str(conntype))
logger.debug("segment = " + segment)
logger.debug("client_subnetaddr = " + str(client_subnetaddr))
logger.debug("edgeserver_subnetaddr = " + str(edgeserver_subnetaddr))
logger.debug("cloudserver_subnetaddr = " + str(cloudserver_subnetaddr))
print (segment)
print ("bucketsize_microsec " + str(bucketsize_microsec))
ret = OrderedDict()
lastclientIP = ""
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
#line 0 contains the query
#line #1 contains its arguments
if linecount == 0 or linecount == 1:
linecount += 1
continue
#line #2 contains the name of each column
# ID,Timestamp,ClientIP,ClientPort,ServerIP,ServerPort,Keyword,Direction,Protocol,Mode,Type,
# ID,Timestamp,Bytes
if linecount == 2:
try:
assert row[13] == "Bytes"
assert row[6] == "Keyword"
assert row[2] == "ClientIP"
assert row[3] == "ClientPort"
assert row[4] == "ServerIP"
except Exception as e:
logger.critical("unknown columns: " + str(row))
logger.critical("EXIT")
sys.exit(1)
linecount += 1
#print row
continue
clientIP = row[2]
serverIP = row[4]
byte = float(row[13])
currenttimestamp_micros = float(row[12])
try:
assert clientIP[:len(client_subnetaddr)].strip() == client_subnetaddr.strip()
assert conntype.strip() in inputfile
except:
logger.critical("conntype: " + str(conntype))
logger.critical("inputfile" + str(inputfile))
logger.critical(str(clientIP[:len(client_subnetaddr)]) + "!=" + str(client_subnetaddr))
logger.critical("Exit")
sys.exit(0)
linecount += 1
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "cloud" and serverIP[:len(cloudserver_subnetaddr)] == cloudserver_subnetaddr):
if clientIP not in ret:
#new clientIP
ret[clientIP] = []
if len(ret) == 1:
#this is the first row containing results for the first target server
lastclientIP = clientIP
currentBytes = byte
bucket_starttime_microsec = currenttimestamp_micros
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
else:
#switch to a new client with a different IP
#add the results of the previous client
if currentBytes == 0:
Mbps = 0
else:
bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
time_datetime = datetime.datetime.fromtimestamp(float(bucket_starttime_microsec + (bucketsize_microsec/2)) / 1000000.0)
ret[lastclientIP].append({"bandwidthMbps": Mbps, "timestamp": time_datetime})
lastclientIP = clientIP
currentByte = 0.0
bucket_starttime_microsec = currenttimestamp_micros
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
continue
elif lastclientIP == clientIP:
if currenttimestamp_micros < bucket_endtime_microsec:
#packet received within the bucketsize_microsec interval
currentBytes += byte
else:
#packet received within the next bucketsize_microsec interval
if currentBytes == 0:
Mbps = 0
else:
bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
time_datetime = datetime.datetime.fromtimestamp(float(bucket_starttime_microsec + (bucketsize_microsec/2)) / 1000000.0)
ret[clientIP].append({"bandwidthMbps": Mbps, "timestamp": time_datetime})
currentBytes = byte
bucket_starttime_microsec = bucket_endtime_microsec
bucket_endtime_microsec = bucket_starttime_microsec + bucketsize_microsec
continue
else:
print("NON DOVREMMO MAI ARRIVARCI")
assert False
#add the last results
if currentBytes == 0:
Mbps = 0
else:
bucketsize_sec = 1.0 * bucketsize_microsec / 1000000
bps = (1.0 * currentBytes * 8) / bucketsize_sec
Mbps = bps/1000000
time_datetime = datetime.datetime.fromtimestamp(float(bucket_starttime_microsec + (bucketsize_microsec/2)) / 1000000.0)
ret[lastclientIP].append({"bandwidthMbps": Mbps, "timestamp": time_datetime})
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
def readlatencyvalues_noisemim(config_parser, section, inputfile, connectiontype, segment, noise):
assert "SORTED_LEGACY" in inputfile
assert "mim" in inputfile
ret = []
client_subnetaddr = config_parser.get(section, "client_subnetaddr_" + connectiontype)
edgeserver_subnetaddr = config_parser.get(section, "edgeserver_subnetaddr_" + connectiontype)
remoteserver_subnetaddr = config_parser.get(section, "remoteserver_subnetaddr_" + connectiontype)
with open (inputfile, "r") as csvinput:
csvreader = csv.reader(csvinput, delimiter=",")
linecount = 0
for row in csvreader:
if linecount == 0 or linecount == 1:
linecount += 1
continue
if linecount == 2:
try:
assert row[13] == "latency"
assert row[6] == "Keyword"
assert row[4] == "ServerIP"
except:
print (row)
sys.exit(1)
linecount += 1
#print row
continue
linecount += 1
try:
latency = float(row[13])
except:
print (inputfile)
for iii in range(0, len(row)):
print(str(iii) + ": " + str(row[iii]) )
sys.exit(0)
serverIP = row[4]
if (segment == "edge" and serverIP[:len(edgeserver_subnetaddr)] == edgeserver_subnetaddr) or \
(segment == "remote" and serverIP[:len(remoteserver_subnetaddr)] == remoteserver_subnetaddr):
#print str(latency/1000)
if latency != 0:
ret.append(latency/1000)
#print ret
print ("read " + str(linecount) + " from " + inputfile + "(including headers)")
return ret
|
from icemac.addressbook.i18n import _
from icemac.addressbook.interfaces import MIN_SUPPORTED_DATE
import z3c.layer.pagelet
import zope.interface
import zope.schema
class ICalendarLayer(z3c.layer.pagelet.IPageletBrowserLayer):
"""Calendar browser layer."""
class IDatetime(zope.interface.Interface):
"""Object interface to edit datetime data."""
whole_day_event = zope.schema.Bool(
title=_('whole day event?'), default=False)
date = zope.schema.Date(
title=_('date'), required=True, min=MIN_SUPPORTED_DATE)
time = zope.schema.Time(title=_('time'), required=False)
datetime = zope.interface.Attribute(
'`date` and `time` combined to a datetime.')
@zope.interface.invariant
def on_non_whole_day_event_time_must_be_set(event):
if not event.whole_day_event and event.time is None:
raise zope.interface.Invalid(
_('Either enter a `time` or select `whole day event`!'))
class IEventDatetime(zope.interface.Interface):
"""Interface to edit event's date and time."""
datetime = zope.schema.Object(title=_('datetime'), schema=IDatetime)
class IEventDescription(zope.interface.Interface):
"""Description of a single event which can be rendered in the calender."""
context = zope.interface.Attribute('IEvent this description is based on.')
datetime = zope.interface.Attribute('datetime.datetime object')
prio = zope.interface.Attribute(
'Event descriptions for the same `datetime` and `kind` with a higher '
'`prio` override the ones with lower `prio`.')
whole_day = zope.interface.Attribute(
'Event is the whole day, so do not display time.')
persons = zope.interface.Attribute(
'Comma separated list of person names belonging to the event.')
def getText(lang=None):
"""Textual description of the event.
If `lang` is not `None` a hyphenation dict for this language is
looked up. This might raise a `LookupError`. Otherwise the text is
hyphenated for HTML.
"""
def getInfo(lang=None):
"""List of additional information about the event.
The contents of the list are defined in master data of calendar.
"""
class UnknownLanguageError(LookupError):
"""Error indicating an unknown laguage."""
|
import os
import logging
from logging.handlers import RotatingFileHandler
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
HOME_DIR = "/data"
BIND_ADDRESS = "0.0.0.0"
LISTEN_PORT = 21
PASSIVE_PORTS = list(range(21100, 21111))
LOG_PATH = "/var/log/pyftpd/pyftpd.log"
logger = logging.getLogger(__name__)
def main(username, password, nat_address, perm):
# Auth options:
authorizer = DummyAuthorizer()
if username is not None and password is not None:
authorizer.add_user(username,
password,
HOME_DIR,
perm=perm)
else:
authorizer.add_anonymous(HOME_DIR, perm=perm)
# Run server:
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = nat_address
handler.passive_ports = PASSIVE_PORTS
server = FTPServer((BIND_ADDRESS, LISTEN_PORT), handler)
try:
server.serve_forever()
finally:
server.close_all()
if __name__ == '__main__':
_debug = os.getenv("PYFTPD_DEBUG", "false").lower() == 'true'
logging.basicConfig(
handlers=[RotatingFileHandler(LOG_PATH, maxBytes=1000000, backupCount=10)],
level=logging.DEBUG if _debug else logging.INFO,
format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
datefmt='%Y-%m-%dT%H:%M:%S')
# Parse env
_username = os.getenv("PYFTPD_USERNAME", "")
_password = os.getenv("PYFTPD_PASSWORD", "")
_username = _username if _username != "" else None
_password = _password if _password != "" else None
_nat_address = os.getenv("PYFTPD_NAT_ADDRESS")
_writable = os.getenv("PYFTPD_READWRITE", "false").lower() == 'true'
_perm = "elradfmwMT" if _writable or (_username is not None and _password is not None) else "elr"
# Run main
logger.info("Starting server (username=%s, perm=%s, nat_address=%s)", _username, _perm, _nat_address)
main(_username, _password, _nat_address, _perm)
|
from rubika_bot.requests import send_message
from rubika_bot.models import Keypad, KeypadRow, Button
b1 = Button(id='100', type='Simple', button_text='Add Account')
b2 = Button(id='101', type='Simple', button_text='Edit Account')
b3 = Button(id='102', type='Simple', button_text='Remove Account')
keypad = Keypad(
rows=[
KeypadRow(buttons=[b1]),
KeypadRow(buttons=[b2, b3])
],
)
send_message(
token='SUPER_SECRET_TOKEN',
chat_id='CHAT_ID',
text='Welcome',
inline_keypad=keypad
)
|
# Generated by Django 3.1.1 on 2020-10-23 22:42
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gallery', '0003_auto_20201023_1930'),
]
operations = [
migrations.RenameModel(
old_name='ImageModel',
new_name='Image',
),
]
|
"""Utility definitions."""
def load_yaml(path: str) -> dict:
import yaml
with open(path, mode='r') as f:
configuration = yaml.safe_load(f)
return configuration
def load_json(path: str) -> dict:
import ujson as json
with open(path, mode='r') as f:
configuration = json.load(f)
return configuration
|
# Generated by Django 2.1.5 on 2019-03-21 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Users', '0005_auto_20190321_1145'),
]
operations = [
migrations.AlterField(
model_name='stock',
name='Ticker',
field=models.CharField(choices=[('AAPL', '0'), ('MSFT', '1'), ('FB', '2'), ('SPY', '3'), ('TVIX', '4')], max_length=5),
),
]
|
from django.contrib import admin
from django.utils.html import format_html
from django.urls import reverse
from rangefilter.filter import DateRangeFilter
from apps.shipments.models import Shipment, Location, TransitState
from apps.jobs.models import AsyncJob
from .filter import StateFilter
from .historical import BaseModelHistory
class AsyncJobInlineTab(admin.TabularInline):
model = AsyncJob
fields = (
'job_id',
'state',
'method',
'created_at',
'last_try',
)
readonly_fields = (
'job_id',
'state',
'method',
'created_at',
'last_try',
)
def method(self, obj):
try:
params = obj.parameters
return params['rpc_method']
except KeyError:
pass
return "??"
def job_id(self, obj):
return format_html(
'<a href="{}" target="_blank">{}</a>',
reverse('admin:jobs_asyncjob_change', kwargs={'object_id': obj.id}),
obj.id
)
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
NON_SCHEMA_FIELDS = [
'asyncjob',
'ethaction',
'permissionlink',
'loadshipment',
'trackingdata',
'document',
'id',
'owner_id',
'storage_credentials_id',
'vault_id',
'vault_uri',
'device',
'shipper_wallet_id',
'carrier_wallet_id',
'moderator_wallet_id',
'updated_at',
'created_at',
'contract_version',
'updated_by',
'state',
'delayed',
'expected_delay_hours',
'exception'
]
class ShipmentAdmin(admin.ModelAdmin):
# Read Only admin page until this feature is worked
list_display = ('id', 'owner_id', 'shippers_reference', 'created_at', 'updated_at', 'shipment_state', )
fieldsets = (
(None, {
'classes': ('extrapretty', ),
'fields': (
'id',
('updated_at', 'created_at',),
('owner_id', 'updated_by',),
('shipper_wallet_id', 'carrier_wallet_id', 'moderator_wallet_id',),
('storage_credentials_id', 'vault_id',),
'state',
'vault_uri',
'device',
'contract_version',
)
}),
('Shipment Schema Fields', {
'classes': ('collapse',),
'description': f'Fields in the {format_html("<a href={}>Schema</a>", "http://schema.shipchain.io")}',
'fields': [field.name for field in Shipment._meta.get_fields() if field.name not in NON_SCHEMA_FIELDS]
})
)
inlines = [
AsyncJobInlineTab,
]
search_fields = ('id', 'shipper_wallet_id', 'carrier_wallet_id', 'moderator_wallet_id', 'state', 'owner_id',
'ship_from_location__name', 'ship_to_location__name', 'final_destination_location__name',
'bill_to_location__name', )
list_filter = [
('created_at', DateRangeFilter),
('updated_at', DateRangeFilter),
('delayed', admin.BooleanFieldListFilter),
('state', StateFilter),
]
def shipment_state(self, obj):
return TransitState(obj.state).label.upper()
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return False
class HistoricalShipmentAdmin(BaseModelHistory, ShipmentAdmin):
readonly_fields = [field.name for field in Shipment._meta.get_fields()]
class LocationAdmin(BaseModelHistory):
fieldsets = [(None, {'fields': [field.name for field in Location._meta.local_fields]})]
readonly_fields = [field.name for field in Location._meta.get_fields()]
search_fields = ('id', 'name__contains', )
|
import sys
import os
import boto3
import click
import json
import datetime
import time
import traceback
from util import *
from tabulate import tabulate
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
|
import sys
sys.path.append("../code")
from global_vars import DATA_DIR
import os
import logging
from data.dataset import dump
from data.ace_preprocess import load_ace05, load_ace04
logging.basicConfig(filename='ann2json.log', level=logging.INFO)
if os.path.exists("ace2005/corpus") and not os.path.exists(DATA_DIR + "ace05.json"):
data, vocab = load_ace05("ace2005/")
dump(data, DATA_DIR + "ace05.json")
if os.path.exists("ace2004/corpus") and not os.path.exists(DATA_DIR + "ace04.json"):
data, vocab = load_ace04("ace2004/")
dump(data, DATA_DIR + "ace04.json")
|
import subprocess
filename = 'testcases'
with open(filename,'r') as f:
lines = f.readlines()
for line in [line.strip() for line in lines]:
if(line.startswith('#')):
continue
if(len(line) == 0):
continue
print()
print('~Test case~')
print(line)
input,expected = line.split('<-')
expected = expected.strip()
print(f'code: {input}')
print(f'expected: {expected}')
#quote quotes
input = input.replace('"', '\\"')
cmd = f'echo "{input}" | node index.js /noprompt'
result = subprocess.getoutput(cmd).strip()
print(f'actual: {result}')
if(expected != result):
print('error!')
exit(1)
|
import os
from twilio.rest import Client
# Auth credentials
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
# Use Lookup API to get country code / MCC / MNC that corresponds
# to given phone number
phone_number = "+15108675310"
print("Find outbound SMS price to: ", phone_number)
client = Client(account_sid, auth_token)
number = client.lookups.phone_numbers(phone_number).fetch()
mcc = number.carrier['mobile_country_code']
mnc = number.carrier['mobile_network_code']
country_code = number.country_code
# Use Pricing API to find the matching base/current prices to call that
# particular country / MCC / MNC from local phone number
messaging_country = client.pricing \
.messaging \
.countries(country_code) \
.fetch()
for country in messaging_country.outbound_sms_prices:
if ((country['mcc'] == mcc) and (country['mnc'] == mnc)):
for price in country['prices']:
if (price['number_type'] == "local"):
print("Country: ", country_code)
print("Base Price: ", price['base_price'])
print("Current Price: ", price['current_price'])
|
#! /usr/bin/python
import hmac
import os.path, sys, time, mimetypes, xmlrpclib, pprint, base64
import urllib
from urllib import unquote, splittype, splithost
from config import config
pp = pprint.PrettyPrinter()
class UrllibTransport(xmlrpclib.Transport):
def set_proxy(self, proxy):
self.proxyurl = proxy
def request(self, host, handler, request_body, verbose=0):
type, r_type = splittype(self.proxyurl)
phost, XXX = splithost(r_type)
puser_pass = None
if '@' in phost:
user_pass, phost = phost.split('@', 1)
if ':' in user_pass:
user, password = user_pass.split(':', 1)
puser_pass = base64.encodestring('%s:%s' % (unquote(user),
unquote(password))).strip()
urlopener = urllib.FancyURLopener({'http':'http://%s'%phost})
if not puser_pass:
urlopener.addheaders = [('User-agent', self.user_agent)]
else:
urlopener.addheaders = [('User-agent', self.user_agent),
('Proxy-authorization', 'Basic ' + puser_pass) ]
host = unquote(host)
f = urlopener.open("http://%s%s"%(host,handler), request_body)
self.verbose = verbose
return self.parse_response(f)
# Make initial connection to service, then login as developer
p = UrllibTransport()
if( config.has_key('proxy') ):
p.set_proxy(config['proxy'])
server = xmlrpclib.Server(config['url'], allow_none=True, transport=p);
else:
server = xmlrpclib.Server(config['url'], allow_none=True);
connection = server.system.connect();
# hash_hmac('sha256', $timestamp .';'.$domain .';'. $nonce .';'.'user.get', 'remote_api_key');
#h = hmac.new(key, data, digest_module)
#result = h.hexdigest()
#session = server.user.login( config['username'], config['password']);
#session = server.user.login(config['key'], 'localhost.domd', '', 'C7nW83nDw', connection['sessid'], config['username'], config['password']);
session = server.user.login(connection['sessid'],config['username'], config['password']);
sessid = session['sessid'];
user = session['user'];
timestamp = str(int(time.time()))
## Load a movie file
#filename = 'testfile.MOV'
#filesize = os.stat(filename).st_size
#filemime = mimetypes.guess_type(filename)
#fd = open(filename, 'rb')
#video_file = fd.read()
#fd.close()
# Create a file_obj dict with encoded file data
#file_obj = {
# 'file': base64.b64encode(video_file),
# 'filename': filename,
# 'filepath': 'sites/default/files/' + filename,
# 'filesize': filesize,
# 'timestamp': timestamp,
# 'uid': user['uid'],
# 'filemime': filemime,
#}
# Save the file to the server
#try:
# f = server.file.save(sessid, file_obj)
#
#except xmlrpclib.Fault, err:
# print "A fault occurred"
# print "Fault code: %d" % err.faultCode
# print "Fault string: %s" % err.faultString
#
#else:
# pp.pprint(f) # DEBUG print new file id (fid)
'''
# Get the new file from the server (verify) DEBUG - not needed - but shows how to retrieve a file based on fid
try:
ff = server.file.get(sessid, f)
except xmlrpclib.Fault, err:
print "A fault occurred"
print "Fault code: %d" % err.faultCode
print "Fault string: %s" % err.faultString
else:
# pp.pprint(ff) # DEBUG - dump the file structure - including the file data
'''
#node = server.node.get(config['key'], 'localhost', '', 'C7nW8P3nDw', connection['sessid'],1)
node = server.node.get(sessid,1,{})
pp.pprint(node)
print "----"
# Create the node object and reference the new fid just created
node = {
'type': 'story',
'status': 1,
'promote': 1,
'nid': 3,
'title': 'Remote Test ' + timestamp,
'body': 'This is a test created from a remote app. Easy.',
'uid': user['uid'],
'name': user['name'],
'changed': timestamp,
# 'field_shortname' : [
# {'value': 'shortname'},
# ],
# 'field_version' : [
# {'value': 'Newest'},
# ],
# 'field_puid' : [
# {'value': 'fmt/12'},
# ],
# 'files': { f: {
# 'new': 1, # Required to insert the referenced file->fid as new attachment!
# 'fid': f, # f is fid from uploaded video file
# 'list': 1, # Or 1, depending on whether you want the attachment listed on node view.
# 'description': 'Video File', # Any text description
# 'weight': 0,
# }
# },
}
pp.pprint(node) # DEBUG - dump the node - shows exact format to use in other languages
try:
#n = server.node.save(sessid, node)
print n, node
nn = server.node.get(sessid,n,{}) # DEBUG - get the final node - not needed now that we know it works
except xmlrpclib.Fault, err:
print "A fault occurred"
print "Fault code: %d" % err.faultCode
print "Fault string: %s" % err.faultString
else:
pp.pprint(n) # DEBUG
pp.pprint(nn) # DEBUG - dump the final node - not needed now that we know it works |
class DBSession:
def close(self):
pass
class MySuperContextManager:
def __init__(self):
self.db = DBSession()
def __enter__(self):
return self.db
def __exit__(self, exc_type, exc_value, traceback):
self.db.close()
async def get_db():
with MySuperContextManager() as db:
yield db
|
"""Add address fields to user
Revision ID: 4d1a5fb71db
Revises: bef47d0853
Create Date: 2015-10-10 16:56:57.670618
"""
# revision identifiers, used by Alembic.
revision = '4d1a5fb71db'
down_revision = 'bef47d0853'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('address', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('city', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('country', sa.String(length=256), nullable=True))
op.add_column('user', sa.Column('zip', sa.String(length=8), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'zip')
op.drop_column('user', 'country')
op.drop_column('user', 'city')
op.drop_column('user', 'address')
### end Alembic commands ###
|
#!/usr/bin/env python2
from __future__ import absolute_import
from __future__ import division
import argparse
import string
import sys
import pwnlib
pwnlib.args.free_form = False
from pwn import *
from pwnlib.commandline import common
parser = common.parser_commands.add_parser(
'cyclic',
help = "Cyclic pattern creator/finder"
)
parser.add_argument(
'-a', '--alphabet',
metavar = 'alphabet',
default = string.ascii_lowercase.encode(),
type = bytes,
help = 'The alphabet to use in the cyclic pattern (defaults to all lower case letters)',
)
parser.add_argument(
'-n', '--length',
metavar = 'length',
default = 4,
type = int,
help = 'Size of the unique subsequences (defaults to 4).'
)
parser.add_argument(
'-c', '--context',
metavar = 'context',
action = 'append',
type = common.context_arg,
choices = common.choices,
help = 'The os/architecture/endianness/bits the shellcode will run in (default: linux/i386), choose from: %s' % common.choices,
)
group = parser.add_mutually_exclusive_group(required = True)
group.add_argument(
'-l', '-o', '--offset', '--lookup',
dest = 'lookup',
metavar = 'lookup_value',
help = 'Do a lookup instead printing the alphabet',
)
group.add_argument(
'count',
type = int,
nargs = '?',
help = 'Number of characters to print'
)
def main(args):
alphabet = args.alphabet
subsize = args.length
if args.lookup:
pat = args.lookup
try:
pat = packing.pack(int(pat, 0), subsize*8)
except ValueError:
pass
if len(pat) != subsize:
log.critical('Subpattern must be %d bytes' % subsize)
sys.exit(1)
if not all(c in alphabet for c in pat):
log.critical('Pattern contains characters not present in the alphabet')
sys.exit(1)
offset = cyclic_find(pat, alphabet, subsize)
if offset == -1:
log.critical('Given pattern does not exist in cyclic pattern')
sys.exit(1)
else:
print(offset)
else:
want = args.count
result = cyclic(want, alphabet, subsize)
got = len(result)
if got < want:
log.failure("Alphabet too small (max length = %i)" % got)
sys.stdout.write(result)
if sys.stdout.isatty():
sys.stdout.write('\n')
if __name__ == '__main__':
pwnlib.commandline.common.main(__file__)
|
"""Integration for Mattermost"""
from __future__ import unicode_literals
from django.utils.functional import cached_property
from rbintegrations.basechat.forms import BaseChatIntegrationConfigForm
from rbintegrations.basechat.integration import BaseChatIntegration
from rbintegrations.slack.integration import format_link, notify
class MattermostIntegration(BaseChatIntegration):
"""Integrates Review Board with Mattermost.
This will handle updating Mattermost channels when review requests are
posted, changed, or closed, and when there's new activity on the review
request.
"""
name = 'Mattermost'
description = (
'Notifies channels in Mattermost when review requests are created, '
'updated, and reviewed.'
)
default_settings = {
'webhook_url': '',
'channel': '',
'notify_username': 'Review Board',
}
config_form_cls = BaseChatIntegrationConfigForm
DEFAULT_COLOR = '#efcc96'
ASSETS_BASE_URL = 'https://static.reviewboard.org/integration-assets' \
'/mattermost'
ASSETS_TIMESTAMP = '?20160830-2346'
LOGO_URL = '%s/reviewboard.png?%s' % (ASSETS_BASE_URL, ASSETS_TIMESTAMP)
VALID_IMAGE_URL_EXTS = ('.png', '.bmp', '.gif', '.jpg', '.jpeg')
TROPHY_URLS = {
'fish': '%s/fish-trophy.png?%s' % (ASSETS_BASE_URL, ASSETS_TIMESTAMP),
'milestone': '%s/milestone-trophy.png?%s' % (ASSETS_BASE_URL,
ASSETS_TIMESTAMP),
}
def notify(self, title, title_link, fallback_text, local_site,
review_request, event_name=None, fields={}, pre_text=None,
body=None, color=None, thumb_url=None, image_url=None):
"""Send a webhook notification to Mattermost.
This will post the given message to any Mattermost channels
configured to receive it.
Args:
title (unicode):
The title for the message.
title_link (unicode):
The link for the title of the message.
fallback_text (unicode):
The non-rich fallback text to display in the chat, for use in
IRC and other services.
fields (dict):
The fields comprising the rich message to display in chat.
local_site (reviewboard.site.models.LocalSite):
The Local Site for the review request or review emitting
the message. Only integration configurations matching this
Local Site will be processed.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request the notification is bound to.
event_name (unicode):
The name of the event triggering this notification.
pre_text (unicode, optional):
Text to display before the rest of the message.
body (unicode, optional):
The body of the message.
color (unicode, optional):
A Mattermost color string or RGB hex value for the message.
thumb_url (unicode, optional):
URL of an image to show on the side of the message.
image_url (unicode, optional):
URL of an image to show in the message.
"""
notify(self, title, title_link, fallback_text, local_site,
review_request, event_name, fields, pre_text, body, color,
thumb_url, image_url)
def format_link(self, path, text):
"""Format the given URL and text to be shown in a Mattermost message.
This will combine together the parts of the URL (method, domain, path)
and format it using Mattermost's URL syntax.
Args:
path (unicode):
The path on the Review Board server.
text (unicode):
The text for the link.
Returns:
unicode:
The link for use in Slack.
"""
return format_link(path, text)
@cached_property
def icon_static_urls(self):
"""The icons used for the integration.
Returns:
dict:
The icons for Mattermost.
"""
from rbintegrations.extension import RBIntegrationsExtension
extension = RBIntegrationsExtension.instance
return {
'1x': extension.get_static_url('images/mattermost/icon.png'),
'2x': extension.get_static_url('images/mattermost/icon@2x.png'),
}
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import *
import numpy as np
import copy
class CombinatorialClassifier(nn.Module):
partition_weight = None
def __init__(self, num_classes, num_partitionings, num_partitions, feature_dim, additive=False, attention=False,
mode='softmax', combination='logit', local_partitionings=1):
super(CombinatorialClassifier, self).__init__()
#self.classifiers = nn.Linear(feature_dim, num_partitions * num_partitionings)
self.classifiers = nn.ModuleDict({'meta_classifier_%d' % i: nn.Linear(feature_dim, num_partitions)for i in range(num_partitionings)} )
self.num_classes = num_classes
self.num_partitionings = num_partitionings
self.num_partitions = num_partitions
self.attention = attention
self.mode = mode
self.combination = combination
self.local_partitionings = local_partitionings
#self.layer_norm = nn.LayerNorm(num_classes, eps=1e-6, elementwise_affine=False)
if self.attention:
self.AtModule = nn.Sequential(
nn.Linear(feature_dim, num_partitionings // 4, bias=False),
nn.ReLU(inplace=True),
nn.Linear(num_partitionings // 4, num_partitionings, bias=False),
#nn.Softmax()
)
print("attention module activated")
#Adds a persistent buffer to the module.
#This is typically used to register a buffer that should not to be considered a model parameter.
#For example, BatchNorm’s running_mean is not a parameter, but is part of the persistent state.
self.register_buffer('partitionings', -torch.ones(num_partitionings, num_classes).long())
self.register_buffer('partitionings_inference', -torch.ones(num_partitionings, num_classes).long())
self.additive = additive
print("mode : ", self.mode, 'combination : ', self.combination)
def set_partitionings(self, partitionings_map):
self.partitionings.copy_(torch.LongTensor(partitionings_map).t())
arange = torch.arange(self.num_partitionings).view(-1, 1).type_as(self.partitionings)
#arange를 더해준다.? -> 01110, 23332
self.partitionings_inference = torch.add(self.partitionings, (arange * self.num_partitions))
def rescale_grad(self):
for params in self.classifiers.parameters():
if self.partition_weight is None:
params.grad.mul_(self.num_partitionings)
else:
params.grad.mul_(self.partition_weight.sum())
def forward(self, input, classifier_idx=None, output_sum=True, return_meta_dist=False, with_feat=False):
assert self.partitionings.sum() > 0, 'Partitionings is never given to the module.'
if classifier_idx is not None:
all_output = self.classifiers['meta_classifier_%d' % classifier_idx](input)
all_output = all_output.view(-1, self.local_partitionings, self.num_partitions)
all_output = F.log_softmax(all_output, dim=2)
all_output = all_output.view(-1, self.local_partitionings * self.num_partitions)
output = all_output.index_select(1, self.partitionings[classifier_idx].view(-1))
output = output.view(-1, self.local_partitionings, self.num_classes)
else:
outputs = []
for i in range(self.num_partitionings):
meta_output = self.classifiers['meta_classifier_%d' % i](input)
outputs.append(meta_output)
all_output = torch.cat(outputs, dim=1)
all_output = all_output.view(-1, self.num_partitionings, self.num_partitions)
all_output = F.log_softmax(all_output, dim=2)
all_output = all_output.view(-1, self.num_partitionings * self.num_partitions)
output = all_output.index_select(1, self.partitionings_inference.view(-1))
output = output.view(-1, self.num_partitionings, self.num_classes)
return output
if output_sum:
output = output.sum(1)
return output
|
""" @package forcebalance.amberio AMBER force field input/output.
This serves as a good template for writing future force matching I/O
modules for other programs because it's so simple.
@author Lee-Ping Wang
@date 01/2012
"""
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import zip
from builtins import range
from builtins import object
import os, sys, re
import copy
from re import match, sub, split, findall
import networkx as nx
from forcebalance.nifty import isint, isfloat, _exec, LinkFile, warn_once, which, onefile, listfiles, warn_press_key, wopen, printcool, printcool_dictionary
import numpy as np
from forcebalance import BaseReader
from forcebalance.engine import Engine
from forcebalance.liquid import Liquid
from forcebalance.abinitio import AbInitio
from forcebalance.interaction import Interaction
from forcebalance.vibration import Vibration
from forcebalance.molecule import Molecule
from collections import OrderedDict, defaultdict, namedtuple
# Rudimentary NetCDF file usage
from scipy.io.netcdf import netcdf_file
try:
# Some functions require the Python API to sander "pysander"
import sander
except:
pass
from forcebalance.output import getLogger
logger = getLogger(__name__)
# Boltzmann's constant
kb_kcal = 0.0019872041
mol2_pdict = {'COUL':{'Atom':[1], 8:''}}
frcmod_pdict = {'BONDS': {'Atom':[0], 1:'K', 2:'B'},
'ANGLES':{'Atom':[0], 1:'K', 2:'B'},
'PDIHS1':{'Atom':[0], 2:'K', 3:'B'},
'PDIHS2':{'Atom':[0], 2:'K', 3:'B'},
'PDIHS3':{'Atom':[0], 2:'K', 3:'B'},
'PDIHS4':{'Atom':[0], 2:'K', 3:'B'},
'PDIHS5':{'Atom':[0], 2:'K', 3:'B'},
'PDIHS6':{'Atom':[0], 2:'K', 3:'B'},
'IDIHS' :{'Atom':[0], 1:'K', 2:'B'},
'VDW':{'Atom':[0], 1:'S', 2:'T'}
}
def is_mol2_atom(line):
s = line.split()
if len(s) < 9:
return False
return all([isint(s[0]), isfloat(s[2]), isfloat(s[3]), isfloat(s[4]), isfloat(s[8])])
def write_leap(fnm, mol2=[], frcmod=[], pdb=None, prefix='amber', spath = [], delcheck=False):
""" Parse and edit an AMBER LEaP input file. Output file is written to inputfile_ (with trailing underscore.) """
have_fmod = []
have_mol2 = []
# The lines that will be printed out to actually run tleap
line_out = []
aload = ['loadamberparams', 'source', 'loadoff']
aload_eq = ['loadmol2']
spath.append('.')
# Default name for the "unit" that is written to prmtop/inpcrd
ambername = 'amber'
for line in open(fnm):
# Skip comment lines
if line.strip().startswith('#') : continue
line = line.split('#')[0]
s = line.split()
ll = line.lower()
ls = line.lower().split()
# Check to see if all files being loaded are in the search path
if '=' in line:
if ll.split('=')[1].split()[0] in aload_eq:
if not any([os.path.exists(os.path.join(d, s[-1])) for d in spath]):
logger.error("The file in this line cannot be loaded : " + line.strip())
raise RuntimeError
elif len(ls) > 0 and ls[0] in aload:
if not any([os.path.exists(os.path.join(d, s[-1])) for d in spath]):
logger.error("The file in this line cannot be loaded : " + line.strip())
raise RuntimeError
if len(s) >= 2 and ls[0] == 'loadamberparams':
have_fmod.append(s[1])
if len(s) >= 2 and 'loadmol2' in ll:
# Adopt the AMBER molecule name from the loadpdb line.
ambername = line.split('=')[0].strip()
have_mol2.append(s[-1])
if len(s) >= 2 and 'loadpdb' in ll:
# Adopt the AMBER molecule name from the loadpdb line.
ambername = line.split('=')[0].strip()
# If we pass in our own PDB, then this line is replaced.
if pdb is not None:
line = '%s = loadpdb %s\n' % (ambername, pdb)
if len(s) >= 1 and ls[0] == 'check' and delcheck:
# Skip over check steps if so decreed
line = "# " + line
if 'saveamberparm' in ll:
# We'll write the saveamberparm line ourselves
continue
if len(s) >= 1 and ls[0] == 'quit':
# Don't write the quit line.
break
if not line.endswith('\n') : line += '\n'
line_out.append(line)
# Sanity checks: If frcmod and mol2 files are provided to this function,
# they should be in the leap.cmd file as well. There should be exactly
# one PDB file being loaded.
for i in frcmod:
if i not in have_fmod:
warn_press_key("WARNING: %s is not being loaded in %s" % (i, fnm))
for i in mol2:
if i not in have_mol2:
warn_press_key("WARNING: %s is not being loaded in %s" % (i, fnm))
fout = fnm+'_'
line_out.append('saveamberparm %s %s.prmtop %s.inpcrd\n' % (ambername, prefix, prefix))
line_out.append('quit\n')
if os.path.exists(fout): os.remove(fout)
with wopen(fout) as f: print(''.join(line_out), file=f)
def splitComment(mystr, debug=False):
"""
Remove the comment from a line in an AMBER namelist. Had to write a separate
function because I couldn't get regex to work
Parameters
----------
mystr : str
Input string such as:
restraintmask='!:WAT,NA&!@H=', ! Restraint mask for non-water, non-ions
Returns
-------
str
Output string with comment removed (but keeping leading and trailing whitespace) such as:
restraintmask='!:WAT,NA&!@H=',
"""
inStr = False
commi = 0
headStr = False
for i in range(len(mystr)):
deactiv=False
if inStr:
if mystr[i] == '\'':
if i < (len(mystr)-1) and mystr[i+1] != '\'' and i > 0 and mystr[i-1] != '\'':
deactiv=True
if headStr and i > 0 and mystr[i-1] == '\'':
deactiv=True
headStr = False
elif mystr[i]=='\'':
# if i < (len(mystr)-1) and mystr[i+1] == '\'':
# raise IOError('A string expression should not start with double quotes')
inStr=True
headStr=True
if debug:
if inStr:
print("\x1b[91m%s\x1b[0m" % mystr[i],end="")
else:
print(mystr[i],end="")
if deactiv:
inStr=False
if not inStr:
if mystr[i] == '!':
commi = i
break
if debug: print()
if commi != 0:
return mystr[:i]
else:
return mystr
def parse_amber_namelist(fin):
"""
Parse a file containing an AMBER namelist
(only significantly tested for sander input).
Parameters
----------
fin : str
Name of file containing the namelist
Returns
-------
comments (list) of lines)
List of lines containing comments before first namelist
names (list)
List of names of each namelist (e.g. cntrl, ewald)
block_dicts (list)
List of ordered dictionaries containing variable names and values for each namelist
suffixes (list)
List of list of lines coming after the "slash" for each suffix
"""
# Are we in the leading comments?
in_comment = True
# Are we inside an input block?
in_block = False
fobj = open(fin)
lines = fobj.readlines()
fobj.close()
comments = []
suffixes = []
names = []
blocks = []
for i in range(len(lines)):
line = lines[i]
strip = line.strip()
# Does the line start with &?
if not in_block:
if strip.startswith('&'):
in_block = True
in_comment = False
names.append(strip[1:].lower())
block_lines = []
suffixes.append([])
continue
if in_comment:
comments.append(line.replace('\n',''))
else:
suffixes[-1].append(line.replace('\n',''))
else:
if strip in ['/','&end']:
in_block = False
blocks.append(block_lines[:])
elif strip.startswith('&'):
raise RuntimeError('Cannot start a namelist within a namelist')
else:
block_lines.append(line.replace('\n',''))
block_dicts = []
for name, block in zip(names, blocks):
block_string = ' '.join([splitComment(line) for line in block])
# Matches the following:
# variable name (may include alphanumeric characters or underscore)
#
block_split = re.findall("[A-Za-z0-9_]+ *= *(?:\'[^']*\'|[+-]?[0-9]+\.?[0-9]*),", block_string)
#block_split = re.findall("[A-Za-z0-9_ ]+= *(?:(?:\'.*\')*[+-]?[0-9]+\.*[0-9]*,)+", block_string)
#block_split = re.findall("[A-Za-z0-9_ ]+= *(?:(?:\'.*\')*[^ ]*,)+", block_string)
# print(block_string)
# print(block_split)
block_dict = OrderedDict()
for word in block_split:
field1, field2 = word.split("=", 1)
key = field1.strip().lower()
val = re.sub(',$','',field2).strip()
block_dict[key] = val
block_dicts.append(block_dict)
return comments, names, block_dicts, suffixes
def write_mdin(calctype, fout=None, nsteps=None, timestep=None, nsave=None, pbc=False, temperature=None, pressure=None, mdin_orig=None):
"""
Write an AMBER .mdin file to carry out a calculation using sander or pmemd.cuda.
Parameters
----------
calctype : str
The type of calculation being performed
'min' : minimization
'eq' : equilibration
'md' : (production) MD
'sp' : Single-point calculation
fout : str
If provided, file name that the .mdin file should be written to.
Each variable within a namelist will occupy one line.
Comments within namelist are not written to output.
timestep : float
Time step in picoseconds. For minimizations or
single-point calculations, this is not needed
nsteps : int
How many MD or minimization steps to take
For single-point calculations, this is not needed
nsave : int
How often to write trajectory and velocity frames
(only production MD writes velocities)
For single-point calculations, this is not needed
pbc : bool
Whether to use periodic boundary conditions
temperature : float
If not None, the simulation temperature
pressure : float
If not None, the simulation pressure
mdin_orig : str, optional
Custom mdin file provided by the user.
Non-&cntrl blocks will be written to output.
Top-of-file comments will be written to output.
Returns
-------
OrderedDict
key : value pairs in the &cntrl namelist,
useful for passing to cpptraj or sander/cpptraj
Python APIs in the future.
"""
if calctype not in ['min', 'eq', 'md', 'sp']:
raise RuntimeError("Invalid calctype")
if calctype in ['eq', 'md']:
if timestep is None:
raise RuntimeError("eq and md requires timestep")
if nsteps is None:
raise RuntimeError("eq and md requires nsteps")
if nsave is None:
raise RuntimeError("eq and md requires nsave")
if calctype == 'min':
# This value is never used but needed
# to prevent an error in string formatting
timestep = 0.0
if nsteps is None:
nsteps = 500
if nsave is None:
nsave = 10
if calctype == 'sp':
# This value is never used but needed
# to prevent an error in string formatting
nsteps = 0
timestep = 0.0
nsave = 0
# cntrl_vars is an OrderedDict of namedtuples.
# Keys are variable names to be printed to mdin_orig.
# Values are namedtuples representing values, their properties are:
# 1) Name of the variable
# 2) Value of the variable, should be a dictionary with three keys 'min', 'eq', 'md'.
# When writing the variable for a run type, it will only be printed if the value exists in the dictionary.
# (e.g. 'maxcyc' should only be printed out for minimization jobs.)
# If the value looked up is equal to None, it will throw an error.
# 3) Comment to be printed out with the variable
# 4) Priority level of the variable
# 1: The variable will always be set in the ForceBalance code at runtime (The highest)
# 2: The variable is set by the user in the ForceBalance input file
# 3: User may provide variable in custom mdin_orig; if not provided, default value will be used
# 4: User may provide variable in custom mdin_orig; if not provided, it will not be printed
cntrl_vars = OrderedDict()
cntrl_var = namedtuple("cntrl_var", ["name", "value", "comment", "priority"])
cntrl_vars["imin"] = cntrl_var(name="imin", value={"min":"1","eq":"0","md":"0","sp":"5"}, comment="0 = MD; 1 = Minimize; 5 = Trajectory analysis", priority=1)
# Options pertaining to minimization
cntrl_vars["ntmin"] = cntrl_var(name="ntmin", value={"min":"2"}, comment="Minimization algorithm; 2 = Steepest descent", priority=3)
cntrl_vars["dx0"] = cntrl_var(name="dx0", value={"min":"0.1"}, comment="Minimizer step length", priority=3)
cntrl_vars["maxcyc"] = cntrl_var(name="maxcyc", value={"min":"%i" % nsteps, "sp":"1"}, comment="Number of minimization steps", priority=3)
# MD options - time step and number of steps
cntrl_vars["dt"] = cntrl_var(name="dt", value={"eq":"%.8f" % timestep, "md":"%.8f" % timestep}, comment="Time step (ps)", priority=2)
cntrl_vars["nstlim"] = cntrl_var(name="nstlim", value={"eq":"%i" % nsteps, "md":"%i" % nsteps}, comment="Number of MD steps", priority=2)
# ntpr, ntwx and ntwr for eq and md runs should be set by this function.
cntrl_vars["ntpr"] = cntrl_var(name="ntpr", value={"min":"%i" % nsave,"eq":"%i" % nsave,"md":"%i" % nsave}, comment="Interval for printing output", priority={"min":1,"eq":2,"md":2,"sp":1})
cntrl_vars["ntwx"] = cntrl_var(name="ntwx", value={"min":"%i" % nsave,"eq":"%i" % nsave,"md":"%i" % nsave}, comment="Interval for writing trajectory", priority={"min":1,"eq":2,"md":2,"sp":1})
cntrl_vars["ntwr"] = cntrl_var(name="ntwr", value={"min":"%i" % nsave,"eq":"%i" % nsteps,"md":"%i" % nsteps}, comment="Interval for writing restart", priority={"min":1,"eq":1,"md":1,"sp":1})
cntrl_vars["ntwv"] = cntrl_var(name="ntwv", value={"md":"-1"}, comment="Interval for writing velocities", priority={"min":1,"eq":1,"md":2,"sp":1})
cntrl_vars["ntwe"] = cntrl_var(name="ntwe", value={"md":"%i" % nsave}, comment="Interval for writing energies (disabled)", priority=1)
cntrl_vars["nscm"] = cntrl_var(name="nscm", value={"eq":"1000","md":"1000"}, comment="Interval for removing COM translation/rotation", priority=3)
# Insist on NetCDF trajectories for ntxo, ioutfm
cntrl_vars["ntxo"] = cntrl_var(name="ntxo", value={"min":"2","eq":"2","md":"2"}, comment="Restart output format; 1 = ASCII, 2 = NetCDF", priority=1)
cntrl_vars["ioutfm"] = cntrl_var(name="ioutfm", value={"min":"1","eq":"1","md":"1"}, comment="Trajectory format; 0 = ASCII, 1 = NetCDF", priority=1)
# min and eq read coors only; md is a full restart
cntrl_vars["ntx"] = cntrl_var(name="ntx", value={"min":"1","eq":"1","md":"5"}, comment="1 = Read coors only; 5 = Full restart", priority=1)
cntrl_vars["irest"] = cntrl_var(name="irest", value={"min":"0","eq":"0","md":"1"}, comment="0 = Do not restart ; 1 = Restart", priority=1)
# Use AMBER's default nonbonded cutoff if the user does not provide
# Set the PBC and pressure variables: ntb, ntp, barostat, mcbarint
if pbc:
ntb_eqmd = "2" if pressure is not None else "1"
ntp_eqmd = "1" if pressure is not None else "0"
cntrl_vars["cut"] = cntrl_var(name="cut", value={"min":"8.0","eq":"8.0","md":"8.0","sp":"8.0"}, comment="Nonbonded cutoff", priority=3)
cntrl_vars["ntb"] = cntrl_var(name="ntb", value={"min":"1","eq":ntb_eqmd,"md":ntb_eqmd,"sp":ntb_eqmd}, comment="0 = no PBC ; 1 = constant V ; 2 = constant P", priority=1)
cntrl_vars["ntp"] = cntrl_var(name="ntp", value={"min":"0","eq":ntp_eqmd,"md":ntp_eqmd,"sp":ntp_eqmd}, comment="0 = constant V ; 1 = isotropic scaling", priority=1)
cntrl_vars["iwrap"] = cntrl_var(name="iwrap", value={"min":"1","eq":"1","md":"1"}, comment="Wrap molecules back into box", priority=3)
cntrl_vars["igb"] = cntrl_var(name="igb", value={"min":"0","eq":"0","md":"0","sp":"0"}, comment="0 = No generalized Born model", priority=3)
if pressure is not None:
# We should use Berendsen for equilibration and MC for production.
cntrl_vars["barostat"] = cntrl_var(name="barostat", value={"eq":"1","md":"2"}, comment="1 = Berendsen; 2 = Monte Carlo", priority=1)
cntrl_vars["mcbarint"] = cntrl_var(name="mcbarint", value={"md":"25"}, comment="MC barostat rescaling interval", priority=3)
else:
# If there is no pressure, these variables should not be printed.
cntrl_vars["barostat"] = cntrl_var(name="barostat", value={}, comment="1 = Berendsen; 2 = Monte Carlo", priority=1)
cntrl_vars["mcbarint"] = cntrl_var(name="mcbarint", value={}, comment="MC barostat rescaling interval", priority=1)
else:
cntrl_vars["cut"] = cntrl_var(name="cut", value={"min":"9999.0","eq":"9999.0","md":"9999.0","sp":"9999.0"}, comment="Nonbonded cutoff", priority=1)
cntrl_vars["ntb"] = cntrl_var(name="ntb", value={"min":"0","eq":"0","md":"0","sp":"0"}, comment="0 = no PBC ; 1 = constant V ; 2 = constant P", priority=1)
cntrl_vars["ntp"] = cntrl_var(name="ntp", value={}, comment="0 = constant V ; 1 = isotropic scaling", priority=1)
cntrl_vars["igb"] = cntrl_var(name="igb", value={"min":"6","eq":"6","md":"6","sp":"6"}, comment="6 = Vacuum phase simulation", priority=1)
cntrl_vars["iwrap"] = cntrl_var(name="iwrap", value={}, comment="Wrap molecules back into box", priority=1)
cntrl_vars["barostat"] = cntrl_var(name="barostat", value={}, comment="1 = Berendsen; 2 = Monte Carlo", priority=1)
cntrl_vars["mcbarint"] = cntrl_var(name="mcbarint", value={}, comment="MC barostat rescaling interval", priority=1)
# Set the temperature variables tempi, temp0, ntt, gamma_ln
if temperature is not None:
cntrl_vars["tempi"] = cntrl_var(name="tempi", value={"eq":"%i" % temperature,"md":"%i" % temperature}, comment="Initial temperature", priority=1)
cntrl_vars["temp0"] = cntrl_var(name="temp0", value={"eq":"%i" % temperature,"md":"%i" % temperature}, comment="Reference temperature", priority=1)
cntrl_vars["ntt"] = cntrl_var(name="ntt", value={"eq":"3","md":"3"}, comment="Thermostat ; 3 = Langevin", priority=1)
cntrl_vars["gamma_ln"] = cntrl_var(name="gamma_ln", value={"eq":"1.0","md":"1.0"}, comment="Langevin collision frequency (ps^-1)", priority=3)
else:
cntrl_vars["tempi"] = cntrl_var(name="tempi", value={}, comment="Initial temperature", priority=1)
cntrl_vars["temp0"] = cntrl_var(name="temp0", value={}, comment="Reference temperature", priority=1)
cntrl_vars["ntt"] = cntrl_var(name="ntt", value={}, comment="Thermostat ; 3 = Langevin", priority=1)
cntrl_vars["gamma_ln"] = cntrl_var(name="gamma_ln", value={}, comment="Langevin collision frequency (ps^-1)", priority=1)
# Options having to do with constraints; these should be set by the user if SHAKE is desired.
# SHAKE is always turned off for minimization and for single-point properties.
cntrl_vars["ntc"] = cntrl_var(name="ntc", value={"min":"1","eq":"1","md":"1","sp":"1"}, comment="SHAKE; 1 = none, 2 = H-bonds, 3 = All-bonds", priority={"min":1,"eq":3,"md":3,"sp":1})
cntrl_vars["ntf"] = cntrl_var(name="ntf", value={"min":"1","eq":"1","md":"1","sp":"1"}, comment="No bonds involving H-atoms (use with NTC=2)", priority={"min":1,"eq":3,"md":3,"sp":1})
cntrl_vars["tol"] = cntrl_var(name="tol", value={}, comment="SHAKE tolerance,", priority=4)
# Random number seed for equilibration and dynamics
cntrl_vars["ig"] = cntrl_var(name="ig", value={"eq":"-1","md":"-1"}, comment="Random number seed; -1 based on date/time", priority=3)
def get_priority(prio_in):
if type(prio_in) is int:
return prio_in
elif type(prio_in) is dict:
return prio_in[calctype]
if mdin_orig is not None:
comments, names, block_dicts, suffixes = parse_amber_namelist(mdin_orig)
comments.append("Generated by ForceBalance from %s" % mdin_orig)
else:
comments = ["Generated by ForceBalance"]
names = ['cntrl']
block_dicts = [{}]
suffixes = [[]]
for name, block_dict in zip(names, block_dicts):
if name == 'cntrl':
user_cntrl = block_dict
break
cntrl_out = OrderedDict()
cntrl_comm = OrderedDict()
# Fill in the "high priority" options set by ForceBalance
# Note that if value[calctype] is not set for a high-priority option,
# that means the variable is erased from the output namelist
checked_list = []
for name, var in cntrl_vars.items():
priority = get_priority(var.priority)
if priority in [1, 2]:
checked_list.append(name)
if calctype in var.value:
cntrl_out[name] = var.value[calctype]
if priority == 1:
cntrl_comm[name] = "Set by FB at runtime : %s" % var.comment
elif priority == 2:
cntrl_comm[name] = "From FB input file : %s" % var.comment
# Fill in the other options set by the user
for name, value in user_cntrl.items():
if name not in checked_list:
checked_list.append(name)
cntrl_out[name] = value
cntrl_comm[name] = "Set via user-provided mdin file"
# Fill in default options not set by the user
for name, var in cntrl_vars.items():
if name not in checked_list and get_priority(var.priority) == 3:
checked_list.append(name)
if calctype in var.value:
cntrl_out[name] = var.value[calctype]
cntrl_comm[name] = "FB set by default : %s" % var.comment
# Note: priority-4 options from cntrl_vars
# are not used at all in this function
for iname, name, in enumerate(names):
if name == 'cntrl':
block_dicts[iname] = cntrl_out
if fout is not None:
with open(fout, 'w') as f:
for line in comments:
print(line, file=f)
for name, block_dict, suffix in zip(names, block_dicts, suffixes):
print("&%s" % name, file=f)
for key, val in block_dict.items():
print("%-20s ! %s" % ("%s=%s," % (key, val), cntrl_comm[key]), file=f)
print("/", file=f)
for line in suffix:
print("%s" % line, file=f)
f.close()
return cntrl_out
class Mol2_Reader(BaseReader):
"""Finite state machine for parsing Mol2 force field file. (just for parameterizing the charges)"""
def __init__(self,fnm):
# Initialize the superclass. :)
super(Mol2_Reader,self).__init__(fnm)
## The parameter dictionary (defined in this file)
self.pdict = mol2_pdict
## The atom numbers in the interaction (stored in the parser)
self.atom = []
## The mol2 file provides a list of atom names
self.atomnames = []
## The section that we're in
self.section = None
# The name of the molecule
self.mol = None
def feed(self, line):
s = line.split()
self.ln += 1
# In mol2 files, the only defined interaction type is the Coulomb interaction.
if line.strip().lower() == '@<tripos>atom':
self.itype = 'COUL'
self.section = 'Atom'
elif line.strip().lower() == '@<tripos>bond':
self.itype = 'None'
self.section = 'Bond'
elif line.strip().lower() == '@<tripos>substructure':
self.itype = 'None'
self.section = 'Substructure'
elif line.strip().lower() == '@<tripos>molecule':
self.itype = 'None'
self.section = 'Molecule'
elif self.section == 'Molecule' and self.mol is None:
self.mol = '_'.join(s)
elif not is_mol2_atom(line):
self.itype = 'None'
if is_mol2_atom(line) and self.itype == 'COUL':
#self.atomnames.append(s[self.pdict[self.itype]['Atom'][0]])
#self.adict.setdefault(self.mol,[]).append(s[self.pdict[self.itype]['Atom'][0]])
self.atomnames.append(s[0])
self.adict.setdefault(self.mol,[]).append(s[0])
if self.itype in self.pdict:
if 'Atom' in self.pdict[self.itype] and match(' *[0-9]', line):
# List the atoms in the interaction.
#self.atom = [s[i] for i in self.pdict[self.itype]['Atom']]
self.atom = [s[0]]
# The suffix of the parameter ID is built from the atom #
# types/classes involved in the interaction.
self.suffix = ':' + '-'.join([self.mol,''.join(self.atom)])
#self.suffix = '.'.join(self.atom)
self.molatom = (self.mol, self.atom if type(self.atom) is list else [self.atom])
class FrcMod_Reader(BaseReader):
"""Finite state machine for parsing FrcMod force field file."""
def __init__(self,fnm):
# Initialize the superclass. :)
super(FrcMod_Reader,self).__init__(fnm)
## The parameter dictionary (defined in this file)
self.pdict = frcmod_pdict
## The atom numbers in the interaction (stored in the parser)
self.atom = []
## Whether we're inside the dihedral section
self.dihe = False
## The frcmod file never has any atoms in it
self.adict = {None:None}
def Split(self, line):
return split(' +(?!-(?![0-9.]))', line.strip().replace('\n',''))
def Whites(self, line):
return findall(' +(?!-(?![0-9.]))', line.replace('\n',''))
def build_pid(self, pfld):
""" Returns the parameter type (e.g. K in BONDSK) based on the
current interaction type.
Both the 'pdict' dictionary (see gmxio.pdict) and the
interaction type 'state' (here, BONDS) are needed to get the
parameter type.
If, however, 'pdict' does not contain the ptype value, a suitable
substitute is simply the field number.
Note that if the interaction type state is not set, then it
defaults to the file name, so a generic parameter ID is
'filename.line_num.field_num'
"""
if self.dihe and not self.haveAtomLine:
pfld += 1
if hasattr(self, 'overpfx'):
return self.overpfx + ':%i:' % pfld + self.oversfx
ptype = self.pdict.get(self.itype,{}).get(pfld,':%i.%i' % (self.ln,pfld))
answer = self.itype
answer += ptype
answer += '/'+self.suffix
return answer
def feed(self, line):
s = self.Split(line)
self.ln += 1
if len(line.strip()) == 0:
return
if match('^dihe', line.strip().lower()):
self.dihe = True
return
elif match('^mass$', line.strip().lower()):
self.dihe = False
self.itype = 'MASS'
return
elif match('^bond$', line.strip().lower()):
self.dihe = False
self.itype = 'BONDS'
return
elif match('^angle$', line.strip().lower()):
self.dihe = False
self.itype = 'ANGLES'
return
elif match('^improper$', line.strip().lower()):
self.dihe = False
self.itype = 'IDIHS'
return
elif match('^nonbon$', line.strip().lower()):
self.dihe = False
self.itype = 'VDW'
return
elif len(s) == 0:
self.dihe = False
return
if self.dihe:
if '-' in s[0]:
self.haveAtomLine = True
self.itype = 'PDIHS%i' % int(np.abs(float(s[4])))
else:
self.haveAtomLine = False
self.itype = 'PDIHS%i' % int(np.abs(float(s[3])))
else:
self.haveAtomLine = True
if self.itype in self.pdict:
if 'Atom' in self.pdict[self.itype] and self.haveAtomLine:
# List the atoms in the interaction.
self.atom = [s[i].replace(" -","-") for i in self.pdict[self.itype]['Atom']]
# The suffix of the parameter ID is built from the atom #
# types/classes involved in the interaction.
self.suffix = ''.join(self.atom)
#=============================================================================================
# AMBER parmtop loader (from 'zander', by Randall J. Radmer)
#=============================================================================================
# A regex for extracting print format info from the FORMAT lines.
FORMAT_RE_PATTERN=re.compile("([0-9]+)([a-zA-Z]+)([0-9]+)\.?([0-9]*)")
# Pointer labels which map to pointer numbers at top of prmtop files
POINTER_LABELS = """
NATOM, NTYPES, NBONH, MBONA, NTHETH, MTHETA,
NPHIH, MPHIA, NHPARM, NPARM, NEXT, NRES,
NBONA, NTHETA, NPHIA, NUMBND, NUMANG, NPTRA,
NATYP, NPHB, IFPERT, NBPER, NGPER, NDPER,
MBPER, MGPER, MDPER, IFBOX, NMXRS, IFCAP
"""
# Pointer labels (above) as a list, not string.
POINTER_LABEL_LIST = POINTER_LABELS.replace(',', '').split()
class PrmtopLoader(object):
"""Parsed AMBER prmtop file.
ParmtopLoader reads, parses and manages content from a AMBER prmtop file.
EXAMPLES
Parse a prmtop file of alanine dipeptide in implicit solvent.
>>> import os, os.path
>>> directory = os.path.join(os.getenv('YANK_INSTALL_DIR'), 'test', 'systems', 'alanine-dipeptide-gbsa')
>>> prmtop_filename = os.path.join(directory, 'alanine-dipeptide.prmtop')
>>> prmtop = PrmtopLoader(prmtop_filename)
Parse a prmtop file of alanine dipeptide in explicit solvent.
>>> import os, os.path
>>> directory = os.path.join(os.getenv('YANK_INSTALL_DIR'), 'test', 'systems', 'alanine-dipeptide-explicit')
>>> prmtop_filename = os.path.join(directory, 'alanine-dipeptide.prmtop')
>>> prmtop = PrmtopLoader(prmtop_filename)
"""
def __init__(self, inFilename):
"""
Create a PrmtopLoader object from an AMBER prmtop file.
ARGUMENTS
inFilename (string) - AMBER 'new-style' prmtop file, probably generated with one of the AMBER tleap/xleap/sleap
"""
self._prmtopVersion=None
self._flags=[]
self._raw_format={}
self._raw_data={}
fIn=open(inFilename)
for line in fIn:
if line.startswith('%VERSION'):
tag, self._prmtopVersion = line.rstrip().split(None, 1)
elif line.startswith('%FLAG'):
tag, flag = line.rstrip().split(None, 1)
self._flags.append(flag)
self._raw_data[flag] = []
elif line.startswith('%FORMAT'):
format = line.rstrip()
index0=format.index('(')
index1=format.index(')')
format = format[index0+1:index1]
m = FORMAT_RE_PATTERN.search(format)
self._raw_format[self._flags[-1]] = (format, m.group(1), m.group(2), m.group(3), m.group(4))
elif self._flags \
and 'TITLE'==self._flags[-1] \
and not self._raw_data['TITLE']:
self._raw_data['TITLE'] = line.rstrip()
else:
flag=self._flags[-1]
(format, numItems, itemType,
itemLength, itemPrecision) = self._getFormat(flag)
iLength=int(itemLength)
line = line.rstrip()
for index in range(0, len(line), iLength):
item = line[index:index+iLength]
if item:
self._raw_data[flag].append(item.strip())
fIn.close()
def _getFormat(self, flag=None):
if not flag:
flag=self._flags[-1]
return self._raw_format[flag]
def _getPointerValue(self, pointerLabel):
"""Return pointer value given pointer label
Parameter:
- pointerLabel: a string matching one of the following:
NATOM : total number of atoms
NTYPES : total number of distinct atom types
NBONH : number of bonds containing hydrogen
MBONA : number of bonds not containing hydrogen
NTHETH : number of angles containing hydrogen
MTHETA : number of angles not containing hydrogen
NPHIH : number of dihedrals containing hydrogen
MPHIA : number of dihedrals not containing hydrogen
NHPARM : currently not used
NPARM : currently not used
NEXT : number of excluded atoms
NRES : number of residues
NBONA : MBONA + number of constraint bonds
NTHETA : MTHETA + number of constraint angles
NPHIA : MPHIA + number of constraint dihedrals
NUMBND : number of unique bond types
NUMANG : number of unique angle types
NPTRA : number of unique dihedral types
NATYP : number of atom types in parameter file, see SOLTY below
NPHB : number of distinct 10-12 hydrogen bond pair types
IFPERT : set to 1 if perturbation info is to be read in
NBPER : number of bonds to be perturbed
NGPER : number of angles to be perturbed
NDPER : number of dihedrals to be perturbed
MBPER : number of bonds with atoms completely in perturbed group
MGPER : number of angles with atoms completely in perturbed group
MDPER : number of dihedrals with atoms completely in perturbed groups
IFBOX : set to 1 if standard periodic box, 2 when truncated octahedral
NMXRS : number of atoms in the largest residue
IFCAP : set to 1 if the CAP option from edit was specified
"""
index = POINTER_LABEL_LIST.index(pointerLabel)
return float(self._raw_data['POINTERS'][index])
def getNumAtoms(self):
"""Return the number of atoms in the system"""
return int(self._getPointerValue('NATOM'))
def getNumTypes(self):
"""Return the number of AMBER atoms types in the system"""
return int(self._getPointerValue('NTYPES'))
def getIfBox(self):
"""Return True if the system was build with periodic boundary conditions (PBC)"""
return int(self._getPointerValue('IFBOX'))
def getIfCap(self):
"""Return True if the system was build with the cap option)"""
return int(self._getPointerValue('IFCAP'))
def getIfPert(self):
"""Return True if the system was build with the perturbation parameters)"""
return int(self._getPointerValue('IFPERT'))
def getMasses(self):
"""Return a list of atomic masses in the system"""
try:
return self._massList
except AttributeError:
pass
self._massList=[]
raw_masses=self._raw_data['MASS']
for ii in range(self.getNumAtoms()):
self._massList.append(float(raw_masses[ii]))
self._massList = self._massList
return self._massList
def getCharges(self):
"""Return a list of atomic charges in the system"""
try:
return self._chargeList
except AttributeError:
pass
self._chargeList=[]
raw_charges=self._raw_data['CHARGE']
for ii in range(self.getNumAtoms()):
self._chargeList.append(float(raw_charges[ii])/18.2223)
self._chargeList = self._chargeList
return self._chargeList
def getAtomName(self, iAtom):
"""Return the atom name for iAtom"""
atomNames = self.getAtomNames()
return atomNames[iAtom]
def getAtomNames(self):
"""Return the list of the system atom names"""
return self._raw_data['ATOM_NAME']
def _getAtomTypeIndexes(self):
try:
return self._atomTypeIndexes
except AttributeError:
pass
self._atomTypeIndexes=[]
for atomTypeIndex in self._raw_data['ATOM_TYPE_INDEX']:
self._atomTypeIndexes.append(int(atomTypeIndex))
return self._atomTypeIndexes
def getAtomType(self, iAtom):
"""Return the AMBER atom type for iAtom"""
atomTypes=self.getAtomTypes()
return atomTypes[iAtom]
def getAtomTypes(self):
"""Return the list of the AMBER atom types"""
return self._raw_data['AMBER_ATOM_TYPE']
def getResidueNumber(self, iAtom):
"""Return iAtom's residue number"""
return self._getResiduePointer(iAtom)+1
def getResidueLabel(self, iAtom=None, iRes=None):
"""Return residue label for iAtom OR iRes"""
if iRes==None and iAtom==None:
raise Exception("only specify iRes or iAtom, not both")
if iRes!=None and iAtom!=None:
raise Exception("iRes or iAtom must be set")
if iRes!=None:
return self._raw_data['RESIDUE_LABEL'][iRes]
else:
return self.getResidueLabel(iRes=self._getResiduePointer(iAtom))
def _getResiduePointer(self, iAtom):
try:
return self.residuePointerDict[iAtom]
except:
pass
self.residuePointerDict = {}
resPointers=self._raw_data['RESIDUE_POINTER']
firstAtom = [int(p)-1 for p in resPointers]
firstAtom.append(self.getNumAtoms())
res = 0
for i in range(self.getNumAtoms()):
while firstAtom[res+1] <= i:
res += 1
self.residuePointerDict[i] = res
return self.residuePointerDict[iAtom]
def getNonbondTerms(self):
"""Return list of all rVdw, epsilon pairs for each atom. Work in the AMBER unit system."""
try:
return self._nonbondTerms
except AttributeError:
pass
self._nonbondTerms=[]
lengthConversionFactor = 1.0
energyConversionFactor = 1.0
for iAtom in range(self.getNumAtoms()):
numTypes=self.getNumTypes()
atomTypeIndexes=self._getAtomTypeIndexes()
index=(numTypes+1)*(atomTypeIndexes[iAtom]-1)
nbIndex=int(self._raw_data['NONBONDED_PARM_INDEX'][index])-1
if nbIndex<0:
raise Exception("10-12 interactions are not supported")
acoef = float(self._raw_data['LENNARD_JONES_ACOEF'][nbIndex])
bcoef = float(self._raw_data['LENNARD_JONES_BCOEF'][nbIndex])
try:
rMin = (2*acoef/bcoef)**(1/6.0)
epsilon = 0.25*bcoef*bcoef/acoef
except ZeroDivisionError:
rMin = 1.0
epsilon = 0.0
rVdw = rMin/2.0*lengthConversionFactor
epsilon = epsilon*energyConversionFactor
self._nonbondTerms.append( (rVdw, epsilon) )
return self._nonbondTerms
def _getBonds(self, bondPointers):
forceConstant=self._raw_data["BOND_FORCE_CONSTANT"]
bondEquil=self._raw_data["BOND_EQUIL_VALUE"]
returnList=[]
forceConstConversionFactor = 1.0
lengthConversionFactor = 1.0
for ii in range(0,len(bondPointers),3):
if int(bondPointers[ii])<0 or \
int(bondPointers[ii+1])<0:
raise Exception("Found negative bonded atom pointers %s"
% ((bondPointers[ii],
bondPointers[ii+1]),))
iType=int(bondPointers[ii+2])-1
returnList.append((int(int(bondPointers[ii])/3),
int(int(bondPointers[ii+1])/3),
float(forceConstant[iType])*forceConstConversionFactor,
float(bondEquil[iType])*lengthConversionFactor))
return returnList
def getBondsWithH(self):
"""Return list of bonded atom pairs, K, and Rmin for each bond with a hydrogen"""
try:
return self._bondListWithH
except AttributeError:
pass
bondPointers=self._raw_data["BONDS_INC_HYDROGEN"]
self._bondListWithH = self._getBonds(bondPointers)
return self._bondListWithH
def getBondsNoH(self):
"""Return list of bonded atom pairs, K, and Rmin for each bond with no hydrogen"""
try:
return self._bondListNoH
except AttributeError:
pass
bondPointers=self._raw_data["BONDS_WITHOUT_HYDROGEN"]
self._bondListNoH = self._getBonds(bondPointers)
return self._bondListNoH
def getAngles(self):
"""Return list of atom triplets, K, and ThetaMin for each bond angle"""
try:
return self._angleList
except AttributeError:
pass
forceConstant=self._raw_data["ANGLE_FORCE_CONSTANT"]
angleEquil=self._raw_data["ANGLE_EQUIL_VALUE"]
anglePointers = self._raw_data["ANGLES_INC_HYDROGEN"] \
+self._raw_data["ANGLES_WITHOUT_HYDROGEN"]
self._angleList=[]
forceConstConversionFactor = 1.0
for ii in range(0,len(anglePointers),4):
if int(anglePointers[ii])<0 or \
int(anglePointers[ii+1])<0 or \
int(anglePointers[ii+2])<0:
raise Exception("Found negative angle atom pointers %s"
% ((anglePointers[ii],
anglePointers[ii+1],
anglePointers[ii+2]),))
iType=int(anglePointers[ii+3])-1
self._angleList.append((int(int(anglePointers[ii])/3),
int(int(anglePointers[ii+1])/3),
int(int(anglePointers[ii+2])/3),
float(forceConstant[iType])*forceConstConversionFactor,
float(angleEquil[iType])))
return self._angleList
def getDihedrals(self):
"""Return list of atom quads, K, phase and periodicity for each dihedral angle"""
try:
return self._dihedralList
except AttributeError:
pass
forceConstant=self._raw_data["DIHEDRAL_FORCE_CONSTANT"]
phase=self._raw_data["DIHEDRAL_PHASE"]
periodicity=self._raw_data["DIHEDRAL_PERIODICITY"]
dihedralPointers = self._raw_data["DIHEDRALS_INC_HYDROGEN"] \
+self._raw_data["DIHEDRALS_WITHOUT_HYDROGEN"]
self._dihedralList=[]
forceConstConversionFactor = 1.0
for ii in range(0,len(dihedralPointers),5):
if int(dihedralPointers[ii])<0 or int(dihedralPointers[ii+1])<0:
raise Exception("Found negative dihedral atom pointers %s"
% ((dihedralPointers[ii],
dihedralPointers[ii+1],
dihedralPointers[ii+2],
dihedralPointers[ii+3]),))
iType=int(dihedralPointers[ii+4])-1
self._dihedralList.append((int(int(dihedralPointers[ii])/3),
int(int(dihedralPointers[ii+1])/3),
int(abs(int(dihedralPointers[ii+2]))/3),
int(abs(int(dihedralPointers[ii+3]))/3),
float(forceConstant[iType])*forceConstConversionFactor,
float(phase[iType]),
int(0.5+float(periodicity[iType]))))
return self._dihedralList
def get14Interactions(self):
"""Return list of atom pairs, chargeProduct, rMin and epsilon for each 1-4 interaction"""
dihedralPointers = self._raw_data["DIHEDRALS_INC_HYDROGEN"] \
+self._raw_data["DIHEDRALS_WITHOUT_HYDROGEN"]
returnList=[]
charges=self.getCharges()
nonbondTerms = self.getNonbondTerms()
for ii in range(0,len(dihedralPointers),5):
if int(dihedralPointers[ii+2])>0 and int(dihedralPointers[ii+3])>0:
iAtom = int(int(dihedralPointers[ii])/3)
lAtom = int(int(dihedralPointers[ii+3])/3)
chargeProd = charges[iAtom]*charges[lAtom]
(rVdwI, epsilonI) = nonbondTerms[iAtom]
(rVdwL, epsilonL) = nonbondTerms[lAtom]
rMin = (rVdwI+rVdwL)
epsilon = math.sqrt(epsilonI*epsilonL)
returnList.append((iAtom, lAtom, chargeProd, rMin, epsilon))
return returnList
def getExcludedAtoms(self):
"""Return list of lists, giving all pairs of atoms that should have no non-bond interactions"""
try:
return self._excludedAtoms
except AttributeError:
pass
self._excludedAtoms=[]
numExcludedAtomsList=self._raw_data["NUMBER_EXCLUDED_ATOMS"]
excludedAtomsList=self._raw_data["EXCLUDED_ATOMS_LIST"]
total=0
for iAtom in range(self.getNumAtoms()):
index0=total
n=int(numExcludedAtomsList[iAtom])
total+=n
index1=total
atomList=[]
for jAtom in excludedAtomsList[index0:index1]:
j=int(jAtom)
if j>0:
atomList.append(j-1)
self._excludedAtoms.append(atomList)
return self._excludedAtoms
def getGBParms(self, symbls=None):
"""Return list giving GB params, Radius and screening factor"""
try:
return self._gb_List
except AttributeError:
pass
self._gb_List=[]
radii=self._raw_data["RADII"]
screen=self._raw_data["SCREEN"]
# Update screening parameters for GBn if specified
if symbls:
for (i, symbl) in enumerate(symbls):
if symbl[0] == ('c' or 'C'):
screen[i] = 0.48435382330
elif symbl[0] == ('h' or 'H'):
screen[i] = 1.09085413633
elif symbl[0] == ('n' or 'N'):
screen[i] = 0.700147318409
elif symbl[0] == ('o' or 'O'):
screen[i] = 1.06557401132
elif symbl[0] == ('s' or 'S'):
screen[i] = 0.602256336067
else:
screen[i] = 0.5
lengthConversionFactor = 1.0
for iAtom in range(len(radii)):
self._gb_List.append((float(radii[iAtom])*lengthConversionFactor, float(screen[iAtom])))
return self._gb_List
def getBoxBetaAndDimensions(self):
"""Return periodic boundary box beta angle and dimensions"""
beta=float(self._raw_data["BOX_DIMENSIONS"][0])
x=float(self._raw_data["BOX_DIMENSIONS"][1])
y=float(self._raw_data["BOX_DIMENSIONS"][2])
z=float(self._raw_data["BOX_DIMENSIONS"][3])
return (beta, x, y, z)
class AMBER(Engine):
""" Engine for carrying out general purpose AMBER calculations. """
def __init__(self, name="amber", **kwargs):
## Keyword args that aren't in this list are filtered out.
self.valkwd = ['amberhome', 'pdb', 'mol2', 'frcmod', 'leapcmd', 'mdin', 'reqpdb']
super(AMBER,self).__init__(name=name, **kwargs)
def setopts(self, **kwargs):
""" Called by __init__ ; Set AMBER-specific options. """
if 'amberhome' in kwargs:
self.amberhome = kwargs['amberhome']
if not os.path.exists(os.path.join(self.amberhome, "bin", "sander")):
warn_press_key("The 'sander' executable indicated by %s doesn't exist! (Check amberhome)" \
% os.path.join(self.amberhome,"bin","sander"))
else:
warn_once("The 'amberhome' option was not specified; using default.")
if which('sander') == '':
warn_press_key("Please add AMBER executables to the PATH or specify amberhome.")
self.amberhome = os.path.split(which('sander'))[0]
self.have_pmemd_cuda = False
if os.path.exists(os.path.join(self.amberhome, "bin", "pmemd.cuda")):
self.callamber('pmemd.cuda -h', persist=True)
if _exec.returncode != 0:
warn_press_key("pmemd.cuda gave a nonzero returncode; CUDA environment variables likely missing")
else:
logger.info("pmemd.cuda is available, using CUDA to accelerate calculations.\n")
self.have_pmemd_cuda = True
with wopen('.quit.leap') as f:
print('quit', file=f)
# AMBER search path
self.spath = []
for line in self.callamber('tleap -f .quit.leap'):
if 'Adding' in line and 'to search path' in line:
self.spath.append(line.split('Adding')[1].split()[0])
os.remove('.quit.leap')
def readsrc(self, **kwargs):
""" Called by __init__ ; read files from the source directory. """
self.leapcmd = onefile(kwargs.get('leapcmd'), 'leap', err=True)
self.mdin = onefile(kwargs.get('mdin'), 'mdin', err=False)
self.absleap = os.path.abspath(self.leapcmd)
if self.mdin is not None:
self.absmdin = os.path.abspath(self.mdin)
# Name of the molecule, currently just call it a default name.
self.mname = 'molecule'
# Whether to throw an error if a PDB file doesn't exist.
reqpdb = kwargs.get('reqpdb', 1)
# Determine the PDB file name. Amber could use this in tleap if it wants.
# If 'pdb' is provided to Engine initialization, it will be used to
# copy over topology information (chain, atomname etc.). If mol/coords
# is not provided, then it will also provide the coordinates.
pdbfnm = onefile(kwargs.get('pdb'), 'pdb' if reqpdb else None, err=reqpdb)
# If the molecule object is provided as a keyword argument, it now
# becomes an Engine attribute as well. Otherwise, we create the
# Engine.mol from the provided coordinates (if they exist).
if 'mol' in kwargs:
self.mol = kwargs['mol']
else:
crdfile = None
if 'coords' in kwargs:
crdfile = onefile(kwargs.get('coords'), None, err=True)
elif pdbfnm is not None:
crdfile = pdbfnm
if crdfile is None:
logger.error("Cannot find a coordinate file to use\n")
raise RuntimeError
self.mol = Molecule(crdfile, top=pdbfnm, build_topology=False)
# If a .pdb was not provided, we create one.
if pdbfnm is None:
pdbfnm = self.name + ".pdb"
# AMBER doesn't always like the CONECT records
self.mol[0].write(pdbfnm, write_conect=False)
self.abspdb = os.path.abspath(pdbfnm)
# Write the PDB that AMBER is going to read in.
# This may or may not be identical to the one used to initialize the engine.
# self.mol.write('%s.pdb' % self.name)
# self.abspdb = os.path.abspath('%s.pdb' % self.name)
def callamber(self, command, stdin=None, print_to_screen=False, print_command=False, **kwargs):
""" Call AMBER; prepend amberhome to calling the appropriate ambertools program. """
csplit = command.split()
# Sometimes the engine changes dirs and the inpcrd/prmtop go missing, so we link it.
# Prepend the AMBER path to the program call.
prog = os.path.join(self.amberhome, "bin", csplit[0])
csplit[0] = prog
# No need to catch exceptions since failed AMBER calculations will return nonzero exit status.
o = _exec(' '.join(csplit), stdin=stdin, print_to_screen=print_to_screen, print_command=print_command, rbytes=1024, **kwargs)
return o
def prepare(self, pbc=False, **kwargs):
""" Called by __init__ ; prepare the temp directory and figure out the topology. """
self.AtomLists = defaultdict(list)
self.pbc = pbc
self.mol2 = []
self.frcmod = []
# if 'mol2' in kwargs:
# self.mol2 = kwargs['mol2'][:]
# if 'frcmod' in kwargs:
# self.frcmod = kwargs['frcmod'][:]
if hasattr(self,'FF'):
# If the parameter files don't already exist, create them for the purpose of
# preparing the engine, but then delete them afterward.
prmtmp = False
for fnm in self.FF.amber_frcmod + self.FF.amber_mol2:
if not os.path.exists(fnm):
self.FF.make(np.zeros(self.FF.np))
prmtmp = True
# Currently force field object only allows one mol2 and frcmod file although this can be lifted.
for mol2 in self.FF.amber_mol2:
self.mol2.append(mol2)
for frcmod in self.FF.amber_frcmod:
self.frcmod.append(frcmod)
# self.mol2 = [self.FF.amber_mol2]
# self.frcmod = [self.FF.amber_frcmod]
# if 'mol2' in kwargs:
# logger.error("FF object is provided, which overrides mol2 keyword argument")
# raise RuntimeError
# if 'frcmod' in kwargs:
# logger.error("FF object is provided, which overrides frcmod keyword argument")
# raise RuntimeError
else:
prmtmp = False
# mol2 and frcmod files in the target folder should also be included
self.absmol2 = []
for mol2 in listfiles(kwargs.get('mol2'), 'mol2', err=False):
if mol2 not in self.mol2:
self.mol2.append(mol2)
self.absmol2.append(os.path.abspath(mol2))
self.absfrcmod = []
for frcmod in listfiles(kwargs.get('frcmod'), 'frcmod', err=False):
if frcmod not in self.frcmod:
self.frcmod.append(frcmod)
self.absfrcmod.append(os.path.abspath(frcmod))
# Figure out the topology information.
self.leap(read_prmtop=True, count_mols=True)
# I also need to write the trajectory
if 'boxes' in self.mol.Data.keys():
logger.info("\x1b[91mWriting %s-all.crd file with no periodic box information\x1b[0m\n" % self.name)
del self.mol.Data['boxes']
if hasattr(self, 'target') and hasattr(self.target,'loop_over_snapshots') and self.target.loop_over_snapshots:
if hasattr(self.target, 'qmatoms'):
self.qmatoms = self.target.qmatoms
else:
self.qmatoms = list(range(self.mol.na))
# if hasattr(self.target, 'shots'):
# self.mol.write("%s-all.crd" % self.name, selection=range(self.target.shots), ftype="mdcrd")
# else:
# self.mol.write("%s-all.crd" % self.name, ftype="mdcrd")
if prmtmp:
for f in self.FF.fnms:
os.unlink(f)
def leap(self, read_prmtop=False, count_mols=False, name=None, delcheck=False):
if not os.path.exists(self.leapcmd):
LinkFile(self.absleap, self.leapcmd)
pdb = os.path.basename(self.abspdb)
if not os.path.exists(pdb):
LinkFile(self.abspdb, pdb)
# Link over "static" mol2 and frcmod files from target folder
# print(self.absmol2, self.absfrcmod)
for mol2 in self.absmol2:
if not os.path.exists(os.path.split(mol2)[-1]):
LinkFile(mol2, os.path.split(mol2)[-1])
for frcmod in self.absfrcmod:
if not os.path.exists(os.path.split(frcmod)[-1]):
LinkFile(frcmod, os.path.split(frcmod)[-1])
if name is None: name = self.name
write_leap(self.leapcmd, mol2=self.mol2, frcmod=self.frcmod, pdb=pdb, prefix=name, spath=self.spath, delcheck=delcheck)
self.callamber("tleap -f %s_" % self.leapcmd)
if read_prmtop:
prmtop = PrmtopLoader('%s.prmtop' % name)
na = prmtop.getNumAtoms()
self.natoms = na
self.AtomLists['Charge'] = prmtop.getCharges()
self.AtomLists['Name'] = prmtop.getAtomNames()
self.AtomLists['Mass'] = prmtop.getMasses()
self.AtomLists['ResidueNumber'] = [prmtop.getResidueNumber(i) for i in range(na)]
self.AtomLists['ResidueName'] = [prmtop.getResidueLabel(i) for i in range(na)]
# AMBER virtual sites don't have to have mass zero; this is my
# best guess at figuring out where they are.
self.AtomMask = [self.AtomLists['Mass'][i] >= 1.0 or self.AtomLists['Name'][i] != 'LP' for i in range(na)]
if self.pbc != prmtop.getIfBox():
raise RuntimeError("Engine was created with pbc = %i but prmtop.getIfBox() = %i" % (self.pbc, prmtop.getIfBox()))
# This is done only optionally, because it is costly
if count_mols:
G = nx.Graph()
for i in range(na):
G.add_node(i)
for b in prmtop.getBondsNoH():
G.add_edge(b[0], b[1])
for b in prmtop.getBondsWithH():
G.add_edge(b[0], b[1])
gs = [G.subgraph(c).copy() for c in nx.connected_components(G)]
# Deprecated in networkx 2.2
# gs = list(nx.connected_component_subgraphs(G))
self.AtomLists['MoleculeNumber'] = [None for i in range(na)]
for ig, g in enumerate(gs):
for i in g.nodes():
self.AtomLists['MoleculeNumber'][i] = ig
def get_charges(self):
self.leap(read_prmtop=True, count_mols=False)
return np.array(self.AtomLists['Charge'])
def evaluate_sander(self, leap=True, traj_fnm=None, snapshots=None):
"""
Utility function for computing energy and forces using AMBER.
Coordinates (and boxes, if pbc) are obtained from the
Molecule object stored internally
Parameters
----------
snapshots : None or list
If a list is provided, only compute energies / forces for snapshots listed.
Outputs:
Result: Dictionary containing energies and forces.
"""
# Figure out where the trajectory information is coming from
# First priority: Passed as input to trajfnm
# Second priority: Using self.trajectory filename attribute
# Third priority: Using internal Molecule object
# 0 = using internal Molecule object
# 1 = using NetCDF trajectory format
mode = 0
if traj_fnm is None and hasattr(self, 'trajectory'):
traj_fnm = self.trajectory
if traj_fnm is not None:
try:
nc = netcdf_file(traj_fnm, 'r')
mode = 1
except TypeError:
print("Failed to load traj as netcdf, trying to load as Molecule object")
mol = Molecule(traj_fnm)
else:
mol = self.mol
def get_coord_box(i):
box = None
if mode == 0:
coords = mol.xyzs[i]
if self.pbc:
box = [mol.boxes[i].a, mol.boxes[i].b, mol.boxes[i].c,
mol.boxes[i].alpha, mol.boxes[i].beta, mol.boxes[i].gamma]
elif mode == 1:
coords = nc.variables['coordinates'].data[i].copy()
if self.pbc:
cl = nc.variables['cell_lengths'].data[i].copy()
ca = nc.variables['cell_angles'].data[i].copy()
box = [cl[0], cl[1], cl[2], ca[0], ca[1], ca[2]]
return coords, box
self.leap(read_prmtop=False, count_mols=False, delcheck=True)
cntrl_vars = write_mdin('sp', pbc=self.pbc, mdin_orig=self.mdin)
if self.pbc:
inp = sander.pme_input()
else:
inp = sander.gas_input()
if 'ntc' in cntrl_vars: inp.ntc = int(cntrl_vars['ntc'])
if 'ntf' in cntrl_vars: inp.ntf = int(cntrl_vars['ntf'])
if 'cut' in cntrl_vars: inp.cut = float(cntrl_vars['cut'])
coord, box = get_coord_box(0)
sander.setup("%s.prmtop" % self.name, coord, box, inp)
Energies = []
Forces = []
if snapshots == None:
if mode == 0:
snapshots = range(len(self.mol))
elif mode == 1:
snapshots = range(nc.variables['coordinates'].shape[0])
# Keep real atoms in mask.
# sander API cannot handle virtual sites when igb > 0
# so these codes are not needed.
# atomsel = np.where(self.AtomMask)
# coordsel = sum([i, i+1, i+2] for i in atomsel)
for i in snapshots:
coord, box = get_coord_box(i)
if self.pbc:
sander.set_box(*box)
sander.set_positions(coord)
e, f = sander.energy_forces()
Energies.append(e.tot * 4.184)
frc = np.array(f).flatten() * 4.184 * 10
Forces.append(frc)
sander.cleanup()
if mode == 1:
nc.close()
Result = OrderedDict()
Result["Energy"] = np.array(Energies)
Result["Force"] = np.array(Forces)
return Result
def energy_force_one(self, shot):
""" Computes the energy and force using AMBER for one snapshot. """
Result = self.evaluate_sander(snapshots=[shot])
return np.hstack((Result["Energy"].reshape(-1,1), Result["Force"]))
def energy(self):
""" Computes the energy using AMBER over a trajectory. """
return self.evaluate_sander()["Energy"]
def energy_force(self):
""" Computes the energy and force using AMBER over a trajectory. """
Result = self.evaluate_sander()
return np.hstack((Result["Energy"].reshape(-1,1), Result["Force"]))
def evaluate_cpptraj(self, leap=True, traj_fnm=None, potential=False):
""" Use cpptraj to evaluate properties over a trajectory file. """
# Figure out where the trajectory information is coming from
# First priority: Passed as input to trajfnm
# Second priority: Using self.trajectory filename attribute
if traj_fnm is None:
if hasattr(self, 'trajectory'):
traj_fnm = self.trajectory
else:
raise RuntimeError("evaluate_cpptraj needs a trajectory file name")
if leap:
self.leap(read_prmtop=False, count_mols=False, delcheck=True)
cpptraj_in=['parm %s.prmtop' % self.name]
cpptraj_in.append("trajin %s" % traj_fnm)
precision_lines = []
if potential:
cntrl_vars = write_mdin('sp', pbc=self.pbc, mdin_orig=self.mdin)
for key in ['igb', 'ntb', 'cut', 'ntc', 'ntf']:
if key not in cntrl_vars:
raise RuntimeError("Cannot use sander API because key %s not set" % key)
cpptraj_in.append("esander POTENTIAL out esander.txt ntb %s igb %s cut %s ntc %s ntf %s" %
(cntrl_vars['ntb'], cntrl_vars['igb'], cntrl_vars['cut'], cntrl_vars['ntc'], cntrl_vars['ntf']))
precision_lines.append("precision esander.txt 18 8")
cpptraj_in.append("vector DIPOLE dipole out dipoles.txt")
precision_lines.append("precision dipoles.txt 18 8")
if self.pbc:
cpptraj_in.append("volume VOLUME out volumes.txt")
precision_lines.append("precision volumes.txt 18 8")
cpptraj_in += precision_lines
with open('%s-cpptraj.in' % self.name, 'w') as f:
print('\n'.join(cpptraj_in), file=f)
self.callamber("cpptraj -i %s-cpptraj.in" % self.name)
# Gather the results
Result = OrderedDict()
if potential:
esander_lines = list(open('esander.txt').readlines())
ie = 0
for iw, w in enumerate(esander_lines[0].split()):
if w == "POTENTIAL[total]":
ie = iw
if ie == 0:
raise RuntimeError("Cannot find field corresponding to total energies")
potentials = np.array([float(line.split()[ie]) for line in esander_lines[1:]])*4.184
Result["Potentials"] = potentials
# Convert e*Angstrom to debye
dipoles = np.array([[float(w) for w in line.split()[1:4]] for line in list(open("dipoles.txt").readlines())[1:]]) / 0.20819434
Result["Dips"] = dipoles
# Volume of simulation boxes in cubic nanometers
# Conversion factor derived from the following:
# In [22]: 1.0 * gram / mole / (1.0 * nanometer)**3 / AVOGADRO_CONSTANT_NA / (kilogram/meter**3)
# Out[22]: 1.6605387831627252
conv = 1.6605387831627252
if self.pbc:
volumes = np.array([float(line.split()[1]) for line in list(open("volumes.txt").readlines())[1:]])/1000
densities = conv * np.sum(self.AtomLists['Mass']) / volumes
Result["Volumes"] = volumes
Result["Rhos"] = densities
return Result
def kineticE_cpptraj(self, leap=False, traj_fnm=None):
"""
Evaluate the kinetic energy of each frame in a trajectory using cpptraj.
This requires a netcdf-formatted trajectory containing velocities, which
is generated using ntwv=-1 and ioutfm=1.
"""
# Figure out where the trajectory information is coming from
# First priority: Passed as input to trajfnm
# Second priority: Using self.trajectory filename attribute
if traj_fnm is None:
if hasattr(self, 'trajectory'):
traj_fnm = self.trajectory
else:
raise RuntimeError("evaluate_cpptraj needs a trajectory file name")
if leap:
self.leap(read_prmtop=False, count_mols=False, delcheck=True)
cpptraj_in=['parm %s.prmtop' % self.name]
cpptraj_in.append("trajin %s" % traj_fnm)
cpptraj_in.append("temperature TEMPERATURE out temperature.txt")
cpptraj_in.append("precision temperature.txt 18 8")
with open('%s-cpptraj-temp.in' % self.name, 'w') as f:
print('\n'.join(cpptraj_in), file=f)
self.callamber("cpptraj -i %s-cpptraj-temp.in" % self.name)
temps = np.array([float(line.split()[1]) for line in list(open("temperature.txt").readlines())[1:]])
dof = 3*self.natoms
kinetics = 4.184 * kb_kcal * dof * temps / 2.0
print("Average temperature is %.2f, kinetic energy %.2f" % (np.mean(temps), np.mean(kinetics)))
# os.unlink("temperature.txt")
return kinetics
def energy_dipole(self):
""" Computes the energy and dipole using AMBER over a trajectory. """
Result = self.evaluate_cpptraj(potential=True)
return np.hstack((Result["Potentials"].reshape(-1,1), Result["Dips"]))
def interaction_energy(self, fraga, fragb):
""" Calculate the interaction energy for two fragments. """
self.A = AMBER(name="A", mol=self.mol.atom_select(fraga), amberhome=self.amberhome, leapcmd=self.leapcmd, mol2=self.mol2, frcmod=self.frcmod, reqpdb=False)
self.B = AMBER(name="B", mol=self.mol.atom_select(fragb), amberhome=self.amberhome, leapcmd=self.leapcmd, mol2=self.mol2, frcmod=self.frcmod, reqpdb=False)
# Interaction energy needs to be in kcal/mol.
return (self.energy() - self.A.energy() - self.B.energy()) / 4.184
def molecular_dynamics(self, nsteps, timestep, temperature=None, pressure=None, nequil=0, nsave=1000, minimize=True, anisotropic=False, threads=1, verbose=False, **kwargs):
"""
Method for running a molecular dynamics simulation.
Required arguments:
nsteps = (int) Number of total time steps
timestep = (float) Time step in FEMTOSECONDS
temperature = (float) Temperature control (Kelvin)
pressure = (float) Pressure control (atmospheres)
nequil = (int) Number of additional time steps at the beginning for equilibration
nsave = (int) Step interval for saving and printing data
minimize = (bool) Perform an energy minimization prior to dynamics
threads = (int) Specify how many OpenMP threads to use
Returns simulation data:
Rhos = (array) Density in kilogram m^-3
Potentials = (array) Potential energies
Kinetics = (array) Kinetic energies
Volumes = (array) Box volumes
Dips = (3xN array) Dipole moments
EComps = (dict) Energy components
"""
if anisotropic:
raise RuntimeError("Anisotropic barostat not implemented in AMBER")
if threads>1:
raise RuntimeError("Multiple threads not implemented in AMBER - for fast runs, use pmemd.cuda")
md_command = "pmemd.cuda" if (self.have_pmemd_cuda and self.pbc) else "sander"
if minimize:
# LPW 2018-02-11 Todo: Implement a separate function for minimization that works for
# RMSD / vibrations as well.
if verbose: printcool("Minimizing the energy", color=0)
write_mdin('min', '%s-min.mdin' % self.name, pbc=self.pbc, mdin_orig=self.mdin)
self.leap(read_prmtop=False, count_mols=False, delcheck=True)
self.callamber("sander -i %s-min.mdin -o %s-min.mdout -p %s.prmtop -c %s.inpcrd -r %s-min.restrt -x %s-min.netcdf -inf %s-min.mdinfo -O" %
(self.name, self.name, self.name, self.name, self.name, self.name, self.name), print_command=True)
nextrst = "%s-min.restrt" % self.name
else:
self.leap(read_prmtop=False, count_mols=False, delcheck=True)
nextrst = "%s.inpcrd" % self.name
# Run equilibration.
if nequil > 0:
write_mdin('eq', '%s-eq.mdin' % self.name, nsteps=nequil, timestep=timestep/1000, nsave=nsave, pbc=self.pbc,
temperature=temperature, pressure=pressure, mdin_orig=self.mdin)
if verbose: printcool("Running equilibration dynamics", color=0)
self.callamber("%s -i %s-eq.mdin -o %s-eq.mdout -p %s.prmtop -c %s -r %s-eq.restrt -x %s-eq.netcdf -inf %s-eq.mdinfo -O" %
(md_command, self.name, self.name, self.name, nextrst, self.name, self.name, self.name), print_command=True)
nextrst = "%s-eq.restrt" % self.name
# Run production.
if verbose: printcool("Running production dynamics", color=0)
write_mdin('md', '%s-md.mdin' % self.name, nsteps=nsteps, timestep=timestep/1000, nsave=nsave, pbc=self.pbc,
temperature=temperature, pressure=pressure, mdin_orig=self.mdin)
self.callamber("%s -i %s-md.mdin -o %s-md.mdout -p %s.prmtop -c %s -r %s-md.restrt -x %s-md.netcdf -inf %s-md.mdinfo -O" %
(md_command, self.name, self.name, self.name, nextrst, self.name, self.name, self.name), print_command=True)
nextrst = "%s-md.restrt" % self.name
self.trajectory = '%s-md.netcdf' % self.name
prop_return = self.evaluate_cpptraj(leap=False, potential=True)
prop_return["Kinetics"] = self.kineticE_cpptraj()
ecomp = OrderedDict()
ecomp["Potential Energy"] = prop_return["Potentials"].copy()
ecomp["Kinetic Energy"] = prop_return["Kinetics"].copy()
ecomp["Total Energy"] = prop_return["Potentials"] + prop_return["Kinetics"]
prop_return["Ecomps"] = ecomp
return prop_return
def normal_modes(self, shot=0, optimize=True):
self.leap(read_prmtop=False, count_mols=False, delcheck=True)
if optimize:
# Copied from AMBER tests folder.
opt_temp = """ Newton-Raphson minimization
&data
ntrun = 4, nsave=20, ndiag=2,
nprint=1, ioseen=0,
drms = 0.000001, maxcyc=4000, bdwnhl=0.1, dfpred = 0.1,
scnb=2.0, scee=1.2, idiel=1,
/
"""
with wopen("%s-nr.in" % self.name) as f: print(opt_temp.format(), file=f)
self.callamber("nmode -O -i %s-nr.in -c %s.inpcrd -p %s.prmtop -r %s.rst -o %s-nr.out" % (self.name, self.name, self.name, self.name, self.name))
nmode_temp = """ normal modes
&data
ntrun = 1, nsave=20, ndiag=2,
nprint=1, ioseen=0,
drms = 0.0001, maxcyc=1, bdwnhl=1.1, dfpred = 0.1,
scnb=2.0, scee=1.2, idiel=1,
nvect={nvect}, eta=0.9, ivform=2,
/
"""
with wopen("%s-nmode.in" % self.name) as f: print(nmode_temp.format(nvect=3*self.mol.na), file=f)
self.callamber("nmode -O -i %s-nmode.in -c %s.rst -p %s.prmtop -v %s-vecs.out -o %s-vibs.out" % (self.name, self.name, self.name, self.name, self.name))
# My attempt at parsing AMBER frequency output.
vmode = 0
ln = 0
freqs = []
modeA = []
modeB = []
modeC = []
vecs = []
for line in open("%s-vecs.out" % self.name).readlines():
if line.strip() == "$FREQ":
vmode = 1
elif line.strip().startswith("$"):
vmode = 0
elif vmode == 1:
# We are in the vibrational block now.
if ln == 0: pass
elif ln == 1:
freqs += [float(i) for i in line.split()]
else:
modeA.append([float(i) for i in line.split()[0:3]])
modeB.append([float(i) for i in line.split()[3:6]])
modeC.append([float(i) for i in line.split()[6:9]])
if len(modeA) == self.mol.na:
vecs.append(modeA)
vecs.append(modeB)
vecs.append(modeC)
modeA = []
modeB = []
modeC = []
ln = -1
ln += 1
calc_eigvals = np.array(freqs)
calc_eigvecs = np.array(vecs)
# Sort by frequency absolute value and discard the six that are closest to zero
calc_eigvecs = calc_eigvecs[np.argsort(np.abs(calc_eigvals))][6:]
calc_eigvals = calc_eigvals[np.argsort(np.abs(calc_eigvals))][6:]
# Sort again by frequency
calc_eigvecs = calc_eigvecs[np.argsort(calc_eigvals)]
calc_eigvals = calc_eigvals[np.argsort(calc_eigvals)]
os.system("rm -rf *.xyz_* *.[0-9][0-9][0-9]")
return calc_eigvals, calc_eigvecs
def multipole_moments(self, shot=0, optimize=True, polarizability=False):
logger.error('Multipole moments are not yet implemented in AMBER interface')
raise NotImplementedError
""" Return the multipole moments of the 1st snapshot in Debye and Buckingham units. """
#=================
# Below is copied from tinkerio.py
#=================
# This line actually runs TINKER
# if optimize:
# self.optimize(shot, crit=1e-6)
# o = self.calltinker("analyze %s.xyz_2 M" % (self.name))
# else:
# self.mol[shot].write('%s.xyz' % self.name, ftype="tinker")
# o = self.calltinker("analyze %s.xyz M" % (self.name))
# # Read the TINKER output.
# qn = -1
# ln = 0
# for line in o:
# s = line.split()
# if "Dipole X,Y,Z-Components" in line:
# dipole_dict = OrderedDict(zip(['x','y','z'], [float(i) for i in s[-3:]]))
# elif "Quadrupole Moment Tensor" in line:
# qn = ln
# quadrupole_dict = OrderedDict([('xx',float(s[-3]))])
# elif qn > 0 and ln == qn + 1:
# quadrupole_dict['xy'] = float(s[-3])
# quadrupole_dict['yy'] = float(s[-2])
# elif qn > 0 and ln == qn + 2:
# quadrupole_dict['xz'] = float(s[-3])
# quadrupole_dict['yz'] = float(s[-2])
# quadrupole_dict['zz'] = float(s[-1])
# ln += 1
# calc_moments = OrderedDict([('dipole', dipole_dict), ('quadrupole', quadrupole_dict)])
# if polarizability:
# if optimize:
# o = self.calltinker("polarize %s.xyz_2" % (self.name))
# else:
# o = self.calltinker("polarize %s.xyz" % (self.name))
# # Read the TINKER output.
# pn = -1
# ln = 0
# polarizability_dict = OrderedDict()
# for line in o:
# s = line.split()
# if "Total Polarizability Tensor" in line:
# pn = ln
# elif pn > 0 and ln == pn + 2:
# polarizability_dict['xx'] = float(s[-3])
# polarizability_dict['yx'] = float(s[-2])
# polarizability_dict['zx'] = float(s[-1])
# elif pn > 0 and ln == pn + 3:
# polarizability_dict['xy'] = float(s[-3])
# polarizability_dict['yy'] = float(s[-2])
# polarizability_dict['zy'] = float(s[-1])
# elif pn > 0 and ln == pn + 4:
# polarizability_dict['xz'] = float(s[-3])
# polarizability_dict['yz'] = float(s[-2])
# polarizability_dict['zz'] = float(s[-1])
# ln += 1
# calc_moments['polarizability'] = polarizability_dict
# os.system("rm -rf *.xyz_* *.[0-9][0-9][0-9]")
# return calc_moments
def optimize(self, shot=0, method="newton", crit=1e-4):
""" Optimize the geometry and align the optimized geometry to the starting geometry. """
logger.error('Geometry optimizations are not yet implemented in AMBER interface')
raise NotImplementedError
# # Code from tinkerio.py, reference for later implementation.
# if os.path.exists('%s.xyz_2' % self.name):
# os.unlink('%s.xyz_2' % self.name)
# self.mol[shot].write('%s.xyz' % self.name, ftype="tinker")
# if method == "newton":
# if self.rigid: optprog = "optrigid"
# else: optprog = "optimize"
# elif method == "bfgs":
# if self.rigid: optprog = "minrigid"
# else: optprog = "minimize"
# o = self.calltinker("%s %s.xyz %f" % (optprog, self.name, crit))
# # Silently align the optimized geometry.
# M12 = Molecule("%s.xyz" % self.name, ftype="tinker") + Molecule("%s.xyz_2" % self.name, ftype="tinker")
# if not self.pbc:
# M12.align(center=False)
# M12[1].write("%s.xyz_2" % self.name, ftype="tinker")
# rmsd = M12.ref_rmsd(0)[1]
# cnvgd = 0
# mode = 0
# for line in o:
# s = line.split()
# if len(s) == 0: continue
# if "Optimally Conditioned Variable Metric Optimization" in line: mode = 1
# if "Limited Memory BFGS Quasi-Newton Optimization" in line: mode = 1
# if mode == 1 and isint(s[0]): mode = 2
# if mode == 2:
# if isint(s[0]): E = float(s[1])
# else: mode = 0
# if "Normal Termination" in line:
# cnvgd = 1
# if not cnvgd:
# for line in o:
# logger.info(str(line) + '\n')
# logger.info("The minimization did not converge in the geometry optimization - printout is above.\n")
# return E, rmsd
def energy_rmsd(self, shot=0, optimize=True):
""" Calculate energy of the selected structure (optionally minimize and return the minimized energy and RMSD). In kcal/mol. """
logger.error('Geometry optimization is not yet implemented in AMBER interface')
raise NotImplementedError
# # Below is TINKER code as reference for later implementation.
# rmsd = 0.0
# # This line actually runs TINKER
# # xyzfnm = sysname+".xyz"
# if optimize:
# E_, rmsd = self.optimize(shot)
# o = self.calltinker("analyze %s.xyz_2 E" % self.name)
# #----
# # Two equivalent ways to get the RMSD, here for reference.
# #----
# # M1 = Molecule("%s.xyz" % self.name, ftype="tinker")
# # M2 = Molecule("%s.xyz_2" % self.name, ftype="tinker")
# # M1 += M2
# # rmsd = M1.ref_rmsd(0)[1]
# #----
# # oo = self.calltinker("superpose %s.xyz %s.xyz_2 1 y u n 0" % (self.name, self.name))
# # for line in oo:
# # if "Root Mean Square Distance" in line:
# # rmsd = float(line.split()[-1])
# #----
# os.system("rm %s.xyz_2" % self.name)
# else:
# o = self.calltinker("analyze %s.xyz E" % self.name)
# # Read the TINKER output.
# E = None
# for line in o:
# if "Total Potential Energy" in line:
# E = float(line.split()[-2].replace('D','e'))
# if E is None:
# logger.error("Total potential energy wasn't encountered when calling analyze!\n")
# raise RuntimeError
# if optimize and abs(E-E_) > 0.1:
# warn_press_key("Energy from optimize and analyze aren't the same (%.3f vs. %.3f)" % (E, E_))
# return E, rmsd
class AbInitio_AMBER(AbInitio):
"""Subclass of Target for force and energy matching
using AMBER."""
def __init__(self,options,tgt_opts,forcefield):
## Coordinate file.
self.set_option(tgt_opts,'coords',default="all.mdcrd")
## PDB file for topology (if different from coordinate file.)
self.set_option(tgt_opts,'pdb',default="conf.pdb")
## AMBER home directory.
self.set_option(options, 'amberhome')
## AMBER home directory.
self.set_option(tgt_opts, 'amber_leapcmd', 'leapcmd')
## Name of the engine.
self.engine_ = AMBER
## Initialize base class.
super(AbInitio_AMBER,self).__init__(options,tgt_opts,forcefield)
class Interaction_AMBER(Interaction):
"""Subclass of Target for calculating and matching ineraction energies
using AMBER. """
def __init__(self,options,tgt_opts,forcefield):
## Coordinate file.
self.set_option(tgt_opts, 'coords')
## PDB file for topology (if different from coordinate file.)
self.set_option(tgt_opts, 'pdb')
## AMBER home directory.
self.set_option(options, 'amberhome')
## AMBER home directory.
self.set_option(tgt_opts, 'amber_leapcmd', 'leapcmd')
## Name of the engine.
self.engine_ = AMBER
## Initialize base class.
super(Interaction_AMBER,self).__init__(options,tgt_opts,forcefield)
class Vibration_AMBER(Vibration):
"""Subclass of Target for calculating and matching vibrational modes using AMBER. """
def __init__(self,options,tgt_opts,forcefield):
## Coordinate file.
self.set_option(tgt_opts, 'coords')
## PDB file for topology (if different from coordinate file.)
self.set_option(tgt_opts, 'pdb')
## AMBER home directory.
self.set_option(options, 'amberhome')
## AMBER home directory.
self.set_option(tgt_opts, 'amber_leapcmd', 'leapcmd')
## Name of the engine.
self.engine_ = AMBER
## Initialize base class.
super(Vibration_AMBER,self).__init__(options,tgt_opts,forcefield)
class Liquid_AMBER(Liquid):
"""Subclass of Target for calculating and matching liquid properties using AMBER. """
def __init__(self,options,tgt_opts,forcefield):
# Name of the liquid PDB file.
self.set_option(tgt_opts,'liquid_coords',default='liquid.pdb',forceprint=True)
# Name of the gas PDB file.
self.set_option(tgt_opts,'gas_coords',default='gas.pdb',forceprint=True)
# Class for creating engine object.
self.engine_ = AMBER
## AMBER home directory.
self.set_option(options, 'amberhome')
# Set some options for the polarization correction calculation.
self.gas_engine_args = {}
# Scripts to be copied from the ForceBalance installation directory.
self.scripts = []
# Send back last frame of production trajectory.
self.extra_output = ['liquid-md.restrt']
## Name of the engine.
self.engine_ = AMBER
# Name of the engine to pass to npt.py.
self.engname = "amber"
# Command prefix.
self.nptpfx = ""
# Extra files to be linked into the temp-directory.
self.nptfiles = ['%s.leap' % os.path.splitext(f)[0] for f in [self.liquid_coords, self.gas_coords]]
## Initialize base class.
super(Liquid_AMBER,self).__init__(options,tgt_opts,forcefield)
for f in [self.liquid_coords, self.gas_coords]:
if os.path.exists(os.path.join(self.root, self.tgtdir, '%s.mdin' % os.path.splitext(f)[0])):
self.nptfiles.append('%s.mdin' % os.path.splitext(f)[0])
if f == self.gas_coords:
self.gas_engine_args['mdin'] = os.path.splitext(f)[0]
for mol2 in listfiles(None, 'mol2', err=False, dnm=os.path.join(self.root, self.tgtdir)):
self.nptfiles.append(mol2)
for frcmod in listfiles(None, 'frcmod', err=False, dnm=os.path.join(self.root, self.tgtdir)):
self.nptfiles.append(frcmod)
for i in self.nptfiles:
if not os.path.exists(os.path.join(self.root, self.tgtdir, i)):
logger.error('Please provide %s; it is needed to proceed.\n' % i)
raise RuntimeError
# Send back the trajectory file.
if self.save_traj > 0:
self.extra_output += ['liquid-md.netcdf']
# These functions need to be called after self.nptfiles is populated
self.post_init(options)
|
#!/usr/bin/env python
"""
Cleans up relative cross-notebook links by replacing them with .html
extension.
"""
import os
import re
from bs4 import BeautifulSoup
import warnings
# TODO: holoviews specific links e.g. to reference manual...doc & generalize
#BOKEH_REPLACEMENTS = {'cell.output_area.append_execute_result': '//cell.output_area.append_execute_result',
# '}(window));\n</div>': '}(window));\n</script></div>',
# '\n(function(root) {': '<script>\n(function(root) {'}
# Fix gallery links (e.g to the element gallery)
#LINK_REPLACEMENTS = {'../../examples/elements/':'../gallery/elements/',
# '../../examples/demos/':'../gallery/demos/',
# '../../examples/streams/':'../gallery/streams/'}
def filter_available(names, name_type):
available = []
for name in names:
reference_dir = os.path.abspath(os.path.join(__file__, '..','..', '..',
'examples', 'reference'))
# if not os.path.isdir(reference_dir):
# raise Exception('Cannot find examples/reference in %r' % reference_dir)
for backend in ['bokeh', 'matplotlib', 'plotly']:
candidate = os.path.join(reference_dir, name_type, backend, name+'.ipynb')
if os.path.isfile(candidate):
replacement_tpl = """<a href='../reference/{clstype}/{backend}/{clsname}.html'>
<code>{clsname}</code></a>"""
replacement = replacement_tpl.format(clstype=name_type,
clsname=name,
backend=backend)
available.append((name, replacement))
break
return available
# TODO: allow to register stuff
def find_autolinkable():
try:
import holoviews as hv
import param
except ImportError:
print('no holoviews and/or param: skipping autolinks')
return {}
# Class names for auto-linking
excluded_names = { 'UniformNdMapping', 'NdMapping', 'MultiDimensionalMapping',
'Empty', 'CompositeOverlay', 'Collator', 'AdjointLayout'}
dimensioned = set(param.concrete_descendents(hv.core.Dimensioned).keys())
all_elements = set(param.concrete_descendents(hv.Element).keys())
all_streams = set(param.concrete_descendents(hv.streams.Stream).keys())
all_containers = set((dimensioned - all_elements) - excluded_names)
return {'elements': filter_available(all_elements, 'elements'),
'streams': filter_available(all_streams, 'streams'),
'containers': filter_available(all_containers, 'containers')}
autolinkable = find_autolinkable()
def component_links(text, path):
if ('user_guide' in path) or ('getting_started' in path):
for clstype, listing in autolinkable.items():
for (clsname, replacement) in list(listing):
try:
text, count = re.subn('<code>\s*{clsname}\s*</code>*'.format(clsname=clsname),replacement, text)
except Exception as e:
print(str(e))
return text
def cleanup_links(path, inspect_links=False):
"""
Use inspect_links to get a list of all the external links in the site
"""
with open(path) as f:
text = f.read()
# if 'BokehJS does not appear to have successfully loaded' in text:
# for k, v in BOKEH_REPLACEMENTS.items():
# text = text.replace(k, v)
text = component_links(text, path)
soup = BeautifulSoup(text, features="html.parser")
for a in soup.findAll('a'):
href = a.get('href', '')
if '.ipynb' in href and 'http' not in href:
# for k, v in LINK_REPLACEMENTS.items():
# href = href.replace(k, v)
a['href'] = href.replace('.ipynb', '.html')
# check to make sure that path exists, if not, try un-numbered version
try_path = os.path.join(os.path.dirname(path), a['href'])
if not os.path.exists(try_path):
num_name = os.path.basename(try_path)
name = re.split(r"^\d+( |-|_)", num_name)[-1]
new_path = try_path.replace(num_name, name)
if os.path.exists(new_path):
a['href'] = os.path.relpath(new_path, os.path.dirname(path))
else:
also_tried = 'Also tried: {}'.format(name) if name != num_name else ''
warnings.warn('Found missing link {} in: {}. {}'.format(a['href'], path, also_tried))
if inspect_links and 'http' in a['href']:
print(a['href'])
for img in soup.findAll('img'):
src = img.get('src', '')
if 'http' not in src and 'assets' in src:
try_path = os.path.join(os.path.dirname(path), src)
if not os.path.exists(try_path):
also_tried = os.path.join('..', src)
if os.path.exists(os.path.join(os.path.dirname(path), also_tried)):
img['src'] = also_tried
else:
warnings.warn('Found reference to missing image {} in: {}. Also tried: {}'.format(src, path, also_tried))
with open(path, 'w') as f:
f.write(str(soup))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('build_dir', help="Build Directory")
parser.add_argument('--inspect-links', action='store_true', help="Whether or not to print out all the links on the website")
args = parser.parse_args()
for root, dirs, files in os.walk(args.build_dir):
for file_path in files:
if file_path.endswith(".html"):
soup = cleanup_links(os.path.join(root, file_path), args.inspect_links)
|
#!/usr/bin/env python
from __future__ import unicode_literals
import sys
import glob
import os
import time
import logging
from datetime import datetime
from slackclient import SlackClient
sys.dont_write_bytecode = True
class RtmBot(object):
def __init__(self, config):
'''
Params:
- config (dict):
- SLACK_TOKEN: your authentication token from Slack
- BASE_PATH (optional: defaults to execution directory) RtmBot will
look in this directory for plugins.
- LOGFILE (optional: defaults to rtmbot.log) The filename for logs, will
be stored inside the BASE_PATH directory
- DEBUG (optional: defaults to False) with debug enabled, RtmBot will
break on errors
'''
# set the config object
self.config = config
self.heartbeat = float(config.get('HEARTBEAT'))
# set slack token
self.token = config.get('SLACK_TOKEN')
# set working directory for loading plugins or other files
working_directory = os.path.dirname(sys.argv[0])
self.directory = self.config.get('BASE_PATH', working_directory)
if not self.directory.startswith('/'):
path = '{}/{}'.format(os.getcwd(), self.directory)
self.directory = os.path.abspath(path)
# establish logging
# log_file = config.get('LOGFILE', 'rtmbot.log')
logging.basicConfig(
# filename=log_file,
level=logging.INFO,
format='%(asctime)s %(message)s')
logging.info('Initialized in: {}'.format(self.directory))
self.debug = self.config.get('DEBUG', False)
# initialize stateful fields
self.last_ping = 0
self.bot_plugins = []
self.slack_client = None
def _dbg(self, debug_string):
if self.debug:
logging.info(debug_string)
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def _start(self):
self.connect()
self.load_plugins()
while True:
for reply in self.slack_client.rtm_read():
self.input(reply)
self.crons()
self.output()
self.autoping()
time.sleep(self.heartbeat or .1)
def start(self):
if 'DAEMON' in self.config:
if self.config.get('DAEMON'):
import daemon
with daemon.DaemonContext():
self._start()
self._start()
def autoping(self):
# hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
self._dbg("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
if isinstance(output[1], dict):
self.slack_client.api_call("chat.postMessage", **output[1])
else:
channel = self.slack_client.server.channels.find(output[0])
if channel is not None and output[1] is not None:
if limiter:
time.sleep(.1)
limiter = False
channel.send_message(output[1])
limiter = True
def crons(self):
time_now = datetime.utcnow()
for plugin in self.bot_plugins:
plugin.do_jobs(time_now)
def load_plugins(self):
for plugin in glob.glob(self.directory + '/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, self.directory + '/plugins/')
for plugin in glob.glob(self.directory + '/plugins/*.py') + \
glob.glob(self.directory + '/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
if name in self.config:
logging.info("config found for: " + name)
plugin_config = self.config.get(name, {})
plugin_config['DEBUG'] = self.debug
self.bot_plugins.append(Plugin(name, plugin_config))
class Plugin(object):
def __init__(self, name, plugin_config=None):
'''
A plugin in initialized with:
- name (str)
- plugin config (dict) - (from the yaml config)
Values in config:
- DEBUG (bool) - this will be overridden if debug is set in config for this plugin
'''
if plugin_config is None:
plugin_config = {}
self.name = name
self.jobs = []
self.module = __import__(name)
self.module.config = plugin_config
self.debug = self.module.config.get('DEBUG', False)
self.register_jobs()
self.outputs = []
if 'setup' in dir(self.module):
self.module.setup()
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module." + function), self.debug))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
if 'timejobs' in dir(self.module):
for timestamp, function in self.module.timejobs:
self.jobs.append(Job(timestamp, eval("self.module." + function), self.debug))
logging.info(self.module.timejobs)
self.module.timejobs = []
else:
self.module.timejobs = []
if 'weeklyjobs' in dir(self.module):
for time_tuple, function in self.module.weeklyjobs:
self.jobs.append(Job(time_tuple, eval("self.module." + function), self.debug))
logging.info(self.module.weeklyjobs)
self.module.weeklyjobs = []
else:
self.module.weeklyjobs = []
def do(self, function_name, data):
if function_name in dir(self.module):
if self.debug is True:
# this makes the plugin fail with stack trace in debug mode
eval("self.module." + function_name)(data)
else:
# otherwise we log the exception and carry on
try:
eval("self.module." + function_name)(data)
except Exception:
logging.exception("problem in module {} {}".format(function_name, data))
if "catch_all" in dir(self.module):
if self.debug is True:
# this makes the plugin fail with stack trace in debug mode
self.module.catch_all(data)
else:
try:
self.module.catch_all(data)
except Exception:
logging.exception("problem in catch all: {} {}".format(self.module, data))
def do_jobs(self, time_now):
for job in self.jobs:
job.check(time_now)
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
# TODO: subclass Job to break check up
def __init__(self, interval, function, debug):
self.function = function
self.interval = interval
self.lastrun = 0
self.debug = debug
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self, time_now):
if isinstance(self.interval, (str, unicode)):
# timejobs
if (self.lastrun == 0) or (self.lastrun and self.lastrun.day != time_now.day):
hour, minute = self.interval.split(':')
if int(hour) == time_now.hour and int(minute) == time_now.minute:
self.do()
self.lastrun = time_now
elif isinstance(self.interval, (tuple, list)):
# weeklyjobs
interval, weekday = self.interval
if time_now.weekday == weekday:
if (self.lastrun == 0) or (self.lastrun and self.lastrun.day != time_now.day):
hour, minute = interval.split(':')
if int(hour) == time_now.hour and int(minute) == time_now.minute:
self.do()
self.lastrun = time_now
else:
# crontable
if self.lastrun + self.interval < time.time():
self.do()
self.lastrun = time.time()
def do(self):
if self.debug is True:
# this makes the plugin fail with stack trace in debug mode
self.function()
else:
# otherwise we log the exception and carry on
try:
self.function()
except Exception:
logging.exception("Problem in job check: {}".format(self.function))
class UnknownChannel(Exception):
pass
|
import asyncio
from datetime import datetime, timedelta, timezone
from typing import Sequence
import structlog
from job_scheduler.api.models import Schedule
from job_scheduler.broker import RabbitMQBroker, ScheduleBroker
from job_scheduler.cache import RedisScheduleCache, ScheduleCache
from job_scheduler.config import config
from job_scheduler.db import RedisScheduleRepository, ScheduleRepository
from job_scheduler.logging import setup_logging
from job_scheduler.services import (
add_to_cache,
diff_from_cache,
enqueue_jobs,
get_range,
)
logger = structlog.getLogger("job_scheduler.scheduler")
async def schedule_jobs(
repo: ScheduleRepository, broker: ScheduleBroker, cache: ScheduleCache, interval=1
):
now = get_now()
schedule_candidates = await get_runnable_schedules(repo, now)
runnable_schedules = await diff_from_cache(cache, *schedule_candidates)
if len(runnable_schedules) < len(schedule_candidates):
logger.warning(
"Ignoring item(s) from cache",
n_schedules_ignored=len(schedule_candidates) - len(runnable_schedules),
)
await enqueue_jobs(broker, *runnable_schedules)
await add_to_cache(cache, *runnable_schedules)
total_delay = sum(s.current_delay.seconds for s in schedule_candidates)
logger.info(
f"Queued schedule(s) for execution", n_schedules=len(runnable_schedules)
)
if total_delay > 0:
logger.warning(
"Observed delay in schedule(s).",
total_delay_s=total_delay,
n_schedules=len(schedule_candidates),
)
await asyncio.sleep(interval)
async def get_runnable_schedules(
repo: ScheduleRepository, now: datetime
) -> Sequence[Schedule]:
return await get_range(repo, None, now.timestamp())
def get_now() -> datetime:
now = datetime.now(timezone.utc)
return now - timedelta(microseconds=now.microsecond)
async def schedule():
cache = await RedisScheduleCache.get_cache()
repo = await RedisScheduleRepository.get_repo()
broker = await RabbitMQBroker.get_broker()
while True:
try:
await schedule_jobs(repo, broker, cache)
except KeyboardInterrupt:
await broker.shutdown()
def main():
setup_logging()
try:
asyncio.run(schedule())
except KeyboardInterrupt:
pass
if __name__ == "__main__":
setup_logging()
if config.dev_mode:
from burgeon import with_reloading
logger.info("Starting scheduler with reloading.")
with_reloading(main)
else:
main()
|
from client import Client
from server import Server
from log.logger import Logger
import os
import sys
import argparse
import logging
class Peer:
"""
The Peer class starts a server-thread and n client-threads
where each client is subscribed to the server of another peer.
Each peer consists of one server, one logger and n clients. The server
as well as the clients are implemented as threads and started in the peer.
All clients are subscribed to the server of another peer.
The server basically is the publishing part of the peer whereas
the clients listen to the updates of the other peers.
When the peer is started via commandline several arguments are
passed to it. These arguments are then passed to the server, the logger and
the clients.
"""
def __init__(self, hostport, others, coordinates, tracefile, logDir, testing):
"""
Initializes the peer.
:param hostport: specifies the port number on which the server broadcasts its updates
:type hostport: int
:param others: the IP-addresses and the port number of the clients in the form IP-address:port number
:type others: str
:param coordinates: defines the rectangle to observe in the form x1,y1,x2,y2
:type coordinates: str
:param tracefile: the path to the tracefile which the server reads from
:type tracefile: str
:param logDir: the path to the directory where the logfile should be saved
:type logDir: str
:param testing: if true the server only reads 20 lines from the tracefile, else it reads everything
:type testing: bool
"""
self.logger = Logger(logDir, str(hostport) + "_chunklog.csv")
self.clients = []
self.server = Server(hostport, coordinates, tracefile, testing, self)
self.addresses = others.split(',')
for i in range(len(self.addresses)):
self.clients.append(Client(hostport, self.addresses[i].strip(), self))
self.server.start()
for i in range(len(self.clients)):
self.clients[i].start()
self.server.join()
for client in self.clients:
client.join()
#close the logfile when finished
self.logger.closeFile()
def configure_loggers(logFolder, logfilename, console=False):
# Define log format
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
# Configure Default-Logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if console:
# Configure logging to stdout
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
streamHandler.setLevel(logging.DEBUG)
logger.addHandler(streamHandler)
if not os.path.exists(logFolder):
os.makedirs(logFolder)
# Configure logging to File
fileHandler = logging.FileHandler(logFolder + '/' + logfilename)
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.INFO)
logger.addHandler(fileHandler)
if __name__ == "__main__":
"""
This method is called when the peer is
started via the commandline.
Here the necessary arguments are parsed
and passed to the peer.
"""
parser = argparse.ArgumentParser(description='Start a peer with one server and several clients.')
parser.add_argument('--serverPort', type=int, required=True,
help='the port number of the server')
parser.add_argument('--clients', type=str, required=True,
help='IP-address and port of the clients; \nExample: "localhost:5557,localhost:5558,localhost:5559"')
parser.add_argument('--coordinates', type=str, required=True,
help='coordinates of the left upper corner and the right lower corner for the rectangle '
'which the server should observe; \nExample: "0,65000,65000,0"')
parser.add_argument('--tracefile', type=str, required=True,
help='the path to the tracefile which the server should read from')
parser.add_argument('--logDir', type=str, default="./log/",
help='the directory in which the logfile should be saved [Default: ./log/]')
parser.add_argument('--testing', action='store_true',
help="if this argument is specified the server will only read 20 lines of its tracefile, else it will read the whole file")
args = parser.parse_args()
configure_loggers(args.logDir, "gameserver_{}.log".format(args.serverPort), True)
logging.info("Entering main...")
if (not (os.path.exists(args.tracefile))):
logging.fatal("Path to tracefile does not exist!")
sys.exit()
else:
# log the params in file
paramsLog = open("params.log", "w")
paramsLog.write("--serverPort=%d\n--clients=%s\n--coordinates=%s\n--tracefile=%s\n--logDir=%s\n--testing=%s"
% (args.serverPort, args.clients, args.coordinates, args.tracefile, args.logDir, args.testing))
paramsLog.close()
logging.info('Logged cmd arguments')
Peer(args.serverPort, args.clients, args.coordinates, args.tracefile, args.logDir, args.testing)
logging.info("Exiting main...")
|
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import h5py
import pyqtgraph as pg
# create some HDF5 data in a 2-d array of X,Y pairs
with h5py.File('plot_2d_data.h5','w') as h5f:
data = h5f.create_dataset('data',shape=(100,2))
data[:,0] = np.arange(0.0,10.0,0.1) ## X data points
data[:,1] = np.random.normal(size=100) ## Y data points
app = QtGui.QApplication([])
win = pg.GraphicsLayoutWidget(show=True, title="2-D plot examples")
win.resize(1000,600)
win.setWindowTitle('pyqtgraph example: 2D Plotting')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="Plot of NumPy data",
x=np.arange(0.0,10.0,0.1), y=np.random.normal(size=100))
p2 = win.addPlot(title="NumPy data with Points",
x=np.arange(0.0,10.0,0.1), y=np.random.normal(size=100),
pen=(255,0,0), symbolBrush=(255,0,0))
win.nextRow()
with h5py.File('plot_2d_data.h5','r') as h5f:
p3 = win.addPlot(title="Plot of HDF5 data",
x=h5f['data'][:,0], y=h5f['data'][:,1])
p4 = win.addPlot(title="HDF5 data with Points",
x=h5f['data'][:,0], y=h5f['data'][:,1],
pen=(0,0,255), symbolBrush=(0,0,255))
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_() |
from datetime import datetime
from unittest import TestCase
import httpretty as httpretty
from attendance.Api import Api
from config import Config
class TestApi(TestCase):
@httpretty.activate
def test_get_employee_list_by_department_bad_response(self):
httpretty.register_uri(httpretty.POST, Config.ATTENDANCE_BOSSCONTROL_HOST, body="Bad json string")
self.assertEqual([], Api().get_employee_list_by_department('department_test', datetime.now(), datetime.now()))
@httpretty.activate
def test_get_employee_list_by_department_no_results(self):
httpretty.register_uri(httpretty.POST, Config.ATTENDANCE_BOSSCONTROL_HOST, body="{\"no\": \"results\"}")
self.assertEqual([], Api().get_employee_list_by_department('department_test', datetime.now(), datetime.now()))
@httpretty.activate
def test_get_employee_list_by_department(self):
httpretty.register_uri(
httpretty.POST,
Config.ATTENDANCE_BOSSCONTROL_HOST,
body="{\"result\": [{\"id\": \"1\"}]}"
)
self.assertEqual(
[{'id': '1'}],
Api().get_employee_list_by_department('department_test', datetime.now(), datetime.now())
)
@httpretty.activate
def test_get_employee_list_ignore(self):
httpretty.register_uri(
httpretty.POST,
Config.ATTENDANCE_BOSSCONTROL_HOST,
body="{\"result\": [{\"code\": \"1\"}]}"
)
Config.ATTENDANCE_DEPARTMENT = ['only-one']
Config.ATTENDANCE_USER_IGNORE = ['1']
self.assertEqual([], Api().get_employee_list())
@httpretty.activate
def test_get_employee_list(self):
httpretty.register_uri(
httpretty.POST,
Config.ATTENDANCE_BOSSCONTROL_HOST,
body="{\"result\": [{\"code\": \"1\"}]}"
)
Config.ATTENDANCE_DEPARTMENT = ['only-one']
Config.ATTENDANCE_USER_IGNORE = []
self.assertEqual([{'code': '1'}], Api().get_employee_list())
@httpretty.activate
def test_get_employee_list_wrong_format(self):
httpretty.register_uri(
httpretty.POST,
Config.ATTENDANCE_BOSSCONTROL_HOST,
body="{\"result\": [{\"no-code\": \"1\"}]}"
)
Config.ATTENDANCE_DEPARTMENT = ['only-one']
Config.ATTENDANCE_USER_IGNORE = []
self.assertEqual([], Api().get_employee_list())
@httpretty.activate
def test_get_employee_record_by_date_bad_response(self):
httpretty.register_uri(httpretty.POST, Config.ATTENDANCE_BOSSCONTROL_HOST, body="Bad json string")
self.assertEqual([], Api().get_employee_record_by_date('employee_id', datetime.now()))
@httpretty.activate
def test_get_employee_record_by_date_no_results(self):
httpretty.register_uri(httpretty.POST, Config.ATTENDANCE_BOSSCONTROL_HOST, body="{\"no\": \"results\"}")
self.assertEqual([], Api().get_employee_record_by_date('employee_id', datetime.now()))
@httpretty.activate
def test_get_employee_record_by_date(self):
httpretty.register_uri(
httpretty.POST,
Config.ATTENDANCE_BOSSCONTROL_HOST,
body="{\"result\": [{\"timestamp\": \"2019-01-01 00:00:00\"}, {\"wrong-name\": \"2019-01-01 00:00:00\"}]}"
)
self.assertEqual(
[datetime(2019, 1, 1, 0, 0)],
Api().get_employee_record_by_date('employee_id', datetime.now())
)
|
import py.test
from descent.parser import parse_grammar
from descent.ast import *
def single_rule_grammar(name, body):
return grammar([rule(name=reference(name), expr=body)])
parse_cases = [
("A <- 'a'", single_rule_grammar("A", string("a"))),
("A <- \"a\"", single_rule_grammar("A", string("a"))),
("A <- '\\n'", single_rule_grammar("A", string("\n"))),
("A <- '\\010'", single_rule_grammar("A", string("\010"))),
("A <- [a]", single_rule_grammar("A", char("a"))),
("A <- [a-z]", single_rule_grammar(
"A", char_range(start=char("a"), end=char("z"))
)),
("A <- []", single_rule_grammar("A", fail())),
("A <- [ab]", single_rule_grammar("A", choice([char("a"), char("b")]))),
("A <- .", single_rule_grammar("A", char_any())),
("A <- .:'a'", single_rule_grammar("A", replace(char_any(), string("a")))),
("A <- B", single_rule_grammar("A", reference("B"))),
("A <- B / C", single_rule_grammar(
"A", choice([reference("B"), reference("C")])
)),
("A <- B*", single_rule_grammar("A", repeat(reference("B")))),
("A <- B+", single_rule_grammar("A", repeat1(reference("B")))),
("A <- B?", single_rule_grammar("A", optional(reference("B")))),
("A <- !B", single_rule_grammar("A", not_follow(reference("B")))),
("A <- &B", single_rule_grammar("A", follow(reference("B")))),
("A <- @B", single_rule_grammar("A", node("B"))),
("A <- B~", single_rule_grammar("A", ignore(reference("B")))),
("A <- B:a", single_rule_grammar(
"A", append(reference("B"), reference("a"))
)),
("A <- B^a", single_rule_grammar(
"A", top(reference("B"), reference("a"))
)),
("A <- B::", single_rule_grammar("A", splice(reference("B")))),
("A <- B^^", single_rule_grammar("A", top_splice(reference("B")))),
]
@py.test.mark.parametrize("input, parsed", parse_cases)
def test_parse(input, parsed):
assert parse_grammar(input) == parsed
|
#!/usr/bin/env python3
"""
juicylang - Juicy programming language written in Python using PLY
Author : Hamidreza Mahdavipanah
Version: 1.0.0
Repository: http://github.com/mahdavipanah/juicylang
License : MIT License
"""
from sys import exit, argv, stdin
from colorama import init as colorama_init, Fore
from src.juicyyacc import yacc
# Initialize colorama
colorama_init(autoreset=True)
# A file has been passed
if len(argv) > 1:
# Showing help text
if argv[1] in {'-h', '--help'}:
print("Usage: juicylang [file-name]\n"
" Runs and interprets the given jul file.\n"
" If no file is given, reads from standard input.\n"
" Example usage:\n"
" $ asmrun examples/myprogram.jul\n"
"Options:\n"
" -h, --help Shows help text.\n"
"Author:\n"
" Hamidreza Mahdavipanah <h.mahdavipanah@gmail.com>\n"
"Repository:\n"
" http://github.com/mahdavipanah/juicylang\n"
"License:\n"
" MIT License")
exit(0)
# Reading a source file
source = ""
try:
with open(argv[1], 'rt', encoding='utf-8') as file:
source = file.read()
except FileNotFoundError:
print(Fore.RED + "File '" + Fore.BLACK + argv[1] + Fore.RED + "' not found")
exit(1)
yacc.parse(source)
exit(0)
# Read program's rouce file form standard input
source = ""
for line in stdin:
source += line
yacc.parse(source) |
from typing import Tuple
from hypothesis import given
from gon.base import Point
from tests.utils import implication
from . import strategies
@given(strategies.points)
def test_reflexivity(point: Point) -> None:
assert point == point
@given(strategies.points_pairs)
def test_symmetry(points_pair: Tuple[Point, Point]) -> None:
first, second = points_pair
assert implication(first == second, second == first)
@given(strategies.points_triplets)
def test_transitivity(points_triplet: Tuple[Point, Point, Point]) -> None:
first, second, third = points_triplet
assert implication(first == second and second == third, first == third)
|
"""Library metadata."""
MAJOR = 0
MINOR = 0
MICRO = 0
__author__ = "Kieran Ryan"
__copyright__ = f"Copyright 2021 {__author__}"
__description = "A sample Python library."
__license__ = "MIT"
__title__ = "pysamplelib"
__url__ = "https://github.com/kieran-ryan/python-library-template"
__version__ = f"{MAJOR}.{MINOR}.{MICRO}"
|
import sublime
import sublime_plugin
from .lib.comment_settings import CommentSettings
from .lib.view_verifier import ViewVerifier
class MagicCommentInsertCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings("MagicComment.sublime-settings")
for raw_comment_settings in settings.get("comments", []):
comment_settings = CommentSettings(raw_comment_settings)
if ViewVerifier(self.view, comment_settings).should_run():
self.__insert_comment(edit, comment_settings)
def __insert_comment(self, edit, comment_settings):
line_number = self.view.text_point(comment_settings.line() - 1, 0)
comment_text = comment_settings.text() + ("\n" * comment_settings.blank_lines())
self.view.insert(edit, line_number, comment_text)
|
import os
import sys
import logging
from _atiiaftt import ffi,lib
class FTUnit:
"""
Convenience class holding accepted units for the atiia functions.
Constants declared from 'ftconfig.c'.
"""
FORCE_LB=ffi.new("char[]","lb".encode("ascii"))
"""lb\\0"""
FORCE_KLB=ffi.new("char[]","klb".encode("ascii"))
"""klb\\0"""
FORCE_N=ffi.new("char[]","N".encode("ascii"))
"""N\\0"""
FORCE_KN=ffi.new("char[]","kN".encode("ascii"))
"""kN\\0"""
FORCE_G=ffi.new("char[]","g".encode("ascii"))
"""g\\0"""
FORCE_KG=ffi.new("char[]","kg".encode("ascii"))
"""kg\\0"""
TORQUE_IN_LB=ffi.new("char[]","in-lb".encode("ascii"))
"""in-lb\\0"""
TORQUE_FT_LB=ffi.new("char[]","ft-lb".encode("ascii"))
"""ft-lb\\0"""
TORQUE_N_M=ffi.new("char[]","N-m".encode("ascii"))
"""N-m\\0"""
TORQUE_N_MM=ffi.new("char[]","N-mm".encode("ascii"))
"""N-mm\\0"""
TORQUE_KG_CM=ffi.new("char[]","kg-cm".encode("ascii"))
"""kg-cm\\0"""
DIST_M=ffi.new("char[]","m".encode("ascii"))
"""m\\0"""
DIST_CM=ffi.new("char[]","cm".encode("ascii"))
"""cm\\0"""
DIST_MM=ffi.new("char[]","mm".encode("ascii"))
"""mm\\0"""
DIST_FT=ffi.new("char[]","ft".encode("ascii"))
"""ft\\0"""
ANGLE_DEG=ffi.new("char[]","deg".encode("ascii"))
"""deg\\0"""
ANGLE_RAD=ffi.new("char[]","rad".encode("ascii"))
"""rad\\0"""
class FTSensor:
"""
Wrapper class to hold force-torque sensor calibration and values.
"""
def __init__(self,CalFilePath=None,index=1):
"""
Force-torque sensor instance constructor. Optionally takes calibration file info.
@param CalFilePath: Path string passed to self.createCalibration
@type CalFilePath: string
@param index: Index value passed to self.createCalibration
@type index: int
"""
self.logger=logging.getLogger("atiiaftt.FTSensor")
self.calibration=ffi.NULL
if CalFilePath!=None:
self.createCalibration(CalFilePath,index)
self.voltage_vector=[]
self.bias_vector=[]
self.ft_vector=[]
def createCalibration(self,CalFilePath,index):
"""
Wraps c function 'createCalibration()'.
@param CalFilePath: Path string of calibration file for the sensor
@type CalFilePath: string
@param index: Index value of the requested calibration data.
@type index: int
@raises IOError: exception raised if 'CalFilePath' fails os.path.exists()
@raises IndexError: exception raised if index value not found in calibration file
"""
if not (os.path.exists(CalFilePath)):
self.logger.error("Can't find calibration file: "+CalFilePath)
raise IOError("Calibration file not found.")
cffi_cal_filepath=ffi.new("char[]",CalFilePath.encode("ascii"))
self.calibration=lib.createCalibration(cffi_cal_filepath,index)
if self.calibration==ffi.NULL:
self.logger.error("Index not found in calibration file.")
raise IndexError("Passed calibration index not found.")
# else ok
def setToolTransform(self,vector,distunits,angleunits):
"""
Wraps c function 'SetToolTransform()'.
@param vector:
@type vector: list of floats
@param distunits:
@type distunits: atiiaftt.FTUnit.DIST_* class member
@param angleunits:
@type angleunits: atiiaftt.FTUnit.ANGLE_* class member
@raises RuntimeError: exception raised if function is called before loading a calibration dataset.
@raises ValueError: exception raised if unknown unit is passed.
"""
cffi_vector=ffi.new("float[]",vector)
ret_val=lib.SetToolTransform(self.calibration,cffi_vector,distunits,angleunits)
if ret_val == 1:
raise RuntimeError("Calibration data not loaded.")
elif ret_val == 2:
raise ValueError("Invalid distance unit.")
elif ret_val == 3:
raise ValueError("Invalid angle unit.")
def setForceUnits(self,newunits):
"""
Wraps c function 'SetForceUnits()'
@param newunits: New force unit to used. Stored in 'calibration.cfg.ForceUnits'
@type newunits: atiiaftt.FTUnit.FORCE_* class member
@raises RuntimeError: exception raised if function is called before loading a calibration dataset.
@raises ValueError: exception raised if unknown unit is passed.
"""
ret_val=lib.SetForceUnits(self.calibration,newunits)
if ret_val == 1:
raise RuntimeError("Calibration data not loaded.")
elif ret_val == 2:
raise ValueError("Invalid force unit.")
def setTorqueUnits(self,newunits):
"""
Wraps c function 'SetTorqueUnits()'
@param newunits: New torque unit to used. Stored in 'calibration.cfg.TorqueUnits'
@type newunits: atiiaftt.FTUnit.TORQUE_* class member
@raises RuntimeError: Exception raised if function is called before loading a calibration dataset.
@raises ValueError: Exception raised if unknown unit is passed.
"""
ret_val=lib.SetTorqueUnits(self.calibration,newunits)
if ret_val == 1:
raise RuntimeError("Calibration data not loaded.")
elif ret_val == 2:
raise ValueError("Invalid torque unit.")
def bias(self,voltages):
"""
Wraps c function 'Bias()'
@param voltages: Averaged set of ADC readings from a sensor
@type voltages: list of floats, also stored in self.bias_vector
@raises RuntimeError: exception raised if function is called before loading a calibration dataset.
"""
self.bias_vector=voltages
if self.calibration==ffi.NULL:
self.logger.error("Calibration data not loaded.")
raise RuntimeError("Calibration data not loaded.")
cffi_bias=ffi.new("float[]",voltages)
self.logger.debug("{}".format(cffi_bias))
lib.Bias(self.calibration,cffi_bias)
def convertToFt(self,voltages):
"""
Wraps c function 'ConvertToFT()'.
The most recent voltages are also stored in self.voltage_vector
The most recent conversion is also stored in self.ft_vector
@param voltages: values read from an ADC connected to a sensor
@type voltages: list of floats
@raises RuntimeError: exception raised if function is called before loading a calibration dataset.
@return: Force-torque values, also stored in self.ft_vector
@rtype: list of floats, format [F.x,F.y,F.z,T.x,T.y,T.z]
"""
self.voltage_vector=voltages
if self.calibration==ffi.NULL:
self.logger.error("Calibration data not loaded.")
raise RuntimeError("Calibration data not loaded.")
cffi_ft_vector=ffi.new("float[]",[0,0,0,0,0,0])
cffi_voltages=ffi.new("float[]",voltages)
lib.ConvertToFT(self.calibration,cffi_voltages,cffi_ft_vector)
self.ft_vector=list(cffi_ft_vector)
return self.ft_vector
|
#!/usr/local/bin/python
import sys, os, getopt, signal, time, re, sqlite3
import distutils.core
import xml.etree.cElementTree as ET
from bs4 import BeautifulSoup, NavigableString, Tag
# The categories that can be found in the ClassHierarchy/index.html file.
maincategories = {
"Class": [
re.compile("^.*class .+$", re.MULTILINE),
re.compile("^.*UCLASS.*$", re.MULTILINE)],
"Struct": [
re.compile("^.*struct .+$", re.MULTILINE),
re.compile("^.*USTRUCT.*$", re.MULTILINE)],
"Union": [
re.compile("^.*union .+$", re.MULTILINE)]
}
class detail:
def __init__(self, htmlname, indexname, regexps):
self.htmlname = htmlname
self.indexname = indexname
self.regexps = regexps
# Additional detail categories, that can be found in each of the referenced files.
detailcategories = [
detail("constructor", "Constructor", []),
detail("constants", "Constant", []),
detail("variables", "Variable", []),
detail("methods", "Method", []),
detail("operators", "Operator", [])
]
htmlroot = None
docsetpath = None
docpath = None
dbpath = None
db = None
cur = None
count = 0
verbose = 0
def usage():
print 'Usage: ue4docset.py [options] <htmlroot> <docsetpath>\n'
print ('\tParses the extracted chm documentation at ' + '\033[4m' + 'htmlroot' + '\033[0m' +
' and generates a docset at ' + '\033[4m' + 'docsetpath' + '\033[0m' + '.')
print '\nOptions:'
print '\t-i\tDocumentation identifier.'
print '\t-n\tDocumentation display name.'
print '\t-s\tDocumentation version.'
print '\t-f\tFallback URL.'
print '\t-v\tVerbose.'
print '\nExample:'
print '\tue4docset.py -n "Unreal Engine" -s "4.0.2" ~/Desktop/HTML ~/Desktop/UE4.docset'
def signal_handler(signal, frame):
print('\nAborted by user.')
sys.exit(2)
signal.signal(signal.SIGINT, signal_handler)
# Generate an Info.plist file from the passed, optional parameters.
def generate_plist(opts):
print "Generating Info.plist"
identifier = "com.epic.unrealengine4"
name = "UE4"
fallbackURL = "https://docs.unrealengine.com/latest/INT/"
version = None
for o, a in opts:
if o == "-i":
identifier = a
elif o == "-n":
name = a
elif o == "-s":
version = a
elif o == "-f":
fallbackURL = a
plistpath = os.path.join(docsetpath, "Contents/Info.plist")
plist = ET.Element("plist")
plist.set("version", "1.0")
root = ET.SubElement(plist, "dict")
key = ET.SubElement(root, "key")
key.text = "CFBundleIdentifier"
string = ET.SubElement(root, "string")
string.text = identifier
key = ET.SubElement(root, "key")
key.text = "CFBundleName"
string = ET.SubElement(root, "string")
string.text = name
if not version is None:
key = ET.SubElement(root, "key")
key.text = "CFBundleShortVersionString"
string = ET.SubElement(root, "string")
string.text = version
key = ET.SubElement(root, "key")
key.text = "CFBundleVersion"
string = ET.SubElement(root, "string")
string.text = version
key = ET.SubElement(root, "key")
key.text = "DashDocSetFamily"
string = ET.SubElement(root, "string")
string.text = "appledoc"
key = ET.SubElement(root, "key")
key.text = "DocSetPlatformFamily"
string = ET.SubElement(root, "string")
string.text = name
key = ET.SubElement(root, "key")
key.text = "DocSetFallbackURL"
string = ET.SubElement(root, "string")
string.text = fallbackURL
key = ET.SubElement(root, "key")
key.text = "dashIndexFilePath"
string = ET.SubElement(root, "string")
string.text = "INT/API/index.html"
key = ET.SubElement(root, "key")
key.text = "isDashDocset"
value = ET.SubElement(root, "true")
tree = ET.ElementTree(plist)
with open(plistpath, "w") as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n')
f.write('<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">')
tree.write(f, 'utf-8')
# Try to find out which category (class, struct..) a doc page belongs to, by parsing its the syntax.
def guess_category(syntax):
for category, regexps in maincategories.iteritems():
for regexp in regexps:
if regexp.search(syntax):
return category
return None
# Insert an entry into the documentation index database.
def insert_index(name, category, path):
global verbose
if not os.path.isfile(os.path.join(docpath, path)):
if verbose:
print "Documenation at path " + path + " does not exist. Skipping."
return
global count
if verbose:
print (str(count) + ": Found " + category +
(" " * (15 - len(category))) + name +
(" " * (40 - len(name))) + " at " + path)
cur.execute("INSERT OR IGNORE INTO searchIndex(name, type, path) VALUES (?,?,?)",
(name, category, path))
count += 1
# Find all the additional categories of a specific type (methods, variables, etc.) in a class doc page.
def parse_file_detail(abspath, soup, detail):
try:
for namecell in soup.find(id=detail.htmlname).find_all(class_="name-cell"):
if namecell.a:
name = namecell.a.text
relpath = namecell.a['href']
thepath = os.path.relpath(os.path.normpath(os.path.join(os.path.dirname(abspath), relpath)), htmlroot)
insert_index(name, detail.indexname, thepath)
except Exception: pass
# Go through the doc page of a class/struct and parse all the methods, variables etc..
def parse_file_details(abspath, soup):
for detail in detailcategories:
parse_file_detail(abspath, soup, detail)
# Parse a class/struct doc page. Find its name in the H1 tag, guess its category based on the syntax.
def parse_file(abspath):
try:
page = open(abspath)
soup = BeautifulSoup(page);
name = soup.find(id="H1TitleId").text
cattext = soup.find(class_='simplecode_api').text
category = guess_category(cattext)
if category is not None and name is not None:
thepath = os.path.relpath(abspath, htmlroot)
insert_index(name, category, thepath)
parse_file_details(abspath, soup)
except Exception: pass
def print_progress(progress):
global verbose
if not verbose:
p = int(progress * 100)
sys.stdout.write('\r')
sys.stdout.write("[%-50s] %d%% " % ('='*(p/2), p))
sys.stdout.flush()
# Go thought all the links of an index file (ClassHierarchy/index.html) and parse all linked doc pages.
def scrape_index_file(abspath):
# print "Scraping file at " + abspath
page = open(abspath)
soup = BeautifulSoup(page)
links = soup.find_all('a')
for idx, link in enumerate(links):
print_progress(float(idx) / float(len(links)))
relpath = link['href']
if not relpath.startswith('javascript') and not relpath.startswith('http'):
foundpath = os.path.normpath(os.path.join(os.path.dirname(abspath), relpath))
parse_file(foundpath)
print_progress(1.0)
print ''
def scrape_folder(abspath):
for dirName, subdirList, fileList in os.walk(abspath):
for fileName in fileList:
parse_file(os.path.join(dirName, fileName))
def main():
global htmlroot
global docsetpath, docpath, dbpath
global db, cur
global verbose
try:
opts, args = getopt.getopt(sys.argv[1:], "vi:n:s:f:")
if len(args) < 2:
usage()
sys.exit(2)
htmlroot = args[0]
if not os.path.isdir(os.path.join(htmlroot, 'INT')):
print 'Error: Extracted CHM documentation not found. Did you specify the correct path?',
print 'It should contain a number of files with a # prefix and a folder called INT'
sys.exit(2)
docsetpath = args[1]
docsetname, docsetext = os.path.splitext(docsetpath)
if not docsetext == '.docset':
print 'Error: docsetpath argument should specify the path of the docset file.',
print 'E.g. ~/Desktop/UE4.docset'
sys.exit(2)
for o, a in opts:
if o == "-v":
verbose = 1
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
docpath = os.path.join(docsetpath, "Contents/Resources/Documents")
if not os.path.exists(docpath):
os.makedirs(docpath)
dbpath = os.path.join(docsetpath, "Contents/Resources/docSet.dsidx")
print 'Copying documentation from ' + htmlroot + ' to ' + docpath + '.'
print 'This may take a few minutes.'
distutils.dir_util.copy_tree(htmlroot, docpath)
chmroot = os.path.join(htmlroot, 'INT/API')
classlistpath = os.path.join(chmroot, 'ClassHierarchy/')
classlistindex = os.path.join(classlistpath, 'index.html')
generate_plist(opts)
db = sqlite3.connect(dbpath)
cur = db.cursor()
try: cur.execute('DROP TABLE searchIndex;')
except: pass
cur.execute('CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, type TEXT, path TEXT);')
cur.execute('CREATE UNIQUE INDEX anchor ON searchIndex (name, type, path);')
print 'Indexing documentation'
#time.sleep(3)
scrape_index_file(classlistindex)
#scrape_folder(chmroot)
db.commit()
db.close()
start_time = time.time();
main();
print "Generation took", time.time() - start_time, "seconds."
|
# -*- coding: utf-8 -*-
#/*
# * Copyright (c) 2022 Renwei
# *
# * This is a free software; you can redistribute it and/or modify
# * it under the terms of the MIT license. See LICENSE for details.
# */
class t_print_progress_bar():
self_percentage = 0
self_total_process = 0
self_current_process = 0
def __init__(self, total_process):
self.self_percentage = 10000
self.self_total_process = total_process
self.self_current_process = 0
def show(self):
self.self_current_process += 1
if self.self_total_process <= self.self_current_process:
percentage = 100
else:
percentage = int((int(self.self_current_process) * 100) / int(self.self_total_process))
if self.self_percentage != percentage:
self.self_percentage = percentage
for step in range(0, percentage + 1):
print('\r[%3d%%] %s' % (step, '>' * step), flush=True, end='')
if percentage >= 100:
# 进度条完成,换到下一行
print(f'')
def t_print_class_member(class_struct):
print([e for e in dir(class_struct) if not e.startswith('_')]) |
if __name__ == "__main__":
'''
This if statement is needed for testing, to locate the modules needed
if we are running the file directly.
'''
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from dalek import settings
from dalek import sound_player
import RPi.GPIO as GPIO
# these are the globally used modules
from challenges import challenge
import time
from dalek import spi
from dalek import drive
from dalek import debug
class Challenge(challenge.ChallengeBase):
'''
Do Not change the class name, it is called in the controller.py.
The buttons can be overridden if you need to add functionally to them.
The main loop is the run() function, all code goes in it.
Look at the ChallengeBase class in challenge.py for all functions that can be called.
'''
def __init__(self, dalek_settings, dalek_sounds):
super().__init__()
self.dalek_settings = dalek_settings
self.dalek_sounds = dalek_sounds
self.arduino_sensor_data = spi.SensorData()
# self.
def stop_running(self):
'''
When this is called it ends this thread
This is also called if the PS3 button is pressed during a challenge,
so add any cleanup code here.
'''
drive.stop()
if self.arduino_sensor_data.is_alive():
self.arduino_sensor_data.stop_running()
self.arduino_sensor_data.join() # wait for process to finish
self.running = False
debug.print_to_all_devices("Done...")
def run(self):
self.running = True
debug.print_to_all_devices("Challenge 'Straight line' Started.")
# self.arduino_sensor_data.start() # starts the new process and runs in the background
self.arduino_sensor_data.start()
time.sleep(0.2)
while self.running:
drive.forward(self.dalek_settings.max_speed)
time.sleep(.1)
# # detects we have finished the challenge.
if self.arduino_sensor_data.frontPing <= 18:
drive.stop()
debug.print_to_all_devices("Center Distance:{}cm Run Finished"
.format(self.arduino_sensor_data.frontPing))
self.stop_running()
if self.arduino_sensor_data.leftPing <= 5:
debug.print_to_all_devices("turnForwardRight", "TrR")
drive.turnForwardRight(self.dalek_settings.outer_turn_speed,
self.dalek_settings.inner_turn_speed)
time.sleep(.05)
drive.forward(self.dalek_settings.max_speed)
if self.arduino_sensor_data.rightPing <= 5:
debug.print_to_all_devices("turnForwardLeft", "TrL")
drive.turnForwardLeft(self.dalek_settings.inner_turn_speed,
self.dalek_settings.outer_turn_speed)
time.sleep(.05)
drive.forward(self.dalek_settings.max_speed)
def main():
pass
# pass
# GPIO.setmode(GPIO.BOARD) # Set the GPIO pins as numbering - Also set in drive.py
# GPIO.setwarnings(False)
# debug.debug_on = True
# dalek_settings = settings.Settings()
# dalek_settings.slow_mode()
# spi.init()
# drive.init()
# dalek_sounds = sound_player.Mp3Player(True) # initialize the sound player
# challenge = Challenge(dalek_settings, dalek_sounds)
# challenge.start()
# # time.sleep(4)
# # challenge.button_circle_pressed()
# # challenge.stop_running()
# challenge.join() # wait for thread to finish.
# debug.print_to_all_devices("\nFINISHED")
if __name__ == "__main__":
main()
else:
debug.print_to_all_devices('importing Straight Line Challenge')
|
import io
import segno
def qrcode_data(text, imgtype="svg"):
qr = segno.make(text, error='Q')
buff = io.BytesIO()
qr.save(buff, scale=4, kind=imgtype)
return buff.getvalue()
|
from os import environ
from django.conf.urls import include, url
from django.views import generic
from rest_framework.documentation import include_docs_urls
urlpatterns = [
url(r'^$', generic.RedirectView.as_view(
url='/api/', permanent=False)),
url(r'^api/food/', include('rest_framework_discovery.urls')),
url(r'^api/', include_docs_urls(
title=environ.get('APP_TITLE', 'API Docs'),
authentication_classes=[],
permission_classes=[],
)),
]
|
"""
Flag and collect methods in your classes
"""
def factory(flag='_flagged', collection='_flagged'):
"""
Factory for creating class-decorator pair for method flagging and collection
Parameters
----------
flag:
name of attribute that ids decorated methods
collection:
name of attribute for the collection
Returns
-------
FlaggedMixin : class
The mixin class for handling collection of flagged methods
flagger : function
Decorator used for flagging
Examples
--------
>>> # implicit alias declaration
... AliasManager, alias = factory(collection='_aliases')
...
... class Child(AliasManager):
... def __init__(self):
... super().__init__()
... for (alias,), method in self._aliases.items():
... setattr(self, alias, method)
...
... @alias('bar')
... def foo(self):
... '''foo doc'''
... print('foo!')
...
... class GrandChild(Child): pass
... GrandChild().bar() # prints 'foo!'
"""
# ensure we have str for the attributes
assert isinstance(flag, str)
assert isinstance(collection, str)
# **************************************************************************
class MethodFlaggerMeta(type):
"""Metaclass to collect methods flagged with decorator"""
def __new__(cls, name, bases, namespace, **kws):
cls = super().__new__(cls, name, bases, namespace)
# emulate inheritance for the flagged methods
coll = {}
for base in bases:
coll.update(getattr(base, collection, {}))
coll.update({getattr(method, flag): method.__name__
for _, method in namespace.items()
if hasattr(method, flag)})
# NOTE: will only work for hashable args passed to flagger
# set the collection attribute as a class variable
setattr(cls, collection, coll)
return cls
# **************************************************************************
class FlaggedMixin(metaclass=MethodFlaggerMeta):
"""
Mixin that collects the flagged methods in a dict and assigns it to the
%s attribute.
"""
# FIXME: can do this in MethodFlaggerMeta.__call__
def __init__(self, *args, **kw):
# bind the flagged methods to the instance
# logging.debug('Collected these functions: %s', getattr(self, collection))
flagged_methods = {
name: getattr(self, method)
for (name, method) in getattr(self, collection).items()}
setattr(self, collection, flagged_methods)
# TODO: use case without arguments
def flagger(*args):
"""
Decorator for flagging methods. Methods decorated with this function
will have the %r attribute set as whatever arguments are passed.
The decorator will preserve docstrings etc., as it returns the original
function.
"""
# TODO: check if args hashable
def decorator(func):
setattr(func, flag, args)
return func
return decorator
flagger.__doc__ = flagger.__doc__ % flag
FlaggedMixin.__doc__ = FlaggedMixin.__doc__ % collection
return FlaggedMixin, flagger
def altFactory(flag='_flagged', collection='_flagged'):
"""
Factory for creating class-decorator pair for method flagging and collection.
This implementation avoids using a metaclass (in some cases this plays better
with multiple inheritance. (metaclass conflicts)). However, it may not work
if your class has properties that reference values set after initialization.
It also does not support inheritance of flagged methods.
Examples
--------
AliasManager, alias = altFactory(collection='aliases')
class Foo( AliasManager ):
def __init__(self):
super().__init__()
for method, alias in self.aliases:
setattr( self, alias, method )
@alias( 'bar' )
def foo(*args):
print( 'calling foo(',args,')' )
Foo().bar()
Notes
-----
When using multiple decorators for a given method, the flagger will need to
be the outermost (top) one.
"""
# ensure we have str for the attributes
assert isinstance(flag, str)
assert isinstance(collection, str)
class FlaggedMixin(object):
"""
Mixin that binds the flagged classmethods to an instance of the class
"""
def __init__(self, *args, **kw):
# collect the flagged methods via introspect
_collection = {}
for name in dir(self):
method = getattr(self, name)
if hasattr(method, flag):
# NOTE: will only work for hashable args passed to flagger
_collection[method] = getattr(method, flag)
setattr(self, collection, _collection)
def flagger(*args):
"""Decorator for flagging methods"""
#
def decorator(func):
# adds an attribute to the decorated function with the name
# passed in as `flag`
setattr(func, flag, args)
return func
return decorator
return FlaggedMixin, flagger
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 27 13:55:21 2019
@author: u0118077
"""
from psychopy import visual, event, core, monitors
from psychopy.hardware import joystick
# Make screen profile ----
widthPix = 2560 # screen width in px
heightPix = 2440 # screen height in px
monitorwidth = 60 # monitor width in cm
viewdist = 60 # viewing distance in cm
monitorname = 'ProArt27'
scrn = 0 # 0 to use main screen, 1 to use external screen
mon = monitors.Monitor(monitorname, width=monitorwidth, distance=viewdist)
mon.setSizePix((widthPix, heightPix))
mon.save()
joystick.backend='pyglet' # must match the Window
# my_win = visual.Window(size = (600, 400), units = 'pix', pos=(200,40), winType='pyglet')
# Load initial setting ----
# Preparing Window
my_win = visual.Window(size=(600, 400), pos=(0,0), monitor = mon, units = 'pix',
screen = 1)
nJoys = joystick.getNumJoysticks() # to check if we have any
id = 0
joy = joystick.Joystick(id) # id must be <= nJoys - 1
nAxes = joy.getNumAxes() # for interest
#Variables: FIXED and others
sscale = visual.RatingScale(my_win, pos=(0, -100), textSize=0.75, textColor='#808080', marker='glow', markerStart=4, stretch=1.2, acceptPreText=' ', acceptText=' ', labels=(), scale=None, showValue=False)
MAX_DURATION = 5
mouse = event.Mouse(visible=True,win=my_win)
experiment_timer = core.Clock()
#trial_finished = False#
experiment_timer.reset()
mouse.clickReset()
j = 0
while experiment_timer.getTime() < MAX_DURATION:
sscale.draw()
# (x,y) = mouse.getWheelRel()
j =joy.getAllHats()
# j = joy.getButton(0) # 0= x 1 = A 2 =b 3=y
# j = joy.getAxis(0) # 0= left
print(j)
# if j == [(-1,0)]:
# print(j)
# print(nJoys, id, joy, nAxes)
my_win.flip()
#reset the scale + get the rating + ReactionTimes
sscale.reset()
if experiment_timer.getTime() > MAX_DURATION:
break
my_win.close() |
from math import factorial
def combinations(n: int, r: int) -> int:
return factorial(n) // (factorial(r) * factorial(n - r))
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class GiftsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def consume_gift(self, gift_id, customer_id, **kwargs):
"""
Consume Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.consume_gift(gift_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:param int customer_id: Customer ID to fetch (required)
:param str token: Gift token to check (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.consume_gift_with_http_info(gift_id, customer_id, **kwargs)
else:
(data) = self.consume_gift_with_http_info(gift_id, customer_id, **kwargs)
return data
def consume_gift_with_http_info(self, gift_id, customer_id, **kwargs):
"""
Consume Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.consume_gift_with_http_info(gift_id, customer_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:param int customer_id: Customer ID to fetch (required)
:param str token: Gift token to check (optional)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gift_id', 'customer_id', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method consume_gift" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gift_id' is set
if ('gift_id' not in params) or (params['gift_id'] is None):
raise ValueError("Missing the required parameter `gift_id` when calling `consume_gift`")
# verify the required parameter 'customer_id' is set
if ('customer_id' not in params) or (params['customer_id'] is None):
raise ValueError("Missing the required parameter `customer_id` when calling `consume_gift`")
collection_formats = {}
resource_path = '/gifts/{gift_id}/consume'.replace('{format}', 'json')
path_params = {}
if 'gift_id' in params:
path_params['gift_id'] = params['gift_id']
query_params = {}
if 'customer_id' in params:
query_params['customer_id'] = params['customer_id']
if 'token' in params:
query_params['token'] = params['token']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_gift(self, body, **kwargs):
"""
Create Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_gift(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Gift1 body: Gift settings (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_gift_with_http_info(body, **kwargs)
else:
(data) = self.create_gift_with_http_info(body, **kwargs)
return data
def create_gift_with_http_info(self, body, **kwargs):
"""
Create Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_gift_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Gift1 body: Gift settings (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_gift" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_gift`")
collection_formats = {}
resource_path = '/gifts'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
self.api_client.set_default_header('Content-Type', 'application/json')
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Gift',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_gift(self, gift_id, **kwargs):
"""
Delete Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_gift(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_gift_with_http_info(gift_id, **kwargs)
else:
(data) = self.delete_gift_with_http_info(gift_id, **kwargs)
return data
def delete_gift_with_http_info(self, gift_id, **kwargs):
"""
Delete Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_gift_with_http_info(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gift_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_gift" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gift_id' is set
if ('gift_id' not in params) or (params['gift_id'] is None):
raise ValueError("Missing the required parameter `gift_id` when calling `delete_gift`")
collection_formats = {}
resource_path = '/gifts/{gift_id}'.replace('{format}', 'json')
path_params = {}
if 'gift_id' in params:
path_params['gift_id'] = params['gift_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_gifts_in_cart(self, cart_id, **kwargs):
"""
Delete Gifts in cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_gifts_in_cart(cart_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int cart_id: Cart ID to fetch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_gifts_in_cart_with_http_info(cart_id, **kwargs)
else:
(data) = self.delete_gifts_in_cart_with_http_info(cart_id, **kwargs)
return data
def delete_gifts_in_cart_with_http_info(self, cart_id, **kwargs):
"""
Delete Gifts in cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_gifts_in_cart_with_http_info(cart_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int cart_id: Cart ID to fetch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cart_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_gifts_in_cart" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cart_id' is set
if ('cart_id' not in params) or (params['cart_id'] is None):
raise ValueError("Missing the required parameter `cart_id` when calling `delete_gifts_in_cart`")
collection_formats = {}
resource_path = '/carts/{cart_id}/gift/'.replace('{format}', 'json')
path_params = {}
if 'cart_id' in params:
path_params['cart_id'] = params['cart_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_gift(self, gift_id, **kwargs):
"""
Get Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gift(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_gift_with_http_info(gift_id, **kwargs)
else:
(data) = self.get_gift_with_http_info(gift_id, **kwargs)
return data
def get_gift_with_http_info(self, gift_id, **kwargs):
"""
Get Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gift_with_http_info(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gift_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_gift" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gift_id' is set
if ('gift_id' not in params) or (params['gift_id'] is None):
raise ValueError("Missing the required parameter `gift_id` when calling `get_gift`")
collection_formats = {}
resource_path = '/gifts/{gift_id}'.replace('{format}', 'json')
path_params = {}
if 'gift_id' in params:
path_params['gift_id'] = params['gift_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Gift',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_gift_in_cart(self, cart_id, **kwargs):
"""
Get Gift in Cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gift_in_cart(cart_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int cart_id: Cart ID to fetch (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_gift_in_cart_with_http_info(cart_id, **kwargs)
else:
(data) = self.get_gift_in_cart_with_http_info(cart_id, **kwargs)
return data
def get_gift_in_cart_with_http_info(self, cart_id, **kwargs):
"""
Get Gift in Cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gift_in_cart_with_http_info(cart_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int cart_id: Cart ID to fetch (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cart_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_gift_in_cart" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cart_id' is set
if ('cart_id' not in params) or (params['cart_id'] is None):
raise ValueError("Missing the required parameter `cart_id` when calling `get_gift_in_cart`")
collection_formats = {}
resource_path = '/carts/{cart_id}/gift'.replace('{format}', 'json')
path_params = {}
if 'cart_id' in params:
path_params['cart_id'] = params['cart_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Gift',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_gift_token(self, gift_id, **kwargs):
"""
Get Gift token
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gift_token(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: GiftToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_gift_token_with_http_info(gift_id, **kwargs)
else:
(data) = self.get_gift_token_with_http_info(gift_id, **kwargs)
return data
def get_gift_token_with_http_info(self, gift_id, **kwargs):
"""
Get Gift token
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gift_token_with_http_info(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: GiftToken
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gift_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_gift_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gift_id' is set
if ('gift_id' not in params) or (params['gift_id'] is None):
raise ValueError("Missing the required parameter `gift_id` when calling `get_gift_token`")
collection_formats = {}
resource_path = '/gifts/{gift_id}/token'.replace('{format}', 'json')
path_params = {}
if 'gift_id' in params:
path_params['gift_id'] = params['gift_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GiftToken',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_gifts(self, **kwargs):
"""
Get Gifts
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gifts(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool unused_only: Filter only unused gifts
:param bool paid_only: Filter only gifts linked to an order which is paid by the user
:param int page:
:param int per_page:
:return: Gifts1
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_gifts_with_http_info(**kwargs)
else:
(data) = self.get_gifts_with_http_info(**kwargs)
return data
def get_gifts_with_http_info(self, **kwargs):
"""
Get Gifts
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gifts_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param bool unused_only: Filter only unused gifts
:param bool paid_only: Filter only gifts linked to an order which is paid by the user
:param int page:
:param int per_page:
:return: Gifts1
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['unused_only', 'paid_only', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_gifts" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/gifts'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'unused_only' in params:
query_params['unused_only'] = params['unused_only']
if 'paid_only' in params:
query_params['paid_only'] = params['paid_only']
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Gifts1',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_gifts_in_cart(self, cart_id, **kwargs):
"""
Get Gifts in cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gifts_in_cart(cart_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int cart_id: Cart ID to fetch (required)
:return: list[Gift]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_gifts_in_cart_with_http_info(cart_id, **kwargs)
else:
(data) = self.get_gifts_in_cart_with_http_info(cart_id, **kwargs)
return data
def get_gifts_in_cart_with_http_info(self, cart_id, **kwargs):
"""
Get Gifts in cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_gifts_in_cart_with_http_info(cart_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int cart_id: Cart ID to fetch (required)
:return: list[Gift]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cart_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_gifts_in_cart" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cart_id' is set
if ('cart_id' not in params) or (params['cart_id'] is None):
raise ValueError("Missing the required parameter `cart_id` when calling `get_gifts_in_cart`")
collection_formats = {}
resource_path = '/carts/{cart_id}/gifts'.replace('{format}', 'json')
path_params = {}
if 'cart_id' in params:
path_params['cart_id'] = params['cart_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Gift]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def send_gift(self, gift_id, **kwargs):
"""
Send Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.send_gift(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.send_gift_with_http_info(gift_id, **kwargs)
else:
(data) = self.send_gift_with_http_info(gift_id, **kwargs)
return data
def send_gift_with_http_info(self, gift_id, **kwargs):
"""
Send Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.send_gift_with_http_info(gift_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gift_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method send_gift" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gift_id' is set
if ('gift_id' not in params) or (params['gift_id'] is None):
raise ValueError("Missing the required parameter `gift_id` when calling `send_gift`")
collection_formats = {}
resource_path = '/gifts/{gift_id}/send'.replace('{format}', 'json')
path_params = {}
if 'gift_id' in params:
path_params['gift_id'] = params['gift_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_gift(self, gift_id, body, **kwargs):
"""
Update Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_gift(gift_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:param Gift2 body: Gift settings (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_gift_with_http_info(gift_id, body, **kwargs)
else:
(data) = self.update_gift_with_http_info(gift_id, body, **kwargs)
return data
def update_gift_with_http_info(self, gift_id, body, **kwargs):
"""
Update Gift
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_gift_with_http_info(gift_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int gift_id: Gift ID to fetch (required)
:param Gift2 body: Gift settings (required)
:return: Gift
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['gift_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_gift" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'gift_id' is set
if ('gift_id' not in params) or (params['gift_id'] is None):
raise ValueError("Missing the required parameter `gift_id` when calling `update_gift`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_gift`")
collection_formats = {}
resource_path = '/gifts/{gift_id}'.replace('{format}', 'json')
path_params = {}
if 'gift_id' in params:
path_params['gift_id'] = params['gift_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
self.api_client.set_default_header('Content-Type', 'application/json')
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Gift',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
import pathlib
from xml.dom import minidom
from camundatools.base_rest import BaseRest
class Definition(BaseRest):
base_url: str
silent: bool
def __init__(self, url=None, username=None, password=None, config_file="camundatools.cfg", silent=False):
super().__init__(silent=silent, config_file=config_file)
self.base_url = url or self.config.get("config", "API_BASE_URL", fallback="http://localhost:8080/engine-rest")
username = username or self.config.get("config", "AUTH_USERNAME", fallback="demo")
password = password or self.config.get("config", "AUTH_PASSWORD", fallback="demo")
self.headers_plain = self.get_header(username, password, content_json=False)
self.headers_json = self.get_header(username, password, content_json=True)
self._API_CREATE_DEPLOYMENT_URL = '/deployment/create'
self._API_DEPLOYMENT_URL = '/deployment/{id}'
self._API_DEFINITIONS_URL = '/process-definition'
self._API_VERSION_URL = '/version'
self._API_STARTING_FORM_URL = '/process-definition/key/{key}/form-variables'
self._API_STARTING_FORM_KEY_URL = '/process-definition/key/{key}/startForm'
self._API_LIST_IDENTITY_HISTORY_URL = '/history/identity-link-log'
self._API_GET_XML_URL = '/process-definition/key/{key}/xml'
def get_camunda_version(self):
url = self.base_url + self._API_VERSION_URL
return super().call('get', url, self.headers_json)
def deploy(self, file_name, changed_only=True):
url = self.base_url + self._API_CREATE_DEPLOYMENT_URL
extensao = pathlib.Path(file_name).suffix
root_element = 'bpmn:process' if extensao == '.bpmn' else 'cmmn:case' if extensao == '.cmmn' else ''
mydoc = minidom.parse(file_name)
items = mydoc.getElementsByTagName(root_element)
deploy_name = items[0].attributes['name'].value
if 'camunda:versionTag' in items[0].attributes:
deploy_name = deploy_name + '_' + items[0].attributes['camunda:versionTag'].value
data = {
'deployment-name': deploy_name,
'deploy-changed-only': 'true' if changed_only else 'false',
}
files = {
f'{file_name}': open(file_name, 'r', encoding='utf8').read(),
}
return self.call('post', url, self.headers_plain, data=data, files=files)
def delete(self, deploy_id, cascade=False):
url = self.base_url + self._API_DEPLOYMENT_URL
url = url.replace('{id}', deploy_id)
if cascade:
url += '?cascade=true'
return self.call('delete', url, self.headers_json)
def list(self, key=None, only_latest_version=False):
url = self.base_url + self._API_DEFINITIONS_URL
param = "?firstResult=0"
if key is not None:
param += f'&key={key}'
if only_latest_version:
param += f'&latestVersion=true'
return self.call('get', url + param, self.headers_json)
def inspect(self, process_definition_id=None, key=None):
url = self.base_url + self._API_DEFINITIONS_URL
if process_definition_id:
url += f'/{process_definition_id}'
elif key:
url += f'/key/{key}'
return self.call('get', url, self.headers_json)
def get_starting_form(self, process_key):
url = self.base_url + self._API_STARTING_FORM_URL
url = url.replace('{key}', process_key)
return super().call('get', url, self.headers_json)
def get_starting_form_key(self, process_key):
url = self.base_url + self._API_STARTING_FORM_KEY_URL
url = url.replace('{key}', process_key)
return super().call('get', url, self.headers_json)
def list_identity_history(self, process_key, task_id):
url = self.base_url + self._API_LIST_IDENTITY_HISTORY_URL
param = '?taskId=' + task_id + '&processDefinitionKey=' + process_key
return super().call('get', url + param, self.headers_json)
def get_xml(self, process_key):
url = self.base_url + self._API_GET_XML_URL
url = url.replace('{key}', process_key)
return super().call('get', url, self.headers_json) |
import pyautogui
import time
from PIL import Image
from pathlib import Path
"""
Basic usage:
1. Find the variable called image_file (line below) and replace it with a valid path to an image
2. Start MS Paint in the background
3. Start this from the command line and within 5 seconds:
4. Put your mouse pointer in the top left corner of the canvas (the giant white part) in Paint
5. Wait forever
If you want it to stop press CTRL + ALT + DEL and put your mouse in the top left corner of the screen. It should stop.
"""
image_folder = Path("C:/Users/CampAsAChamp/Github/msPaintAutomation/images")
image_file = image_folder / "sample_image.png"
pyautogui.PAUSE = 0 # adjust this like crazy
original_speed = pyautogui.PAUSE
pyautogui.FAILSAFE = True
TESTING = False
if not TESTING:
time.sleep(5)
start_time = time.time()
def color_comp(color_number, component):
"""
Quick way to change the color in the custom color menu
:param color_number: 0-255 value
:param component: the key to get to the color. r=red, g=green, u=blue
:return: None
"""
pyautogui.hotkey('altleft', component)
pyautogui.typewrite(str(color_number))
def color(color_numbers):
"""
Take a list of color values (RGB), open the custom colors, and apply values.
:param color_numbers: List (Red, Green, Blue) values
:return: None
"""
# If there is an alpha channel discard it as there is no opacity in paint
if len(color_numbers) > 3:
color_numbers.pop()
# print("Color Numbers: ", color_numbers)
assert len(color_numbers) == 3, "{}".format(len(color_numbers))
for num in color_numbers:
assert 0 <= num <= 255, str(num)
if not TESTING:
pyautogui.typewrite(['altleft', 'h', 'e', 'c', ], interval=0.1)
color_comp(color_numbers[0], 'r')
color_comp(color_numbers[1], 'g')
color_comp(color_numbers[2], 'u')
pyautogui.press('enter')
def fix_list(num_list):
"""
Hack to ensure my rounding didn't put values outside of 0-255
:param num_list: List of numbers.
:return: The same number list.
"""
for index in range(len(num_list)):
if num_list[index] > 255:
num_list[index] = 255
elif num_list[index] < 0:
num_list[index] = 0
return num_list
# manually determined with the menu turned off the start of the canvas is at 25, 83. otherwise use .position()
# start_x, start_y = 25, 83
start_x, start_y = pyautogui.position()
# read image file
image = Image.open(image_file)
# make dictionary with pixels using RGB as key
pixel_dictionary = dict()
for x in range(image.width):
for y in range(image.height):
rgb = image.getpixel((x, y)) # get the pixel value in a tuple
# round it because doing every color takes forever
rgb_rounded = list(map(lambda num: round(num, -1), rgb))
# ensure rounding didn't push anything over 255 (it will)
rgb_rounded = fix_list(rgb_rounded)
# convert it to a list of strings
rgb_list = list(map(str, rgb_rounded))
rgb_key = ':'.join(rgb_list) # separate with : to split later
# if the dictionary doesn't have any pixel list yet, create it.
if pixel_dictionary.get(rgb_key) is None:
pixel_dictionary[rgb_key] = list()
# compile a list of pixels using this RGB
pixel_dictionary[rgb_key].append((x, y))
# from pprint import pprint
# pprint(pixel_dictionary)
# clicking before doing all the work to ensure we're focused in the paint
pyautogui.click(start_x, start_y)
# sorted the RGBs by the lengths of the associated list. this way we can see the image form earlier on
for rgb in sorted(pixel_dictionary.keys(), key=lambda z: len(pixel_dictionary[z]), reverse=True):
# when the color form comes up do it slowly or else there are problems.
pyautogui.PAUSE = original_speed + 0.1
# set color to current RGB key
color_map = map(int, rgb.split(':'))
color_list = list(color_map)
# print('changing to', rgb, color_list)
# this sleep is to allow the gui to catch up...i think. this eliminates some but not all problems
time.sleep(0.1)
color(color_list)
pyautogui.PAUSE = original_speed # return to quickly painting
for pixel_x, pixel_y in pixel_dictionary.get(rgb):
if not TESTING:
# print("drawing", rgb) # funny enough, this slows it down just enough to be quick with no errors
pyautogui.click(start_x + pixel_x, start_y + pixel_y)
else:
print(list(map(int, rgb.split(':'))), end=' ')
print(start_x + pixel_x, start_y + pixel_y)
print("Took {} seconds!".format(time.time() - start_time))
|
# Generated by Django 3.2.4 on 2021-12-30 21:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0004_news_quote_text'),
]
operations = [
migrations.AlterModelOptions(
name='maincategory',
options={'ordering': ['-created_at'], 'verbose_name': 'Main Category', 'verbose_name_plural': 'Main Categories'},
),
migrations.AlterModelOptions(
name='news',
options={'ordering': ['-created_at'], 'verbose_name': 'News', 'verbose_name_plural': 'News'},
),
migrations.RenameField(
model_name='maincategory',
old_name='created',
new_name='created_at',
),
migrations.RenameField(
model_name='maincategory',
old_name='updated',
new_name='updated_at',
),
migrations.AddField(
model_name='news',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='news',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
from .vector_field import vector_field
from .ftle import FTLE
from .fit_ppt import fit_ppt
|
from functools import wraps
import logging
import os
from typing import Any, Callable, TypeVar
from django.conf import settings
from django.utils.translation import ugettext as _
import stripe
from zerver.lib.exceptions import JsonableError
from zerver.lib.logging_util import log_to_file
from zerver.models import Realm, UserProfile
from zilencer.models import Customer
from zproject.settings import get_secret
STRIPE_SECRET_KEY = get_secret('stripe_secret_key')
STRIPE_PUBLISHABLE_KEY = get_secret('stripe_publishable_key')
stripe.api_key = STRIPE_SECRET_KEY
BILLING_LOG_PATH = os.path.join('/var/log/zulip'
if not settings.DEVELOPMENT
else settings.DEVELOPMENT_LOG_DIRECTORY,
'billing.log')
billing_logger = logging.getLogger('zilencer.stripe')
log_to_file(billing_logger, BILLING_LOG_PATH)
log_to_file(logging.getLogger('stripe'), BILLING_LOG_PATH)
CallableT = TypeVar('CallableT', bound=Callable[..., Any])
class StripeError(JsonableError):
pass
def catch_stripe_errors(func: CallableT) -> CallableT:
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
if STRIPE_PUBLISHABLE_KEY is None:
# Dev-only message; no translation needed.
raise StripeError(
"Missing Stripe config. In dev, add to zproject/dev-secrets.conf .")
try:
return func(*args, **kwargs)
except stripe.error.StripeError as e:
billing_logger.error("Stripe error: %d %s",
e.http_status, e.__class__.__name__)
if isinstance(e, stripe.error.CardError):
raise StripeError(e.json_body.get('error', {}).get('message'))
else:
raise StripeError(
_("Something went wrong. Please try again or email us at %s.")
% (settings.ZULIP_ADMINISTRATOR,))
except Exception as e:
billing_logger.exception("Uncaught error in Stripe integration")
raise
return wrapped # type: ignore # https://github.com/python/mypy/issues/1927
@catch_stripe_errors
def count_stripe_cards(realm: Realm) -> int:
try:
customer_obj = Customer.objects.get(realm=realm)
cards = stripe.Customer.retrieve(customer_obj.stripe_customer_id).sources.all(object="card")
return len(cards["data"])
except Customer.DoesNotExist:
return 0
@catch_stripe_errors
def save_stripe_token(user: UserProfile, token: str) -> int:
"""Returns total number of cards."""
# The card metadata doesn't show up in Dashboard but can be accessed
# using the API.
card_metadata = {"added_user_id": user.id, "added_user_email": user.email}
try:
customer_obj = Customer.objects.get(realm=user.realm)
customer = stripe.Customer.retrieve(customer_obj.stripe_customer_id)
billing_logger.info("Adding card on customer %s: source=%r, metadata=%r",
customer_obj.stripe_customer_id, token, card_metadata)
card = customer.sources.create(source=token, metadata=card_metadata)
customer.default_source = card.id
customer.save()
return len(customer.sources.all(object="card")["data"])
except Customer.DoesNotExist:
customer_metadata = {"string_id": user.realm.string_id}
# Description makes it easier to identify customers in Stripe dashboard
description = "{} ({})".format(user.realm.name, user.realm.string_id)
billing_logger.info("Creating customer: source=%r, description=%r, metadata=%r",
token, description, customer_metadata)
customer = stripe.Customer.create(source=token,
description=description,
metadata=customer_metadata)
card = customer.sources.all(object="card")["data"][0]
card.metadata = card_metadata
card.save()
Customer.objects.create(realm=user.realm, stripe_customer_id=customer.id)
return 1
|
from jobs.acquisition.messageacquisition import MessageAcquisition
from general_types.virtual_classes import ObservableGeneric
from general_types.label_ogc import LabelObservationType
from utility.utility_catalog_cached import UtilityCatalogCached
from jobs.cache_redis import CachedComponents
import logging
import datetime
import pytz
logger = logging.getLogger('textlogger')
class ServiceObservationAcquisition:
global_counter_observation = 0
@staticmethod
def check_time_validity(observable: ObservableGeneric,
timestamp_now: datetime,
interval_secs: int) -> bool:
try:
if not observable:
return False
if interval_secs == -1:
return True
timestamp_obs = observable.get_timestamp()
time_diff_secs = (timestamp_now-timestamp_obs).total_seconds()
if time_diff_secs > interval_secs:
logger.info("ServiceObsAcqu Obs Too Old: {0} {1} {2}"
.format(str(time_diff_secs),
str(observable.get_observable_id()),
str(observable.get_device_id()),
observable.get_timestamp()))
return False
return True
except Exception as ex:
logger.error("ServiceObsAcqu check_time_validity Exception: {}".format(ex))
return True
@staticmethod
def check_observable_type_admitted(observable: ObservableGeneric) -> bool:
try:
if not observable:
return False
if observable.get_type_observable() == LabelObservationType.LABEL_OBSTYPE_LOCALIZATION:
return True
elif observable.get_type_observable() == LabelObservationType.LABEL_OBSTYPE_CROWDDENSITY:
return True
return False
except NotImplementedError:
return False
@staticmethod
def validate_and_save_observation_cache(observable: ObservableGeneric,
associated_topic: str,
interval_validity_secs: int) -> bool:
try:
if not observable:
logger.info('ServiceObsAqu Failed (observable is None)')
return False
if not ServiceObservationAcquisition.check_observable_type_admitted(observable=observable):
logger.info("ServiceObsAcqu Type Observable Not Admitted: {}"
.format(observable.get_type_observable()))
return False
if not ServiceObservationAcquisition.check_if_observable_updated(observable=observable,
associated_topic=associated_topic,
interval_validity_secs=interval_validity_secs):
logger.info('ServiceObsAcqu Observable {} NOT Updated. Discarded'.format(observable.get_device_id()))
return False
if not ServiceObservationAcquisition.update_cache_element_topic(observable=observable,
associated_topic=associated_topic):
logger.info('ServiceObsAcqu Observable {} update_cache_element_topic Failed. Discarded'.format(observable.get_device_id()))
return False
logger.debug('ServiceObsAcqu validate_and_save_observation_cache associated topic: {0}, datastream_id: {1}'
.format(associated_topic,
observable.get_datastream_id()))
return True
except Exception as ex:
logger.error("ServiceObsAcqu validate_and_save_observation_cache Exception: {}".format(ex))
return False
@staticmethod
def check_if_observable_updated(observable: ObservableGeneric,
associated_topic: str,
interval_validity_secs: int):
try:
if not observable:
return False
if not ServiceObservationAcquisition.check_time_validity(observable=observable,
timestamp_now=datetime.datetime.now(tz=pytz.utc),
interval_secs=interval_validity_secs):
return False
return True
except Exception as ex:
logger.error("ServiceObsAcqu check_if_observable_updated Exception: {}".format(ex))
return False
@staticmethod
def update_cache_element_topic(observable: ObservableGeneric,
associated_topic: str) -> bool:
try:
if not observable:
return False
return UtilityCatalogCached.append_new_observable(label_type_observable=associated_topic,
observable=observable)
except Exception as ex:
logger.error('ServiceObsAcqu update_cache_element_topic Exception: {}'.format(ex))
return False
@staticmethod
def acquire_single_observation(json_observation: dict,
associated_topic: str,
pilot_name: str = '',
observable_id: int = 0,
running_id: int = 0) -> ObservableGeneric:
if associated_topic == LabelObservationType.LABEL_OBSTYPE_CROWDDENSITY:
return MessageAcquisition.crowd_density_local_observation(mqtt_dictionary=json_observation,
pilot_name=pilot_name,
observable_id=observable_id)
elif associated_topic == 'GateCountingEstimation':
return MessageAcquisition.gate_counting_observation(json_observation, pilot_name)
elif associated_topic == LabelObservationType.LABEL_OBSTYPE_LOCALIZATION:
return MessageAcquisition.localization_observation(mqtt_dictionary=json_observation,
pilot_name=pilot_name,
observable_id=observable_id,
running_id=running_id)
return None
|
from rest_framework import viewsets
from tasks.models import Task
from .serializers import TaskSerializer
from django.db import transaction
from worker.simple_worker import WORKERS
from worker.simple_worker import task_add
from rest_framework.exceptions import APIException
import time
import copy
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
queryset = Task.objects.all()
def perform_create(self, serializer):
try:
with transaction.atomic():
instance = serializer.save(state = "CREATED")
job_params = copy.deepcopy(serializer.validated_data['params']) # dont want to see db_id in returned params
job_params['db_id'] = instance.id
transaction.on_commit(lambda: task_add.delay(job_params))
except Exception as e:
raise APIException(str(e))
def perform_destroy(self, instance):
try:
with transaction.atomic():
instance.delete()
transaction.on_commit(lambda: WORKERS.control.revoke(instance.task_id, terminate=True))
except Exception as e:
raise APIException(str(e))
|
#-------------------------------- main.py file -----------------------------------------#
"""
Main file - entry point to the code. This file coordinates all other files and
implements all the functionality directly available to the user.
"""
# import statements
import numpy as np
import joblib
from pkg_resources import get_distribution
from boreas.models import makePrediction, RFModelIsotropic, TBNNSModelAnisotropic
from boreas.case import TestCase, TrainingCase
from boreas import process
from boreas import constants
def printInfo():
"""
Makes sure everything is properly installed.
We print a welcome message, the version of the package, and attempt to load
the pre-trained models to make sure the data file is there. Return 1 at the end
if no exceptions were raised.
"""
print('Welcome to Boreas - a package for industrial deployment of machine-learned '
+ 'turbulent mixing models for film cooling (formerly known as RaFoFC)!')
# Get distribution version
dist = get_distribution('boreas')
print('Version: {}'.format(dist.version))
# Try to load the default RF model and print information about it
print('Attempting to load the default RF model...')
rf = RFModelIsotropic()
rf.loadFromDisk()
print('Default model was found and can be loaded properly.')
print('\t Description: ', end="", flush=True)
rf.printDescription()
# Try to load the default TBNNS model and print information about it
print('Attempting to load the default TBNN-s model...')
nn = TBNNSModelAnisotropic()
nn.loadFromDisk()
print('Default model was found and can be loaded properly.')
print('\t Description: ', end="", flush=True)
nn.printDescription()
return 1 # return this if everything went ok
def applyMLModel(tecplot_in_path, tecplot_out_path, *,
zone = None, deltaT0 = None,
use_default_var_names = False, use_default_derivative_names = True,
calc_derivatives = True, write_derivatives = True,
threshold = None, default_prt = None, clean_features = True,
features_load_path = None, features_dump_path = None,
ip_file_path = None, csv_file_path = None,
variables_to_write = None, outnames_to_write = None,
model_path = None, secondary_model_path = None,
model_type = "RF", features_type="F2",
ensemble_of_models = False, std_ensemble = False):
"""
Applies ML model on a single test case, given in a Tecplot file.
Main function of package. Call this to take in a Tecplot file, process it, apply
the machine learning model, and save results to disk. All optional arguments must
be used with the identifying keyword (that's what * means)
Arguments:
tecplot_in_path -- string containing the path of the input tecplot file. It must be
a binary .plt file, resulting from a k-epsilon simulation.
tecplot_out_path -- string containing the path to which the final tecplot dataset
will be saved.
zone -- optional argument. The zone where the flow field solution is saved in
Tecplot. By default, it is zone 0. This can be either a string (with the
zone name) or an integer with the zone index.
deltaT0 -- optional argument. Temperature scale (Tmax - Tmin) that will be used to
non-dimensionalize the dataset. If it is not provided (default behavior),
the user will be prompted to enter an appropriate number.
use_default_var_names -- optional argument. Boolean flag (True/False) that determines
whether default Fluent names will be used to fetch variables
in the Tecplot dataset. If the flag is False (default
behavior), the user will be prompted to enter names for each
variable that is needed.
use_default_derivative_names -- optional argument. Boolean flag (True/False) that
determine if the user will pick the names for the
derivative quantities in the Tecplot file or whether
default names are used. This flag is only used if the
next flag is False (i.e., if derivatives are
already pre-calculated, then setting this flag to
False allows the user to input the names of each
derivative in the input .plt file). It defaults to
True.
calc_derivatives -- optional argument. Boolean flag (True/False) that determines
whether derivatives need to be calculated in the Tecplot file.
Note we need derivatives of U, V, W, and Temperature, with names
ddx_{}, ddy_{}, ddz_{}. If such variables were already calculated
and exist in the dataset, set this flag to False to speed up the
process. By default (True), derivatives are calculated and a new
file with derivatives called "derivatives_{}" will be saved to
disk.
write_derivatives -- optional argument. Boolean flag (True/False) that determines
whether to write a binary Tecplot file to disk with the newly
calculated derivatives. The file will have the same name as the
input, except followed by "_derivatives". This is useful because
calculating derivatives takes a long time, so you might want to
save results to disk as soon as they are calculated.
threshold -- optional argument. This variable determines the threshold for
(non-dimensional) temperature gradient below which we throw away a
point. If None, use the value in constants.py (default value is 1e-3).
For temperature gradient less than that, we use the Reynolds analogy
(with fixed Pr_t). For gradients larger than that, we use the
model.
default_prt -- optional argument, this variable contains the default value of Pr_t to
use in regions where gradients are low or features have been cleaned.
If this is None (default), then use the value from constants.py.
clean_features -- optional argument. This determines whether we should remove outlier
points from the dataset before applying the model. This is measured
by the standard deviation of points around the mean.
features_load_path -- optional argument. If this is supplied, then the function will
try to load the features from disk instead of
processing the tecplot file all over again. Since calculating
the features can take a while for large datasets, this can be
useful to speed up repetitions.
features_dump_path -- optional argument. If this is provided and we processed the
tecplot data from scratch (i.e. we calculated the features),
then the function will save the features to disk, so it is
much faster to perform the same computations again later.
ip_file_path -- optional argument. String containing the path to which the
interpolation file (which is read by ANSYS Fluent) will be saved. If
this argument is None (by default), then no interpolation file is
written.
csv_file_path -- optional argument. String containing the path to which the csv file
(which can be read by StarCCM+) will be saved. If this is None
(default), then no csv file is written.
variables_to_write -- optional argument. This is a list of strings containing names
of variables in the Tecplot file that we want to write in the
Fluent interpolation file/CSV file. By default, it is None,
which leads the program to pick only the diffusivity variables
just calculated.
outnames_to_write -- optional argument. This is a list of strings that must have the
same length as the previous argument. It contains the names that
each of the variables written in the interpolation/csv files will
have. By default, this is None, which leads to code to name all
variables being written sequentially, starting at "uds-2". Naming
them as "user defined scalars x" (uds-x) is an easy way to read
them in Fluent.
model_path -- optional argument. This is the path where the function will look for
a pre-trained machine learning model. The file must be a pickled
instance of a random forest regressor class or a pickled instance of
the TBNN-s class, saved to disk using joblib. If None, the default
machine learning model that comes with the package(which is already
pre-trained with LES/DNS) is employed.
secondary_model_path -- optional argument. This is the path where the function will
look for a pre-trained random forest model to support the
TBNN-s model in the hybrid formulation. The file must be a
pickled instance of a random forest regressor class, saved to
disk using joblib. By default, the default RF is loaded. This
argument is only relevant when model_type = "TBNNS_hybrid".
model_type -- optional argument. This tells us which type of model we are loading.
It must be a string, and the currently supported options are "RF",
"TBNNS", and "TBNNS_hybrid". The default option is "RF".
features_type -- optional argument, string determining the type of features that
we are currently extracting. Options are "F1" and "F2". Default
value is "F2".
ensemble_of_models -- optional argument. This is a boolean flag that tells us whether
to use a model ensemble instead of a single model instance. If
this is true, the model_path parameter must be a list of paths
instead of a single path. The default option is "False"
std_ensemble -- optional argument. This is a boolean flag that instructs the solver
to return the standard deviation across the ensemble of models. This
can only be True if ensemble_of_models=True; in which case, we only
return the standard deviation and not the actual diffusivity. This
option only makes sense for the TBNN-s model (since the RF is already
an ensemble) and is not supported for the hybrid model. The default
option is "False"
"""
assert model_type == "RF" or model_type == "TBNNS" or model_type == "TBNNS_hybrid", \
"Invalid model_type received!"
assert features_type == "F1" or features_type == "F2", \
"Invalid features_type received!"
if ensemble_of_models: # check whether model_path is a list if model ensemble
assert type(model_path) is list, \
"Error! For a model ensemble, model_path must be a list"
assert len(model_path) > 0, "Error! model_path is an empty list!"
# Initialize dataset and get scales for non-dimensionalization. The default behavior
# is to ask the user for the names and the scales. Passing keyword arguments to this
# function can be done to go around this behavior
dataset = TestCase(tecplot_in_path, zone=zone,
use_default_names=use_default_var_names)
dataset.normalize(deltaT0=deltaT0)
# If this flag is True (default) calculate the derivatives and save the result to
# disk (since it takes a while to do that...)
if calc_derivatives:
dataset.calculateDerivatives()
if write_derivatives: # write new Tecplot file to disk
dataset.saveDataset(tecplot_in_path[0:-4] + "_derivatives.plt")
else:
print("Derivatives already calculated!")
dataset.addDerivativeNames(use_default_derivative_names)
# Here, run the code for applying random forest model ("RF")
if model_type == "RF":
# This line processes the dataset and extracts features for the ML step which
# can take a long time. features_load_path and features_dump_path can be
# set to make the method load/save the processed quantities from disk.
x, _ = dataset.extractFeatures(with_tensor_basis=False,
features_type=features_type, threshold=threshold,
features_load_path=features_load_path,
features_dump_path=features_dump_path,
clean_features=clean_features)
prt_ML = makePrediction("RF", model_path, x, features_type)
# Adds result to tecplot and sets the default variable names to output
varname = "Prt_ML"
dataset.addPrt(prt_ML, varname, default_prt)
if variables_to_write is None:
variables_to_write = [varname]
if outnames_to_write is None:
outnames_to_write = ["uds-2"]
# Here, run the code for applying TBNN model ("TBNNS")
elif model_type == "TBNNS":
# This line processes the dataset and returns the features and tensor basis
# at each point in the dataset where gradients are significant.
x, tb = dataset.extractFeatures(with_tensor_basis=True,
features_type=features_type, threshold=threshold,
features_load_path=features_load_path,
features_dump_path=features_dump_path,
clean_features=clean_features)
alphaij_ML, g_ML = makePrediction("TBNNS", model_path, x, features_type, tb,
ensemble=ensemble_of_models,
std_flag=std_ensemble)
# Adds result to tecplot and sets the default variable names to output
varname = ["Dxx", "Dxy", "Dxz", "Dyx", "Dyy", "Dyz", "Dzx", "Dzy", "Dzz"]
dataset.addTensorDiff(alphaij_ML, varname, default_prt)
g_name = ["g1", "g2", "g3", "g4", "g5", "g6"]
dataset.addG(g_ML, g_name, default_prt)
if variables_to_write is None:
variables_to_write = varname
if outnames_to_write is None:
outnames_to_write = ["uds-2", "uds-3", "uds-4", "uds-5", "uds-6", "uds-7",
"uds-8", "uds-9", "uds-10"]
# Here, run the code for applying TBNN-s + random forest model ("TBNNS_hybrid")
elif model_type == "TBNNS_hybrid":
# This line processes the dataset and returns the features and tensor basis
# at each point in the dataset where gradients are significant.
x, tb = dataset.extractFeatures(with_tensor_basis=True,
features_type=features_type, threshold=threshold,
features_load_path=features_load_path,
features_dump_path=features_dump_path,
clean_features=clean_features)
alphaij_ML, g_ML = makePrediction("TBNNS", model_path, x, features_type, tb,
ensemble=ensemble_of_models,
std_flag=std_ensemble)
# Now, get a random forest prediction for the turbulent Prandtl number
prt_ML = makePrediction("RF", secondary_model_path, x, features_type)
# Combine alphaij_ML and prt_ML into a single diffusivity tensor
alphaij_mod = dataset.enforcePrt(alphaij_ML, prt_ML)
# Adds result to tecplot and sets the default variable names to output
varname = ["Dxx", "Dxy", "Dxz", "Dyx", "Dyy", "Dyz", "Dzx", "Dzy", "Dzz"]
dataset.addTensorDiff(alphaij_mod, varname, default_prt)
g_name = ["g1", "g2", "g3", "g4", "g5", "g6"]
dataset.addG(g_ML, g_name, default_prt)
if variables_to_write is None:
variables_to_write = varname
if outnames_to_write is None:
outnames_to_write = ["uds-2", "uds-3", "uds-4", "uds-5", "uds-6", "uds-7",
"uds-8", "uds-9", "uds-10"]
# Write output: create interp/csv files and produce tecplot file
if ip_file_path is not None:
dataset.createInterpFile(ip_file_path, variables_to_write, outnames_to_write)
if csv_file_path is not None:
dataset.createCsvFile(csv_file_path, variables_to_write, outnames_to_write)
dataset.saveDataset(tecplot_out_path)
def produceTrainingFeatures(tecplot_in_path, *, data_path = None,
zone = None, deltaT0 = None,
use_default_var_names = False,
use_default_derivative_names = True,
calc_derivatives = True, write_derivatives = True,
threshold = None, clean_features = True,
features_load_path = None, features_dump_path = None,
prt_cap = None, gamma_correction = False,
downsample = None, tecplot_out_path = None,
model_type = "RF", features_type="F2"):
"""
Produces features and labels from a single Tecplot file, used for training.
This function is useful for training your own models. Call it on a single Tecplot
file (.plt) that contains all mean data including u'c' values, and it will process
it to generate the features and labels used for training. All optional arguments may
only be used with the keyword (that's what * means)
Arguments:
tecplot_in_path -- string containing the path of the input tecplot file. It must be
a binary .plt file, resulting from a k-epsilon simulation.
data_path -- optional argument. A string containing the path where a joblib file is
saved, containing features and labels for training ML models. If None
(default), a default name is employed.
zone -- optional argument. The zone where the flow field solution is saved in
Tecplot. By default, it is zone 0. This can be either a string (with the
zone name) or an integer with the zone index.
deltaT0 -- optional argument. Temperature scale (Tmax - Tmin) that will be used to
non-dimensionalize the dataset. If it is not provided (default behavior),
the user will be prompted to enter an appropriate number.
use_default_var_names -- optional argument. Boolean flag (True/False) that determines
whether default Fluent names will be used to fetch variables
in the Tecplot dataset. If the flag is False (default
behavior), the user will be prompted to enter names for each
variable that is needed.
use_default_derivative_names -- optional argument. Boolean flag (True/False) that
determine if the user will pick the names for the
derivative quantities in the Tecplot file or whether
default names are used. This flag is only used if the
next flag is False (i.e., if derivatives are
already pre-calculated, then setting this flag to
False allows the user to input the names of each
derivative in the input .plt file). It defaults to
True.
calc_derivatives -- optional argument. Boolean flag (True/False) that determines
whether derivatives need to be calculated in the Tecplot file.
Note we need derivatives of U, V, W, and Temperature, with names
ddx_{}, ddy_{}, ddz_{}. If such variables were already calculated
and exist in the dataset, set this flag to False to speed up the
process. By default (True), derivatives are calculated and a new
file with derivatives called "derivatives_{}" will be saved to
disk.
write_derivatives -- optional argument. Boolean flag (True/False) that determines
whether to write a binary Tecplot file to disk with the newly
calculated derivatives. The file will have the same name as the
input, except followed by "_derivatives". This is useful because
calculating derivatives takes a long time, so you might want to
save results to disk as soon as they are calculated.
threshold -- optional argument. This variable determines the threshold for
(non-dimensional) temperature gradient below which we throw away a
point. If None, use the value in constants.py (default value is 1e-3).
For temperature gradient less than that, we use the Reynolds analogy
(with fixed Pr_t). For gradients larger than that, we use the
model.
clean_features -- optional argument. This determines whether we should remove outlier
points from the dataset before applying the model. This is measured
by the standard deviation of points around the mean.
features_load_path -- optional argument. If this is supplied, then the function will
try to load the features from disk instead of
processing the tecplot file all over again. Since calculating
the features can take a while for large datasets, this can be
useful to speed up repetitions.
features_dump_path -- optional argument. If this is provided and we processed the
tecplot data from scratch (i.e. we calculated the features),
then the function will save the features to disk, so it is
much faster to perform the same computations again later.
prt_cap -- optional, contains the (symmetric) cap on the value of Pr_t. If None,
then use the value in constants.py. If this value is 100, for example,
then 0.01 < Pr_t < 100, and values outside of this range are capped.
gamma_correction -- optional. If True, use the correction defined in
Milani, Ling, Eaton (JTM 2020). That correction only makes
sense if training data is on a fixed reference frame (it breaks
Galilean invariance), so it is turned off by default. However,
it can improve results in some cases.
downsample -- optional, number that controls how we downsample the data before
saving it to disk. If None (default), it will read the number from
constants.py. If this number is more than 1, then it represents the
number of examples we want to save; if it is less than 1, it represents
the ratio of all training examples we want to save.
tecplot_out_path -- optional, a string containing the path to which the final tecplot
dataset will be saved. Useful for sanity checking the results. By
default it is None (no .plt file saved)
model_type -- optional argument. This tells us which type of model we are loading.
It must be a string, and the currently supported options are "RF".
The default option is "RF".
features_type -- optional argument, string determining the type of features that
we are currently extracting. Options are "F1" and "F2". Default
value is "F2".
"""
assert model_type == "RF" or model_type == "TBNNS" or model_type == "TBNNS_hybrid", \
"Invalid model_type received!"
# Initialize dataset and get scales for non-dimensionalization. The default behavior
# is to ask the user for the names and the scales. Passing keyword arguments to this
# function can be done to go around this behavior
dataset = TrainingCase(tecplot_in_path, zone=zone,
use_default_names=use_default_var_names)
dataset.normalize(deltaT0=deltaT0)
# If this flag is True (default) calculate the derivatives and save the result to
# disk (since it takes a while to do that...)
if calc_derivatives:
dataset.calculateDerivatives()
if write_derivatives: # write new Tecplot file to disk
dataset.saveDataset(tecplot_in_path[0:-4] + "_derivatives.plt")
else:
print("Derivatives already calculated!")
dataset.addDerivativeNames(use_default_derivative_names)
metadata = {}
metadata["features_type"] = features_type
if model_type == "RF":
# This line processes the dataset and extracts features for the ML step which
# can take a long time. features_load_path and features_dump_path can be
# set to make the method load/save the processed quantities from disk.
x, _ = dataset.extractFeatures(with_tensor_basis=False,
features_type=features_type, threshold=threshold,
features_load_path=features_load_path,
features_dump_path=features_dump_path,
clean_features=clean_features)
gamma = dataset.extractGamma(prt_cap, gamma_correction) # gamma = 1/Prt
training_list = [x, gamma] # what is used for training
metadata["with_tensor_basis"]=False
metadata["with_gamma"]=True
# Write the Tecplot data to disk with the extracted Prt_LES for sanity check
if tecplot_out_path is not None:
dataset.addPrt(1.0/gamma, "Prt_LES")
dataset.saveDataset(tecplot_out_path)
elif model_type == "TBNNS":
# This line processes the dataset and returns the features and tensor basis
# at each point in the dataset where gradients are significant.
x, tb = dataset.extractFeatures(with_tensor_basis=True,
features_type=features_type, threshold=threshold,
features_load_path=features_load_path,
features_dump_path=features_dump_path,
clean_features=clean_features)
uc, gradT, nut = dataset.extractUc()
training_list = [x, tb, uc, gradT, nut] # what is used for training
metadata["with_tensor_basis"]=True
metadata["with_gamma"]=False
elif model_type == "TBNNS_hybrid":
# This line processes the dataset and returns the features and tensor basis
# at each point in the dataset where gradients are significant.
x, tb = dataset.extractFeatures(with_tensor_basis=True,
features_type=features_type, threshold=threshold,
features_load_path=features_load_path,
features_dump_path=features_dump_path,
clean_features=clean_features)
uc, gradT, nut = dataset.extractUc()
gamma = dataset.extractGamma(prt_cap, gamma_correction)
training_list = [x, tb, uc, gradT, nut, gamma] # what is used for training
metadata["with_tensor_basis"]=True
metadata["with_gamma"]=True
# Saves joblib file to disk with features/labels for this dataset
# If data_path is None, use default name (appending _trainingdata to the end)
if data_path is None:
data_path = tecplot_in_path[0:-4] + "_trainingdata.pckl" # default name
# Save training features to disk
process.saveTrainingFeatures(training_list, metadata, data_path, downsample)
def trainRFModel(features_list, description, model_path, *, features_type="F1",
downsample=None, n_trees = None, max_depth = None,
min_samples_split = None, n_jobs = None):
"""
Trains a random forest model and saves it to disk.
Trains a random forest, isotropic model, with features and labels previously
calculated. Multiple files can be used at the same time (each file in the list
comes from a given dataset. All optional arguments may only be used with the
keyword (that's what * means)
Arguments:
features_list -- list containing paths to files saved to disk with features
and labels for training. These files are produced by the function
above (produceTrainingFeatures) from Tecplot files.
description -- A short, written description of the model being trained. It will be
saved to disk together with the model itself.
model_path -- The path where the trained model will be saved in disk.
features_type -- optional argument, string determining the type of features that
we are currently extracting. Options are "F1" and "F2". Default
value is "F1".
downsample -- optional, number that controls how we downsample the data before
using it to train. If None (default), it will read the number from
constants.py. If this number is more than 1, then it represents the
number of examples we want to save; if it is less than 1, it represents
the ratio of all training examples we want to save. Can also be a list
of numbers, in which case each number is applied to an element of
features_list
n_trees -- optional. Hyperparameter of the random forest, contains number of
trees to use. If None (default), reads value from constants.py
max_depth -- optional. Hyperparameter of the random forest, contains maximum
depth of each tree to use. If None (default), reads value from
constants.py
min_samples_split -- optional. Hyperparameter of the random forest, contains
minimum number of samples at a node required to split. Can
either be an int (number itself) or a float (ratio of total
examples). If None (default), reads value from constants.py
n_jobs -- optional. Number of processors to use when training the RF (notice that
training is embarassingly parallel). If None (default behavior), then
the value is read from constants.py. See manual for
RandomForestRegressor class; if this is -1, all processors are used.
"""
# Reads the list of files provided for features/labels
print("{} file(s) were provided and will be used".format(len(features_list)))
x_list = []
y_list = []
if isinstance(downsample, list): # make sure list is the right size
assert len(downsample) == len(features_list), \
"downsample is a list, but it has the wrong number of entries!"
for i, file in enumerate(features_list):
if isinstance(downsample, list): # if list, take each element sequentially
x_features, gamma = process.loadTrainingFeatures(file, "RF", downsample[i],
features_type)
else:
x_features, gamma = process.loadTrainingFeatures(file, "RF", downsample,
features_type)
x_list.append(x_features)
y_list.append(gamma)
x_total = np.concatenate(x_list, axis=0)
y_total = np.concatenate(y_list, axis=0)
# Here, we train and save the model
rf = RFModelIsotropic()
rf.train(x_total, y_total, n_trees, max_depth, min_samples_split, n_jobs)
rf.save(description, model_path)
def trainTBNNSModel(features_list_train, features_list_dev, description, model_path,
path_to_saver, *, FLAGS={}, features_type="F2",
downsample_train=None, downsample_dev=None,):
"""
Trains a TBNN-s and saves it to disk.
Trains a TBNN-s, anisotropic model, with features and labels previously
calculated. Multiple files can be used at the same time (each file in the list
comes from a given dataset. All optional arguments may only be used with the
keyword (that's what * means)
Arguments:
features_list_train -- list containing paths to files saved to disk with features
and labels for training. These files are produced by the function
above (produceTrainingFeatures) from Tecplot files.
features_list_dev -- list containing paths to files saved to disk with features
and labels for the validation set. These files are produced by
the function above (produceTrainingFeatures) from Tecplot files.
description -- A short, written description of the model being trained. It will be
saved to disk together with the model itself.
model_path -- The path where the trained model will be saved in disk.
path_to_saver -- The path where the model parameters are saved to disk, through the
tf.Saver class. Usually, we want to put that in a folder called
checkpoints.
FLAGS -- optional argument, dictionary that controls training parameters for
the TBNNS model. Check the tbnns package to see what settings can be used.
features_type -- optional argument, string determining the type of features that
we are currently extracting. Options are "F1" and "F2". Default
value is "F2".
downsample_train -- optional, number that controls how we downsample the data before
using it to train. If None (default), it will read the number from
constants.py. If this number is more than 1, then it represents the
number of examples we want to save; if it is less than 1, it represents
the ratio of all training examples we want to save. Can also be a list
of numbers, in which case each number is applied to an element of
features_list.
downsample_dev -- optional, same as above for the dev set.
"""
# Reads the list of files provided for features/labels
print("{} file(s) were provided and will be used for training"\
.format(len(features_list_train)))
print("{} file(s) were provided and will be used for validation"\
.format(len(features_list_dev)))
# Makes sure downsample list is the right size
if isinstance(downsample_train, list):
assert len(downsample_train) == len(features_list_train), \
"downsample is a list, but it has the wrong number of entries!"
if isinstance(downsample_dev, list): # make sure list is the right size
assert len(downsample_dev) == len(features_list_dev), \
"downsample is a list, but it has the wrong number of entries!"
# Load training files
x_list_train=[]; tb_list_train=[]; uc_list_train=[];
gradT_list_train=[]; nut_list_train=[]
for i, file in enumerate(features_list_train):
if isinstance(downsample_train, list): # if list, take each element sequentially
x, tb, uc, gradT, nut = process.loadTrainingFeatures(file, "TBNNS",
downsample_train[i],
features_type)
else:
x, tb, uc, gradT, nut = process.loadTrainingFeatures(file, "TBNNS",
downsample_train,
features_type)
x_list_train.append(x); tb_list_train.append(tb); uc_list_train.append(uc)
gradT_list_train.append(gradT); nut_list_train.append(nut)
x_train = np.concatenate(x_list_train, axis=0)
tb_train = np.concatenate(tb_list_train, axis=0)
uc_train = np.concatenate(uc_list_train, axis=0)
gradT_train = np.concatenate(gradT_list_train, axis=0)
nut_train = np.concatenate(nut_list_train, axis=0)
# Load dev files
x_list_dev=[]; tb_list_dev=[]; uc_list_dev=[];
gradT_list_dev=[]; nut_list_dev=[]
for i, file in enumerate(features_list_dev):
if isinstance(downsample_dev, list): # if list, take each element sequentially
x, tb, uc, gradT, nut = process.loadTrainingFeatures(file, "TBNNS",
downsample_dev[i],
features_type)
else:
x, tb, uc, gradT, nut = process.loadTrainingFeatures(file, "TBNNS",
downsample_dev,
features_type)
x_list_dev.append(x); tb_list_dev.append(tb); uc_list_dev.append(uc)
gradT_list_dev.append(gradT); nut_list_dev.append(nut)
x_dev = np.concatenate(x_list_dev, axis=0)
tb_dev = np.concatenate(tb_list_dev, axis=0)
uc_dev = np.concatenate(uc_list_dev, axis=0)
gradT_dev = np.concatenate(gradT_list_dev, axis=0)
nut_dev = np.concatenate(nut_list_dev, axis=0)
# Edit FLAGS if necessary:
if features_type=="F1": FLAGS['num_features'] = constants.NUM_FEATURES_F1
elif features_type=="F2": FLAGS['num_features'] = constants.NUM_FEATURES_F2
# Here, we train and save the model
nn = TBNNSModelAnisotropic()
nn.train(FLAGS, path_to_saver,
x_train, tb_train, uc_train, gradT_train, nut_train,
x_dev, tb_dev, uc_dev, gradT_dev, nut_dev)
nn.save(description, model_path) |
# Generated by Django 2.2.5 on 2019-09-14 23:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line1', models.CharField(max_length=255)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=50)),
('country', models.CharField(max_length=50)),
],
),
]
|
import pytest
import unittest.mock as mock
from smqtk_descriptors import DescriptorElementFactory
from smqtk_indexing import NearestNeighborsIndex
from smqtk_relevancy.interfaces.rank_relevancy import RankRelevancyWithFeedback
from smqtk_iqr.iqr.iqr_session import IqrSession
from smqtk_descriptors.impls.descriptor_element.memory import \
DescriptorMemoryElement
class TestIqrSession (object):
"""
Unit tests pertaining to the IqrSession class.
"""
iqrs = None # type: IqrSession
@classmethod
def setup_method(cls) -> None:
"""
Setup an iqr session with a mocked rank relevancy
"""
rank_relevancy_with_feedback = mock.MagicMock(spec=RankRelevancyWithFeedback)
cls.iqrs = IqrSession(rank_relevancy_with_feedback)
def test_context_manager_passthrough(self) -> None:
"""
Test that using an instance as a context manager works and passes along
the instance correctly.
"""
with self.iqrs as iqrs:
assert self.iqrs is iqrs
def test_adjudicate_new_pos_neg(self) -> None:
"""
Test that providing iterables to ``new_positives`` and
``new_negatives`` parameters result in additions to the positive and
negative sets respectively.
"""
p0 = DescriptorMemoryElement(0).set_vector([0])
self.iqrs.adjudicate(new_positives=[p0])
assert self.iqrs.positive_descriptors == {p0}
assert self.iqrs.negative_descriptors == set()
n1 = DescriptorMemoryElement(1).set_vector([1])
self.iqrs.adjudicate(new_negatives=[n1])
assert self.iqrs.positive_descriptors == {p0}
assert self.iqrs.negative_descriptors == {n1}
p2 = DescriptorMemoryElement(2).set_vector([2])
p3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
self.iqrs.adjudicate(new_positives=[p2, p3], new_negatives=[n4])
assert self.iqrs.positive_descriptors == {p0, p2, p3}
assert self.iqrs.negative_descriptors == {n1, n4}
def test_adjudicate_add_duplicates(self) -> None:
"""
Test that adding duplicate descriptors as positive or negative
adjudications has no effect as the behavior of sets should be observed.
"""
p0 = DescriptorMemoryElement(0).set_vector([0])
p2 = DescriptorMemoryElement(2).set_vector([2])
n1 = DescriptorMemoryElement(1).set_vector([1])
p3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
# Partially add the above descriptors
self.iqrs.adjudicate(new_positives=[p0], new_negatives=[n1])
assert self.iqrs.positive_descriptors == {p0}
assert self.iqrs.negative_descriptors == {n1}
# Add all descriptors, observing that that already added descriptors
# are ignored.
self.iqrs.adjudicate(new_positives=[p0, p2, p3], new_negatives=[n1, n4])
assert self.iqrs.positive_descriptors == {p0, p2, p3}
assert self.iqrs.negative_descriptors == {n1, n4}
# Duplicate previous call so no new descriptors are added. No change or
# issue should be observed.
self.iqrs.adjudicate(new_positives=[p0, p2, p3], new_negatives=[n1, n4])
assert self.iqrs.positive_descriptors == {p0, p2, p3}
assert self.iqrs.negative_descriptors == {n1, n4}
def test_adjudication_switch(self) -> None:
"""
Test providing positives and negatives on top of an existing state such
that the descriptor adjudications are reversed. (what was once positive
is now negative, etc.)
"""
p0 = DescriptorMemoryElement(0).set_vector([0])
p1 = DescriptorMemoryElement(1).set_vector([1])
p2 = DescriptorMemoryElement(2).set_vector([2])
n3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
# Set initial state
self.iqrs.positive_descriptors = {p0, p1, p2}
self.iqrs.negative_descriptors = {n3, n4}
# Adjudicate, partially swapping adjudications individually
self.iqrs.adjudicate(new_positives=[n3])
assert self.iqrs.positive_descriptors == {p0, p1, p2, n3}
assert self.iqrs.negative_descriptors == {n4}
self.iqrs.adjudicate(new_negatives=[p1])
assert self.iqrs.positive_descriptors == {p0, p2, n3}
assert self.iqrs.negative_descriptors == {n4, p1}
# Adjudicate swapping remaining at the same time
self.iqrs.adjudicate(new_positives=[n4], new_negatives=[p0, p2])
assert self.iqrs.positive_descriptors == {n3, n4}
assert self.iqrs.negative_descriptors == {p0, p1, p2}
def test_adjudicate_remove_pos_neg(self) -> None:
"""
Test that we can remove positive and negative adjudications using
"un_*" parameters.
"""
# Set initial state
p0 = DescriptorMemoryElement(0).set_vector([0])
p1 = DescriptorMemoryElement(1).set_vector([1])
p2 = DescriptorMemoryElement(2).set_vector([2])
n3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
# Set initial state
self.iqrs.positive_descriptors = {p0, p1, p2}
self.iqrs.negative_descriptors = {n3, n4}
# "Un-Adjudicate" descriptors individually
self.iqrs.adjudicate(un_positives=[p1])
assert self.iqrs.positive_descriptors == {p0, p2}
assert self.iqrs.negative_descriptors == {n3, n4}
self.iqrs.adjudicate(un_negatives=[n3])
assert self.iqrs.positive_descriptors == {p0, p2}
assert self.iqrs.negative_descriptors == {n4}
# "Un-Adjudicate" collectively
self.iqrs.adjudicate(un_positives=[p0, p2], un_negatives=[n4])
assert self.iqrs.positive_descriptors == set()
assert self.iqrs.negative_descriptors == set()
def test_adjudicate_combined_remove_unadj(self) -> None:
"""
Test combining adjudication switching with un-adjudication.
"""
# Set initial state
p0 = DescriptorMemoryElement(0).set_vector([0])
p1 = DescriptorMemoryElement(1).set_vector([1])
p2 = DescriptorMemoryElement(2).set_vector([2])
n3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
# Set initial state
self.iqrs.positive_descriptors = {p0, p1, p2}
self.iqrs.negative_descriptors = {n3, n4}
# Add p5, switch p1 to negative, unadj p2
p5 = DescriptorMemoryElement(5).set_vector([5])
self.iqrs.adjudicate(new_positives=[p5], new_negatives=[p1],
un_positives=[p2])
assert self.iqrs.positive_descriptors == {p0, p5}
assert self.iqrs.negative_descriptors == {n3, n4, p1}
# Add n6, switch n4 to positive, unadj n3
n6 = DescriptorMemoryElement(6).set_vector([6])
self.iqrs.adjudicate(new_positives=[n4], new_negatives=[n6],
un_negatives=[n3])
assert self.iqrs.positive_descriptors == {p0, p5, n4}
assert self.iqrs.negative_descriptors == {p1, n6}
def test_adjudicate_both_labels(self) -> None:
"""
Test that providing a descriptor element as both a positive AND
negative adjudication causes no state change..
"""
# Set initial state
p0 = DescriptorMemoryElement(0).set_vector([0])
p1 = DescriptorMemoryElement(1).set_vector([1])
p2 = DescriptorMemoryElement(2).set_vector([2])
n3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
# Set initial state
self.iqrs.positive_descriptors = {p0, p1, p2}
self.iqrs.negative_descriptors = {n3, n4}
# Attempt adjudicating a new element as both postive AND negative
e = DescriptorMemoryElement(5).set_vector([5])
self.iqrs.adjudicate(new_positives=[e], new_negatives=[e])
assert self.iqrs.positive_descriptors == {p0, p1, p2}
assert self.iqrs.negative_descriptors == {n3, n4}
def test_adjudicate_unadj_noeffect(self) -> None:
"""
Test that an empty call, or un-adjudicating a descriptor that is not
currently marked as a positive or negative, causes no state change.
"""
# Set initial state
p0 = DescriptorMemoryElement(0).set_vector([0])
p1 = DescriptorMemoryElement(1).set_vector([1])
p2 = DescriptorMemoryElement(2).set_vector([2])
n3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
# Set initial state
self.iqrs.positive_descriptors = {p0, p1, p2}
self.iqrs.negative_descriptors = {n3, n4}
# Empty adjudication
self.iqrs.adjudicate()
assert self.iqrs.positive_descriptors == {p0, p1, p2}
assert self.iqrs.negative_descriptors == {n3, n4}
# Attempt un-adjudication of a non-adjudicated element.
e = DescriptorMemoryElement(5).set_vector([5])
self.iqrs.adjudicate(un_positives=[e], un_negatives=[e])
assert self.iqrs.positive_descriptors == {p0, p1, p2}
assert self.iqrs.negative_descriptors == {n3, n4}
def test_adjudicate_cache_resetting_positive(self) -> None:
"""
Test results view cache resetting functionality on adjudicating certain
ways.
"""
e = DescriptorMemoryElement(0).set_vector([0])
a = [(DescriptorMemoryElement(0), 1.0), (DescriptorMemoryElement(0), 2.0)]
self.iqrs._ordered_pos = a
self.iqrs._ordered_neg = a
self.iqrs._ordered_non_adj = a
# Check that adding a positive adjudication resets the positive and
# non-adjudicated result caches.
self.iqrs.adjudicate(new_positives=[e])
assert self.iqrs._ordered_pos is None # reset
assert self.iqrs._ordered_neg is not None # NOT reset
assert self.iqrs._ordered_non_adj is None # reset
def test_adjudicate_cache_resetting_negative(self) -> None:
"""
Test results view cache resetting functionality on adjudicating certain
ways.
"""
e = DescriptorMemoryElement(0).set_vector([0])
a = [(DescriptorMemoryElement(0), 1.0), (DescriptorMemoryElement(0), 2.0)]
self.iqrs._ordered_pos = a
self.iqrs._ordered_neg = a
self.iqrs._ordered_non_adj = a
# Check that adding a positive adjudication resets the positive and
# non-adjudicated result caches.
self.iqrs.adjudicate(new_negatives=[e])
assert self.iqrs._ordered_pos is not None # NOT reset
assert self.iqrs._ordered_neg is None # reset
assert self.iqrs._ordered_non_adj is None # reset
def test_adjudication_cache_not_reset(self) -> None:
"""
Test that pos/neg/non-adj result caches are NOT reset when no state
change occurs under different circumstances
"""
# setup initial IQR session state.
a = [(DescriptorMemoryElement(0), 1.0), (DescriptorMemoryElement(0), 2.0)]
p0 = DescriptorMemoryElement(0).set_vector([0])
p1 = DescriptorMemoryElement(1).set_vector([1])
p2 = DescriptorMemoryElement(2).set_vector([2])
n3 = DescriptorMemoryElement(3).set_vector([3])
n4 = DescriptorMemoryElement(4).set_vector([4])
self.iqrs.positive_descriptors = {p0, p1, p2}
self.iqrs.negative_descriptors = {n3, n4}
self.iqrs._ordered_pos = self.iqrs._ordered_neg = self.iqrs._ordered_non_adj = a
# Empty adjudication
self.iqrs.adjudicate()
assert self.iqrs._ordered_pos is not None # NOT reset
assert self.iqrs._ordered_neg is not None # NOT reset
assert self.iqrs._ordered_non_adj is not None # NOT reset
# Repeat positive/negative adjudication
self.iqrs.adjudicate(new_positives=[p0])
assert self.iqrs._ordered_pos is not None # NOT reset
assert self.iqrs._ordered_neg is not None # NOT reset
assert self.iqrs._ordered_non_adj is not None # NOT reset
self.iqrs.adjudicate(new_negatives=[n3])
assert self.iqrs._ordered_pos is not None # NOT reset
assert self.iqrs._ordered_neg is not None # NOT reset
assert self.iqrs._ordered_non_adj is not None # NOT reset
self.iqrs.adjudicate(new_positives=[p1], new_negatives=[n4])
assert self.iqrs._ordered_pos is not None # NOT reset
assert self.iqrs._ordered_neg is not None # NOT reset
assert self.iqrs._ordered_non_adj is not None # NOT reset
# No-op un-adjudication
e = DescriptorMemoryElement(5).set_vector([5])
self.iqrs.adjudicate(un_positives=[e], un_negatives=[e])
assert self.iqrs._ordered_pos is not None # NOT reset
assert self.iqrs._ordered_neg is not None # NOT reset
assert self.iqrs._ordered_non_adj is not None # NOT reset
def test_update_working_set_no_pos(self) -> None:
"""
Working set updating should fail when there are no positive examples
in the current state.
"""
nn_index = mock.MagicMock(spec=NearestNeighborsIndex)
# initially constructed session has no pos/neg adjudications
assert len(self.iqrs.positive_descriptors) == 0
assert len(self.iqrs.external_positive_descriptors) == 0
with pytest.raises(
RuntimeError,
match=r"No positive descriptors to query the neighbor index with"
):
self.iqrs.update_working_set(nn_index)
def test_update_working_set(self) -> None:
"""
Test "updating" with some positives across both positives containers.
"""
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
# Mock index. Make it so that the neighbors of inputs is just the input
# itself.
nn_index: NearestNeighborsIndex = mock.Mock(spec=NearestNeighborsIndex)
nn_index.nn = mock.Mock(side_effect=lambda d, n: ([d], [0.])) # type: ignore
# See positive descriptors
self.iqrs.positive_descriptors.update({d0, d1})
self.iqrs.external_positive_descriptors.update({d2})
assert len(self.iqrs.working_set) == 0
self.iqrs.update_working_set(nn_index)
assert len(self.iqrs.working_set) == 3
assert set(self.iqrs.working_set.descriptors()) == {d0, d1, d2}
def test_refine_no_pos(self) -> None:
"""
Test that refinement cannot occur if there are no positive descriptor
external/adjudicated elements.
"""
with pytest.raises(RuntimeError, match='Did not find at least one '
'positive adjudication'):
self.iqrs.refine()
def test_refine_no_prev_results(self) -> None:
"""
Test that the results of RelevancyIndex ranking are directly reflected
in a new results dictionary of probability values, even for elements
that were also used in adjudication.
This test is useful because a previous state of the IQR Session
structure would force return probabilities for some descriptor elements
to certain values if they were also present in the positive or negative
adjudicate (internal or external) sets.
"""
test_in_pos_elem = DescriptorMemoryElement(0).set_vector([0])
test_in_neg_elem = DescriptorMemoryElement(1).set_vector([1])
test_ex_pos_elem = DescriptorMemoryElement(2).set_vector([2])
test_ex_neg_elem = DescriptorMemoryElement(3).set_vector([3])
test_other_elem = DescriptorMemoryElement(4).set_vector([4])
# Mock the working set so it has the correct size and elements
desc_list = [test_in_pos_elem, test_in_neg_elem, test_other_elem]
self.iqrs.working_set.add_many_descriptors(desc_list)
# Mock return dictionary, probabilities don't matter much other than
# they are not 1.0 or 0.0.
pool_ids = [de.uuid() for de in desc_list]
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.return_value = ( # type: ignore
[0.5, 0.5, 0.5],
pool_ids
)
# Asserting expected pre-condition where there are no results yet.
assert self.iqrs.results is None
assert self.iqrs.feedback_list is None
# Prepare IQR state for refinement
# - set dummy internal/external positive negatives.
self.iqrs.external_descriptors(
positive=[test_ex_pos_elem], negative=[test_ex_neg_elem]
)
self.iqrs.adjudicate(
new_positives=[test_in_pos_elem], new_negatives=[test_in_neg_elem]
)
# Test calling refine method
self.iqrs.refine()
# We test that:
# - ``rank_relevancy_with_feedback.rank`` called with the combination of
# external/adjudicated descriptor elements.
# - ``results`` attribute now has a dict value
# - value of ``results`` attribute is what we expect.
pool_uids, pool_de = zip(*self.iqrs.working_set.items())
pool = [de.vector() for de in pool_de]
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.assert_called_once_with( # type: ignore
[test_in_pos_elem.vector(), test_ex_pos_elem.vector()],
[test_in_neg_elem.vector(), test_ex_neg_elem.vector()],
pool,
pool_uids
)
assert self.iqrs.results is not None
assert len(self.iqrs.results) == 3
assert test_other_elem in self.iqrs.results
assert test_in_pos_elem in self.iqrs.results
assert test_in_neg_elem in self.iqrs.results
assert self.iqrs.results[test_other_elem] == 0.5
assert self.iqrs.results[test_in_pos_elem] == 0.5
assert self.iqrs.results[test_in_neg_elem] == 0.5
assert self.iqrs.feedback_list == desc_list
def test_refine_with_prev_results(self) -> None:
"""
Test that the results of RelevancyIndex ranking are directly reflected
in an existing results dictionary of probability values.
"""
test_in_pos_elem = DescriptorMemoryElement(0).set_vector([0])
test_in_neg_elem = DescriptorMemoryElement(1).set_vector([1])
test_ex_pos_elem = DescriptorMemoryElement(2).set_vector([2])
test_ex_neg_elem = DescriptorMemoryElement(3).set_vector([3])
test_other_elem = DescriptorMemoryElement(4).set_vector([4])
# Mock the working set so it has the correct size and elements
desc_list = [test_in_pos_elem, test_in_neg_elem, test_other_elem]
self.iqrs.working_set.add_many_descriptors(desc_list)
# Mock return dictionary, probabilities don't matter much other than
# they are not 1.0 or 0.0.
pool_ids = [*self.iqrs.working_set.iterkeys()]
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.return_value = ( # type: ignore
[0.5, 0.5, 0.5],
pool_ids
)
# Create a "previous state" of the results dictionary containing
# results from our "working set" of descriptor elements.
self.iqrs.results = {
test_in_pos_elem: 0.2,
test_in_neg_elem: 0.2,
test_other_elem: 0.2,
# ``refine`` replaces the previous dict, so disjoint keys are
# NOT retained.
'something else': 0.3, # type: ignore
}
# Create a "previous state" of the feedback results.
self.iqrs.feedback_list = [test_ex_pos_elem,
test_ex_neg_elem,
test_other_elem]
# Prepare IQR state for refinement
# - set dummy internal/external positive negatives.
self.iqrs.external_descriptors(
positive=[test_ex_pos_elem], negative=[test_ex_neg_elem]
)
self.iqrs.adjudicate(
new_positives=[test_in_pos_elem], new_negatives=[test_in_neg_elem]
)
# Test calling refine method
self.iqrs.refine()
# We test that:
# - ``rel_index.rank`` called with the combination of
# external/adjudicated descriptor elements.
# - ``results`` attribute now has an dict value
# - value of ``results`` attribute is what we expect.
pool_uids, pool_de = zip(*self.iqrs.working_set.items())
pool = [de.vector() for de in pool_de]
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.assert_called_once_with( # type: ignore
[test_in_pos_elem.vector(), test_ex_pos_elem.vector()],
[test_in_neg_elem.vector(), test_ex_neg_elem.vector()],
pool,
pool_uids
)
assert self.iqrs.results is not None
assert len(self.iqrs.results) == 3
assert test_other_elem in self.iqrs.results
assert test_in_pos_elem in self.iqrs.results
assert test_in_neg_elem in self.iqrs.results
assert 'something else' not in self.iqrs.results
assert self.iqrs.results[test_other_elem] == 0.5
assert self.iqrs.results[test_in_pos_elem] == 0.5
assert self.iqrs.results[test_in_neg_elem] == 0.5
assert self.iqrs.feedback_list == desc_list
def test_ordered_results_no_results_no_cache(self) -> None:
"""
Test that an empty list is returned when ``ordered_results`` is called
before any refinement has occurred.
"""
assert self.iqrs.ordered_results() == []
def test_ordered_results_has_cache(self) -> None:
"""
Test that a shallow copy of the cached list is returned when there is
a cache.
"""
# Simulate there being a cache
self.iqrs._ordered_pos = ['simulated', 'cache'] # type: ignore
actual = self.iqrs.get_positive_adjudication_relevancy()
assert actual == self.iqrs._ordered_pos
assert id(actual) != id(self.iqrs._ordered_pos)
def test_ordered_results_has_results_no_cache(self) -> None:
"""
Test that an appropriate list is returned by ``ordered_results`` after
a refinement has occurred.
"""
# Mocking results map existing for return.
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
self.iqrs.results = {
d0: 0.0,
d1: 0.8,
d2: 0.2,
d3: 0.4,
}
# Cache should be empty before call to ``ordered_results``
assert self.iqrs._ordered_results is None
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual1 = self.iqrs.ordered_results()
m_sorted.assert_called_once()
expected = [(d1, 0.8), (d3, 0.4), (d2, 0.2), (d0, 0.0)]
assert actual1 == expected
# Calling the method a second time should not result in a ``sorted``
# operation due to caching.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted') as m_sorted:
actual2 = self.iqrs.ordered_results()
m_sorted.assert_not_called()
assert actual2 == expected
# Both returns should be shallow copies, thus not the same list
# instances.
assert id(actual1) != id(actual2)
def test_ordered_results_has_results_post_reset(self) -> None:
"""
Test that an empty list is returned after a reset where there was a
cached value before the reset.
"""
# Mocking results map existing for return.
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
self.iqrs.results = {
d0: 0.0,
d1: 0.8,
d2: 0.2,
d3: 0.4,
}
# Initial call to ``ordered_results`` should have a non-None return.
assert self.iqrs.ordered_results() is not None
self.iqrs.reset()
# Post-reset, there should be no results nor cache.
actual = self.iqrs.ordered_results()
assert actual == []
def test_feedback_results_weird_state(self) -> None:
"""
Test that there is a fallback case when assumptions are violated.
This method assumes the value of `feedback_list` will either be
iterable or will be None. If this is violated there should be a hard
stop.
"""
# not iterable, not None
self.iqrs.feedback_list = 666 # type: ignore
with pytest.raises(
RuntimeError,
match=r"Feedback results in an invalid state"
):
self.iqrs.feedback_results()
def test_feedback_results_no_results_no_cache(self) -> None:
"""
Test that an empty list is returned when ``feedback_results`` is called
before any refinement has occurred.
"""
assert self.iqrs.feedback_results() == []
def test_feedback_results_has_cache(self) -> None:
"""
Test that a shallow copy of the cached list is returned when there is
a cache.
"""
# Simulate there being a cache
self.iqrs.feedback_list = ['simulated', 'cache'] # type: ignore
actual = self.iqrs.feedback_results()
assert actual == self.iqrs.feedback_list
assert id(actual) != id(self.iqrs.feedback_list)
def test_feedback_results_has_results_post_reset(self) -> None:
"""
Test that an empty list is returned after a reset where there was a
cached value before the reset.
"""
# Mocking results map existing for return.
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
self.iqrs.feedback_list = [
d0,
d1,
d2,
d3,
]
# Initial call to ``ordered_results`` should have a non-None return.
assert self.iqrs.feedback_results() is not None
self.iqrs.reset()
# Post-reset, there should be no results nor cache.
actual = self.iqrs.feedback_results()
assert actual == []
def test_get_positive_adjudication_relevancy_has_cache(self) -> None:
"""
Test that a shallow copy of the cached list is returned if there is a
cache.
"""
self.iqrs._ordered_pos = ['simulation', 'cache'] # type: ignore
actual = self.iqrs.get_positive_adjudication_relevancy()
assert actual == ['simulation', 'cache']
assert id(actual) != id(self.iqrs._ordered_pos)
def test_get_positive_adjudication_relevancy_no_cache_no_results(self) -> None:
"""
Test that ``get_positive_adjudication_relevancy`` returns None when in a
pre-refine state when there are no positive adjudications.
"""
assert self.iqrs.get_positive_adjudication_relevancy() == []
def test_get_positive_adjudication_relevancy_no_cache_has_results(self) -> None:
"""
Test that we can get positive adjudication relevancy scores correctly
from a not-cached state.
"""
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
# Simulate a populated contributing adjudication state (there must be
# some positives for a simulated post-refine state to be valid).
self.iqrs.rank_contrib_pos = {d1, d3}
self.iqrs.rank_contrib_neg = {d0}
# Simulate post-refine results map.
self.iqrs.results = {
d0: 0.1,
d1: 0.8,
d2: 0.2,
d3: 0.4,
}
# Cache is initially empty
assert self.iqrs._ordered_pos is None
# Test that the appropriate sorting actually occurs.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual1 = self.iqrs.get_positive_adjudication_relevancy()
m_sorted.assert_called_once()
expected = [(d1, 0.8), (d3, 0.4)]
assert actual1 == expected
# Calling the method a second time should not result in a ``sorted``
# operation due to caching.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual2 = self.iqrs.get_positive_adjudication_relevancy()
m_sorted.assert_not_called()
assert actual2 == expected
# Both returns should be shallow copies, thus not the same list
# instances.
assert id(actual1) != id(actual2)
def test_get_negative_adjudication_relevancy_has_cache(self) -> None:
"""
Test that a shallow copy of the cached list is returned if there is a
cache.
"""
self.iqrs._ordered_neg = ['simulation', 'cache'] # type: ignore
actual = self.iqrs.get_negative_adjudication_relevancy()
assert actual == ['simulation', 'cache']
assert id(actual) != id(self.iqrs._ordered_neg)
def test_get_negative_adjudication_relevancy_no_cache_no_results(self) -> None:
"""
Test that ``get_negative_adjudication_relevancy`` returns None when in a
pre-refine state when there are no negative adjudications.
"""
assert self.iqrs.get_negative_adjudication_relevancy() == []
def test_get_negative_adjudication_relevancy_no_cache_has_results(self) -> None:
"""
Test that we can get negative adjudication relevancy scores correctly
from a not-cached state.
"""
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
# Simulate a populated contributing adjudication state (there must be
# some positives for a simulated post-refine state to be valid).
self.iqrs.rank_contrib_pos = {d1}
self.iqrs.rank_contrib_neg = {d0, d2}
# Simulate post-refine results map.
self.iqrs.results = {
d0: 0.1,
d1: 0.8,
d2: 0.2,
d3: 0.4,
}
# Cache is initially empty
assert self.iqrs._ordered_neg is None
# Test that the appropriate sorting actually occurs.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual1 = self.iqrs.get_negative_adjudication_relevancy()
m_sorted.assert_called_once()
expected = [(d2, 0.2), (d0, 0.1)]
assert actual1 == expected
# Calling the method a second time should not result in a ``sorted``
# operation due to caching.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual2 = self.iqrs.get_negative_adjudication_relevancy()
m_sorted.assert_not_called()
assert actual2 == expected
# Both returns should be shallow copies, thus not the same list
# instances.
assert id(actual1) != id(actual2)
def test_get_unadjudicated_relevancy_has_cache(self) -> None:
"""
Test that a shallow copy of the cached list is returned if there is a
cache.
"""
self.iqrs._ordered_non_adj = ['simulation', 'cache'] # type: ignore
actual = self.iqrs.get_unadjudicated_relevancy()
assert actual == ['simulation', 'cache']
assert id(actual) != id(self.iqrs._ordered_non_adj)
def test_get_unadjudicated_relevancy_no_cache_no_results(self) -> None:
"""
Test that ``get_unadjudicated_relevancy`` returns None when in a
pre-refine state when there is results state.
"""
assert self.iqrs.get_unadjudicated_relevancy() == []
def test_get_unadjudicated_relevancy_no_cache_has_results(self) -> None:
"""
Test that we get the non-adjudicated DescriptorElements and their
scores correctly from a non-cached state with known results.
"""
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
# Simulate a populated contributing adjudication state (there must be
# some positives for a simulated post-refine state to be valid).
self.iqrs.rank_contrib_pos = {d1}
self.iqrs.rank_contrib_neg = {d0}
# Simulate post-refine results map.
self.iqrs.results = {
d0: 0.1,
d1: 0.8,
d2: 0.2,
d3: 0.4,
}
# Cache should be initially empty
assert self.iqrs._ordered_non_adj is None
# Test that the appropriate sorting actually occurs.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual1 = self.iqrs.get_unadjudicated_relevancy()
m_sorted.assert_called_once()
expected = [(d3, 0.4), (d2, 0.2)]
assert actual1 == expected
# Calling the method a second time should not result in a ``sorted``
# operation due to caching.
with mock.patch('smqtk_iqr.iqr.iqr_session.sorted',
side_effect=sorted) as m_sorted:
actual2 = self.iqrs.get_unadjudicated_relevancy()
m_sorted.assert_not_called()
assert actual2 == expected
# Both returns should be shallow copies, thus not the same list
# instances.
assert id(actual1) != id(actual2)
def test_reset_result_cache_invalidation(self) -> None:
"""
Test that calling the reset method resets the result view caches to
None.
"""
self.iqrs.reset()
assert self.iqrs._ordered_pos is None
assert self.iqrs._ordered_neg is None
assert self.iqrs._ordered_non_adj is None
def test_get_set_state(self) -> None:
"""
Simple test of get-state functionality
"""
d0 = DescriptorMemoryElement(0).set_vector([0])
d1 = DescriptorMemoryElement(1).set_vector([1])
d2 = DescriptorMemoryElement(2).set_vector([2])
d3 = DescriptorMemoryElement(3).set_vector([3])
# Set up the session to have some state.
self.iqrs.positive_descriptors.update({d0})
self.iqrs.negative_descriptors.update({d1})
self.iqrs.external_positive_descriptors.update({d2})
self.iqrs.external_negative_descriptors.update({d3})
b = self.iqrs.get_state_bytes()
assert b is not None
assert len(b) > 0
rank_relevancy_with_feedback = mock.MagicMock(spec=RankRelevancyWithFeedback)
descr_fact = DescriptorElementFactory(DescriptorMemoryElement, {})
new_iqrs = IqrSession(rank_relevancy_with_feedback)
new_iqrs.set_state_bytes(b, descr_fact)
assert self.iqrs.positive_descriptors == new_iqrs.positive_descriptors
assert self.iqrs.negative_descriptors == new_iqrs.negative_descriptors
assert self.iqrs.external_positive_descriptors == new_iqrs.external_positive_descriptors
assert self.iqrs.external_negative_descriptors == new_iqrs.external_negative_descriptors
def test_refine_no_neg(self) -> None:
"""
Test refinement without any negative adjudications and ensure that the farthest
descriptor from the positive example is automatically chosen as the negative example.
"""
test_in_pos_elem = DescriptorMemoryElement(0).set_vector([0])
test_ex_pos_elem = DescriptorMemoryElement(2).set_vector([2])
test_other_elem = DescriptorMemoryElement(4).set_vector([4])
test_other_elem_far = DescriptorMemoryElement(5).set_vector([5])
# Mock the working set so it has the correct size and elements
desc_list = [test_in_pos_elem, test_other_elem, test_other_elem_far]
self.iqrs.working_set.add_many_descriptors(desc_list)
# Mock return dictionary, probabilities don't matter much other than
# they are not 1.0 or 0.0.
pool_ids = [de.uuid() for de in desc_list]
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.return_value = ( # type: ignore
[0.7, 0.3, 0.1],
pool_ids
)
# Prepare IQR state for refinement
# - set dummy internal/external positive negatives w/ no negative examples
self.iqrs.external_descriptors(
positive=[test_ex_pos_elem], negative=[]
)
self.iqrs.adjudicate(
new_positives=[test_in_pos_elem], new_negatives=[]
)
# Test calling refine method
self.iqrs.refine()
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.assert_called_once() # type: ignore
# Get the most recent call arguments,
# extracting what was passed as the negative descriptor input,
# which should be populated by the auto-negative selection logic.
neg_list_arg = self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.call_args[0][1] # type: ignore
assert neg_list_arg == [test_other_elem_far.vector()]
def test_refine_neg_autoselect_fail(self) -> None:
"""
Test refinement without any negative adjudications and when all of the other
possible adjudications are already marked as positive.
"""
test_in_pos_elem = DescriptorMemoryElement(0).set_vector([0])
test_ex_pos_elem = DescriptorMemoryElement(2).set_vector([2])
test_other_elem = DescriptorMemoryElement(4).set_vector([4])
test_other_elem_far = DescriptorMemoryElement(5).set_vector([5])
# Mock the working set so it has the correct size and elements
desc_list = [test_in_pos_elem, test_other_elem, test_other_elem_far]
self.iqrs.working_set.add_many_descriptors(desc_list)
# Mock return dictionary, probabilities don't matter much other than
# they are not 1.0 or 0.0.
pool_ids = [de.uuid() for de in desc_list]
self.iqrs.rank_relevancy_with_feedback.rank_with_feedback.return_value = ( # type: ignore
[0.7, 0.3, 0.1],
pool_ids
)
# Prepare IQR state for refinement
# - all external descriptors marked as positive examples and no negatives
self.iqrs.external_descriptors(
positive=[test_ex_pos_elem, test_other_elem, test_other_elem_far], negative=[]
)
self.iqrs.adjudicate(
new_positives=[test_in_pos_elem], new_negatives=[]
)
with pytest.raises(
RuntimeError,
match=r"Negative auto-selection failed. Did not select any negative examples."
):
self.iqrs.refine()
class TestIqrSessionBehavior (object):
"""
Test certain IqrSession state transitions
"""
# TODO - More complicated state transitions.
|
#!/usr/bin/env python3
__author__ = 'Lindo Nkambule'
import hailtop.batch as hb
def scatter_interval_list(b: hb.batch.Batch, interval_list_file: hb.resource.ResourceFile, scatter_count: int = 50,
break_bands_at_multiples_of: int = 1000000, scatter_img: str = None, memory: int = 2,
out_dir: str = None):
"""
break the calling interval list into sub-intervals
:param b: batch
:param interval_list_file: one or more interval lists
:param scatter_count: the number of files into which to scatter the resulting list by locus
:param break_bands_at_multiples_of: if set to a positive value will create a new interval list with the original
intervals broken up at integer multiples of this value. Set to 0 to NOT break up intervals
:param scatter_img: image to use for the job
:param memory: job memory
:param out_dir: output directory
:return:
"""
# break the calling interval list into sub-intervals
docker_image = scatter_img if scatter_img else 'us.gcr.io/broad-gotc-prod/genomes-in-the-cloud:2.4.3-1564508330'
scatter_list = b.new_job(name='scatter-interval-list')
scatter_list.image(docker_image)
scatter_list.cpu(1)
scatter_list.memory(f'{memory}Gi')
scatter_list.command('mkdir /scatter_intervals')
scatter_list.command(f'java -Xms1g -jar /usr/gitc/picard.jar \
IntervalListTools \
SCATTER_COUNT={scatter_count} \
SUBDIVISION_MODE=BALANCING_WITHOUT_INTERVAL_SUBDIVISION_WITH_OVERFLOW \
UNIQUE=true \
SORT=true \
BREAK_BANDS_AT_MULTIPLES_OF={break_bands_at_multiples_of} \
INPUT={interval_list_file} \
OUTPUT=/scatter_intervals')
scatter_list.command('''
cat > my_script.py <<EOF
import sys
import os
import glob
intervals = sorted(glob.glob('/scatter_intervals/*/*.interval_list'))
for i, interval in enumerate(intervals):
(directory, filename) = os.path.split(interval)
newName = os.path.join(directory, str(i + 1) + filename)
os.rename(interval, newName)
EOF
python3 my_script.py
''')
scatter_list.command(f'mv /scatter_intervals {scatter_list.outfiles}')
b.write_output(scatter_list.outfiles, f'{out_dir}/scatter-intervals')
# We return the `scatter_list` Job object that can be used in downstream jobs.
return scatter_list
|
"""Author: Brandon Trabucco, Copyright 2019, MIT License"""
from multiarchy.launch import launch_local
from multiarchy.baselines.sac import sac, sac_variant
from gym.envs.mujoco.half_cheetah import HalfCheetahEnv
if __name__ == "__main__":
# parameters for the learning experiment
variant = dict(
max_num_steps=1000000,
logging_dir="half_cheetah/sac/",
hidden_size=256,
num_hidden_layers=2,
reward_scale=1.0,
discount=0.99,
initial_alpha=1.0,
policy_learning_rate=0.0003,
qf_learning_rate=0.0003,
tau=0.005,
batch_size=256,
max_path_length=1000,
num_workers=10,
num_warm_up_steps=100000,
num_steps_per_epoch=1000,
num_steps_per_eval=50000,
num_steps_per_gradient=1,
num_epochs_per_eval=10,
num_epochs=10000)
# make sure that all the right parameters are here
assert all([x in variant.keys() for x in sac_variant.keys()])
# launch the experiment using ray
launch_local(
sac,
variant,
HalfCheetahEnv,
num_seeds=3)
|
from urllib import request, error
class Download:
def __init__(self, url='', verbose=False, timeout=5):
self.url = url
self.verbose = verbose
self.html = ''
self.error = False
self.timeout = timeout
def download(self, user_agent='wswp', num_retries=2):
# self.html, self.error = download(self.url, self.verbose, user_agent=user_agent, num_retries=num_retries,
# timeout=self.timeout)
try:
self.html = download(self.url, self.verbose, user_agent=user_agent, num_retries=num_retries,
timeout=self.timeout)
except IOError:
raise ValueError("Could not download requested page.")
def download(url, verbose, user_agent='wswp', num_retries=2, decoding_format='utf-8', timeout=5):
"""
Function to download contents from a given url
Input:
url: str
string with the url to download from
user_agent: str
Default 'wswp'
num_retries: int
Number of times to retry downloading
if there is an error
verbose: bool
Print out url and errors
decoding: "utf-8"
Output:
returns: str
string with contents of given url
"""
# html_error = False
if verbose:
print('Downloading:', url)
headers = {'User-agent': user_agent}
request_obj = request.Request(url, headers=headers)
try:
with request.urlopen(request_obj, timeout=timeout) as response:
html = response.read()
except error.URLError as e:
if verbose:
print('Download error:', e.reason)
# html = None
# if num_retries > 0:
# if hasattr(e, 'code') and 500 <= e.code < 600:
# # retry 5XX HTTP errors
# return download(url, user_agent, num_retries - 1)[0]
# # elif hasattr(e, 'code') and e.code == 404:
# else:
# html_error = True
raise IOError(e.reason)
return html.decode(decoding_format)
|
from qtpy import QtCore
from qtpy.QtWidgets import (
QApplication,
QFrame,
QHBoxLayout,
QLabel,
QProgressBar,
QVBoxLayout,
QWidget,
)
class ProgressBar(QWidget):
"""QProgressBar with QLabels for description and ETA."""
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.pbar = QProgressBar()
self.description_label = QLabel()
self.eta_label = QLabel()
base_layout = QVBoxLayout()
pbar_layout = QHBoxLayout()
pbar_layout.addWidget(self.description_label)
pbar_layout.addWidget(self.pbar)
pbar_layout.addWidget(self.eta_label)
base_layout.addLayout(pbar_layout)
line = QFrame(self)
line.setObjectName("QtCustomTitleBarLine")
line.setFixedHeight(1)
base_layout.addWidget(line)
self.setLayout(base_layout)
def setRange(self, min, max):
self.pbar.setRange(min, max)
def _set_value(self, value):
self.pbar.setValue(value)
QApplication.processEvents()
def _get_value(self):
return self.pbar.value()
def _set_description(self, desc):
self.description_label.setText(desc)
QApplication.processEvents()
def _set_eta(self, eta):
self.eta_label.setText(eta)
class ProgressBarGroup(QWidget):
"""One or more QProgressBars with a QFrame line separator at the bottom"""
def __init__(self, pbar, parent=None) -> None:
super().__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
pbr_group_layout = QVBoxLayout()
pbr_group_layout.addWidget(pbar)
pbr_group_layout.setContentsMargins(0, 0, 0, 0)
line = QFrame(self)
line.setObjectName("QtCustomTitleBarLine")
line.setFixedHeight(1)
pbr_group_layout.addWidget(line)
self.setLayout(pbr_group_layout)
|
from pkg_resources import declare_namespace
from .default_proxy import DefaultProxy
declare_namespace(__name__)
__all__ = [
'DefaultProxy',
'create_proxy'
]
create_proxy = DefaultProxy.of
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzing the potentials of wrapped-branes models.
This is mostly a pedagogical example.
TensorFlow-based analysis really shines if the number of true scalars
(i.e. with degeneracies removed) is >= ca. 20.
The scaffolding included here makes the code quite easy to adopt to
other reasonably straightforward cases. One only needs to introduce a
function that computes the scalar potential like dim7_potential()
below (remembering that one is operating on TensorFlow objects rather
than numerical quantities), and then call:
scan({{number_of_scalars}}, {{potential_function}}).
"""
import pdb # For interactive debugging only.
import collections
import dataclasses
import numpy
import scipy.optimize
import sys
import tensorflow as tf
from m_theory_lib import m_util
# The actual problem definitions.
from wrapped_branes import potentials
@dataclasses.dataclass(frozen=True)
class Solution(object):
potential: float
stationarity: float
pos: numpy.ndarray
def scan_for_critical_points(
problem,
starting_points,
stationarity_threshold=1e-4,
mdnewton=True,
debug=True,
*problem_extra_args,
**problem_extra_kwargs):
"""Scans for critical points of a scalar function.
Args:
problem: The potential-function specifying the problem.
starting_points: iterable with starting points to start the search from.
stationarity_threshold: Upper bound on permissible post-optimization
stationarity for a solution to be considered good.
debug: Whether to print newly found solutions right when they
are discovered.
problem_extra_args: Extra positional arguments for the problem-function.
problem_extra_kwargs: Extra keyword arguments for the problem-function.
Yields:
A `Solution` numerical solution.
"""
def f_problem(pos):
return problem(pos, *problem_extra_args, **problem_extra_kwargs)
tf_stat_func = m_util.tf_stationarity(f_problem)
tf_grad_stat_func = m_util.tf_grad(tf_stat_func)
tf_grad_pot_func = None
tf_jacobian_pot_func = None
if mdnewton:
tf_grad_pot_func = m_util.tf_grad(f_problem)
tf_jacobian_pot_func = m_util.tf_jacobian(tf_grad_pot_func)
for x0 in starting_points:
val_opt, xs_opt = m_util.tf_minimize(tf_stat_func, x0,
tf_grad_func=tf_grad_stat_func,
precise=False)
if val_opt > stationarity_threshold:
continue # with next starting point.
# We found a point that apparently is close to a critical point.
t_xs_opt = tf.constant(xs_opt, dtype=tf.float64)
if not mdnewton:
yield Solution(potential=f_problem(t_xs_opt).numpy(),
stationarity=tf_stat_func(t_xs_opt).numpy(),
pos=xs_opt)
continue # with next solution.
# We could use MDNewton to force each gradient-component
# of the stationarity condition to zero. It is however
# more straightforward to instead do this directly
# for the gradient of the potential.
*_, xs_opt_mdnewton = m_util.tf_mdnewton(
f_problem,
t_xs_opt,
maxsteps=4,
debug_func=None,
tf_grad_func=tf_grad_pot_func,
tf_jacobian_func=tf_jacobian_pot_func)
t_xs_opt_mdnewton = tf.constant(xs_opt_mdnewton, dtype=tf.float64)
yield Solution(potential=f_problem(t_xs_opt_mdnewton).numpy(),
stationarity=tf_stat_func(t_xs_opt_mdnewton).numpy(),
pos=xs_opt_mdnewton)
if __name__ == '__main__':
# Set numpy's default array-formatting width to large width.
numpy.set_printoptions(linewidth=200)
if len(sys.argv) != 2 or sys.argv[-1] not in potentials.PROBLEMS:
sys.exit('\n\nUsage: python3 -i -m wrapped_branes.wrapped_branes {problem_name}.\n'
'Known problem names are: %s' % ', '.join(
sorted(potentials.PROBLEMS)))
problem = potentials.PROBLEMS[sys.argv[-1]]
rng = numpy.random.RandomState(seed=0)
def gen_x0s():
while True:
yield rng.normal(scale=0.15, size=problem.num_scalars)
solutions_iter = scan_for_critical_points(
problem.tf_potential,
gen_x0s(),
mdnewton=True,
**problem.tf_potential_kwargs)
for n, solution in zip(range(100), solutions_iter):
print('P=%+12.8f S=%8.3g at: %s' % (solution.potential,
solution.stationarity,
numpy.round(solution.pos, 4)))
|
from random import randint
# Exercise 074 - Largest and Smallest Tuple Values
"""Create a program that will generate five random numbers
and put them in a tuple.After that, show the list of
generated numbers and also indicate the smallest and
largest value that are in the tuple.
"""
numbers = (
randint(1, 10),
randint(1, 10),
randint(1, 10),
randint(1, 10),
randint(1, 10),
)
print(f"The numbers drawn were: ", end="")
for i in numbers:
print(f"{i} ", end="")
print(f"\nThe highest value was {max(numbers)}")
print(f"The smallest value was {min(numbers)}")
|
import time
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
from cont_evaluate import evaluate_policy
from algorithms.klac import ReplayBuffer, KLAC, apply_squashing_func
class BOPAHSingle(KLAC):
def __init__(self, state_dim, action_dim, max_action, kl_coef, lamb=1, num_critics=5, gradient_norm_panelty=0,
gradient_norm_limit=30, hidden_dim=64):
super(BOPAHSingle, self).__init__(state_dim, action_dim, max_action, kl_coef, lamb, num_critics, gradient_norm_panelty,
gradient_norm_limit, hidden_dim)
self.valid_obs_ph, self.valid_action_ph, self.valid_reward_ph, self.valid_terminal_ph, self.valid_next_obs_ph \
= [tf.keras.layers.Input(d) for d in [state_dim, action_dim, 1, 1, state_dim]]
self.valid_ensemble_mask = tf.keras.layers.Input(self.num_critics)
# For hyper-gradient
self.traj_valid_obs_ph = tf.keras.layers.Input((None, self.state_dim), name='traj_valid_obs_ph') # (maxlen) x |S|
self.traj_valid_actions_ph = tf.keras.layers.Input((None, self.action_dim), name='traj_valid_actions_ph') # (maxlen) x |A|
self.mask_ph = tf.keras.layers.Input((None, 1), name='mask_ph') # (maxlen) x 1
self.gradient_buffer_ph = tf.placeholder(dtype=tf.float32, shape=[], name='gradient_buffer_ph')
valid_action_pi, _, _ = self.actor([self.valid_obs_ph])
valid_tensors = [self.valid_obs_ph, self.valid_action_ph, self.valid_next_obs_ph, self.valid_terminal_ph, valid_action_pi, self.valid_ensemble_mask]
self.valid_critic_v, self.valid_critic_q, valid_mean_q_loss, valid_qs_pi, valid_critic_train_op, valid_target_update_op = self._build_critic(
valid_tensors, v_bonus=0, q_bonus=self.valid_reward_ph, hidden_dim=self.hidden_dim)
self.eval_ops.extend([tf.reduce_mean(valid_mean_q_loss), tf.reduce_mean(valid_qs_pi)])
self.eval_labels.extend(['valid_q_loss', 'valid_mean_q'])
self.step_ops[0] = tf.group([self.step_ops[0], valid_critic_train_op]) # TODO: hard-coded index
self.step_ops[2] = tf.group([self.step_ops[2], valid_target_update_op])
def _build_kl_coef(self, init_kl_coef):
"""
overrides KLAC._build_kl_coef
"""
self.log_kl_coef = tf.Variable(np.log(init_kl_coef), dtype=tf.float32)
self.sess.run(self.log_kl_coef.initializer)
return tf.exp(self.log_kl_coef)
def _compute_negative_covariance(self, obs, actor, train_critic_q, valid_critic_q, num_samples=20):
batch_size = tf.shape(obs)[0]
tiled_obs = tf.tile(obs, [num_samples, 1])
action_samples, _, actor_dist = actor([tiled_obs])
action_samples_1, _, actor_dist = actor([tiled_obs])
action_samples_2, _, actor_dist = actor([tiled_obs])
q1s_pi = train_critic_q([tiled_obs, action_samples])
q2s_pi = valid_critic_q([tiled_obs, action_samples])
q1_pi = tf.reshape(self._reduce_q(q1s_pi), [num_samples, batch_size, -1])
# handle multiple output dimension for train critic
q2_pi = tf.reshape(self._reduce_q(q2s_pi), [num_samples, batch_size, 1])
v1 = tf.reduce_mean(q1_pi, axis=0)
v2 = tf.reduce_mean(q2_pi, axis=0)
qq = tf.reduce_mean(q1_pi * q2_pi, axis=0)
q1s_pi = train_critic_q([tiled_obs, action_samples_1])
q2s_pi = valid_critic_q([tiled_obs, action_samples_2])
q1_pi = tf.reshape(self._reduce_q(q1s_pi), [num_samples, batch_size, -1])
q2_pi = tf.reshape(self._reduce_q(q2s_pi), [num_samples, batch_size, 1])
v1 = tf.reduce_mean(q1_pi, axis=0)
v2 = tf.reduce_mean(q2_pi, axis=0)
return v1 * v2 - qq
def _get_alpha_train_op(self, train_critic_q, valid_critic_q, maxlen):
N = tf.shape(self.traj_valid_obs_ph)[0]
reshaped_obs = tf.reshape(self.traj_valid_obs_ph, [-1, self.state_dim])
reshaped_actions = tf.reshape(self.traj_valid_actions_ph, [-1, self.action_dim])
negative_q_cov = self._compute_negative_covariance(reshaped_obs, self.actor, train_critic_q, valid_critic_q)
pi_grad = tf.reshape(negative_q_cov / self.kl_coef, [N, maxlen, 1])
_, _, behavior_dist = self.behavior_policy([reshaped_obs])
_, _, target_dist = self.actor([reshaped_obs])
pre_squash_actions = tf.math.atanh(tf.clip_by_value(reshaped_actions, -1 + 1e-6, 1 - 1e-6))
log_is_ratios = target_dist.log_prob(pre_squash_actions) - behavior_dist.log_prob(pre_squash_actions) + tf.log(self.gamma)
cum_log_is_ratios = tf.cumsum(tf.reshape(log_is_ratios, [N, maxlen, 1]), exclusive=True, axis=1)
cum_log_is_ratios = tf.clip_by_value(cum_log_is_ratios, -7, 7)
v_grads_s0 = tf.reduce_sum(tf.exp(cum_log_is_ratios) * pi_grad * self.mask_ph, axis=1)
gradvar = [(-self.gradient_buffer_ph, self.log_kl_coef)]
alpha_optimizer = tf.train.AdamOptimizer(learning_rate=1e-2)
alpha_train_op = alpha_optimizer.apply_gradients(gradvar)
with tf.control_dependencies([alpha_train_op]):
alpha_clip_op = tf.assign(self.log_kl_coef, tf.clip_by_value(self.log_kl_coef, np.log(0.75), np.log(500)))
self.sess.run(tf.variables_initializer(alpha_optimizer.variables()))
return tf.group([alpha_train_op, alpha_clip_op]), v_grads_s0
#######################################
# interfaces for cont_run.py
#######################################
def batch_learn(self, train_trajectory, vec_env, total_timesteps, log_interval, seed, result_filepath=None, valid_trajectory=None):
np.random.seed(seed)
train_replay_buffer = ReplayBuffer(train_trajectory, max_action=self.max_action, num_critic=self.num_critics)
valid_replay_buffer = ReplayBuffer(valid_trajectory, max_action=self.max_action, num_critic=self.num_critics)
valid_replay_buffer.standardizer = train_replay_buffer.standardizer
self.standardizer = train_replay_buffer.standardizer
# Zero-padding of valid (obs, actions)
valid_obs_padded, valid_actions_padded, valid_terminal_mask, valid_traj_maxlen = ReplayBuffer.group_element_trajectory(valid_trajectory)
valid_obs_padded, valid_actions_padded = self.standardizer(valid_obs_padded), valid_actions_padded / self.max_action
valid_trajectory_indices = np.arange(len(valid_trajectory))
num_updates = 10
# Hyper-gradient ascent operation
alpha_train_op, v_grads_s0 = self._get_alpha_train_op(self.critic_q, self.valid_critic_q, valid_traj_maxlen)
# Start...
saver = tf.train.Saver(max_to_keep=2)
last_checkpoint = tf.train.latest_checkpoint(result_filepath + '_checkpoint')
if last_checkpoint is not None:
start_time = time.time()
saver.restore(self.sess, last_checkpoint)
loaded = np.load(result_filepath + '.tmp.npy', allow_pickle=True).item()
eval_timesteps = loaded['eval_timesteps']
evaluations = loaded['evals']
infos_values = loaded['info_values']
v_grad_list = []
timestep = eval_timesteps[-1] + 1
timesteps = range(timestep, total_timesteps)
print('loaded', timestep)
print(eval_timesteps)
print(infos_values)
else:
start_time = time.time()
eval_timesteps = []
evaluations = []
infos_values = []
v_grad_list = []
timesteps = range(total_timesteps)
for timestep in tqdm(timesteps, desc="BOPAHSingle", ncols=70):
obs, action, reward, next_obs, done, ensemble_mask = train_replay_buffer.sample(self.batch_size)
valid_obs, valid_action, valid_reward, valid_next_obs, valid_done, valid_ensemble_mask = valid_replay_buffer.sample(self.batch_size)
feed_dict = {
self.obs_ph: obs, self.action_ph: action, self.reward_ph: reward,
self.next_obs_ph: next_obs, self.terminal_ph: done,
self.valid_obs_ph: valid_obs, self.valid_action_ph: valid_action, self.valid_reward_ph: valid_reward,
self.valid_next_obs_ph: valid_next_obs, self.valid_terminal_ph: valid_done,
self.obs_mean: train_replay_buffer.obs_mean, self.obs_std: train_replay_buffer.obs_std,
self.ensemble_mask: ensemble_mask, self.valid_ensemble_mask: valid_ensemble_mask
}
step_result = self.sess.run(self.step_ops + self.eval_ops, feed_dict=feed_dict)
infos_value = step_result[len(self.step_ops):]
if (timestep + 1) % 500 == 0 and timestep > 100000:
grad_values = []
np.random.shuffle(valid_trajectory_indices)
reshaped_indices = np.reshape(valid_trajectory_indices[:200], [num_updates, -1])
for rind in reshaped_indices:
v_grads_s0_value = self.sess.run(v_grads_s0, feed_dict={
self.obs_mean: train_replay_buffer.obs_mean, self.obs_std: train_replay_buffer.obs_std,
self.traj_valid_obs_ph: valid_obs_padded[rind],
self.traj_valid_actions_ph: valid_actions_padded[rind],
self.mask_ph: valid_terminal_mask[rind]})
grad_values += list(v_grads_s0_value.flatten())
# print(negative_q_cov_value[:100, 0])
v_grad_list.append(np.mean(grad_values))
self.sess.run(alpha_train_op, feed_dict={self.gradient_buffer_ph: np.mean(grad_values)})
print('t=%d: (elapsed_time=%f)' % (timestep, time.time() - start_time))
print('\n============================')
for label, value in zip(self.eval_labels, infos_value):
if label == 'kl_coef':
print('%16s: %10.3f' % (label, value))
print('%16s: %10.3f' % ('log_kl_coef', np.log(value)))
print('%16s: %10.3f' % ('total_grad_value', np.mean(grad_values)))
print('============================\n')
if timestep % log_interval == 0:
print('-----------saving----------------------')
v_grad_mean = np.mean(v_grad_list)
v_grad_list = []
evaluation = evaluate_policy(vec_env, self)
eval_timesteps.append(timestep)
evaluations.append(evaluation)
infos_values.append(infos_value + [v_grad_mean])
print('t=%d: %f (elapsed_time=%f)' % (timestep, evaluation, time.time() - start_time))
print('\n============================')
for label, value in zip(self.eval_labels, infos_value):
print('%12s: %10.3f' % (label, value))
print('============================\n')
if result_filepath:
result = {'eval_timesteps': eval_timesteps, 'evals': evaluations, 'info_values': infos_values}
np.save(result_filepath + '.tmp.npy', result)
saver.save(self.sess, result_filepath + '_checkpoint/model')
return eval_timesteps, evaluations, infos_values
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
# from mainWindow import Ui_MainWindow
# from shineMainWindow import ShineMainWindow
# from brightMainWindow import BrightMainWindow
from src.windows.brightMainWindow import BrightMainWindow
class Spruce(BrightMainWindow):
def __init__(self):
super().__init__()
def main():
app = QtWidgets.QApplication(sys.argv)
iconLoc = '{}/{}'.format(Spruce.iconDir, 'spruce.png')
splash = QtWidgets.QSplashScreen(QtGui.QPixmap(iconLoc))
statusFont = QtGui.QFont()
statusFont.setBold(True)
statusFont.setPointSize(16)
splash.setFont(statusFont)
splash.showMessage('Loading ... ...',
QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom,
QtCore.Qt.white)
spruce = Spruce()
app.processEvents()
splash.show()
QtCore.QTimer.singleShot(2500, splash.close)
QtCore.QTimer.singleShot(2500, spruce.show)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.