content
stringlengths 5
1.05M
|
|---|
# multiprocessing örneği
# kodlaması diğerlerine göre daha kolaydır
# Session nesnesinin kullanımına ise dikkat edilmelidir
# Process başına nasıl ayarlandığı başka ortak nesne kullanan senaryolar için yol göstericidir
import requests
import time
import multiprocessing
# Pool içindeki her process kendi bellek alanında yaşıyor.
# Yani birbirleriyle bu örnekte olduğu gibi Session nesnesini paylaşamazlar.
# Her process kendi Session nesnesine sahip olmalı
# Bunu global bir değişkenle sağlıyor ve elde edilmesi için getSession metoduna başvuruyoruz
# Nitekim her metod çağrısında bir Session oluşturulmaması gerekir.
# Process'lerin her biri için o process'e ait bir Session nesnesi kullanmalıyız.
processSession = None
def getSession():
global processSession
if not processSession:
processSession = requests.Session()
def getResponse(address):
with processSession.get(address) as response:
currentProcessName = multiprocessing.current_process().name
print(currentProcessName)
print("{0} adresinden {1} bytes geldi.\n".format(
address, len(response.content)))
def getAll(addresses):
# CPU sayısına göre Process havuzu oluşturulur
# Process'lerin kullanacağı Session nesneleri initializer parametresine göre
# getSession metodundan tedarik edilir
# map fonksiyonu ile Process ve metod eşleştirmeleri yapılır
with multiprocessing.Pool(initializer=getSession) as processPool:
processPool.map(getResponse, addresses)
if __name__ == "__main__":
print("Web talepleri gönderiliyor")
# Üstünde çalışacağımız örnek web adresleri
targetSites = [
"https://github.com/jdorfman/awesome-json-datasets",
"https://dev.to/awwsmm/101-bash-commands-and-tips-for-beginners-to-experts-30je",
"https://www.buraksenyurt.com/post/raspberry-pi-ve-python-calisma-notlarim"
] * 300 # 300er adet oluşturur
beginning = time.time() # başlamadan önceki zamanı al
getAll(targetSites)
duration = time.time()-beginning # toplam süreyi hesapla
print("Toplam çalışma süresi {0} saniye".format(duration))
|
import os
import unittest
from .smart_excel import (
SmartExcel,
validate_position
)
children_things = [
{
'parent_id': 42,
'result': 'yes'
},
{
'parent_id': 42,
'result': 'no'
},
{
'parent_id': 43,
'result': 'oui'
},
{
'parent_id': 43,
'result': 'non'
},
]
class DataModel():
def __init__(self):
self.results = {
'my_custom_payload_for_table': [
'Good morning',
'Bonjour'
],
'my_custom_payload_for_map': [
'Guten tag',
'Goeie more'
],
'things': [
{
'id': 42,
'name': 'The answer'
},
{
'id': 43,
'name': 'nothing'
}
]
}
self.custom_column_names = ['Bonzai', 'Artichoke']
def __str__(self):
return "My Custom DataModel"
def get_sheet_name_for_summary(self):
return 'A summary title'
def write_column_name_func(self, instance, kwargs={}):
return self.custom_column_names[kwargs['index']]
def write_first_column(self, instance, kwargs={}):
return instance
def get_payload_detail(self, instance, foreign_key):
item_id = instance[foreign_key]
return [
item
for item in children_things
if item['parent_id'] == item_id
]
def get_sheet_name_for_detail(self, instance):
return f'Sheet nb {instance["id"]}'
def write_thing_id(self, instance, kwargs={}):
return instance['id']
def write_thing_value(self, instance, kwargs={}):
return instance['name']
def write_result(self, instance, kwargs={}):
return instance['result']
def get_smart_excel(definition, data_model, output='template.xlsx'):
if isinstance(definition, dict):
definition = [definition]
return SmartExcel(
output=output,
definition=definition,
data=data_model()
)
class TestParseSheetDefinition(unittest.TestCase):
def setUp(self):
self.sheet_def = {
'type': 'sheet',
}
def test_reserved_sheet(self):
excel = get_smart_excel({}, DataModel)
self.assertEqual(len(excel.sheets), 2)
self.assertTrue('_data' in list(excel.sheets.keys()))
self.assertTrue('_meta' in list(excel.sheets.keys()))
self.assertTrue(excel.sheets['_data']['reserved'])
self.assertTrue(excel.sheets['_meta']['reserved'])
# user should not be able to add a sheet
# with a reserved name (_data, _meta)
for reserved_sheet_name in excel.reserved_sheets:
self.sheet_def['name'] = reserved_sheet_name
with self.assertRaises(ValueError) as raised:
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(str(raised.exception), f'{reserved_sheet_name} is a reserved sheet name.')
self.assertEqual(len(excel.sheets), 2)
def test_sheet_without_name(self):
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(len(excel.sheets), 3)
self.assertEqual(excel.sheets['Default-0-0']['name'], 'Default-0')
self.assertFalse(excel.sheets['Default-0-0']['reserved'])
def test_simple_sheet(self):
self.sheet_def['name'] = 'Sheet 1'
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(len(excel.sheets), 3)
self.assertEqual(excel.sheets['Sheet 1-0']['name'], 'Sheet 1')
def test_sheet_name_func(self):
self.sheet_def['name'] = {
'func': 'summary'
}
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(len(excel.sheets), 3)
self.assertEqual(excel.sheets['A summary title-0']['name'], 'A summary title')
def test_sheet_key(self):
self.sheet_def['key'] = 'summary'
self.sheet_def['name'] = 'A summary title'
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(len(excel.sheets), 3)
self.assertEqual(excel.sheets['summary']['name'], 'A summary title')
def test_table_component(self):
self.sheet_def['key'] = 'default'
table_comp = {
'type': 'table',
'name': 'My table',
'position': {
'x': 0,
'y': 0
},
'payload': 'my_custom_payload_for_table'
}
columns = [
{
'name': 'Column 1',
'key': 'column_1'
}
]
table_comp['columns'] = columns
self.sheet_def['components'] = [
table_comp
]
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(len(excel.sheets['default']['components']), 1)
self.assertEqual(len(excel.sheets['default']['components'][0]['columns']), 1)
self.assertEqual(
excel.sheets['default']['components'][0]['columns'][0],
{
'name': 'Column 1',
'letter': 'A',
'index': 0,
'key': 'column_1'
})
columns = [
{
'name': {
'func': 'column_name_func'
},
'key': 'column_1'
}
]
parsed_columns = excel.parse_columns(columns, repeat=1)
self.assertEqual(
parsed_columns[0],
{
'name': 'Bonzai',
'letter': 'A',
'index': 0,
'key': 'column_1'
})
def test_map_component(self):
self.sheet_def['key'] = 'default'
map_comp = {
'type': 'map',
'name': 'My Map',
'position': {
'x': 0,
'y': 0
},
'payload': 'my_custom_payload_for_map',
}
rows = [
{
'name': 'Row 1'
}
]
map_comp['rows'] = rows
self.sheet_def['components'] = [
map_comp
]
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(len(excel.sheets['default']['components']), 1)
self.assertEqual(len(excel.sheets['default']['components'][0]['rows']), 1)
def test_component_position(self):
self.sheet_def['key'] = 'default'
components = [
{
'type': 'map',
'name': 'My Map',
'position': {
'x': 0,
'y': 0
},
'payload': 'my_custom_payload_for_map',
'rows': [
{
'name': 'Row 1'
}
]
},
{
'type': 'map',
'name': 'a second map',
'position': {
'x': 0,
'y': 0
}
}
]
self.sheet_def['components'] = components
with self.assertRaises(ValueError) as raised:
excel = get_smart_excel(self.sheet_def, DataModel)
self.assertEqual(str(raised.exception), 'Cannot position `a second map` at 0;0. `My Map` is already present.')
self.sheet_def['components'][1]['position']['y'] = 1
excel = get_smart_excel(self.sheet_def, DataModel)
def test_recursive(self):
self.sheet_def['components'] = [
{
'type': 'table',
'name': 'A table',
'payload': 'things',
'position': {
'x': 0,
'y':0
},
'columns': [
{
'name': 'Identification',
'key': 'thing_id'
},
{
'name': 'Value',
'key': 'thing_value'
}
],
'recursive': {
'payload_func': 'detail',
'foreign_key': 'id',
'name': {
'func': 'detail'
},
'components': [
{
'name': 'Another table',
'type': 'table',
'position': {
'x': 0,
'y': 0
},
'columns': [
{
'name': 'Result',
'key': 'result'
}
]
}
]
}
}
]
excel = get_smart_excel(
self.sheet_def,
DataModel,
'test_recursive.xlsx')
self.assertEqual(len(excel.sheets), 5)
excel.dump()
class TestParseFormatDefinition(unittest.TestCase):
def setUp(self):
self.format_def = {
'type': 'format',
'key': 'my_custom_format',
'format': {
'border': 1,
'bg_color': '#226b30',
}
}
def test_format(self):
excel = get_smart_excel(self.format_def, DataModel)
self.assertEqual(len(excel.formats), 1)
self.assertTrue('my_custom_format' in excel.formats)
def test_num_format(self):
self.format_def['num_format'] = 'R 0'
excel = get_smart_excel(self.format_def, DataModel)
self.assertEqual(len(excel.formats), 1)
self.assertTrue('my_custom_format' in excel.formats)
class TestDump(unittest.TestCase):
def runTest(self):
path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_dump.xlsx')
if os.path.exists(path):
os.remove(path)
self.format_def = {
'type': 'sheet',
'name': 'Bonjour',
'components': [
{
'type': 'table',
'name': 'My table',
'position': {
'x': 0,
'y': 0
},
'payload': 'my_custom_payload_for_table',
'columns': [
{
'name': 'Column 1',
'key': 'first_column'
}
]
}
]
}
excel = get_smart_excel(
self.format_def,
DataModel,
output=path)
excel.dump()
self.assertTrue(os.path.exists(path))
# Do a manuel testing of the generated spreadsheet.
class TestValidatePosition(unittest.TestCase):
def setUp(self):
self.element = {
'position': ''
}
def test_type(self):
with self.assertRaises(ValueError) as raised:
validate_position(self.element)
self.assertEqual(str(raised.exception), "position must be a <class 'dict'>")
def test_required_attrs(self):
self.element['position'] = {}
with self.assertRaises(ValueError) as raised:
validate_position(self.element)
self.assertEqual(str(raised.exception), "x is required in a component position definition.")
self.element['position'] = {
'x': None
}
with self.assertRaises(ValueError) as raised:
validate_position(self.element)
self.assertEqual(str(raised.exception), "y is required in a component position definition.")
def test_type_required_attrs(self):
self.element['position'] = {
'x': None,
'y': None
}
with self.assertRaises(ValueError) as raised:
validate_position(self.element)
self.assertEqual(str(raised.exception), "x must be a <class 'int'>")
self.element['position'] = {
'x': 0,
'y': None
}
with self.assertRaises(ValueError) as raised:
validate_position(self.element)
self.assertEqual(str(raised.exception), "y must be a <class 'int'>")
def test_ok(self):
self.element['position'] = {
'x': 0,
'y': 0
}
self.assertTrue(validate_position(self.element))
if __name__ == "__main__":
unittest.main()
|
from .file_source import FileSource
from .json_source import JSONSource
from .toml_source import TOMLSource
from .yaml_source import YAMLSource
|
#!/usr/bin/env python
from ttastromech import TTAstromech
import time
if __name__ == '__main__':
r2 = TTAstromech()
try:
r2.run() # make random astromech sounds
time.sleep(2)
except KeyboardInterrupt:
print('bye ...')
|
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=np.inf)
import fusion
# Kinect module
import pyk4a
from helpers import convert_to_bgra_if_required
from pyk4a import Config, PyK4A
from pyk4a import PyK4APlayback
from icp_modules.ICP_point_to_plane import *
from icp_modules.FrameMaps_kms2 import *
from helpers import colorize, convert_to_bgra_if_required
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
def SEG_model():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.DEVICE = 'cpu'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
"""
class 중 label 0 이 human
pred_masks : mask prediction(segmentation)
"""
return predictor
def Joint_model():
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.DEVICE = 'cpu'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
return predictor
def filter_human(output):
classes = output["instances"].pred_classes
human = list(np.nonzero(np.where(classes.numpy() == 0, 1, 0))[0])
boxes = output["instances"].pred_boxes
focus = boxes.area().numpy()[human].argmax()
mask = output["instances"].pred_masks[human[focus]]
x, y = np.nonzero(1 - mask.numpy())
return x, y
def filter_joint(output):
joints = output["instances"].pred_keypoints[0][:, :2].numpy()
return joints
def joint_to_3D(joints, Inverse, depth_im):
Joints = np.zeros((3, 17))
for i in range(17):
xx, yy = joints[i]
d = depth_im[int(round(yy)), int(round(xx))]
Joints[:, i] = d * np.dot(Inverse, np.array([xx, yy, 1]).T)
return Joints
def simple_bundle(joint_3D: list):
# Outlier 날리고 평균 구하기.
joints_3D = np.array(joint_3D) # N * 3 * 17
joint_val = [[0, 0, 0] for _ in range(17)]
counts = [[0.1, 0.1, 0.1] for _ in range(17)]
mean = np.mean(joints_3D, axis=0) # 3x17
std = np.std(joints_3D, axis=0) # 3x17
thr_low = mean + 2 * std
thr_high = mean - 2 * std
for i in range(len(joints_3D)):
for j in range(17):
if thr_low[0, j] < joints_3D[i, 0, j] < thr_high[0, j]:
joint_val[j][0] += joints_3D[i, 0, j]
counts[j][0] += 1
if thr_low[1, j] < joints_3D[i, 1, j] < thr_high[1, j]:
joint_val[j][1] += joints_3D[i, 1, j]
counts[j][1] += 1
if thr_low[2, j] < joints_3D[i, 2, j] < thr_high[2, j]:
joint_val[j][2] += joints_3D[i, 2, j]
counts[j][2] += 1
result = np.zeros((3, 17))
for i, (val, count) in enumerate(zip(joint_val, counts)):
xv = val[0]
xc = count[0]
yv = val[1]
yc = count[1]
zv = val[2]
zc = count[2]
result[:, i] = xv / xc, yv / yc, zv / zc
return result
if __name__ == "__main__":
seg_model = SEG_model()
joint_model = Joint_model()
# Open kinect camera by realtime
k4a = PyK4A(
Config(
color_resolution=pyk4a.ColorResolution.RES_720P,
depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
color_format=pyk4a.ImageFormat.COLOR_MJPG
)
)
# Load video file
# filename = r'C:\Users\82106\PycharmProjects\dino_lib\python_kinect_fusion\tsdf-fusion-python-master\human6.mkv'
# filename = r'C:\Users\82106\PycharmProjects\dino_lib\python_kinect_fusion\tsdf-fusion-python-master\0531_2.mkv'
filename = r'0_sample_video\human6.mkv'
# filename = r'0_sample_video\0531\0531_3.mkv'
n_frames = 4
k4a = PyK4APlayback(filename)
k4a.open()
# Load Kinect's intrinsic parameter 3X3
cam_intr = k4a.calibration.get_camera_matrix(pyk4a.calibration.CalibrationType.COLOR)
invK = np.linalg.inv(cam_intr)
# List 생성
list_depth_im = []
list_color_im = []
# vol_bnds 생성
vol_bnds = np.zeros((3, 2))
voxel_size = 0.03
iter = 0
# while True:
for i in range(0, n_frames):
capture = k4a.get_next_capture()
if capture.depth is not None and capture.color is not None:
print(f"==========={iter}==========")
# Read depth and color image
depth_im = capture.transformed_depth.astype(float)
depth_im /= 1000. ## depth is saved in 16-bit PNG in millimeters
depth_im[depth_im == 65.535] = 0 # set invalid depth to 0 (specific to 7-scenes dataset) 65.535=2^16/1000
color_capture = convert_to_bgra_if_required(k4a.configuration["color_format"], capture.color)
color_im = cv2.cvtColor(color_capture, cv2.COLOR_BGR2RGB)
H, W, d_ = color_im.shape
list_depth_im.append(depth_im)
list_color_im.append(color_im)
if iter == 0:
first_Points3D, sample = PointCloud(depth_im, invK) # Nx3
cam_pose = np.eye(4)
first_pose = cam_pose
prev_normal = NormalMap(sample, depth_im, invK) # Normal map이 destination의 normal map 이어야함.
elif iter >= 1:
second_Points3D, sample = PointCloud(depth_im, invK) # Nx3
pose = point_to_plane(second_Points3D,
first_Points3D, prev_normal) # A, B // maps A onto B : B = pose*A
prev_normal = NormalMap(sample, depth_im, invK)
# ## visualize pose result
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(projection='3d') # Axe3D object
# P = np.vstack((second_Points3D.T, np.ones((1, second_Points3D.shape[0])))) # projection P = 4XN
# # ax.scatter(second_Points3D.T[:, 0], second_Points3D.T[:, 1], second_Points3D.T[:, 2], color='g', s=0.5)
# proj = pose.dot(P)
# ax.scatter(P.T[:, 0], P.T[:, 1], P.T[:, 2], color='r', s=0.3)
# ax.scatter(first_Points3D[:, 0], first_Points3D[:, 1], first_Points3D[:, 2], color='b', s=0.3) # fP = Nx3
# plt.show()
cam_pose = np.dot(first_pose, pose)
first_pose = cam_pose
first_Points3D = second_Points3D
# Compute camera view frustum and extend convex hull
view_frust_pts = fusion.get_view_frustum(depth_im, cam_intr, cam_pose)
vol_bnds[:, 0] = np.minimum(vol_bnds[:, 0], np.amin(view_frust_pts, axis=1))
vol_bnds[:, 1] = np.maximum(vol_bnds[:, 1], np.amax(view_frust_pts, axis=1))
iter = iter + 1
# ======================================================================================================== #
print("Initializing voxel volume...")
tsdf_vol = fusion.TSDFVolume(vol_bnds, voxel_size=voxel_size)
human_vol = fusion.TSDFVolume(vol_bnds, voxel_size=voxel_size)
k4a.close()
# ===============Integrate===============
n_imgs = len(list_depth_im)
iter = 0
# poses = []
joints_3D = []
for iter in range(0, n_imgs):
print("Fusing frame %d/%d" % (iter + 1, n_imgs))
# Read depth and color image
depth_im = list_depth_im[iter]
color_im = list_color_im[iter]
output = seg_model(color_im)
not_valid_x, not_valid_y = filter_human(output)
for not_x, not_y in zip(not_valid_x, not_valid_y):
depth_im[not_x, not_y] = 0
color_im[not_x, not_y] = 0
val_x, val_y = np.nonzero(depth_im)
threshold = np.mean(depth_im[val_x, val_y]) + 2 * np.std(depth_im[val_x, val_y])
depth_im[depth_im >= threshold] = 0
H, W = depth_im.shape
output = joint_model(color_im)
joint = filter_joint(output)
joints_3D.append(joint_to_3D(joint, invK, depth_im))
# Set first frame as world system
if iter == 0:
previous_Points3D, _ = PointCloud(depth_im, invK)
cam_pose = np.eye(4)
previous_pose = cam_pose
prev_normal = NormalMap(sample, depth_im, invK)
elif iter == 1:
second_Points3D, sample = PointCloud(depth_im, invK)
pose = point_to_plane(second_Points3D,
first_Points3D, prev_normal) # A, B // maps A onto B : B = pose*A
prev_normal = NormalMap(sample, depth_im, invK)
cam_pose = np.dot(previous_pose, pose)
previous_pose = cam_pose
previous_Points3D = second_Points3D
elif iter > 1:
Points3D, sample = PointCloud(depth_im, invK)
# Compute camera view frustum and extend convex hull
pose = point_to_plane(Points3D, previous_Points3D, prev_normal) # A, B // maps A onto B : B = pose*A
prev_normal = NormalMap(sample, depth_im, invK)
pose = np.dot(previous_pose, pose)
view_frust_pts = fusion.get_view_frustum(depth_im, cam_intr, pose)
vol_bnds_seq = np.zeros((3, 2))
vol_bnds_seq[:, 0] = np.minimum(vol_bnds_seq[:, 0], np.amin(view_frust_pts, axis=1))
vol_bnds_seq[:, 1] = np.maximum(vol_bnds_seq[:, 1], np.amax(view_frust_pts, axis=1))
tsdf_vol_seq = fusion.TSDFVolume(vol_bnds_seq, voxel_size=voxel_size)
tsdf_vol_seq.integrate(color_im, depth_im, cam_intr, pose, obs_weight=1.)
# second_Points3D = tsdf_vol_seq.get_point_cloud()[:, 0:3]
#
# # 누적 pointcloud vertex only
# first_Points3D = tsdf_vol.get_partial_point_cloud()
#
# pts_size = min(first_Points3D.shape[0], second_Points3D.shape[0])
# pose = point_to_plane(second_Points3D[0:pts_size, :],
# first_Points3D[0:pts_size, :], normal_map) # A, B // maps A onto B : B = pose*A
# print(f'{pts_size} / {first_Points3D.shape[0]}')
pose = np.dot(previous_pose, pose)
cam_pose = pose
previous_pose = cam_pose
previous_Points3D = Points3D
# poses.append(previous_pose)
# Integrate observation into voxel volume (assume color aligned with depth)
tsdf_vol.integrate(color_im, depth_im, cam_intr, cam_pose, obs_weight=1.)
iter = iter + 1
joint_ = simple_bundle(joints_3D)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(projection='3d') # Axe3D object
ax.scatter(joint_[0, :], joint_[1, :], joint_[2, :]) # projection P = 4XN
plt.show()
# Get mesh from voxel volume and save to disk (can be viewed with Meshlab)
print("Saving mesh")
# verts, faces, norms, colors = human_vol.get_mesh()
verts, faces, norms, colors = tsdf_vol.get_mesh()
fusion.meshwrite("human_mesh.ply", verts, faces, norms, colors)
# Get point cloud from voxel volume and save to disk (can be viewed with Meshlab)
# print("Saving point cloud")
# point_cloud = human_vol.get_point_cloud()
# fusion.pcwrite("human_pcd.ply", point_cloud)
|
import logging
from typing import Dict
from carball.json_parser.game import Game
from carball.generated.api import game_pb2
from carball.generated.api.game_pb2 import mutators_pb2 as mutators
from carball.generated.api.player_pb2 import Player
log = logging.getLogger(__name__)
def create_dropshot_ball_events(game: Game, proto_game: game_pb2.Game, player_map: Dict[str, Player]):
if game.ball_type != mutators.BREAKOUT:
return
hits = list(proto_game.game_stats.hits)
proto_events = proto_game.game_stats.dropshot_stats.ball_phase_events
for event in game.dropshot['ball_events']:
frame_number = event['frame_number']
proto_event = proto_events.add()
proto_event.frame_number = frame_number
proto_event.ball_phase = event['state']
while len(hits) > 1 and hits[1].frame_number <= frame_number:
hits.pop(0)
hit = hits.pop(0)
if hit.frame_number != frame_number:
log.warning(f'Did not find exact hit event for dropshot ball event at frame {frame_number}, hit frame {hit.frame_number}')
player = player_map[hit.player_id.id]
if player.is_orange != bool(event['team']):
log.warning(f'Team does not match in dropshot ball event ({frame_number}) and hit event ({hit.frame_number})')
return
proto_event.player_id.id = player.id.id
|
"""
1. Store data long-term, slower access speed, on a magnetic disk, like documents
2. Store data short-term, faster access speed, like games or in-progress documents
3. Crunch numbers to make the computer run and do work
4. RAM is volatile, it dissapears when the power is cut. HDDs are non-volatile, the data remains even without power. In addition, RAM is faster than an HDD.
5. When opening, say, Rimworld, the HDD first loads the data from the game executable file on-disk into RAM. Then the CPU fetches the data from the RAM and executes the game, sending data back and forth between the RAM and itself to continually update the screen and run the game.
"""
|
"""
This module handles classes related to reading, modifying and writing 2DA files.
"""
from __future__ import annotations
from contextlib import suppress
from copy import copy
from enum import Enum
from typing import List, Dict, Optional, Any, Type
class TwoDA:
"""
Represents a 2DA file.
"""
def __init__(self):
self._rows: List[Dict[str, str]] = []
self._headers: List[str] = []
def __iter__(self):
"""
Iterates through each row yielding a new linked TwoDARow instance.
"""
for row in self._rows:
yield TwoDARow(row)
def get_headers(self) -> List[str]:
"""
Returns a copy of the set of column headers.
Returns:
The column headers.
"""
return copy(self._headers)
def get_column(self, header: str) -> List[str]:
"""
Returns every cell listed under the specified column header.
Args:
header: The column header.
Raises:
KeyError: If the specified column header does not exist.
Returns:
A list of cells.
"""
if header not in self._headers:
raise KeyError("The header '{}' does not exist.".format(header))
return [self._rows[i][header] for i in range(self.get_height())]
def add_column(self, header: str) -> None:
"""
Adds a new column with the specified header and populates it with blank cells for each row.
Args:
header: The header for the new column.
Raises:
KeyError: If the specified column header already exists.
"""
if header in self._headers:
raise KeyError("The header '{}' already exists.".format(header))
self._headers.append(header)
for row in self._rows:
row[header] = ""
def remove_column(self, header: str) -> None:
"""
Removes a column from the table with the specified column header. If no such column header exists it is ignored;
no error is thrown.
Args:
header: The column header.
"""
if header in self._headers:
for row in self._rows:
row.pop(header)
self._headers.remove(header)
def get_row(self, row_id: int) -> TwoDARow:
"""
Returns a TwoDARow instance which can update and retrieve the values of the cells for the specified row.
Args:
row_id: The row id.
Raises:
IndexError: If the specified row does not exist.
Returns:
A new TwoDARow instance.
"""
return TwoDARow(self._rows[row_id])
def add_row(self, cells: Dict[str, Any] = None) -> int:
"""
Adds a new row to the end of the table. Headers specified in the cells parameter that do not exist in the table
itself will be ignored, headers that are not specified in the cells parameter but do exist in the table will
default to being blank. All cells are converted to strings before being added into the 2DA.
Args:
cells: A dictionary representing the cells of the new row. A key is the header and value is the cell.
Returns:
The id of the new row.
"""
self._rows.append({})
if cells is None:
cells = {}
for header in cells:
cells[header] = str(cells[header])
for header in self._headers:
self._rows[-1][header] = cells[header] if header in cells else ""
return len(self._rows) - 1
def get_cell(self, row_id, column: str) -> str:
"""
Returns the value of the cell at the specified row under the specified column.
Args:
row_id: The row id.
column: The column header.
Raises:
KeyError: If the specified column does not exist.
IndexError: If the specified row does not exist.
Returns:
The cell value.
"""
return self._rows[row_id][column]
def set_cell(self, row_id: int, column: str, value: Any) -> None:
"""
Sets the value of a cell at the specified row under the specified column. If the value is none, it will output a
blank string.
Args:
row_id: The row id.
column: The column header.
value: The new value of the target cell.
Raises:
KeyError: If the specified column does not exist.
IndexError: If the specified row does not exist.
"""
value = "" if value is None else value
self._rows[row_id][column] = str(value)
def get_height(self) -> int:
"""
Returns the number of rows in the table.
Returns:
The number of rows.
"""
return len(self._rows)
def get_width(self) -> int:
"""
Returns the number of columns in the table.
Returns:
The number of columns.
"""
return len(self._headers)
def resize(self, row_count: int) -> None:
"""
Sets the number of rows in the table. Use with caution; specifying a height less than the current height will
result in a loss of data.
Args:
row_count: The number of rows to set.
Raises:
ValueError: If the height is negative.
"""
if self.get_height() < 0:
raise ValueError("The height of the table cannot be negative.")
current_height = len(self._rows)
if row_count < current_height:
# trim the _rows list
self._rows = self._rows[:row_count]
else:
# insert the new rows with each cell filled in blank
for i in range(row_count - current_height):
self.add_row()
class TwoDARow:
def __init__(self, row_data: Dict[str, str]):
self._data: Dict[str, str] = row_data
def get_string(self, header: str) -> str:
"""
Returns the string value for the cell under the specified header.
Args:
header: The column header for the cell.
Raises:
KeyError: If the specified header does not exist.
Returns:
The cell value.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
return self._data[header]
def get_integer(self, header: str, default: Optional[int] = None) -> float:
"""
Returns the integer value for the cell under the specified header. If the value of the cell is an invalid
integer then a default value is used instead.
Args:
header: The column header for the cell.
default: The default value.
Raises:
KeyError: If the specified header does not exist.
Returns:
The cell value as an integer or a default value.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = default
with suppress(ValueError):
cell = self._data[header]
if cell.startswith("0x"):
value = int(cell, 16)
else:
value = int(cell)
return value
def get_float(self, header: str, default: Optional[int] = None) -> float:
"""
Returns the float value for the cell under the specified header. If the value of the cell is an invalid float
then a default value is used instead.
Args:
header: The column header for the cell.
default: The default value.
Raises:
KeyError: If the specified header does not exist.
Returns:
The cell value as a float or default value.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = default
with suppress(ValueError):
cell = self._data[header]
value = float(cell)
return value
def get_enum(self, header: str, enum_type: Type[Enum], default: Optional[Enum]) -> Optional[Enum]:
"""
Returns the enum value for the cell under the specified header.
Args:
header: The column header for the cell.
enum_type: The enum class to try parse the cell value with.
default: The default value.
Raises:
KeyError: If the specified header does not exist.
Returns:
The cell value as a enum or default value.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = default
if enum_type(self._data[header]) != "":
value = enum_type(self._data[header])
return value
def set_string(self, header: str, value: Optional[str]) -> None:
"""
Sets the value of a cell under the specified header. If the value is none it will default to a empty string.
Args:
header: The column header for the cell.
value: The new cell value.
Raises:
KeyError: If the specified header does not exist.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = "" if value is None else value
self._data[header] = value
def set_integer(self, header: str, value: Optional[int]) -> None:
"""
Sets the value of a cell under the specified header, converting the integer into a string. If the value is none
it will default to a empty string.
Args:
header: The column header for the cell.
value: The new cell value.
Raises:
KeyError: If the specified header does not exist.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = "" if value is None else value
self._data[header] = str(value)
def set_float(self, header: str, value: Optional[float]) -> None:
"""
Sets the value of a cell under the specified header, converting the float into a string. If the value is none
it will default to a empty string.
Args:
header: The column header for the cell.
value: The new cell value.
Raises:
KeyError: If the specified header does not exist.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = "" if value is None else value
self._data[header] = str(value)
def set_enum(self, header: str, value: Optional[Enum]):
"""
Sets the value of a cell under the specified header, converting the enum into a string. If the value is none
it will default to a empty string.
Args:
header: The column header for the cell.
value: The new cell value.
Raises:
KeyError: If the specified header does not exist.
"""
if header not in self._data:
raise KeyError("The header '{}' does not exist.".format(header))
value = "" if value is None else value.value
self._data[header] = value
|
"""
========
Fractals
========
Fractals are geometric structures that are self-similar at any scale. These
structures are easy to generate using recursion. In this demo, we'll be
implementing the following fractals:
- Sierpinski Tetrahedron or Tetrix
- Menger Sponge
- Moseley Snowflake
Let's begin by importing some necessary modules. We need ``fury.primitive`` to
avoid having to hardcode the geometry of a tetrahedron and a cube.
``fury.utils`` also contains a ``repeat_primitive`` function which we will use
for this demo.
"""
import math
import numpy as np
from fury import window, primitive, utils, ui
###############################################################################
# Before we create our first fractal, let's set some ground rules for us to
# work with.
#
# 1. Instead of creating a new actor to represent each primitive of the
# fractal, we will compute the centers of each primitive and draw them at once
# using ``repeat_primitive()``.
#
# 2. How many primitives do we need? For each fractal, we define a depth which
# will prevent infinite recursion. Assuming we have a depth of :math:`N`, and
# at each level the shape is divided into :math:`k` smaller parts, we will need
# :math:`k^{N}` primitives to represent the fractal.
#
# 3. Ideally, we want to allocate the array of centers upfront. To achieve
# this, we can use the method of representing a binary tree in an array, and
# extend it to work with k-ary trees (formulas for the same can be found
# `here`_). In this scheme of representation, we represent every primitive as a
# node, and each sub-primitive as a child node. We can also skip storing the
# first :math:`\frac{k^{N} - 1}{k - 1} + 1` entries as we only need to render
# the leaf nodes. This allows us to create an array of exactly the required
# size at the start, without any additional overhead.
#
# .. _here: https://book.huihoo.com/data-structures-and-algorithms-with-object-oriented-design-patterns-in-c++/html/page356.html # noqa
#
# -----------------------------------------------------------------------------
###############################################################################
# The tetrix is a classic 3d fractal, a natural three-dimensional extension of
# the Sierpinski Triangle. At each level, we need to calculate the new centers
# for the next level. We can use the vertices of a tetrahedron as the offsets
# for the new centers, provided that the tetrahedron is centered at the origin
# (which is the case here).
def tetrix(N):
centers = np.zeros((4 ** N, 3))
# skipping non-leaf nodes (see above)
offset = (4 ** N - 1) // 3 + 1
# just need the vertices
U, _ = primitive.prim_tetrahedron()
def gen_centers(depth, pos, center, dist):
if depth == N:
centers[pos - offset] = center
else:
idx = 4 * (pos - 1) + 2
for i in range(4):
# distance gets halved at each level
gen_centers(depth + 1, idx + i, center + dist * U[i], dist / 2)
# the division by sqrt(6) is to ensure correct scale
gen_centers(0, 1, np.zeros(3), 2 / (6 ** 0.5))
vertices, faces = primitive.prim_tetrahedron()
# primitive is scaled down depending on level
vertices /= 2 ** (N - 1)
# compute some pretty colors
bounds_min, bounds_max = np.min(centers, axis=0), np.max(centers, axis=0)
colors = (centers - bounds_min) / (bounds_max - bounds_min)
vertices, triangles, colors, _ = primitive.repeat_primitive(
centers=centers, colors=colors, vertices=vertices, faces=faces
)
return utils.get_actor_from_primitive(vertices, triangles, colors)
###############################################################################
# For a Menger Sponge, each cube is divided into 27 smaller cubes, and we skip
# some of them (face centers, and the center of the cube). This means that on
# every level we get 20 new cubes.
#
# Here, to compute the points of each new center, we start at a corner cube's
# center and add the offsets to each smaller cube, scaled according to the
# level.
def sponge(N):
centers = np.zeros((20 ** N, 3))
offset = (20 ** N - 1) // 19 + 1
# these are the offsets of the new centers at the next level of recursion
# each cube is divided into 20 smaller cubes for a snowflake
V = np.array([[0, 0, 0], [0, 0, 1], [0, 0, 2], [0, 1, 0], [0, 1, 2],
[0, 2, 0], [0, 2, 1], [0, 2, 2], [1, 0, 0], [1, 0, 2],
[1, 2, 0], [1, 2, 2], [2, 0, 0], [2, 0, 1], [2, 0, 2],
[2, 1, 0], [2, 1, 2], [2, 2, 0], [2, 2, 1], [2, 2, 2]])
def gen_centers(depth, pos, center, dist):
if depth == N:
centers[pos - offset] = center
else:
# we consider a corner cube as our starting point
start = center - np.array([1, 1, 1]) * dist ** 0.5
idx = 20 * (pos - 1) + 2
# this moves from the corner cube to each new cube's center
for i in range(20):
# each cube is divided into 27 cubes so side gets divided by 3
gen_centers(depth + 1, idx + i, start + V[i] * dist, dist / 3)
gen_centers(0, 1, np.zeros(3), 1 / 3)
vertices, faces = primitive.prim_box()
vertices /= 3 ** N
bounds_min, bounds_max = np.min(centers, axis=0), np.max(centers, axis=0)
colors = (centers - bounds_min) / (bounds_max - bounds_min)
vertices, triangles, colors, _ = primitive.repeat_primitive(
centers=centers, colors=colors, vertices=vertices, faces=faces
)
return utils.get_actor_from_primitive(vertices, triangles, colors)
###############################################################################
# A snowflake is exactly the same as above, but we skip different cubes
# (corners and center). I think this looks quite interesting, and it is
# possible to see the Koch snowflake if you position the camera just right.
def snowflake(N):
centers = np.zeros((18 ** N, 3))
offset = (18 ** N - 1) // 17 + 1
V = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2], [0, 2, 1],
[1, 0, 0], [1, 0, 1], [1, 0, 2], [1, 1, 0], [1, 1, 2],
[1, 2, 0], [1, 2, 1], [1, 2, 2], [2, 0, 1], [2, 1, 0],
[2, 1, 1], [2, 1, 2], [2, 2, 1]])
def gen_centers(depth, pos, center, side):
if depth == N:
centers[pos - offset] = center
else:
start = center - np.array([1, 1, 1]) * side ** 0.5
idx = 18 * (pos - 1) + 2
for i in range(18):
gen_centers(depth + 1, idx + i, start + V[i] * side, side / 3)
gen_centers(0, 1, np.zeros(3), 1 / 3)
vertices, faces = primitive.prim_box()
vertices /= 3 ** N
bounds_min, bounds_max = np.min(centers, axis=0), np.max(centers, axis=0)
colors = (centers - bounds_min) / (bounds_max - bounds_min)
vertices, triangles, colors, _ = primitive.repeat_primitive(
centers=centers, colors=colors, vertices=vertices, faces=faces
)
return utils.get_actor_from_primitive(vertices, triangles, colors)
###############################################################################
# Now that we have the functions to generate fractals, we can start setting up
# the Scene and ShowManager.
scene = window.Scene()
showmgr = window.ShowManager(scene, "Fractals", (800, 800), reset_camera=True)
###############################################################################
# These values are what work nicely on my machine without lagging. If you have
# a powerful machine, you could bump these up by around 2-3.
fractals = [tetrix(6), sponge(3), snowflake(3)]
###############################################################################
# We want to be able to switch between the three fractals. To achieve this
# we'll create a RadioButton and register a callback which will remove existing
# fractals and add the selected one. This also resets the camera.
options = {
"Tetrix": 0,
"Sponge": 1,
"Snowflake": 2,
}
shape_chooser = ui.RadioButton(options.keys(), padding=10, font_size=16,
checked_labels=["Tetrix"], position=(10, 10))
def choose_shape(radio):
showmgr.scene.rm(*fractals)
showmgr.scene.add(fractals[options[radio.checked_labels[0]]])
showmgr.scene.reset_camera()
shape_chooser.on_change = choose_shape
# selected at start
showmgr.scene.add(fractals[0])
showmgr.scene.add(shape_chooser)
###############################################################################
# Let's add some basic camera movement to make it look a little interesting.
# We can use a callback here to update a counter and calculate the camera
# positions using the counter. ``sin`` and ``cos`` are used here to make smooth
# looping movements.
counter = 0
def timer_callback(_obj, _event):
global counter
counter += 1
showmgr.scene.azimuth(math.sin(counter * 0.01))
showmgr.scene.elevation(math.cos(counter * 0.01) / 4)
showmgr.render()
showmgr.add_timer_callback(True, 20, timer_callback)
###############################################################################
# Finally, show the window if running in interactive mode or render to an image
# otherwise. This is needed for generating the documentation that you are
# reading.
interactive = False
if interactive:
showmgr.start()
else:
window.record(showmgr.scene, out_path="fractals.png", size=(800, 800))
|
from datetime import datetime
from protean import BaseAggregate
from protean.core.value_object import BaseValueObject
from protean.fields.basic import DateTime, String
from protean.fields.embedded import ValueObject
class SimpleVO(BaseValueObject):
foo = String()
bar = String()
class VOWithDateTime(BaseValueObject):
foo = String()
now = DateTime()
class SimpleVOEntity(BaseAggregate):
vo = ValueObject(SimpleVO)
class EntityWithDateTimeVO(BaseAggregate):
vo = ValueObject(VOWithDateTime)
class TestAsDict:
def test_empty_simple_vo(self):
simple = SimpleVOEntity(id=12)
assert simple.to_dict() == {"id": 12}
def test_simple_vo_dict(self):
vo = SimpleVO(foo="foo", bar="bar")
assert vo.to_dict() == {"foo": "foo", "bar": "bar"}
def test_embedded_simple_vo(self):
vo = SimpleVO(foo="foo", bar="bar")
simple = SimpleVOEntity(id=12, vo=vo)
assert simple.to_dict() == {"id": 12, "vo": {"foo": "foo", "bar": "bar"}}
def test_datetime_vo_dict(self):
now = datetime.utcnow()
vo = VOWithDateTime(foo="foo", now=now)
assert vo.to_dict() == {"foo": "foo", "now": str(now)}
def test_embedded_datetime_vo(self):
now = datetime.utcnow()
vo = VOWithDateTime(foo="foo", now=now)
simple = EntityWithDateTimeVO(id=12, vo=vo)
assert simple.to_dict() == {"id": 12, "vo": {"foo": "foo", "now": str(now)}}
|
import sys
import math
import torch
import random
import numpy as np
import torch.nn as nn
import sim_utils as su
import model_utils as mu
import torch.nn.functional as F
from dataset_3d import get_spectrogram_window_length
sys.path.append('../backbone')
from select_backbone import select_resnet
from convrnn import ConvGRU
eps = 1e-7
INF = 25.0
class MyDataParallel(torch.nn.DataParallel):
"""
Allow nn.DataParallel to call model's attributes.
"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def get_parallel_model(model):
if torch.cuda.is_available():
dev_count = torch.cuda.device_count()
print("Using {} GPUs".format(dev_count))
model = MyDataParallel(model, device_ids=list(range(dev_count)))
return model
def get_num_channels(modality):
if modality.startswith(mu.ImgMode):
return 3
elif modality == mu.FlowMode:
return 2
elif modality == mu.FnbFlowMode:
return 2
elif modality == mu.KeypointHeatmap:
return 17
elif modality == mu.SegMask:
return 1
else:
assert False, "Invalid modality: {}".format(modality)
class NonLinearProjection(nn.Module):
def __init__(self, input_dim, output_dim):
super(NonLinearProjection, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = 128
self.projection = nn.Sequential(
nn.Linear(input_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, output_dim),
)
def forward(self, features):
is_grid_format = (len(features.shape) == 5) and (features.shape[-1] <= 32)
# In case the last two dimension are grids
if is_grid_format:
assert features.shape[-1] == features.shape[-2], "Invalid shape: {}".format(features.shape)
features = features.permute(0, 1, 3, 4, 2)
projected_features = self.projection(features)
# projected_features = self.cosSimHandler.l2NormedVec(projected_features, dim=-1)
if is_grid_format:
projected_features = features.permute(0, 1, 4, 2, 3)
return projected_features
class AttentionProjection(nn.Module):
def __init__(self, input_dim, grid_shape):
super(AttentionProjection, self).__init__()
self.input_dim = input_dim
self.grid_shape = grid_shape
self.output_dim = 32
# Projection layer to generate small attention maps
self.hidden_dim = 32
self.projection = nn.Sequential(
nn.Linear(input_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, self.output_dim),
)
# Generate attention map by squashing the grids
self.total_cells = self.grid_shape[0] * self.grid_shape[1]
self.attention = nn.Sequential(
nn.Linear(self.total_cells * self.output_dim, self.total_cells),
nn.ReLU(),
nn.Linear(self.total_cells, self.total_cells),
nn.Softmax(dim=-1),
)
def forward(self, features):
# input is B, N, D, s, s
B, N, D, s, s = features.shape
assert features.shape[-1] == features.shape[-2], "Invalid shape: {}".format(features.shape)
features = features.permute(0, 1, 3, 4, 2)
projected_features = self.projection(features.reshape(B * N * s * s, D))
# projected_features is -1, output_dim
projected_features = projected_features.view(B, N, s, s, self.output_dim)
# projected_features is B, N, s, s, output_dim
attention_map = self.attention(projected_features.view(B * N, -1))
# attention_map is B * N, total_cells
attention_map = attention_map.view(B, N, 1, self.grid_shape[0], self.grid_shape[1])
return attention_map
def applyAttention(self, features):
# features is B, 1, D, s, s
attention_map = self.forward(features)
# attention_map is B, 1, 1, s, s
context = features * attention_map
context = context.sum(-1).sum(-1)
return context, attention_map
class ImageFetCombiner(nn.Module):
def __init__(self, img_fet_dim, img_segments):
super(ImageFetCombiner, self).__init__()
# Input feature dimension is [B, dim, s, s]
self.dim = img_fet_dim
self.s = img_segments
self.flat_dim = self.dim * self.s * self.s
layers = []
if self.s == 7:
layers.append(nn.MaxPool2d(2, 2, padding=1))
layers.append(nn.MaxPool2d(2, 2))
layers.append(nn.AvgPool2d(2, 2))
if self.s == 4:
layers.append(nn.MaxPool2d(2, 2))
layers.append(nn.AvgPool2d(2, 2))
elif self.s == 2:
layers.append(nn.AvgPool2d(2, 2))
# input is B x dim x s x s
self.feature = nn.Sequential(*layers)
# TODO: Normalize
# Output is B x dim
def forward(self, input: torch.Tensor):
# input is B, N, D, s, s
B, N, D, s, s = input.shape
input = input.view(B * N, D, s, s)
y = self.feature(input)
y = y.reshape(B, N, -1)
return y
class WeighedLoss(nn.Module):
"""
Class that implements automatically weighed loss from: https://arxiv.org/pdf/1705.07115.pdf
"""
def __init__(self, num_losses, device):
super(WeighedLoss, self).__init__()
self.device = device
self.coeffs = []
for i in range(num_losses):
init_value = random.random()
param = nn.Parameter(torch.tensor(init_value))
name = "auto_param_" + str(i)
self.register_parameter(name, param)
self.coeffs.append(param)
def forward(self, losses=[]):
"""
Forward pass
Keyword Arguments:
losses {list} -- List of tensors of losses
Returns:
torch.Tensor -- 0-dimensional tensor with final loss. Can backpropagate it.
"""
assert len(losses) == len(self.coeffs), \
"Loss mismatch, check how many losses are passed"
net_loss = torch.tensor(0.0).to(self.device)
for i, loss in enumerate(losses):
net_loss += torch.exp(-self.coeffs[i]) * loss
net_loss += 0.5 * self.coeffs[i]
return net_loss
class IdentityFlatten(nn.Module):
def __init__(self):
super(IdentityFlatten, self).__init__()
def forward(self, input: torch.Tensor):
# input is B, N, D, s, s
B, N, D, s, s = input.shape
return input.reshape(B, N, -1)
class BaseDpcRnn(nn.Module):
def get_modality_feature_extractor(self):
if self.mode.split('-')[0] in [mu.ImgMode]:
assert self.last_size0 == self.last_size1
return ImageFetCombiner(self.final_feature_size, self.last_size0)
else:
assert False, "Invalid mode provided: {}".format(self.mode)
'''DPC with RNN'''
def __init__(self, args):
super(BaseDpcRnn, self).__init__()
torch.cuda.manual_seed(233)
self.debug = args['debug']
self.mode = args["mode"]
self.num_seq = args["num_seq"]
self.seq_len = args["seq_len"]
self.sample_size = args["img_dim"]
self.last_duration = int(math.ceil(self.seq_len / 4))
self.last_size1 = int(math.ceil(self.sample_size / 32))
self.last_size0 = self.last_size1
if self.mode == mu.AudioMode:
# Assume each audio image is 32x128
self.last_size0 = 1
self.last_size = None
if self.last_size0 == self.last_size1:
self.last_size = self.last_size0
print('final feature map has size %dx%d' % (self.last_size0, self.last_size1))
self.in_channels = get_num_channels(self.mode)
self.l2_norm = True
self.num_classes = args['num_classes']
self.dropout = 0.75
if self.debug:
self.dropout = 0.0
track_running_stats = True
print("Track running stats: {}".format(track_running_stats))
self.backbone, self.param = select_resnet(
args["net"], track_running_stats=track_running_stats, in_channels=self.in_channels
)
# params for GRU
self.param['num_layers'] = 1
self.param['hidden_size'] = self.param['feature_size']
# param for current model
self.final_feature_size = self.param["feature_size"]
self.total_feature_size = self.param['hidden_size'] * (self.last_size0 * self.last_size1)
self.agg = ConvGRU(input_size=self.param['feature_size'],
hidden_size=self.param['hidden_size'],
kernel_size=1,
num_layers=self.param['num_layers'])
self.compiled_features = self.get_modality_feature_extractor()
self.cosSimHandler = su.CosSimHandler()
self.mask = None
# self.relu = nn.ReLU(inplace=False)
self._initialize_weights(self.agg)
self.panasonic_num_classes = {'video': 75, 'atomic': 448}
def initialize_supervised_inference_layers(self):
self.final_bn = nn.BatchNorm1d(self.param['feature_size'])
self.final_bn.weight.data.fill_(1)
self.final_bn.bias.data.zero_()
self.final_fc = self.init_classification_fc_layer(self.num_classes)
def init_classification_fc_layer(self, num_classes):
final_fc = nn.Sequential(
nn.Linear(self.param['feature_size'], self.param['feature_size']),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.param['feature_size'], num_classes),
)
self._initialize_weights(final_fc)
return final_fc
def _initialize_weights(self, module):
for name, param in module.named_parameters():
if 'weight' in name:
nn.init.orthogonal_(param, 1)
# other resnet weights have been initialized in resnet itself
def reset_mask(self):
self.mask = None
class DpcRnn(BaseDpcRnn):
'''DPC with RNN'''
def __init__(self, args):
print('Using DPC-RNN model for mode: {}'.format(args["mode"]))
super(DpcRnn, self).__init__(args)
self.pred_step = args["pred_step"]
self.network_pred = nn.Sequential(
nn.Conv2d(self.param['feature_size'], self.param['feature_size'], kernel_size=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(self.param['feature_size'], self.param['feature_size'], kernel_size=1, padding=0)
)
self._initialize_weights(self.network_pred)
self.is_supervision_enabled = mu.SupervisionLoss in args["losses"]
if self.is_supervision_enabled:
self.initialize_supervised_inference_layers()
def get_representation(self, block, detach=False):
(B, N, C, SL, H, W) = block.shape
block = block.view(B*N, C, SL, H, W)
feature = self.backbone(block)
del block
feature = F.relu(feature)
feature = F.avg_pool3d(feature, (self.last_duration, 1, 1), stride=1)
feature = feature.view(B, N, self.param['feature_size'], self.last_size0, self.last_size1)
# [B*N,D,last_size,last_size]
context, _ = self.agg(feature)
context = context[:,-1,:].unsqueeze(1)
context = F.max_pool3d(context, (1, self.last_size0, self.last_size1), stride=1).squeeze(-1).squeeze(-1)
del feature
if self.l2_norm:
context = self.cosSimHandler.l2NormedVec(context, dim=2)
# Return detached version if required
if detach:
return context.detach()
else:
return context
def forward(self, block, ret_rep=False):
# ret_cdot values: [c, z, zt]
# block: [B, N, C, SL, W, H]
# B: Batch, N: Number of sequences per instance, C: Channels, SL: Sequence Length, W, H: Dims
### extract feature ###
(B, N, C, SL, H, W) = block.shape
block = block.view(B*N, C, SL, H, W)
feature = self.backbone(block)
del block
feature = F.avg_pool3d(feature, (self.last_duration, 1, 1), stride=(1, 1, 1))
if self.l2_norm:
feature = self.cosSimHandler.l2NormedVec(feature, dim=1)
# before ReLU, (-inf, +inf)
feature_inf_all = feature.view(B, N, self.param['feature_size'], self.last_size0, self.last_size1)
feature = feature.view(B, N, self.param['feature_size'], self.last_size0, self.last_size1)
# Generate feature for future frames
feature_inf = feature_inf_all[:, N - self.pred_step::, :].contiguous()
del feature_inf_all
# Random assignment to serve as placeholder value
probabilities = torch.tensor(0)
# aggregate and predict overall context
probabilities = None
if self.is_supervision_enabled:
context, _ = self.agg(feature)
context = context[:, -1, :].unsqueeze(1)
context = F.max_pool3d(context, (1, self.last_size0, self.last_size1), stride=1).squeeze(-1).squeeze(-1)
# [B,N,C] -> [B,C,N] -> BN() -> [B,N,C], because BN operates on id=1 channel.
context = self.final_bn(context.transpose(-1, -2)).transpose(-1,-2)
probabilities = self.final_fc(context).view(B, self.num_classes)
### aggregate, predict future ###
# Generate inferred future (stored in feature_inf) through the initial frames
_, hidden = self.agg(feature[:, 0:N-self.pred_step, :].contiguous())
if self.l2_norm:
hidden = self.cosSimHandler.l2NormedVec(hidden, dim=2)
# Get the last hidden state, this gives us the predicted representation
# after tanh, (-1,1). get the hidden state of last layer, last time step
hidden = hidden[:, -1, :]
# Predict next pred_step time steps for this instance
pred = []
for i in range(self.pred_step):
# sequentially pred future based on the hidden states
p_tmp = self.network_pred(hidden)
if self.l2_norm:
p_tmp = self.cosSimHandler.l2NormedVec(p_tmp, dim=1)
pred.append(p_tmp)
_, hidden = self.agg(p_tmp.unsqueeze(1), hidden.unsqueeze(0))
if self.l2_norm:
hidden = self.cosSimHandler.l2NormedVec(hidden, dim=2)
hidden = hidden[:, -1, :]
# Contains the representations for each of the next pred steps
pred = torch.stack(pred, 1) # B, pred_step, xxx
# Both are of the form [B, pred_step, D, s, s]
return pred, feature_inf, feature, probabilities, hidden
# Vals to return
ContextVal = 'ctx'
GridCtxVal = 'grid'
AttentionVal = 'attn'
class SupervisedDpcRnn(BaseDpcRnn):
'''
DPC with RNN for supervision
'''
def __init__(self, args):
print('Using Supervised DPC-RNN model for mode: {}'.format(args["mode"]))
super(SupervisedDpcRnn, self).__init__(args)
self.hierarchical = args['hierarchical']
self.initialize_supervised_inference_layers()
if self.hierarchical:
self.init_multihead_layers()
# Initialize attention based layers
self.attention = args['attention']
self.attention_fn = AttentionProjection(self.final_feature_size, (self.last_size0, self.last_size1))
print('Using attention: {}'.format(self.attention))
def eval_mode(self):
self.backbone.eval()
self.final_bn.eval()
for m in list(self.backbone.modules()) + list(self.modules()):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.eval()
def init_multihead_layers(self):
self.final_fc_video = self.init_classification_fc_layer(self.panasonic_num_classes['video'])
self.final_fc_atomic = self.init_classification_fc_layer(self.panasonic_num_classes['atomic'])
def get_representation(self, block, returns=[ContextVal]):
(B, N, C, SL, H, W) = block.shape
block = block.view(B*N, C, SL, H, W)
feature = self.backbone(block)
del block
feature = F.relu(feature)
feature = F.avg_pool3d(feature, (self.last_duration, 1, 1), stride=1)
feature = feature.view(B, N, self.param['feature_size'], self.last_size0, self.last_size1)
# feature is [B,N,D,last_size, last_size] now
context, _ = self.agg(feature)
del feature
# Get the context at the final timestep i.e. remove N
gridCtx = context[:,-1,:].unsqueeze(1)
attention_map = None
# context is B, 1, D, s, s
if self.attention:
attention_map = self.attention_fn(gridCtx)
# attention_map is B, 1, 1, s, s
else:
attention_map = torch.ones((B, 1, 1, self.last_size0, self.last_size1), device=gridCtx.device)
# Normalize attention_map
attention_map = attention_map / attention_map.sum(-1, keepdim=True).sum(-2, keepdim=True)
context = gridCtx * attention_map
context = context.sum(-1).sum(-1)
valsToReturn = []
if ContextVal in returns:
valsToReturn.append(context)
if GridCtxVal in returns:
valsToReturn.append(gridCtx)
if AttentionVal in returns:
valsToReturn.append(attention_map)
return tuple(valsToReturn)
def forward(self, block):
context, grid, attn = self.get_representation(block, returns=[ContextVal, GridCtxVal, AttentionVal])
# [B,N,C] -> [B,C,N] -> BN() -> [B,N,C], because BN operates on id=1 channel.
context = self.final_bn(context.transpose(-1, -2)).transpose(-1, -2)
# for m in self.backbone.modules():
# if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
# print(m, torch.unique(m.running_mean)[:10], torch.unique(m.running_var)[:10])
main_logits, atomic_logits = None, None
if self.hierarchical:
main_logits = self.final_fc_video(context).contiguous().reshape(-1, self.panasonic_num_classes['video'])
atomic_logits = self.final_fc_atomic(context).contiguous().reshape(-1, self.panasonic_num_classes['atomic'])
else:
main_logits = self.final_fc(context).view(-1, self.num_classes)
# Dummy result, doesn't matter in the non hierarchical case
atomic_logits = main_logits.detach()
return context, main_logits, atomic_logits, grid, attn
import math
import vgg
class AudioVGGEncoder(nn.Module):
'''
VGG model with feature outputs
'''
def __init__(self, args):
super(AudioVGGEncoder, self).__init__()
torch.cuda.manual_seed(233)
self.dim = (128, get_spectrogram_window_length(args['seq_len'], args['num_seq'], args['ds']))
self.args = args
self.dropout = 0.75
self.num_classes = args['num_classes']
self.debug = args['debug']
if self.debug:
self.dropout = 0.0
# cfg['E'] refers to VGG_19 with batchnorm
self.custom_cfg = list(vgg.cfg['E'])
# We add a convolution and maxpooling to reduce resolution
self.reducedDim = (self.dim[0] // 32, self.dim[1] // 32)
self.numReductions = int(math.log(min(self.reducedDim), 2))
for _ in range(self.numReductions):
self.custom_cfg.extend([512, 'M'])
# Gives the final shape after the new reductions
self.numResidualElements = int(np.prod(self.reducedDim) / 2 ** (len(self.reducedDim) * self.numReductions))
self.param = {'mid_feature_size': 512 * self.numResidualElements, 'feature_size': 256}
self.features = vgg.make_layers(self.custom_cfg, batch_norm=True)
self.flatten = nn.Sequential(
nn.Linear(self.param['mid_feature_size'], self.param['feature_size']),
)
self.final_feature_size = self.param['feature_size']
self.last_size = 1
self.is_supervised = args['model'] == mu.ModelSupervised
if self.is_supervised:
self.final_fc = self.init_classification_fc_layer(self.num_classes)
self.panasonic_num_classes = {'video': 75, 'atomic': 448}
self.hierarchical = args['hierarchical']
if self.hierarchical:
self.init_multihead_layers()
def eval_mode(self):
self.features.eval()
for m in list(self.features.modules()) + list(self.modules()):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.eval()
def init_multihead_layers(self):
self.final_fc_video = self.init_classification_fc_layer(self.panasonic_num_classes['video'])
self.final_fc_atomic = self.init_classification_fc_layer(self.panasonic_num_classes['atomic'])
def init_classification_fc_layer(self, num_classes):
final_fc = nn.Sequential(
nn.Linear(self.param['feature_size'], self.param['feature_size']),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.param['feature_size'], num_classes),
)
self._initialize_weights(final_fc)
return final_fc
def _initialize_weights(self, module):
for name, param in module.named_parameters():
if 'weight' in name:
nn.init.orthogonal_(param, 1)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.flatten(x)
B, D = x.shape
if self.is_supervised:
x = x.view(B, 1, D)
main_logits, atomic_logits = None, None
if self.hierarchical:
main_logits = self.final_fc_video(x.view(B, D)).contiguous().view(-1, self.panasonic_num_classes['video'])
atomic_logits = self.final_fc_atomic(x.view(B, D)).contiguous().view(-1, self.panasonic_num_classes['atomic'])
else:
main_logits = self.final_fc(x.view(B, D)).view(-1, self.num_classes)
# Dummy result, doesn't matter in the non hierarchical case
atomic_logits = main_logits.detach()
return x, main_logits, atomic_logits, x.unsqueeze(-1).unsqueeze(-1), x
else:
x = x.view(B, 1, D, 1, 1)
return x
import unittest
from tqdm import tqdm
class TestForwardPass(unittest.TestCase):
@classmethod
def setUp(self):
"""
This code code is ran once before all tests.
"""
parser = mu.get_multi_modal_model_train_args()
self.args = vars(parser.parse_args(''))
self.args["mode"] = self.args["modalities"][0]
self.args["img_dim"] = 64
self.args["num_classes"] = 10
self.args["num_seq"], self.args["seq_len"] = 2, 4
self.B, self.N, self.SL, self.H, self.W, self.D, self.IC = \
self.args["batch_size"], self.args["num_seq"], self.args["seq_len"], self.args["img_dim"], self.args["img_dim"], 256, 3
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = torch.device(self.device)
self.cosSimHandler = su.CosSimHandler().to(self.device)
def test_forward_pass_for_attention(self):
# Run training for each loss
self.args["attention"] = True
self.model = SupervisedDpcRnn(self.args)
block = torch.rand(self.B, self.N, self.IC, self.SL, self.H, self.W)
self.model(block)
self.model.get_representation(block)
def test_forward_pass_without_attention(self):
self.args["attention"] = False
self.model = SupervisedDpcRnn(self.args)
# Run training for each loss
block = torch.rand(self.B, self.N, self.IC, self.SL, self.H, self.W)
self.model(block)
self.model.get_representation(block)
if __name__ == '__main__':
unittest.main()
|
from flask_admin import expose
from flask_login import current_user
from secure_views import SecureBaseView
from dashboards import user_dash
class UserDashView(SecureBaseView):
def is_visible(self):
return current_user.is_authenticated
def __init__(self, *args, **kwargs):
self.app = kwargs.pop('app', True)
SecureBaseView.__init__(self, *args, **kwargs)
@expose('/')
def index(self):
scripts = self.app._generate_scripts_html()
css = self.app._generate_css_dist_html()
config = self.app._generate_config_html()
return self.render('admin/dash_view.html', scripts=scripts, css=css,
dashconfig=config)
|
from typing import List, Optional
from modpath.dtypes import ModpathOptions
from modpath.funcs import segment, recombine
from modpath.errors import ArgumentError, PathOpError
def check_args(args):
if args.old_ext and args.multidot:
raise ArgumentError("Cannot use --old-ext with --multidot option")
def modpath_argparse():
import argparse
pr = argparse.ArgumentParser(
description="""Modify paths. A tool to manipulate paths, extensions, directories,
and structured filenames. A path looks like this:
[SCHEME://][HOST][DIRNAME]/[BASENAME][.EXT]"""
)
pr.add_argument("path", nargs="+", help="The path(s) to be modified")
pr.add_argument("-s", "--suffix", default=None, help="placed between the basename and extension")
pr.add_argument("-S", "--old-suffix", default=None, help="if specified, use this as the suffix")
pr.add_argument("-p", "--prefix", default=None, help="placed in front of the basename")
pr.add_argument("-P", "--old-prefix", default=None, help="if specified, use this as the prefix")
pr.add_argument("-e", "--ext", default=None, help="if specified, replaces the extension")
pr.add_argument("-E", "--old-ext", default=None, help="if specified, treat this as the extension")
pr.add_argument(
"-b",
"--base",
default=None,
help="if specified, replaces the basename without extension",
)
pr.add_argument("-d", "--dirname", default=None, help="if specified, replaces the dirname")
pr.add_argument("-D", "--old-dirname", default=None, help="if specified, treat this as the dirname")
pr.add_argument("-r", "--rel", default=None, help="determine relpath to")
pr.add_argument(
"-m",
"--multidot",
action="store_true",
help="Treat all dotted components in base as the extension",
)
pr.add_argument("-g", "--glob", action="store_true", help="Allows globbing in [PATH]")
pr.add_argument("-a", "--abs", action="store_true", help="Get absolute path")
pr.add_argument("-R", "--real", "--realpath", action="store_true", help="Get realpath")
return pr
def args_to_opts(args) -> List[ModpathOptions]:
argdict = vars(args).copy() # otherwise this mutates args
paths = argdict.pop("path")
out = []
for p in paths:
argdict["path"] = p
out.append(ModpathOptions(**argdict))
return out
def modpath_cli():
parser = modpath_argparse()
args = parser.parse_args()
check_args(args)
opts = args_to_opts(args)
# print(opts)
for opt in opts:
try:
print(recombine(segment(opt)))
except PathOpError as exc:
print("{}: {}".format(exc.__class__.__name__, exc))
exit(1)
|
import json
import subprocess
import webbrowser
from abc import ABC, abstractmethod
from sys import platform
try:
import requests
from pylatex import NoEscape, Document, Package
except ModuleNotFoundError:
pass
class ReportBase(ABC):
def __init__(self):
super().__init__()
def make_tex(self, fp_tex: str):
doc = self.make_latex()
if fp_tex.endswith('.tex'):
fp_tex = fp_tex[:-4]
doc.generate_tex(fp_tex)
def make_pdf(self, fp_pdf: str, fp_pdf_viewer: str = None, clean: bool = True, clean_tex: bool = True):
if fp_pdf.endswith('.pdf'):
fp_pdf = fp_pdf[:-4]
doc = self.make_latex()
doc.generate_pdf(filepath=fp_pdf, clean=clean, clean_tex=clean_tex)
if platform == 'darwin':
subprocess.Popen(['open', fp_pdf + '.pdf'], creationflags=0x08000000)
else:
if fp_pdf_viewer:
subprocess.Popen([fp_pdf_viewer, fp_pdf + '.pdf'], creationflags=0x08000000)
else:
subprocess.Popen([fp_pdf + '.pdf'], shell=True, creationflags=0x08000000)
def make_pdf_web(self, fp_tex: str):
if fp_tex.endswith('.tex'):
fp_tex = fp_tex[:-4]
self.make_tex(fp_tex=fp_tex + '.tex', )
fileio_response = requests.post(
"https://file.io",
files={
'file': (
fp_tex + '.tex',
open(fp_tex + '.tex', 'rb'))
}
)
texurl = json.loads(fileio_response.text)['link']
webbrowser.open(f"https://www.overleaf.com/docs?snip_uri={texurl}")
@abstractmethod
def make_latex(self, *args, **kwargs) -> Document:
pass
@abstractmethod
def make_latex_sections(self) -> list:
pass
@staticmethod
def make_document_template(sec_title_prefix: str = None, *_, **__):
doc = Document(
indent=False,
geometry_options={'left': '0.99in', 'right': '0.99in', 'top': '1.5in', 'bottom': '1.5in'}
)
doc.packages.append(Package('xcolor'))
doc.packages.append(Package('sectsty'))
doc.packages.append(Package('hyperref'))
doc.packages.append(Package('mathtools'))
# doc.packages.append(Package('standalone', options='preview'))
doc.preamble.append(NoEscape(r'\definecolor{ofr}{RGB}{0, 164, 153}'))
doc.preamble.append(NoEscape(r'\renewcommand\familydefault{\sfdefault}'))
doc.preamble.append(NoEscape(r'\sectionfont{\color{ofr}}'))
doc.preamble.append(NoEscape(r'\subsectionfont{\color{ofr}}'))
doc.preamble.append(NoEscape(r'\subsubsectionfont{\color{ofr}}'))
doc.preamble.append(NoEscape(r'\renewcommand{\arraystretch}{1.2}'))
# \titleformat{<command>}[<shape>]{<format>}{<label>}{<sep>}{<before-code>}[<after-code>]
# doc.preamble.append(NoEscape(r'\titleformat{\section}{}{}{\hspace{1cm}}{}%'))
if sec_title_prefix:
doc.preamble.append(NoEscape(f'\\renewcommand{{\\thesection}}{{{sec_title_prefix}\\arabic{{section}}}}'))
return doc
class Report(ReportBase):
def __init__(self, sections: list, sec_title_prefix: str):
self.sections: list = sections
self.sec_title_prefix: str = sec_title_prefix
super().__init__()
def make_latex(self) -> Document:
doc = self.make_document_template(self.sec_title_prefix)
for i in self.sections:
doc.append(i)
return doc
def make_latex_sections(self) -> list:
return self.sections
|
"""Routines for getting halo properties and links, and data derived from them, starting with a Halo or other object
"""
from __future__ import absolute_import
import sqlalchemy
from . import data_attribute_mapper
class HaloPropertyGetter(object):
"""HaloPropertyGetter and its subclasses implement efficient methods for retrieving data from sqlalchemy ORM objects.
The key features are
* being able to flexibly use a pre-queried cache of ORM objects with data, or issue fresh SQL queries as appropriate
* being able to flexibly call 'reassembly' on the data
* speed when used repeatedly on multiple rows of similar data
Different classes get different types of data from the ORM and/or process it differently.
This base class is used to retrieve the actual HaloProperty objects.
"""
def use_fixed_cache(self, halo):
return 'all_properties' not in sqlalchemy.inspect(halo).unloaded
def get(self, halo, property_id, session):
"""Get the specified property, from the in-memory cache if it exists otherwise from the database
using the specified session
:type halo: Halo
:type property_id: int
:type session: sqlalchemy.orm.session.Session
"""
if self.use_fixed_cache(halo):
return self.get_from_cache(halo, property_id)
else:
return self.get_from_session(halo, property_id, session)
def keys(self, halo, session):
"""Get a list of keys, from the in-memory cache if it exists otherwise from the database
using the specified session
:type halo: Halo
:type session: sqlalchemy.orm.session.Session
"""
if self.use_fixed_cache(halo):
return self.keys_from_cache(halo)
else:
return self.keys_from_session(halo, session)
def get_from_cache(self, halo, property_id):
"""Get the specified property from an existing in-memory cache
:type halo: Halo
:type property_id: int"""
return_vals = []
for x in halo.all_properties:
if x.name_id == property_id:
return_vals.append(x)
return self.postprocess_data_objects(return_vals)
def get_from_session(self, halo, property_id, session):
"""Get the specified property from the database using the specified session
:type halo: Halo
:type property_id: int
:type session: sqlalchemy.orm.session.Session"""
from . import halo_data
query_properties = session.query(halo_data.HaloProperty).filter_by(name_id=property_id, halo_id=halo.id,
deprecated=False).order_by(
halo_data.HaloProperty.id.desc())
return self.postprocess_data_objects(query_properties.all())
def keys_from_cache(self, halo):
"""Return a list of keys from an existing in-memory cache"""
return [x.name.text for x in halo.all_properties]
def keys_from_session(self, halo, session):
from . import halo_data
query_properties = session.query(halo_data.HaloProperty).filter_by(halo_id=halo.id,
deprecated=False)
return [x.name.text for x in query_properties.all()]
def cache_contains(self, halo, property_id):
"""Return True if the existing in-memory cache has the specified property
:type halo: Halo
:type property_id: int"""
for x in halo.all_properties:
if x.name_id == property_id:
return True
return False
def postprocess_data_objects(self, objects):
"""Post-process the ORM data objects to pull out the data in the form required"""
return objects
class HaloPropertyValueGetter(HaloPropertyGetter):
"""As HaloPropertyGetter, but return the data value (including automatic reassembly of the data if appropriate)"""
def __init__(self):
self._options = []
self._providing_class = None
self._mapper = None
def postprocess_data_objects(self, outputs):
return [self._postprocess_one_result(o) for o in outputs]
def _setup_data_mapper(self, property_object):
if self._mapper is None:
# Optimisation: figure out a mapper for the first output and assume it's ok for all of them
self._mapper = data_attribute_mapper.DataAttributeMapper(property_object)
def _infer_property_class(self, property_object):
if self._providing_class is None:
# Optimisation: figure out a providing class for the first output and assume it's ok for all of them
try:
self._providing_class = property_object.name.providing_class(property_object.halo.handler_class)
except NameError:
pass
def _postprocess_one_result(self, property_object):
self._infer_property_class(property_object)
if hasattr(self._providing_class, 'reassemble'):
instance = self._providing_class(property_object.halo.timestep.simulation)
return instance.reassemble(property_object, *self._options)
else:
self._setup_data_mapper(property_object)
return self._mapper.get(property_object)
class HaloPropertyValueWithReassemblyOptionsGetter(HaloPropertyValueGetter):
"""As HaloPropertyValueGetter, but allow options to be passed to the property reassembler"""
def __init__(self, *options):
super(HaloPropertyValueWithReassemblyOptionsGetter, self).__init__()
self._options = options
class HaloPropertyRawValueGetter(HaloPropertyValueGetter):
"""As HaloPropertyValueGetter, but never invoke an automatic reassembly; always retrieve the raw data"""
def _postprocess_one_result(self, property_object):
self._setup_data_mapper(property_object)
return self._mapper.get(property_object)
class HaloLinkGetter(HaloPropertyGetter):
"""As HaloPropertyGetter, but retrieve HaloLinks instead of HaloProperties"""
def get_from_cache(self, halo, property_id):
return_vals = []
for x in halo.all_links:
if x.relation_id == property_id:
return_vals.append(x)
return self.postprocess_data_objects(return_vals)
def get_from_session(self, halo, property_id, session):
from . import halo_data
query_links = session.query(halo_data.HaloLink).filter_by(relation_id=property_id, halo_from_id=halo.id).order_by(
halo_data.HaloLink.id)
return self.postprocess_data_objects(query_links.all())
def cache_contains(self, halo, property_id):
for x in halo.all_links:
if x.relation_id == property_id:
return True
return False
def keys_from_cache(self, halo):
"""Return a list of keys from an existing in-memory cache"""
return [x.relation.text for x in halo.all_links]
def keys_from_session(self, halo, session):
from . import halo_data
query_properties = session.query(halo_data.HaloLink).filter_by(halo_from_id=halo.id)
return [x.relation.text for x in query_properties.all()]
class HaloLinkTargetGetter(HaloLinkGetter):
"""As HaloLinkGetter, but retrieve the target of the links instead of the HaloLink objects themselves"""
def postprocess_data_objects(self, outputs):
return [o.halo_to for o in outputs]
|
import json
import re
import sys
from collections.abc import Mapping, KeysView, ValuesView, Callable
from datetime import datetime, date, timedelta
from pathlib import Path
from struct import calcsize, unpack_from, error as StructError
from traceback import format_tb
from types import TracebackType
from typing import Union, Iterator, Iterable
from unicodedata import category
from colored import stylize, fg as _fg, bg as _bg
def colored(text, fg=None, do_color: bool = True, bg=None):
if fg is not None and bg is not None:
colors = (_fg(fg), _bg(bg))
else:
colors = _fg(fg) if fg is not None else _bg(bg) if bg is not None else ()
return stylize(text, colors) if do_color and colors else text
def to_hex_and_str(
pre,
data: bytes,
*,
encoding: str = 'utf-8',
fill: int = 0,
struct: Union[str, Callable] = None,
offset: int = 0,
pad: bool = False,
) -> str:
"""
Format the given bytes to appear similar to the format used by xxd. Intended to be called for each line - splitting
the data into the amount to appear on each line should be done before calling this function.
:param pre: Line prefix
:param data: The binary data to be converted
:param encoding: Encoding to use for the str portion
:param fill: Ensure hex fills the amount of space that would be required for this many bytes
:param struct: Interpret contents as an array of the given struct format character
:param offset: Offset to apply before processing contents as a struct array
:param pad: Pad the string portion to ensure alignment when escaped characters are found
:return: String containing both the hex and str representations
"""
try:
replacements = to_hex_and_str._replacements
except AttributeError:
repl_map = {c: '.' for c in map(chr, range(sys.maxunicode + 1)) if category(c) == 'Cc'}
to_hex_and_str._replacements = replacements = str.maketrans(repl_map | {'\r': '\\r', '\n': '\\n', '\t': '\\t'})
as_hex = data.hex(' ', -4)
if pad:
esc = {'\r', '\n', '\t'}
as_str = ''.join(c if c in esc else f' {c}' for c in data.decode(encoding, 'replace')).translate(replacements)
else:
as_str = data.decode(encoding, 'replace').translate(replacements)
if fill:
if (to_fill := fill * 2 + (fill // 4) - 1 - len(as_hex)) > 0:
as_hex += ' ' * to_fill
if to_fill := fill * (1 + int(pad)) - len(as_str):
as_str += ' ' * to_fill
if struct:
if isinstance(struct, str):
from_struct = []
for i in range(offset, len(data), calcsize(struct)):
try:
from_struct.extend(unpack_from(struct, data, i))
except StructError:
pass
elif isinstance(struct, Callable):
from_struct = struct(data)
else:
raise TypeError(f'Unexpected struct type={type(struct)}')
return f'{pre} {as_hex} | {as_str} | {from_struct}'
return f'{pre} {as_hex} | {as_str}'
def to_bin_str(data: bytes, sep: str = ' '):
return sep.join(map('{:08b}'.format, data))
class PseudoJsonEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, (set, KeysView)):
return sorted(o)
elif isinstance(o, ValuesView):
return list(o)
elif isinstance(o, Mapping):
return dict(o)
elif isinstance(o, bytes):
try:
return o.decode('utf-8')
except UnicodeDecodeError:
return o.hex(' ', -4)
elif isinstance(o, datetime):
return o.strftime('%Y-%m-%d %H:%M:%S %Z')
elif isinstance(o, date):
return o.strftime('%Y-%m-%d')
elif isinstance(o, (type, timedelta)):
return str(o)
elif isinstance(o, TracebackType):
return ''.join(format_tb(o)).splitlines()
elif hasattr(o, '__to_json__'):
return o.__to_json__()
elif hasattr(o, '__serializable__'):
return o.__serializable__()
try:
return super().default(o)
except TypeError:
return repr(o)
except UnicodeDecodeError:
return o.decode('utf-8', 'replace')
def pseudo_json(data, sort_keys: bool = True) -> str:
return json.dumps(data, cls=PseudoJsonEncoder, sort_keys=sort_keys, indent=4, ensure_ascii=False)
def pseudo_json_rows(data, sort_keys: bool = True) -> str:
last = len(data) - 1
rows = '\n'.join(
' {}: {}{}'.format(
json.dumps(key, ensure_ascii=False),
json.dumps(val, cls=PseudoJsonEncoder, sort_keys=sort_keys, ensure_ascii=False),
',' if i != last else ''
)
for i, (key, val) in enumerate(data.items())
)
return f'[\n{rows}\n]'
class cached_classproperty:
def __init__(self, func):
self.__doc__ = func.__doc__
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.func = func
self.values = {}
def __get__(self, obj, cls):
try:
return self.values[cls]
except KeyError:
self.values[cls] = value = self.func.__get__(obj, cls)() # noqa
return value
def unique_path(parent: Path, stem: str, suffix: str, seps=('_', '-'), n: int = 1, add_date: bool = True) -> Path:
"""
:param parent: Directory in which a unique file name should be created
:param stem: File name without extension
:param suffix: File extension, including `.`
:param seps: Separators between stem and date/n, respectfully.
:param n: First number to try; incremented by 1 until adding this value would cause the file name to be unique
:param add_date: Whether a date should be added before n. If True, a date will always be added.
:return: Path with a file name that does not currently exist in the target directory
"""
date_sep, n_sep = seps
if add_date:
stem = f'{stem}{date_sep}{datetime.now().strftime("%Y-%m-%d")}'
name = stem + suffix
while (path := parent.joinpath(name)).exists():
name = f'{stem}{n_sep}{n}{suffix}'
n += 1
return path
def without_unknowns(data):
if isinstance(data, dict):
return {k: without_unknowns(v) for k, v in data.items() if not isinstance(k, str) or not k.startswith('_')}
return data
def collapsed_ranges_str(values: Iterable[str], sep: str = '...', delim: str = ', ') -> str:
return delim.join(start if start == end else f'{start}{sep}{end}' for start, end in collapse_ranges(values))
def collapse_ranges(values: Iterable[str]) -> list[tuple[str, str]]:
try:
match_suffix = collapse_ranges._match_suffix
except AttributeError:
collapse_ranges._match_suffix = match_suffix = re.compile(r'^(.*?)(\d+)$').match
groups = []
with_suffix = {}
for value in values:
if m := match_suffix(value):
prefix, suffix = m.groups()
with_suffix[value] = (prefix, int(suffix))
else:
groups.append((value, value))
group = {}
last = None
for value, (prefix, suffix) in sorted(with_suffix.items(), key=lambda kv: kv[1]):
if prefix != last and group:
groups.extend(_collapse_ranges(group))
group = {}
group[value] = suffix
last = prefix
if group:
groups.extend(_collapse_ranges(group))
groups.sort()
return groups
def _collapse_ranges(values: dict[str, int]) -> Iterator[tuple[str, str]]:
start, end, last = None, None, None
for value, suffix in values.items():
if start is None:
start = end = value
elif suffix - last == 1:
end = value
else:
yield start, end
start = end = value
last = suffix
if start is not None:
yield start, end
|
funcionarios = []
for i in range(5):
dados=[]
dados.append(input("informe o nome do funcionario:"))
dados.append(input("informe o email do funcionario:"))
funcionarios.append(dados)
print(funcionarios)
|
from rest_framework import serializers
from FuncionariosApp.models import Departamento, Funcionario
class DepartamentoSerializer(serializers.ModelSerializer):
class Meta:
model = Departamento
fields = ('DepartamentoId', 'DepartamentoNome')
class FuncionarioSerializer(serializers.ModelSerializer):
class Meta:
model = Funcionario
fields = ('FuncionarioId', 'FuncionarioNome','Departamento', 'DataDeContratacao','ArquivoFotoNome')
|
#!/usr/bin/python3
import collections
import csv
import os
import sys
doc_template = """
<!DOCTYPE html>
<html>
<style>
@font-face {{
font-family:'Yandex Sans Display Web';
src:url(https://yastatic.net/adv-www/_/H63jN0veW07XQUIA2317lr9UIm8.eot);
src:url(https://yastatic.net/adv-www/_/H63jN0veW07XQUIA2317lr9UIm8.eot?#iefix) format('embedded-opentype'),
url(https://yastatic.net/adv-www/_/sUYVCPUAQE7ExrvMS7FoISoO83s.woff2) format('woff2'),
url(https://yastatic.net/adv-www/_/v2Sve_obH3rKm6rKrtSQpf-eB7U.woff) format('woff'),
url(https://yastatic.net/adv-www/_/PzD8hWLMunow5i3RfJ6WQJAL7aI.ttf) format('truetype'),
url(https://yastatic.net/adv-www/_/lF_KG5g4tpQNlYIgA0e77fBSZ5s.svg#YandexSansDisplayWeb-Regular) format('svg');
font-weight:400;
font-style:normal;
font-stretch:normal
}}
body {{ font-family: "Yandex Sans Display Web", Arial, sans-serif; background: #EEE; }}
h1 {{ margin-left: 10px; }}
th, td {{ border: 0; padding: 5px 10px 5px 10px; text-align: left; vertical-align: top; line-height: 1.5; background-color: #FFF;
td {{ white-space: pre; font-family: Monospace, Courier New; }}
border: 0; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }}
a {{ color: #06F; text-decoration: none; }}
a:hover, a:active {{ color: #F40; text-decoration: underline; }}
table {{ border: 0; }}
.main {{ margin-left: 10%; }}
p.links a {{ padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-space: nowrap; box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.05), 0 8px 25px -5px rgba(0, 0, 0, 0.1); }}
</style>
<title>{header}</title>
</head>
<body>
<div class="main">
<h1>{header}</h1>
{test_part}
<p class="links">
<a href="{raw_log_url}">{raw_log_name}</a>
<a href="{branch_url}">{branch_name}</a>
<a href="{commit_url}">Commit</a>
{additional_urls}
<a href="output.7z">Test output</a>
<a href="{task_url}">Task (private network)</a>
</p>
</body>
</html>
"""
table_template = """
<h2>{caption}</h2>
<table>
{header}
{rows}
</table>
"""
def tr(x):
return '<tr>' + str(x) + '</tr>'
def td(x):
return '<td>' + str(x) + '</td>'
def th(x):
return '<th>' + str(x) + '</th>'
def table_row(r):
return tr(''.join([td(f) for f in r]))
def table_header(r):
return tr(''.join([th(f) for f in r]))
def tsv_rows(n):
result = ''
with open(n, encoding='utf-8') as fd:
for row in csv.reader(fd, delimiter="\t", quotechar='"'):
result += table_row(row)
return result
params = collections.defaultdict(str)
params['header'] = "ClickHouse Performance Comparison"
params['test_part'] = (
table_template.format(
caption = 'Tested commits',
header = table_header(['Old', 'New']),
rows = table_row([open('left-commit.txt').read(), open('right-commit.txt').read()])
) +
table_template.format(
caption = 'Changes in performance',
header = table_header(['Old, s', 'New, s', 'Relative difference (new - old)/old', 'Randomization distribution quantiles [5%, 50%, 95%]', 'Query']),
rows = tsv_rows('changed-perf.tsv')) +
table_template.format(
caption = 'Slow on client',
header = table_header(['Client time, s', 'Server time, s', 'Ratio', 'Query']),
rows = tsv_rows('slow-on-client.tsv')) +
table_template.format(
caption = 'Unstable',
header = table_header(['Old, s', 'New, s', 'Relative difference (new - old)/old', 'Randomization distribution quantiles [5%, 50%, 95%]', 'Query']),
rows = tsv_rows('unstable.tsv')) +
table_template.format(
caption = 'Run errors',
header = table_header(['A', 'B']),
rows = tsv_rows('run-errors.log'))
)
print(doc_template.format_map(params))
|
{
"variables": {
"copy_c_api": "no",
"c_api_path": "<(module_root_dir)/qdb",
},
"targets": [
{
"target_name": "<(module_name)",
"sources": [
"src/qdb_api.cpp",
"src/entry.hpp",
"src/expirable_entry.hpp",
"src/blob.cpp",
"src/blob.hpp",
"src/cluster.cpp",
"src/cluster.hpp",
"src/error.cpp",
"src/error.hpp",
"src/integer.cpp",
"src/integer.hpp",
"src/prefix.cpp",
"src/prefix.hpp",
"src/query_find.cpp",
"src/query_find.hpp",
"src/query.cpp",
"src/query.hpp",
"src/range.cpp",
"src/range.hpp",
"src/suffix.cpp",
"src/suffix.hpp",
"src/tag.cpp",
"src/tag.hpp",
"src/time_series.cpp",
"src/time_series.hpp",
"src/ts_column.cpp",
"src/ts_column.hpp",
"src/ts_point.cpp",
"src/ts_point.hpp",
"src/ts_range.cpp",
"src/ts_range.hpp",
"src/ts_aggregation.cpp",
"src/ts_aggregation.hpp",
"src/cluster_data.hpp",
"src/utilities.cpp",
"src/utilities.hpp",
"src/time.cpp",
"src/time.hpp",
"test/blobTest.js",
"test/clusterTest.js",
"test/config.js",
"test/deamonRunner.js",
"test/integerTest.js",
"test/prefixTest.js",
"test/queryTest.js",
"test/rangeTest.js",
"test/suffixTest.js",
"test/tagTest.js",
"test/tsBlobTest.js",
"test/tsDoubleTest.js",
"test/tsGeneralTest.js",
"test/tsInt64Test.js",
"test/tsStringTest.js",
"test/tsTimestampTest.js",
],
"conditions": [
[
"OS=='mac'",
{
"include_dirs": [
"/usr/local/include",
"<(c_api_path)/include"
],
"libraries": [
"-L<(c_api_path)/lib",
"-lqdb_api",
"-Wl,-rpath,@loader_path"
],
"xcode_settings": {
"OTHER_CFLAGS": [
"-std=c++14",
"-stdlib=libc++",
"-Wno-strict-aliasing",
"-mmacosx-version-min=10.7"
]
}
}
],
[
"OS=='freebsd'",
{
"include_dirs": [
"/usr/local/include",
"<(c_api_path)/include"
],
"libraries": [
"-L/usr/local/lib",
"-L<(c_api_path)/lib",
"-lqdb_api",
"-Wl,-rpath=\'$$ORIGIN\'"
],
"cflags": [
"-std=c++14",
"-stdlib=libc++",
"-Wno-strict-aliasing",
"-Wno-deprecated-declarations",
"-U_LIBCPP_TRIVIAL_PAIR_COPY_CTOR"
]
}
],
[
"OS=='linux'",
{
"include_dirs": [
"/usr/local/include",
"<(c_api_path)/include"
],
"libraries": [
"-L/usr/local/lib",
"-L<(c_api_path)/lib",
"-lqdb_api",
"-Wl,-rpath=\'$$ORIGIN\'",
"-static-libgcc",
"-static-libstdc++"
],
"cflags": [
"-std=c++14",
"-Wno-strict-aliasing"
]
}
],
[
"OS=='win'",
{
"include_dirs": [
"<(c_api_path)/include"
],
"msvs_settings": {
"VCCLCompilerTool": {
"ExceptionHandling": "2",
"DisableSpecificWarnings": [
"4530"
]
}
},
"link_settings": {
"libraries": [
"<(c_api_path)/lib/qdb_api.lib"
]
}
}
]
]
},
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [
"<(module_name)"
],
"conditions": [
[
"OS=='mac'",
{
"copies": [
{
"destination": "<(module_path)",
"files": [
"<(PRODUCT_DIR)/<(module_name).node"
],
"conditions": [
[
"copy_c_api=='yes'",
{
"files": [
"<(c_api_path)/lib/libc++.1.0.dylib",
"<(c_api_path)/lib/libc++.1.dylib",
"<(c_api_path)/lib/libc++.LICENSE.TXT",
"<(c_api_path)/lib/libc++.dylib",
"<(c_api_path)/lib/libc++abi.1.0.dylib",
"<(c_api_path)/lib/libc++abi.1.dylib",
"<(c_api_path)/lib/libc++abi.LICENSE.TXT",
"<(c_api_path)/lib/libc++abi.dylib",
"<(c_api_path)/lib/libqdb_api.dylib"
]
}
]
]
}
]
}
],
[
"OS=='freebsd' or OS=='linux'",
{
"copies": [
{
"destination": "<(module_path)",
"files": [
"<(PRODUCT_DIR)/<(module_name).node"
],
"conditions": [
[
"copy_c_api=='yes'",
{
"files": [
"<(c_api_path)/lib/libqdb_api.so"
]
}
]
]
}
]
}
],
[
"OS=='win'",
{
"copies": [
{
"destination": "<(module_path)",
"files": [
"<(PRODUCT_DIR)/<(module_name).node"
],
"conditions": [
[
"copy_c_api=='yes'",
{
"files": [
"<(c_api_path)/bin/qdb_api.dll"
]
}
]
]
}
]
}
]
]
}
]
}
|
'''
03 - Line plot [3]
Now that you've built your first line plot, let's start working on the data that professor Hans Rosling used to build his beautiful bubble chart. It was collected in 2007. Two lists are available for you:
life_exp which contains the life expectancy for each country and
gdp_cap, which contains the GDP per capita (i.e. per person) for each country expressed in US Dollars.
GDP stands for Gross Domestic Product. It basically represents the size of the economy of a country. Divide this by the population and you get the GDP per capita.
matplotlib.pyplot is already imported as plt, so you can get started straight away.
Instructions:
- Print the last item from both the list gdp_cap, and the list life_exp; it is information about Zimbabwe.
- Build a line chart, with gdp_cap on the x-axis, and life_exp on the y-axis. Does it make sense to plot this data on a line plot?
- Don't forget to finish off with a plt.show() command, to actually display the plot.
'''
import matplotlib.pyplot as plt
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
# It does not make sense to show this data as a line chart.........
|
#!/usr/bin/env python3
from aws_cdk import core
from stacks.apiv2_stack import RestfulApiGatewayv2Stack
from stacks.config import conf
app = core.App()
cdk_env = core.Environment(region=conf.aws_region, account=conf.aws_account)
# Using API gateway v1
# from stacks.api_stack import RestfulApiGatewayStack
# RestfulApiGatewayStack(app, "tzfinder-api-stack", env=cdk_env)
# Using API gateway v2
# Note that domain name is required at the moment for this stack
RestfulApiGatewayv2Stack(app, "tzfinder-api-stack", env=cdk_env)
app.synth()
|
#!/usr/bin/env python3
from action_msgs.msg import GoalStatus
from hapthexa_msgs.action import MoveLeg
from hapthexa_msgs.msg import ForceSensor
import rclpy
from rclpy.action import ActionClient
phase = 0
def forcesensor_callback(msg, node):
# node.get_logger().info('{0}'.format(msg.z))
if phase == 2 and msg.radial_magnitude > 0.3:
node.get_logger().info('z detected')
future = goal_handle.cancel_goal_async()
future.add_done_callback(lambda future: cancel_done(node, future))
global once_failed
once_failed = True
def cancel_done(node, future):
cancel_response = future.result()
if len(cancel_response.goals_canceling) > 0:
node.get_logger().info('Goal successfully canceled')
else:
node.get_logger().info('Goal failed to cancel')
def main(args=None):
rclpy.init(args=args)
node = rclpy.create_node('minimal_action_client')
leg_name = 'front_left'
forcesensor_sub = node.create_subscription(ForceSensor, 'hapthexa/leg/'+leg_name+'/force_sensor', lambda msg: forcesensor_callback(msg, node),10)
action_client = ActionClient(node, MoveLeg, 'hapthexa/leg/'+leg_name+'/move_leg')
node.get_logger().info('Waiting for action server...')
action_client.wait_for_server()
global goal_handle
global phase
phase = 1
global once_failed
once_failed = False
goal_msg = MoveLeg.Goal()
goal_msg.x = float(0)
goal_msg.y = float(0)
goal_msg.z = float(10.0)
goal_msg.relative_mode = True
send_goal_future = action_client.send_goal_async(goal_msg)
rclpy.spin_until_future_complete(node, send_goal_future)
goal_handle = send_goal_future.result()
get_result_future = goal_handle.get_result_async()
rclpy.spin_until_future_complete(node, get_result_future)
#########################
phase = 2
goal_msg = MoveLeg.Goal()
goal_msg.x = float(5.0)
goal_msg.y = float(0)
goal_msg.z = float(10.0)
goal_msg.relative_mode = True
send_goal_future = action_client.send_goal_async(goal_msg)
rclpy.spin_until_future_complete(node, send_goal_future)
goal_handle = send_goal_future.result()
get_result_future = goal_handle.get_result_async()
rclpy.spin_until_future_complete(node, get_result_future)
#########################
if once_failed:
phase = 3
goal_msg = MoveLeg.Goal()
goal_msg.x = float(0.0)
goal_msg.y = float(0)
goal_msg.z = float(15.0)
goal_msg.relative_mode = True
send_goal_future = action_client.send_goal_async(goal_msg)
rclpy.spin_until_future_complete(node, send_goal_future)
goal_handle = send_goal_future.result()
get_result_future = goal_handle.get_result_async()
rclpy.spin_until_future_complete(node, get_result_future)
#########################
phase = 4
goal_msg = MoveLeg.Goal()
goal_msg.x = float(5.0)
goal_msg.y = float(0)
goal_msg.z = float(15.0)
goal_msg.relative_mode = True
send_goal_future = action_client.send_goal_async(goal_msg)
rclpy.spin_until_future_complete(node, send_goal_future)
goal_handle = send_goal_future.result()
get_result_future = goal_handle.get_result_async()
rclpy.spin_until_future_complete(node, get_result_future)
#########################
phase = 5
goal_msg = MoveLeg.Goal()
goal_msg.x = float(5.0)
goal_msg.y = float(0)
goal_msg.z = float(0.0)
goal_msg.relative_mode = True
send_goal_future = action_client.send_goal_async(goal_msg)
rclpy.spin_until_future_complete(node, send_goal_future)
goal_handle = send_goal_future.result()
get_result_future = goal_handle.get_result_async()
rclpy.spin_until_future_complete(node, get_result_future)
action_client.destroy()
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os
import re
import codecs
import tempfile
import types
import warnings
from mecabwrap.domecab import do_mecab, do_mecab_vec, do_mecab_iter
from mecabwrap.utils import detect_mecab_enc, find_dictionary
class TestDomecab(unittest.TestCase):
def test_wakati(self):
out = do_mecab(u'すもももももももものうち', '-Owakati')
self.assertEqual(out.strip(), u'すもも も もも も もも の うち')
def test_default(self):
out = do_mecab(u'メロンパンを食べる')
lbs = re.findall(r'\n', out)
self.assertEqual(len(lbs), 5)
words = re.findall(r'[^\t\n]+\t', out)
words = [w[:-1] for w in words]
self.assertEqual(words, [u'メロン', u'パン', u'を', u'食べる'])
def test_unicode(self):
out = do_mecab(u'メロンパンを食べる', '-E', u'おわり\n', '-B', u'はじまり\n')
words = re.findall(r'[^\t\n]+\t', out[1:-1])
words = [w[:-1] for w in words]
self.assertEqual(words, [u'メロン', u'パン', u'を', u'食べる'])
def test_outfile_and_o(self):
# generate filenames that do not exist
path1 = tempfile.mktemp()
path2 = tempfile.mktemp()
path3 = tempfile.mktemp()
# make sure they don't exist
self.assertFalse(os.path.exists(path1))
self.assertFalse(os.path.exists(path2))
self.assertFalse(os.path.exists(path3))
ret1 = do_mecab(u'赤巻紙青巻紙', outpath=path1)
ret2 = do_mecab(u'赤巻紙青巻紙', '-o', path2)
ret3 = do_mecab(u'赤巻紙青巻紙', '--output', path3)
# confirm files are created
self.assertTrue(os.path.exists(path1))
self.assertTrue(os.path.exists(path2))
self.assertTrue(os.path.exists(path3))
# confirm all results are empty
self.assertEqual(ret1, u'')
self.assertEqual(ret2, u'')
self.assertEqual(ret3, u'')
with open(path1, 'rb') as f:
out1 = f.read()
with open(path2, 'rb') as f:
out2 = f.read()
with open(path3, 'rb') as f:
out3 = f.read()
# confirm results are all identical
self.assertEqual(out1, out2)
self.assertEqual(out2, out3)
enc = detect_mecab_enc()
out = do_mecab(u'赤巻紙青巻紙')
self.assertEqual(out, out1.decode(enc))
self.assertTrue(len(out) > 0)
# clean up
os.remove(path1)
os.remove(path2)
os.remove(path3)
# make sure they don't exist anymore
self.assertFalse(os.path.exists(path1))
self.assertFalse(os.path.exists(path2))
self.assertFalse(os.path.exists(path3))
# when both -o and outpath assigned
# should get warning, and outpath is ignored
with warnings.catch_warnings(record=True) as w:
ret4 = do_mecab(u'赤巻紙青巻紙', '-o', path2, outpath=path1)
self.assertEqual(len(w), 1)
self.assertTrue(os.path.exists(path2))
self.assertFalse(os.path.exists(path1))
with open(path2, 'rb') as f:
out4 = f.read()
self.assertEqual(out1, out4)
class TestDomecabVec(unittest.TestCase):
def test_vec(self):
ins = [u'春はあけぼの', u'やうやう白くなりゆく山際']
out = do_mecab_vec(ins, outpath=None)
lbs = re.findall(r'\n', out)
self.assertEqual(len(lbs), 10)
words = re.findall(r'[^\t\n]+\t', out)
words = [w[:-1] for w in words]
self.assertEqual(
words,
[u'春', u'は', u'あけぼの', u'やうやう',
u'白く', u'なり', u'ゆく', u'山際']
)
lines = out.split('\n')
lines = [line.strip() for line in lines]
self.assertEqual(lines[3], 'EOS')
self.assertEqual(lines[9], 'EOS')
def test_linebreak(self):
# vector element containing line break
ins = [u'今日は\n赤ちゃん', u'私が\rママよ']
out = do_mecab_vec(ins)
eos = re.findall(r'EOS[\r]{0,1}\n', out)
self.assertEqual(len(eos), 2, out)
out = do_mecab_vec(ins, '-Owakati')
split = out.strip().split('\n')
self.assertEqual(len(split), 2, out)
class TestDomecabIter(unittest.TestCase):
def test_iter(self):
ins = [u'アイスコーヒー', u'飲みたい']
it = do_mecab_iter(ins, '-F%m\n', byline=True)
self.assertTrue(isinstance(it, types.GeneratorType))
self.assertEqual(
list(it), [u'アイス', u'コーヒー', u'EOS', u'飲み', u'たい', u'EOS'])
ins = [u'ぶどうパン', u'食べたい']
it = do_mecab_iter(ins, '-F%m\n', byline=False)
self.assertTrue(isinstance(it, types.GeneratorType))
self.assertEqual(
list(it), [u'ぶどう\nパン\nEOS', u'食べ\nたい\nEOS'])
def test_iter_count(self):
ins = [u'となりの客はよく柿食う客だ', u'バスガス爆発']
ct = 0
for line in do_mecab_iter(ins, byline=False):
ct += 1
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '-Owakati', byline=True):
ct += 1
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '-Owakati', byline=False):
ct += 1
self.assertEqual(ct, 1)
def test_iter_Eopt(self):
ins = [u'となりの客はよく柿食う客だ', u'バスガス爆発']
ct = 0
for line in do_mecab_iter(ins, '-EEND\n', byline=False):
ct += 1
self.assertEqual(line[-4:], '\nEND')
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '-E', 'END\n', byline=False):
ct += 1
self.assertEqual(line[-4:], '\nEND')
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '--eos-format=END\n', byline=False):
ct += 1
self.assertEqual(line[-4:], '\nEND')
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '--eos-format', 'END\n', byline=False):
ct += 1
self.assertEqual(line[-4:], '\nEND')
self.assertEqual(ct, 2)
def test_iter_Eopt_unicode(self):
ins = [u'となりの客はよく柿食う客だ', u'バスガス爆発']
ct = 0
for line in do_mecab_iter(ins, u'-Eおしまい\n', byline=False):
ct += 1
self.assertEqual(line[-5:], u'\nおしまい')
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '-E', u'おしまい\n', byline=False):
ct += 1
self.assertEqual(line[-5:], u'\nおしまい')
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, u'--eos-format=おしまい\n', byline=False):
ct += 1
self.assertEqual(line[-5:], u'\nおしまい')
self.assertEqual(ct, 2)
ct = 0
for line in do_mecab_iter(ins, '--eos-format', u'おしまい\n', byline=False):
ct += 1
self.assertEqual(line[-5:], u'\nおしまい')
self.assertEqual(ct, 2)
def test_linebreak(self):
ins = [u'今日は\n赤ちゃん', u'私が\rママよ']
out = list(do_mecab_iter(ins, byline=False))
self.assertEqual(len(out), 2, out)
out = list(do_mecab_iter(ins, '-Owakati', byline=True))
self.assertEqual(len(out), 2, out)
class TestLargeInput(unittest.TestCase):
def test_large_input(self):
enc = detect_mecab_enc()
x = u'すもももももももものうち!'
bx = len(x.encode(enc))
# repeat this until it is over 20000 bytes
tgt = 20000
rep = int(tgt / bx + 1)
y = x * rep
by = len(y.encode(enc))
# make sure that the computation above works for all platform
self.assertTrue(by > tgt)
# test combination of (auto_buffer_size, truncate)
# - True, True: correctly parsed
# - True, False: correctly parsed
# - False, True: gets warning, result truncated
# - False, False: gets warning, result has extra EOS
out1 = do_mecab_vec([y], '-Owakati', auto_buffer_size=True, truncate=True)
out2 = do_mecab_vec([y], '-Owakati', auto_buffer_size=True, truncate=False)
out3 = do_mecab_vec([y], '-Owakati', auto_buffer_size=False, truncate=True)
with warnings.catch_warnings(record=True) as w:
out4 = do_mecab_vec([y], '-Owakati', auto_buffer_size=False, truncate=False)
self.assertEqual(len(w), 1, 'auto=False, trunc=False')
# test of result length
def num_eos(out):
return len(re.findall(r'\n', out))
self.assertEqual(num_eos(out1), 1)
self.assertEqual(num_eos(out2), 1)
self.assertEqual(num_eos(out3), 1)
self.assertTrue(num_eos(out4) > 1)
# test of truncation
def num_exclam(out):
return len(re.findall(r'!', out))
self.assertEqual(num_exclam(out1), rep)
self.assertEqual(num_exclam(out2), rep)
self.assertTrue(num_exclam(out3) < rep)
#self.assertEqual(num_exclam(out4), rep)
# what happens is ambiguous
# test of mannual -b option
# if we set enough buffer size level, we should be fine
out5 = do_mecab_vec([y], '-Owakati', '-b', str(by+1),
auto_buffer_size=False, truncate=True)
out6 = do_mecab_vec([y], '-Owakati', '-b', str(by+1),
auto_buffer_size=False, truncate=False)
self.assertEqual(num_eos(out5), 1)
self.assertEqual(num_eos(out6), 1)
self.assertEqual(num_exclam(out5), rep)
self.assertEqual(num_exclam(out6), rep)
# if the buffer size is small, we should get warning
out7 = do_mecab_vec([y], '-Owakati', '-b', str(by),
auto_buffer_size=False, truncate=True)
with warnings.catch_warnings(record=True) as w:
out8 = do_mecab_vec([y], '-Owakati', '-b', str(by),
auto_buffer_size=False, truncate=False)
self.assertEqual(len(w), 1, 'auto=False, trunc=False, -b small')
self.assertEqual(num_eos(out7), 1)
self.assertTrue(num_eos(out8) > 1)
self.assertTrue(num_exclam(out7) < rep)
#self.assertEqual(num_exclam(out8), rep)
# if we set -b option and auto_buffer_size together,
# we should get warning and we will use the auto size
with warnings.catch_warnings(record=True) as w:
out9 = do_mecab_vec([y], '-Owakati', '-b', str(by+1),
auto_buffer_size=True)
self.assertEqual(len(w), 1, 'auto=False, trunc=False, -b small')
self.assertEqual(num_eos(out9), 1)
self.assertEqual(num_exclam(out9), rep)
# result equality
self.assertEqual(out1, out2)
self.assertEqual(out1, out5)
self.assertEqual(out1, out6)
self.assertEqual(out1, out9)
# and inequality
self.assertNotEqual(out1, out3)
self.assertNotEqual(out1, out4)
self.assertNotEqual(out1, out7)
self.assertNotEqual(out1, out8)
def test_large_input_iter(self):
enc = detect_mecab_enc()
x = u'隣の客はよく柿食う客かな?'
bx = len(x.encode(enc))
# repeat this until it is over 30000 bytes
tgt = 30000
rep = int(tgt / bx + 1)
y = x * rep
by = len(y.encode(enc))
# make sure that the computation above works for all platform
self.assertTrue(by > tgt)
out1 = do_mecab_iter(
[y], '-Owakati', byline=True, auto_buffer_size=True, truncate=True)
out1 = list(out1)
out2 = do_mecab_iter(
[y], '-Owakati', byline=True, auto_buffer_size=True, truncate=False)
out2 = list(out2)
out3 = do_mecab_iter(
[y], '-Owakati', byline=True, auto_buffer_size=False, truncate=True)
out3 = list(out3)
with warnings.catch_warnings(record=True) as w:
out4 = do_mecab_iter(
[y], '-Owakati', byline=True, auto_buffer_size=False, truncate=False)
out4 = list(out4)
self.assertEqual(len(w), 1, 'auto=False, trunc=False')
# test of result length
self.assertEqual(len(out1), 1)
self.assertEqual(len(out2), 1)
self.assertEqual(len(out3), 1)
self.assertTrue(len(out4) > 1)
# test of truncation
def num_exclam(out):
return len(re.findall(r'\?', ''.join(out)))
self.assertEqual(num_exclam(out1), rep)
self.assertEqual(num_exclam(out2), rep)
self.assertTrue(num_exclam(out3) < rep)
#self.assertEqual(num_exclam(out4), rep)
# what happens is ambiguous
# test of mannual -b option
# if we set enough buffer size level, we should be fine
out5 = do_mecab_iter([y], '-Owakati', '-b', str(by+1), byline=True,
auto_buffer_size=False, truncate=True)
out5 = list(out5)
out6 = do_mecab_iter([y], '-Owakati', '-b', str(by+1), byline=True,
auto_buffer_size=False, truncate=False)
out6 = list(out6)
self.assertEqual(len(out5), 1)
self.assertEqual(len(out6), 1)
self.assertEqual(num_exclam(out5), rep)
self.assertEqual(num_exclam(out6), rep)
# if the buffer size is small, we should get warning
out7 = do_mecab_iter([y], '-Owakati', '-b', str(by), byline=True,
auto_buffer_size=False, truncate=True)
out7 = list(out7)
with warnings.catch_warnings(record=True) as w:
out8 = do_mecab_iter([y], '-Owakati', '-b', str(by), byline=True,
auto_buffer_size=False, truncate=False)
out8 = list(out8)
self.assertEqual(len(w), 1, 'auto=False, trunc=False, -b small')
self.assertEqual(len(out7), 1)
self.assertTrue(len(out8) > 1)
self.assertTrue(num_exclam(out7) < rep)
#self.assertEqual(num_exclam(out8), rep)
# if we set -b option and auto_buffer_size together,
# we should get warning and we will use the auto size
with warnings.catch_warnings(record=True) as w:
out9 = do_mecab_iter([y], '-Owakati', '-b', str(by+1), byline=True,
auto_buffer_size=True)
out9 = list(out9)
self.assertEqual(len(w), 1, 'auto=False, trunc=False, -b small')
self.assertEqual(len(out9), 1)
self.assertEqual(num_exclam(out9), rep)
# result equality
self.assertEqual(out1, out2)
self.assertEqual(out1, out5)
self.assertEqual(out1, out6)
self.assertEqual(out1, out9)
# and inequality
self.assertNotEqual(out1, out3)
self.assertNotEqual(out1, out4)
self.assertNotEqual(out1, out7)
self.assertNotEqual(out1, out8)
class TestMultipleOptions(unittest.TestCase):
def test_multiple_options(self):
out = do_mecab(u"すもももももももものうち", '-Bbegin\n', '-Eend\n')
out = out.strip()
out = re.split(r'(\r\n)|(\n)|(\r)', out)
self.assertEqual(out[0], 'begin')
self.assertEqual(out[-1], 'end')
class TestDictionary(unittest.TestCase):
def test_ipadic_nelogd(self):
dictionary = "mecab-ipadic-neologd"
if find_dictionary(dictionary):
res = do_mecab(u"メロンパンを食べたい", dictionary=dictionary)
# neologd combines melon and pan
self.assertTrue(res.find(u"メロンパン") == 0, res)
def test_ipadic_nelogd(self):
dictionary = "mecab-unidic-neologd"
if find_dictionary(dictionary):
res = do_mecab(u"しまっておいたメロンパン", dictionary=dictionary)
self.assertTrue(res.find(u"メロンパン") >= 0, res)
# unidic provide normalized kanji expression for some words
self.assertTrue(res.find(u"仕舞う") >= 0, res)
self.assertTrue(res.find(u"置く") >= 0, res)
if __name__ == '__main__':
unittest.main()
|
"""
Support code for the test suite.
There's some Python 2.x <-> 3.x compatibility code here.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import with_statement
import sys
import threading
import pickle
import Pyro4
__all__ = ["tobytes", "tostring", "unicode", "unichr", "basestring", "StringIO", "next",
"AtomicCounter", "NonserializableError", "MyThing", "MyThingExposed",
"MyThingExposedSub", "MyThingSub", "unittest"]
if sys.version_info < (3, 0):
# noinspection PyUnresolvedReferences
from StringIO import StringIO
def tobytes(string, encoding=None):
return string
def tostring(bytes):
return bytes
unicode = unicode
unichr = unichr
basestring = basestring
else:
from io import StringIO
def tobytes(string, encoding="iso-8859-1"):
return bytes(string, encoding)
def tostring(bytes, encoding="utf-8"):
return str(bytes, encoding)
unicode = str
unichr = chr
basestring = str
if sys.version_info < (2, 6):
def next(iterable):
return iterable.next()
else:
next = next
if ((2, 7) <= sys.version_info < (3, 0)) or (sys.version_info >= (3, 1)):
import unittest
else:
# noinspection PyUnresolvedReferences
import unittest2 as unittest
class AtomicCounter(object):
def __init__(self):
self.lock = threading.Lock()
self.count = 0
def reset(self):
self.count = 0
def incr(self):
with self.lock:
self.count += 1
def value(self):
with self.lock:
return self.count
class NonserializableError(Exception):
def __reduce__(self):
raise pickle.PicklingError("to make this error non-serializable")
class MyThing(object):
c_attr = "hi"
propvalue = 42
_private_attr1 = "hi"
__private_attr2 = "hi"
name = ""
def __init__(self, name="dummy"):
self.name = name
def __eq__(self, other):
if type(other) is MyThing:
return self.name == other.name
return False
def method(self, arg, default=99, **kwargs):
pass
@staticmethod
def staticmethod(arg):
pass
@classmethod
def classmethod(cls, arg):
pass
def __dunder__(self):
pass
def __private(self):
pass
def _private(self):
pass
@Pyro4.expose
@property
def prop1(self):
return self.propvalue
@Pyro4.expose
@prop1.setter
def prop1(self, value):
self.propvalue = value
@Pyro4.expose
@property
def readonly_prop1(self):
return self.propvalue
@property
def prop2(self):
return self.propvalue
@prop2.setter
def prop2(self, value):
self.propvalue = value
@Pyro4.oneway
def oneway(self, arg):
pass
@Pyro4.expose
def exposed(self):
pass
__hash__ = object.__hash__
@Pyro4.expose
class MyThingExposed(object):
blurp = 99 # won't be exposed, because it is a class attribute and not a property
_name = ""
def __init__(self, name="dummy"):
self._name = name
def __eq__(self, other):
if type(other) is MyThingExposed:
return self._name == other._name
return False
def foo(self, arg):
return arg
@classmethod
def classmethod(cls, arg):
return arg
@staticmethod
def staticmethod(arg):
return arg
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def readonly_name(self):
return self._name
@Pyro4.oneway
def remotemethod(self, arg):
return arg
def _p(self):
pass
def __private(self):
pass
def __dunder__(self):
pass
__hash__ = object.__hash__
class MyThingExposedSub(MyThingExposed):
def sub_exposed(self):
pass
def sub_unexposed(self):
pass
class MyThingSub(MyThing):
@Pyro4.expose
def sub_exposed(self):
pass
def sub_unexposed(self):
pass
|
from example_pkg import a
from example_pkg import b
print(a.name)
print(a.__path__)
print(b.name)
print(b.__path__)
|
from django.db import models
from django.utils.translation import gettext_lazy as _
# As django docs sugest define CHIOCES by suitably-named constant
# Field with restricted choices only are good when you need predefined filter
class Form(models.Model):
#
SIMPLE = "SP"
CIRCLE = "CB"
SPLIT_WITH_ICON = "SBI"
BRAND = "BB"
FORM_CHOICES = [
(SIMPLE, _('Simple')),
(CIRCLE, _('Circle')),
(BRAND, _('Brand')),
(SPLIT_WITH_ICON, _('Split if Icon')),
]
form = models.CharField(_("Form"),
max_length= 3,
choices= FORM_CHOICES,
default= SIMPLE,
)
@property
def get_form(self):
if self.form == self.SIMPLE:
return ""
elif self.form == self.CIRCLE:
return "circle"
elif self.form == self.BRAND:
return "block"
elif self.form == self.SPLIT_WITH_ICON:
return "icon-split"
def __str__(self):
return self.get_form_display()
class Size(models.Model):
DEFAULT = "DB"
SMALL = "SB"
LARGE = "LG"
SIZE_CHOICES = [
(DEFAULT, _("Default")),
(SMALL, _("Small")),
(LARGE, _("Large")),
]
size = models.CharField(_("Size"),
max_length=3,
choices=SIZE_CHOICES,
default=DEFAULT,
)
@property
def get_size(self):
if self.size == self.DEFAULT:
return ""
elif self.size == self.SMALL:
return "sm"
elif self.size == self.LARGE:
return "lg"
def __str__(self):
return self.get_size_display()
class Color(models.Model):
PRIMARY = 1
SECONDARY = 2
SUCCESS = 3
INFO = 4
WARNING = 5
ERROR = 6
LIGHT = 7
COLOR_CHOICES = [
(PRIMARY, _("Primary")),
(SECONDARY, _("Secondary")),
(SUCCESS, _("Success")),
(INFO, _("Info")),
(WARNING, _("Warning")),
(ERROR, _("Error")),
(LIGHT, _("Light")),
]
color = models.IntegerField(_('Color'),
choices=COLOR_CHOICES,
default=PRIMARY,
)
@property
def get_color(self):
if self.color == self.PRIMARY:
return "primary"
elif self.color == self.SECONDARY:
return "secondary"
elif self.color == self.SUCCESS:
return "success"
elif self.color == self.INFO:
return "info"
elif self.color == self.WARNING:
return "warning"
elif self.color == self.ERROR:
# Django forms has message tags and constant error and bootstrap use danger
return "danger"
elif self.color == self.LIGHT:
return "light"
def __str__(self):
return self.get_color_display()
class FaStyle(models.Model):
# Font Awesome version 5. Instead of fa as a style preceding every icon style, you need
# to pick from fas for solid, far for regular, fal for light, or fab for brand
SOLID = 1
REGULAR = 2
LIGHT = 3
BRAND = 4
FA_STYLE_CHOICES = [
(SOLID, _("Solid")),
(REGULAR, _("Regular")),
(LIGHT, _("Light")),
(BRAND, _("Brand")),
]
style = models.IntegerField(_('Icon Style'),
choices=FA_STYLE_CHOICES,
default=REGULAR,
)
@property
def get_style(self):
if self.style== self.SOLID:
return "fas"
elif self.style == self.REGULAR:
return "far"
elif self.style == self.LIGHT:
return "fal"
elif self.style == self.BRAND:
return "fab"
def __str__(self):
return self.get_style_display()
class FaIcon(models.Model):
# Font Awesome version 5. Instead of fa as a style preceding every icon style, you need
# to pick from fas for solid, far for regular, fal for light, or fab for brand
style = models.CharField(_("Style"),
max_length=120,
)
def __str__(self):
return self.style
# Create your models here.
class Button(models.Model):
caption = models.CharField(max_length=20, blank=True)
# Button normaly some operation by POST in some href="url"
# Just for illustration it will be set in database
# you should use get_absolute_url in your model
href = models.CharField(max_length=120, blank=True)
# As django docs sugest define CHIOCES by suitably-named constant
form = models.ForeignKey(Form,
on_delete=models.CASCADE,
related_name='buttons')
size = models.ForeignKey(Size,
on_delete=models.CASCADE,
related_name='buttons')
color = models.ForeignKey(Color,
on_delete=models.CASCADE,
related_name='buttons')
style = models.ForeignKey(FaStyle,
on_delete=models.CASCADE,
related_name='buttons')
icon = models.ForeignKey(FaIcon,
on_delete=models.CASCADE,
related_name='buttons')
|
# SPDX-FileCopyrightText: Copyright (c) 2021 Dan Halbert for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import adafruit_board_toolkit.circuitpython_serial
comports = adafruit_board_toolkit.circuitpython_serial.repl_comports()
if not comports:
raise Exception("No CircuitPython boards found")
# Print the device paths or names that connect to a REPL.
print([comport.device for comport in comports])
|
#!/usr/bin/python
# coding:utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# 驼峰转下划线
def camel_to_underline(camel_format):
underline_format = ''
if isinstance(camel_format, str):
for _s_ in camel_format:
underline_format += _s_ if _s_.islower() else '_' + _s_.lower()
if underline_format.startswith('_'):
underline_format = underline_format[1:]
return underline_format
# 下划线转驼峰
def underline_to_camel(underline_format):
camel_format = ''
if isinstance(underline_format, str):
for _s_ in underline_format.split('_'):
camel_format += _s_.capitalize()
return camel_format
|
import random
import unittest
from pytm.pytm import (
TM,
Actor,
Boundary,
Data,
Dataflow,
Datastore,
Process,
Server,
Threat,
)
class TestUniqueNames(unittest.TestCase):
def test_duplicate_boundary_names_have_different_unique_names(self):
random.seed(0)
object_1 = Boundary("foo")
object_2 = Boundary("foo")
object_1_uniq_name = object_1._uniq_name()
object_2_uniq_name = object_2._uniq_name()
self.assertNotEqual(object_1_uniq_name, object_2_uniq_name)
self.assertEqual(object_1_uniq_name, "boundary_foo_acf3059e70")
self.assertEqual(object_2_uniq_name, "boundary_foo_88f2d9c06f")
class TestAttributes(unittest.TestCase):
def test_write_once(self):
user = Actor("User")
with self.assertRaises(ValueError):
user.name = "Computer"
def test_kwargs(self):
user = Actor("User", isAdmin=True)
self.assertEqual(user.isAdmin, True)
user = Actor("User")
self.assertEqual(user.isAdmin, False)
user.isAdmin = True
self.assertEqual(user.isAdmin, True)
def test_load_threats(self):
tm = TM("TM")
self.assertNotEqual(len(TM._threats), 0)
with self.assertRaises(FileNotFoundError):
tm.threatsFile = "threats.json"
with self.assertRaises(FileNotFoundError):
TM("TM", threatsFile="threats.json")
def test_responses(self):
tm = TM("my test tm", description="aa", isOrdered=True)
user = Actor("User")
web = Server("Web Server")
db = Datastore("SQL Database")
http_req = Dataflow(user, web, "http req")
insert = Dataflow(web, db, "insert data")
query = Dataflow(web, db, "query")
query_resp = Dataflow(db, web, "query results", responseTo=query)
http_resp = Dataflow(web, user, "http resp")
http_resp.responseTo = http_req
self.assertTrue(tm.check())
self.assertEqual(http_req.response, http_resp)
self.assertIs(http_resp.isResponse, True)
self.assertIs(query_resp.isResponse, True)
self.assertEqual(query_resp.responseTo, query)
self.assertEqual(query.response, query_resp)
self.assertIsNone(insert.response)
self.assertIs(insert.isResponse, False)
def test_defaults(self):
tm = TM("TM")
user_data = Data("HTTP")
user = Actor("User", data=user_data, authenticatesDestination=True)
json_data = Data("JSON")
server = Server(
"Server", port=443, protocol="HTTPS", isEncrypted=True, data=json_data
)
sql_resp = Data("SQL resp")
db = Datastore(
"PostgreSQL",
isSQL=True,
port=5432,
protocol="PostgreSQL",
isEncrypted=False,
data=sql_resp,
)
worker = Process("Task queue worker")
req_get_data = Data("HTTP GET")
req_get = Dataflow(user, server, "HTTP GET", data=req_get_data)
server_query_data = Data("SQL")
server_query = Dataflow(server, db, "Query", data=server_query_data)
result_data = Data("Results")
result = Dataflow(db, server, "Results", data=result_data, isResponse=True)
resp_get_data = Data("HTTP Response")
resp_get = Dataflow(server, user, "HTTP Response", data=resp_get_data, isResponse=True)
req_post_data = Data("JSON")
req_post = Dataflow(user, server, "HTTP POST", data=req_post_data)
resp_post = Dataflow(server, user, "HTTP Response", isResponse=True)
sql_data = Data("SQL")
worker_query = Dataflow(worker, db, "Query", data=sql_data)
Dataflow(db, worker, "Results", isResponse=True)
cookie = Data("Auth Cookie", carriedBy=[req_get, req_post])
self.assertTrue(tm.check())
self.assertEqual(req_get.srcPort, -1)
self.assertEqual(req_get.dstPort, server.port)
self.assertEqual(req_get.isEncrypted, server.isEncrypted)
self.assertEqual(
req_get.authenticatesDestination, user.authenticatesDestination
)
self.assertEqual(req_get.protocol, server.protocol)
self.assertTrue(user.data.issubset(req_get.data))
self.assertEqual(server_query.srcPort, -1)
self.assertEqual(server_query.dstPort, db.port)
self.assertEqual(server_query.isEncrypted, db.isEncrypted)
self.assertEqual(
server_query.authenticatesDestination, server.authenticatesDestination
)
self.assertEqual(server_query.protocol, db.protocol)
self.assertTrue(server.data.issubset(server_query.data))
self.assertEqual(result.srcPort, db.port)
self.assertEqual(result.dstPort, -1)
self.assertEqual(result.isEncrypted, db.isEncrypted)
self.assertEqual(result.authenticatesDestination, False)
self.assertEqual(result.protocol, db.protocol)
self.assertTrue(db.data.issubset(result.data))
self.assertEqual(resp_get.srcPort, server.port)
self.assertEqual(resp_get.dstPort, -1)
self.assertEqual(resp_get.isEncrypted, server.isEncrypted)
self.assertEqual(resp_get.authenticatesDestination, False)
self.assertEqual(resp_get.protocol, server.protocol)
self.assertTrue(server.data.issubset(resp_get.data))
self.assertEqual(req_post.srcPort, -1)
self.assertEqual(req_post.dstPort, server.port)
self.assertEqual(req_post.isEncrypted, server.isEncrypted)
self.assertEqual(
req_post.authenticatesDestination, user.authenticatesDestination
)
self.assertEqual(req_post.protocol, server.protocol)
self.assertTrue(user.data.issubset(req_post.data))
self.assertEqual(resp_post.srcPort, server.port)
self.assertEqual(resp_post.dstPort, -1)
self.assertEqual(resp_post.isEncrypted, server.isEncrypted)
self.assertEqual(resp_post.authenticatesDestination, False)
self.assertEqual(resp_post.protocol, server.protocol)
self.assertTrue(server.data.issubset(resp_post.data))
self.assertListEqual(server.inputs, [req_get, req_post])
self.assertListEqual(server.outputs, [server_query])
self.assertListEqual(worker.inputs, [])
self.assertListEqual(worker.outputs, [worker_query])
self.assertListEqual(cookie.carriedBy, [req_get, req_post])
self.assertSetEqual(set(cookie.processedBy), set([user, server]))
self.assertIn(cookie, req_get.data)
self.assertSetEqual(
set([d.name for d in req_post.data]), set([cookie.name, "HTTP", "JSON"])
)
class TestMethod(unittest.TestCase):
def test_defaults(self):
tm = TM("my test tm", description="aa", isOrdered=True)
internet = Boundary("Internet")
cloud = Boundary("Cloud")
user = Actor("User", inBoundary=internet)
server = Server("Server")
db = Datastore("DB", inBoundary=cloud, isSQL=True)
func = Datastore("Lambda function", inBoundary=cloud)
request = Dataflow(user, server, "request")
response = Dataflow(server, user, "response", isResponse=True)
user_query = Dataflow(user, db, "user query")
server_query = Dataflow(server, db, "server query")
func_query = Dataflow(func, db, "func query")
default_target = ["Actor", "Boundary", "Dataflow", "Datastore", "Server"]
testCases = [
{"target": server, "condition": "target.oneOf(Server, Datastore)"},
{"target": server, "condition": "not target.oneOf(Actor, Dataflow)"},
{"target": request, "condition": "target.crosses(Boundary)"},
{"target": user_query, "condition": "target.crosses(Boundary)"},
{"target": server_query, "condition": "target.crosses(Boundary)"},
{"target": func_query, "condition": "not target.crosses(Boundary)"},
{"target": func_query, "condition": "not target.enters(Boundary)"},
{"target": func_query, "condition": "not target.exits(Boundary)"},
{"target": request, "condition": "not target.enters(Boundary)"},
{"target": request, "condition": "target.exits(Boundary)"},
{"target": response, "condition": "target.enters(Boundary)"},
{"target": response, "condition": "not target.exits(Boundary)"},
{"target": user, "condition": "target.inside(Boundary)"},
{"target": func, "condition": "not any(target.inputs)"},
{
"target": server,
"condition": "any(f.sink.oneOf(Datastore) and f.sink.isSQL "
"for f in target.outputs)",
},
]
self.assertTrue(tm.check())
for case in testCases:
t = Threat(SID="", target=default_target, condition=case["condition"])
self.assertTrue(
t.apply(case["target"]),
"Failed to match {} against {}".format(
case["target"],
case["condition"],
),
)
|
import uuid
from django.db import models
from django.utils import timezone
from django.core.mail import send_mail
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.validators import UnicodeUsernameValidator
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
uuid = models.UUIDField(default=uuid.uuid4, primary_key=True, editable=False)
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
blank=True,
null=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
last_name = models.CharField(_('姓'), max_length=150)
first_name = models.CharField(_('名'), max_length=150)
last_name_kana = models.CharField(_('姓(かな)'), max_length=150)
first_name_kana = models.CharField(_('名(かな)'), max_length=150)
old_last_name = models.CharField(_('旧姓'), max_length=150, blank=True, null=True)
old_last_name_kana = models.CharField(_('旧姓(かな)'), max_length=150, blank=True, null=True)
nickname = models.CharField(_('ニックネーム'), max_length=150, blank=True, null=True)
email = models.EmailField(_('メールアドレス'), unique=True)
sex = models.CharField(_('性別'), max_length=4, choices=(('1','男性'), ('2','女性')))
birthday = models.DateField(_('生年月日'), blank=True, null=True)
country = models.CharField(_('国'), default='日本', max_length=15, editable=False)
postal_code = models.CharField(_('郵便番号(ハイフンなし)'), max_length=7)
prefecture = models.CharField(_('都道府県'), max_length=5)
address = models.CharField(_('市区町村番地'), max_length=100)
building = models.CharField(_('建物名'), max_length=30)
tel = models.CharField(_('電話番号(ハイフンなし)'), max_length=11, blank=True, null=True)
url = models.URLField(_('URL'), max_length=300, blank=True, null=True)
photo = models.ImageField(_('写真'), blank=True, null=True)
is_create_orchestra = models.BooleanField(default=False, editable=False)
is_staff = models.BooleanField(
_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin site.'),
)
is_active = models.BooleanField(
_('active'),
default=True,
help_text=_(
'Designates whether this user should be treated as active. '
'Unselect this instead of deleting accounts.'
),
)
# created_at = models.DateTimeField(_('作成日時'), auto_now_add=True, editable=False)
# updated_at = models.DateTimeField(_('更新日時'), auto_now=True, editable=False)
created_at = models.DateTimeField(_('作成日時'), default=timezone.now)
updated_at = models.DateTimeField(_('更新日時'), default=timezone.now)
# is_active = models.BooleanField(_('有効/無効'), default=True)
objects = UserManager()
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
db_table = 'User'
verbose_name = _('user')
verbose_name_plural = _('ユーザー')
def clean(self):
super().clean()
self.email = self.__class__.objects.normalize_email(self.email)
def get_full_name(self):
full_name = '%s %s' % (self.last_name, self.first_name)
return full_name.strip()
def get_full_name_kana(self):
full_name_kana = '%s %s' % (self.last_name_kana, self.first_name_kana)
return full_name_kana.strip()
def get_short_name(self):
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
send_mail(subject, message, from_email, [self.email], **kwargs)
# @property
# def username(self):
# return self.email
|
from django.db.models import Sum
from utils import constants
def shop_cart(request):
"""当前用户的购物车信息"""
user = request.user
cart_list = []
cart_total = {}
cart_count = 0
if user.is_authenticated:
# 我的购物车商品列表
cart_list = user.carts.filter(
status=constants.ORDER_STATUS_INIT
)
cart_total = cart_list.aggregate(
sum_amount=Sum('amount'),
sum_count=Sum('count')
)
cart_count = cart_list.count()
return {
'cart_count': cart_count, # 购物车中的商品数量
'cart_list': cart_list,
'cart_total': cart_total,
}
|
from rest_framework import generics
from rest_framework.permissions import (
SAFE_METHODS, BasePermission, IsAuthenticated
)
from posts.models import Post
from .serializers import PostSerializer
class PostUserPermissions(BasePermission):
message = "Only the author of this post can edit or delete"
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.author_id == request.user
class PostsListCreate(generics.ListCreateAPIView):
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostsDetail(generics.RetrieveDestroyAPIView, PostUserPermissions):
permission_classes = [PostUserPermissions & IsAuthenticated]
queryset = Post.objects.all()
serializer_class = PostSerializer
|
# -*- coding:utf-8 -*-
"""
@Author : g1879
@Contact : g1879@qq.com
@File : session_page.py
"""
import os
import re
from pathlib import Path
from random import random
from time import time
from typing import Union, List
from urllib.parse import urlparse, quote
from requests_html import HTMLSession, HTMLResponse
from .common import get_loc_from_str, translate_loc_to_xpath, avoid_duplicate_name
from .config import OptionsManager
from .session_element import SessionElement, execute_session_find
class SessionPage(object):
"""SessionPage封装了页面操作的常用功能,使用requests_html来获取、解析网页。"""
def __init__(self, session: HTMLSession, timeout: float = 10):
"""初始化函数"""
self._session = session
self.timeout = timeout
self._url = None
self._url_available = None
self._response = None
@property
def session(self) -> HTMLSession:
return self._session
@property
def response(self) -> HTMLResponse:
return self._response
@property
def url(self) -> str:
"""当前访问url"""
return self._url
@property
def url_available(self) -> bool:
"""url有效性"""
return self._url_available
@property
def cookies(self) -> dict:
"""当前session的cookies"""
return self.session.cookies.get_dict()
@property
def title(self) -> str:
"""获取网页title"""
return self.ele(('css selector', 'title')).text
@property
def html(self) -> str:
"""获取元素innerHTML,如未指定元素则获取所有源代码"""
return self.response.html.html
def ele(self, loc_or_ele: Union[tuple, str, SessionElement], mode: str = None, show_errmsg: bool = False) \
-> Union[SessionElement, List[SessionElement], None]:
"""查找一个元素
:param loc_or_ele: 页面元素地址
:param mode: 以某种方式查找元素,可选'single','all'
:param show_errmsg: 是否显示错误信息
:return: 页面元素对象或列表
"""
if isinstance(loc_or_ele, SessionElement):
return loc_or_ele
elif isinstance(loc_or_ele, str):
loc = get_loc_from_str(loc_or_ele)
else:
loc = translate_loc_to_xpath(loc_or_ele)
return execute_session_find(self.response.html, loc, mode, show_errmsg)
def eles(self, loc: Union[tuple, str], show_errmsg: bool = False) -> List[SessionElement]:
"""查找符合条件的所有元素"""
return self.ele(loc, mode='all', show_errmsg=True)
def get(self, url: str, go_anyway: bool = False, **kwargs) -> Union[bool, None]:
"""用get方式跳转到url,调用_make_response()函数生成response对象"""
to_url = quote(url, safe='/:&?=%;#@')
if not url or (not go_anyway and self.url == to_url):
return
self._url = to_url
self._response = self._make_response(to_url, **kwargs)
if self._response:
self._response.html.encoding = self._response.encoding # 修复requests_html丢失编码方式的bug
self._url_available = True if self._response and self._response.ok else False
return self._url_available
def post(self, url: str, data: dict = None, go_anyway: bool = False, **kwargs) -> Union[bool, None]:
"""用post方式跳转到url,调用_make_response()函数生成response对象"""
to_url = quote(url, safe='/:&?=%;#@')
if not url or (not go_anyway and self._url == to_url):
return
self._url = to_url
self._response = self._make_response(to_url, mode='post', data=data, **kwargs)
if self._response:
self._response.html.encoding = self._response.encoding # 修复requests_html丢失编码方式的bug
self._url_available = True if self._response and self._response.status_code == 200 else False
return self._url_available
def download(self, file_url: str, goal_path: str = None, rename: str = None, show_msg: bool = False,
**kwargs) -> tuple:
"""下载一个文件
生成的response不写入self._response,是临时的
:param file_url: 文件url
:param goal_path: 存放路径url
:param rename: 重命名
:param kwargs: 连接参数
:param show_msg: 是否显示下载信息
:return: 元组,bool和状态信息(成功时信息为文件名)
"""
goal_path = goal_path or OptionsManager().get_value('paths', 'global_tmp_path')
if not goal_path:
raise IOError('No path specified.')
kwargs['stream'] = True
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
r = self._make_response(file_url, mode='get', **kwargs)
if not r:
if show_msg:
print('Invalid link')
return False, 'Invalid link'
# -------------------获取文件名-------------------
# header里有文件名,则使用它,否则在url里截取,但不能保证url包含文件名
if 'Content-disposition' in r.headers:
file_name = r.headers['Content-disposition'].split('"')[1].encode('ISO-8859-1').decode('utf-8')
elif os.path.basename(file_url):
file_name = os.path.basename(file_url).split("?")[0]
else:
file_name = f'untitled_{time()}_{random.randint(0, 100)}'
file_full_name = rename or file_name
# 避免和现有文件重名
file_full_name = avoid_duplicate_name(goal_path, file_full_name)
# 打印要下载的文件
if show_msg:
print_txt = file_full_name if file_name == file_full_name else f'{file_name} -> {file_full_name}'
print(print_txt)
print(f'Downloading to: {goal_path}')
# -------------------开始下载-------------------
# 获取远程文件大小
file_size = int(r.headers['Content-Length']) if 'Content-Length' in r.headers else None
# 已下载文件大小和下载状态
downloaded_size, download_status = 0, False
# 完整的存放路径
full_path = Path(f'{goal_path}\\{file_full_name}')
try:
with open(str(full_path), 'wb') as tmpFile:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
tmpFile.write(chunk)
# 如表头有返回文件大小,显示进度
if show_msg and file_size:
downloaded_size += 1024
rate = downloaded_size / file_size if downloaded_size < file_size else 1
print('\r {:.0%} '.format(rate), end="")
except Exception as e:
download_status, info = False, f'Download failed.\n{e}'
raise
else:
download_status, info = (False, 'File size is 0.') if full_path.stat().st_size == 0 else (True, 'Success.')
finally:
# 删除下载出错文件
if not download_status and full_path.exists():
full_path.unlink()
r.close()
# -------------------显示并返回值-------------------
if show_msg:
print(info, '\n')
info = file_full_name if download_status else info
return download_status, info
def _make_response(self, url: str, mode: str = 'get', data: dict = None, **kwargs) -> Union[HTMLResponse, bool]:
"""生成response对象。接收mode参数,以决定用什么方式。
:param url: 要访问的网址
:param mode: 'get','post'中选择
:param data: 提交的数据
:param kwargs: 其它参数
:return: Response对象
"""
if mode not in ['get', 'post']:
raise ValueError("mode must be 'get' or 'post'.")
url = quote(url, safe='/:&?=%;#@')
# 设置referer和host值
kwargs_set = set(x.lower() for x in kwargs)
if 'headers' in kwargs_set:
header_set = set(x.lower() for x in kwargs['headers'])
if self._url and 'referer' not in header_set:
kwargs['headers']['Referer'] = self._url
if 'host' not in header_set:
kwargs['headers']['Host'] = urlparse(url).hostname
else:
kwargs['headers'] = self.session.headers
kwargs['headers']['Host'] = urlparse(url).hostname
if self._url:
kwargs['headers']['Referer'] = self._url
if 'timeout' not in kwargs_set:
kwargs['timeout'] = self.timeout
try:
r = None
if mode == 'get':
r = self.session.get(url, **kwargs)
elif mode == 'post':
r = self.session.post(url, data=data, **kwargs)
except:
return_value = False
else:
headers = dict(r.headers)
if 'Content-Type' not in headers or 'charset' not in headers['Content-Type']:
re_result = re.search(r'<meta.*?charset=[ \'"]*([^"\' />]+).*?>', r.text)
try:
charset = re_result.group(1)
except:
charset = 'utf-8'
else:
charset = headers['Content-Type'].split('=')[1]
# 避免存在退格符导致乱码或解析出错
r._content = r.content if 'stream' in kwargs and kwargs['stream'] else r.content.replace(b'\x08', b'\\b')
r.encoding = charset
return_value = r
return return_value
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mlp
import seaborn as sns
mlp.style.use("seaborn")
df=pd.read_pickle("../data/df.pkl")
df['E10'] = df['Real Earnings'].rolling(window=120, min_periods=120).mean()
df["P/E10"] = df['Real Price'] / df['E10']
# Plot P
plt.plot(df["Date Fraction"], df["Real Price"])
plt.title("Historical S&P Prices")
plt.xlabel("Date")
plt.ylabel("Stock Price, P")
plt.savefig("SPY.png")
plt.clf()
# Plot E
plt.plot(df["Date Fraction"], df["Real Earnings"], label="E")
plt.plot(df["Date Fraction"], df["E10"], label="E10")
plt.title("Historical S&P Earnings")
plt.xlabel("Date")
plt.ylabel("Earnings, E")
plt.legend()
plt.savefig("E.png")
plt.clf()
# Plot D
plt.plot(df["Date Fraction"], df["Real Dividend"])
plt.title("Historical S&P Dividends")
plt.xlabel("Date")
plt.ylabel("Dividends, D")
plt.savefig("D.png")
plt.clf()
# Plot CAPE
plt.plot(df["Date Fraction"], df["CAPE"], label="CAPE")
plt.plot(df["Date Fraction"], df["P/E10"], label="CAPE_Reconstructed")
plt.title("Historical S&P CAPE")
plt.xlabel("Date")
plt.ylabel("P/E10")
plt.legend()
plt.savefig("CAPE.png")
plt.clf()
# Plot log(CAPE) log(E10_t+1/E10_t)
df["10 yr. MAE growth"] = np.log(df.E10.shift(-12)/df.E10.shift())/10
plt.xscale('log')
plt.scatter(df["CAPE"], df["10 yr. MAE growth"])
plt.ylabel("10 Year MA(E) Growth")
plt.xlabel("CAPE, P/E10")
plt.title("Ten Year MA(E) Growth vs CAPE")
plt.savefig("E6A.png")
plt.clf()
# Plot log(CAPE) log(P_{t+10*12} /P_t)
df["10 yr. P growth"] = np.log(df.P.shift(-120)/df.P)/10
plt.xscale('log')
plt.scatter(df["CAPE"], df["10 yr. P growth"])
plt.ylabel("10 Year P Growth")
plt.xlabel("CAPE, P/E10")
plt.title("Ten Year Price Growth vs CAPE")
plt.savefig("E6B.png")
plt.clf()
# Plot Sparsely
df_sparse=df[::6]
df_sparse_old=df_sparse.loc[df_sparse["Date Fraction"]<=1996]
df_sparse_new=df_sparse.loc[df_sparse["Date Fraction"]>1996]
# plt.xscale('log')
plt.scatter(df_sparse_old["CAPE"], df_sparse_old["10 yr. MAE growth"], label="1881-2006")
plt.scatter(df_sparse_new["CAPE"], df_sparse_new["10 yr. MAE growth"], label="2007-2021")
plt.ylabel("10 Year MA(E) Growth")
plt.xlabel("CAPE, P/E10")
plt.title("Ten Year MA(E) Growth vs CAPE")
plt.legend()
plt.savefig("E6A_sparse.png")
plt.clf()
# plt.xscale('log')
plt.scatter(df_sparse_old["CAPE"], df_sparse_old["10 yr. P growth"], label="1881-2006")
plt.scatter(df_sparse_new["CAPE"], df_sparse_new["10 yr. P growth"], label="2007-2021")
plt.legend()
plt.ylabel("10 Year Price Growth")
plt.xlabel("CAPE, P/E10")
plt.title("Ten Year Price Growth vs CAPE")
plt.savefig("E6B_sparse.png")
plt.clf()
# Plot Sparsely With Regression Lines
# plt.xscale('log')
sns.regplot(x="CAPE", y="10 yr. MAE growth",data=df_sparse_old, label="1881-2006")
sns.regplot(x="CAPE", y="10 yr. MAE growth",data=df_sparse_new, label="2007-2021")
plt.ylabel("10 Year MA(E) Growth (Annualized)")
plt.xlabel("CAPE, P/E10")
plt.title("Ten Year MA(E) Growth vs CAPE")
plt.legend()
plt.savefig("E6A_sparse_sns.png")
plt.clf()
# plt.xscale('log')
sns.regplot(x="CAPE", y="10 yr. P growth",data=df_sparse_old, label="1886-2006")
sns.regplot(x="CAPE", y="10 yr. P growth",data=df_sparse_new, label="2007-2021")
plt.legend()
plt.ylabel("10 Year Price Growth (Anualized)")
plt.xlabel("CAPE, P/E10")
plt.title("Ten Year Price Growth vs CAPE")
plt.savefig("E6B_sparse_sns.png")
plt.clf()
# Plot In buckets
df["CAPE quintile"]=pd.qcut(df["CAPE"], 5, labels=False)
sns.boxplot(x="CAPE quintile", y="10 yr. P growth", data=df)
plt.savefig("CAPE_decile_all.png"); plt.clf()
df["time"]="1886-2006"
df.loc[df["Date Fraction"]>1996, "time"]="2007-2021"
sns.boxplot(x="CAPE quintile", y="10 yr. P growth", data=df, hue="time")
plt.savefig("CAPE_decile_hued.png"); plt.clf()
# Changing the decile to be time-wise
df.loc[df["Date Fraction"]<=1996, "CAPE quintile"]=pd.qcut(df.loc[df["Date Fraction"]<=1996]["CAPE"], 5, labels=False)
df.loc[df["Date Fraction"]>1996, "CAPE quintile"]=pd.qcut(df.loc[df["Date Fraction"]>1996]["CAPE"], 5, labels=False)
df_old=df.loc[df["Date Fraction"]<=1996]
df_new=df.loc[df["Date Fraction"]>1996]
sns.boxplot(x="CAPE quintile", y="10 yr. P growth", data=df_old); sns.regplot(x="CAPE quintile", y="10 yr. P growth", scatter=False, data=df_old); plt.title("1886-2006");plt.savefig("CAPE_decile_old.png"); plt.clf()
sns.boxplot(x="CAPE quintile", y="10 yr. P growth", data=df_new); sns.regplot(x="CAPE quintile", y="10 yr. P growth", scatter=False, data=df_new); plt.title("2007-2021");plt.savefig("CAPE_decile_new.png"); plt.clf()
sns.boxplot(x="CAPE quintile", y="10 yr. P growth", data=df, hue="time"); sns.regplot(x="CAPE quintile", y="10 yr. P growth", scatter=False, data=df_old);sns.regplot(x="CAPE quintile", y="10 yr. P growth", scatter=False, data=df_new); plt.savefig("CAPE_quintile_timewise_hued.png"); plt.clf()
|
import re
import inflect
from itertools import product
_parse_csv_line = lambda x: (x.split(','))
def flatten_tuple_results(results):
# Convert tuple results to a list of of CSV strings
result_arr = [item for sublist in results for item in sublist]
result_arr = [x.strip() for x in result_arr]
result_arr = [*map(_parse_csv_line, result_arr)]
flattened_arr = [item for sublist in result_arr for item in sublist]
return flattened_arr
def parse_dict_of_lists(results):
# TODO: This is a temporary method to transform data from the synonyms api
# I don't really like that we have to use this, but it's the quickest way to get things
# running again after pulling out the synonyms service into a self-contained app
output = {}
for item in results:
output[item.key] = sorted(list(set(item.list)), key=len, reverse=True)
return output
def query_result_to_dict(result):
"""
SQLAlchemy returns tuples, they need to be converted to dict so we can jsonify
:return:
"""
return dict(zip(result.keys(), result))
def query_results_to_dict(results):
"""
SQLAlchemy returns tuples, they need to be converted to dict so we can jsonify
:return:
"""
return list(map(lambda result: query_result_to_dict(result), results))
def merge_dicts(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield (k, dict(merge_dicts(dict1[k], dict2[k])))
else:
# If one of the values is not a dict, you can't continue merging it.
# Value from second dict overrides one in first and we move on.
yield (k, dict2[k])
# Alternatively, replace this with exception raiser to alert you of value conflicts
elif k in dict1:
yield (k, dict1[k])
else:
yield (k, dict2[k])
def remove_periods_designation(results):
designation_list = []
for item in results:
text = re.sub(r'[\.]', '', item, 0, re.IGNORECASE)
designation_list.append(item)
if text != item:
designation_list.append(text)
return designation_list
def get_plural_singular_name(name):
d = {}
p = inflect.engine()
for word in name.split():
val = []
singular = p.singular_noun(word)
plural = p.plural_noun(word)
if singular:
val.append(singular.lower())
if plural:
val.append(plural.lower())
val.append(word.lower())
d[word] = (list(set(val)))
name_list = []
for combination in product(*d.values()):
name_list.append(' '.join(combination))
return name_list
def convert_to_ascii(value):
try:
return value.encode("ascii", "ignore").decode('ascii')
except Exception as err:
return value
# def remove_numbers_list(list_name):
# return [name for name in list_name if not name.isdigit()]
# def remove_numbers_dict(d):
# return {key: value for key, value in d.items() if not key.isdigit()}
|
#encoding=utf-8
## SOLVED 2014/04/10
## 5777
# It was proposed by Christian Goldbach that every odd composite number can be
# written as the sum of a prime and twice a square.
# 9 = 7 + 2×12
# 15 = 7 + 2×22
# 21 = 3 + 2×32
# 25 = 7 + 2×32
# 27 = 19 + 2×22
# 33 = 31 + 2×12
# It turns out that the conjecture was false.
# What is the smallest odd composite that cannot be written as the sum of a prime
# and twice a square?
import math
import helpers.prime as primeutils
def euler():
# for each odd number starting at 3
composite = 3
while True:
# 'found' will be True iff the composite can be expressed as the sum
# of a prime and two times a square
found = False
# try all prime numbers that are less than the composite number
for prime in primeutils.primes(composite):
# calculate the square
square = (composite - prime) // 2
# it must have an integer square root for it to be valid
root = math.sqrt(square)
if root == int(root):
found = True
break
# if it cannot be expressed as the sum of blah blah...
if not found:
# we found the answer
return composite
# next odd number
composite += 2
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import luigi
from luigi import build
from luigi.contrib.azurebatch import AzureBatchTask
class AzureBatchHelloWorld(AzureBatchTask):
"""
Prerequisites:
- Create an Azure Batch Service Account and Azure Storage Account
- Provide the secrets/credentials for Storage Account and Azure Batch Service Account in a .env file
- Python Packages to install:
- azure batch package: ``pip install azure-batch>=6.0.0``
- azure blob storage: ``pip install azure-storage-blob>=1.3.1``
This is the pass through class, showing how you can inherit the new AzureBatchTask.
You need to override methods if you need a different functionality
This task runs a :py:class:`luigi.contrib.azurebatch.AzureBatchTask` task.
Provide the path of the root dir where the data and python scripts are kep locally, this task
will upload the root dir to azure blob and download it on each compute node from blob to execute
it on each node in parallel.
Output results are stored on Azure storage account
"""
batch_account_name = luigi.Parameter(os.getenv("BATCH_ACCOUNT_NAME"))
batch_account_key = luigi.Parameter(os.getenv("BATCH_ACCOUNT_KEY"))
batch_account_url = luigi.Parameter(os.getenv("BATCH_ACCOUNT_URL"))
storage_account_name = luigi.Parameter(os.getenv("STORAGE_ACCOUNT_NAME"))
storage_account_key = luigi.Parameter(os.getenv("STORAGE_ACCOUNT_KEY"))
command = luigi.Parameter("echo Hello World")
if __name__ == "__main__":
build([AzureBatchHelloWorld()], local_scheduler=True)
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaVectorDraw
class LineJoin(object):
Miter = 0
Bevel = 1
Round = 2
|
# Projeto: VemPython/exe041
# Autor: rafael
# Data: 16/03/18 - 10:49
# Objetivo: TODO A configuração Nacional de Natação precisa de um programa que leia o ano de nascimento de um atleta e
# mostre sua categoria, de acordo com a idade.
# Até 9: MIRIM
# Até 14: INFANTIL
# Até 19: JUNIOR
# Até 20: SENIOR
# Acima: Master
from datetime import date
ano = date.today().year-int(input('>>>> SEM APELAR COM OS AMIGUINHOS <<<<\n\nInforme o ano de nascimento do atleta: '))
if ano > 0 and ano <= 9:
print("MIRIM")
elif ano > 9 and ano <= 14:
print('INFANTIL')
elif ano > 14 and ano <= 19:
print('JUNIOR')
elif ano == 20:
print('SENIOR')
elif ano > 20:
print('MASTER')
else:
print('ESSE SERUMANO NEM NASCEU AINDA!!!')
|
class Person:
def quack(self):
print("I say 'Quack'")
def walk(self):
print("Person walking")
class Duck:
def quack(self):
print("Quack!")
def walk(self):
print("Duck walk")
def feathers(self):
print("Ducks have feathers")
def InTheForest(duck):
duck.quack()
duck.walk()
duck = Duck()
person = Person()
InTheForest(duck)
InTheForest(person)
|
from .. import TransformerAcceptanceTest
class TestNormalizeSettingName(TransformerAcceptanceTest):
TRANSFORMER_NAME = "NormalizeSettingName"
def test_normalize_setting_name(self):
self.compare(source="tests.robot")
def test_normalize_setting_name_selected(self):
self.compare(
source="tests.robot",
expected="selected.robot",
config=" --startline 12 --endline 15",
)
def test_disablers(self):
self.compare(source="disablers.robot", not_modified=True)
|
from ssaw import SettingsApi
from . import my_vcr
@my_vcr.use_cassette()
def test_settings_globalnotice(session):
"""Tests an API call to get/set global settings"""
SettingsApi(session).set_globalnotice('aaa')
r = SettingsApi(session).get_globalnotice()
assert r == 'aaa'
@my_vcr.use_cassette()
def test_settings_globalnotice2(session):
"""Tests an API call to remove global settings"""
SettingsApi(session).remove_globalnotice()
r = SettingsApi(session).get_globalnotice()
assert r == ''
|
import os
import sys
import pytest
from pavo_cristatus.interactions.pavo_cristatus_status import PavoCristatusStatus
from pavo_cristatus.interactions.symbol_signature_replacer_interaction.symbol_signature_replacer_interaction import interact
from pavo_cristatus.module_symbols.module_symbols import ModuleSymbols
from pavo_cristatus.project_loader import symbol_collector
from pavo_cristatus.project_loader.normalized_symbol import NormalizedSymbol
from pavo_cristatus.project_loader.utilities import is_annotated_symbol_of_interest, is_non_annotated_symbol_of_interest
from pavo_cristatus import utilities
from pavo_cristatus.utilities import pavo_cristatus_open
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_callables import \
ModuleFakeClassWithCallables
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_classes_with_nested_annotated_callables import \
ModuleFakeClassWithClassesWithNestedAnnotatedCallables
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_classes import \
ModuleFakeClassWithClasses
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_inherited_annotated_callables import \
ModuleFakeClassWithInheritedAnnotatedCallables
from pavo_cristatus.tests.doubles.verifiers.write_verifier import WriteVerifier
from pavo_cristatus.tests.utilities import get_module_qualname_from_source, get_python_file_from_symbol_object
unit_test_path = os.path.split(__file__)[0]
project_root_path = os.path.normpath(os.path.join(unit_test_path, "..", "..")).replace("\\", "\\\\")
file_write_verifier = WriteVerifier()
def safe_open_hook(*args, **kwargs):
return file_write_verifier
symbols_under_test = [ModuleFakeClassWithCallables.non_symbol_of_interest,
ModuleFakeClassWithClasses.NonSymbolOfInterest,
ModuleFakeClassWithInheritedAnnotatedCallables.SymbolOfInterest,
ModuleFakeClassWithInheritedAnnotatedCallables.NonSymbolOfInterest,
ModuleFakeClassWithClassesWithNestedAnnotatedCallables.NonSymbolOfInterest,
ModuleFakeClassWithClassesWithNestedAnnotatedCallables.SymbolOfInterest]
# these are only supported by python 3.9 (all of the following symbols will cause syntax errors)
if sys.version_info >= (3, 9):
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_nested_lambda_decorated_classes import \
ModuleFakeClassWithNestedLambdaDecoratedClasses
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_lambda_decorated_classes import \
ModuleFakeClassWithLambdaDecoratedClasses
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_multiply_lambda_decorated_callables import \
ModuleFakeClassWithMultiplyLambdaDecoratedCallables
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_multiply_lambda_decorated_classes import \
ModuleFakeClassWithMultiplyLambdaDecoratedClasses
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_nested_lambda_decorated_callables import \
ModuleFakeClassWithNestedLambdaDecoratedCallables
from pavo_cristatus.tests.doubles.module_fakes.annotated.module_fake_class_with_lambda_decorated_callables import \
ModuleFakeClassWithLambdaDecoratedCallables
symbols_under_test.extend([ModuleFakeClassWithNestedLambdaDecoratedClasses.symbol_of_interest,
ModuleFakeClassWithLambdaDecoratedClasses.SymbolOfInterest,
ModuleFakeClassWithMultiplyLambdaDecoratedCallables.symbol_of_interest,
ModuleFakeClassWithMultiplyLambdaDecoratedClasses.SymbolOfInterest,
ModuleFakeClassWithNestedLambdaDecoratedCallables.symbol_of_interest,
ModuleFakeClassWithLambdaDecoratedCallables.symbol_of_interest])
@pytest.mark.parametrize("symbol", symbols_under_test)
def test_symbol_signature_replacer_interaction(monkeypatch, symbol):
monkeypatch.setattr(utilities, pavo_cristatus_open.__name__, safe_open_hook)
symbol_object = symbol_collector.convert_to_symbol_object(project_root_path, NormalizedSymbol(symbol, None, None), is_annotated_symbol_of_interest)
python_file = get_python_file_from_symbol_object(symbol_object)
module = sys.modules[symbol_object.normalized_symbol.module]
module_symbols = ModuleSymbols(module, python_file,
get_module_qualname_from_source(symbol_object.normalized_symbol.source, project_root_path),
{symbol_object})
project_symbols = {module_symbols}
file_write_verifier.reset(module_symbols.get_non_annotated_source())
# Due to file_write_verifier's structure, there can only be one ModuleSymbols per test
result = interact(project_symbols)
assert result.status == PavoCristatusStatus.SUCCESS and result.result
file_write_verifier.verify()
|
"""
this program stores book information
A book has the following attributes
title,author,year,ISBN
User can:
view all records
search an entry
add entry
update entry
delete
close
"""
from tkinter import *
import backend
window=Tk() #creates a window object
window.wm_title("BookStore") #for naming your window
l1=Label(window,text="Title")
l1.grid(row=0,column=0)
title_text=StringVar()
e1=Entry(window,textvariable=title_text)
e1.grid(row=0,column=1)
l2=Label(window,text="Author")
l2.grid(row=0,column=2)
author_text=StringVar()
e2=Entry(window,textvariable=author_text)
e2.grid(row=0,column=3)
l3=Label(window,text="Year")
l3.grid(row=1,column=0)
year_text=StringVar()
e3=Entry(window,textvariable=year_text)
e3.grid(row=1,column=1)
l4=Label(window,text="ISBN")
l4.grid(row=1,column=2)
ISBN_text=StringVar()
e4=Entry(window,textvariable=ISBN_text)
e4.grid(row=1,column=3)
def get_selected_row(event):
try:
global selected_tuple
index=list1.curselection()[0]
selected_tuple=list1.get(index)
e1.delete(0,END)
e1.insert(END,selected_tuple[1])
e2.delete(0,END)
e2.insert(END,selected_tuple[2])
e3.delete(0,END)
e3.insert(END,selected_tuple[3])
e4.delete(0,END)
e4.insert(END,selected_tuple[4])
except IndexError:
pass
def view_command():
list1.delete(0,END)
for row in backend.view():
list1.insert(END,row)
def search_command():
list1.delete(0,END)
for row in backend.search(title_text.get(),author_text.get(),year_text.get(),ISBN_text.get()):
list1.insert(END,row)
def insert_command():
backend.insert(title_text.get(),author_text.get(),year_text.get(),ISBN_text.get())
list1.delete(0,END)
list1.insert(END,(title_text.get(),author_text.get(),year_text.get(),ISBN_text.get()))
def delete_command():
backend.delete(selected_tuple[0])
view_command()
def delete_all():
backend.delete_all()
list1.delete(0,END)
def update_command():
backend.update(selected_tuple[0],title_text.get(),author_text.get(),year_text.get(),ISBN_text.get())
view_command()
list1=Listbox(window,height=6,width=35)
list1.grid(row=2,column=1,rowspan=7)
sb1=Scrollbar(window)
sb1.grid(row=2,column=2,rowspan=6)
list1.configure(yscrollcommand=sb1.set)#set the scroll command along the y-axis in the list
sb1.configure(command=list1.yview)
sb2 = Scrollbar(window, orient = HORIZONTAL)
sb2.grid(row=7, column = 1)
list1.configure(xscrollcommand=sb2.set)#set the scroll command along the X-axis in the list
sb2.configure(command = list1.xview)
list1.bind('<<ListboxSelect>>',get_selected_row)
# widget.bind(event, handler)
#If an event matching the event description occurs in the widget,
#the given handler is called with an object describing the event.
b1=Button(window,text="View all",width=12,command=view_command) #The command option takes a reference to a function
b1.grid(row=2,column=3)
b2=Button(window,text="Search entry",width=12,command=search_command)
b2.grid(row=3,column=3)
b3=Button(window,text="Add entry",width=12,command=insert_command)
b3.grid(row=4,column=3)
b4=Button(window,text="Update Selected",width=12,command=update_command)
b4.grid(row=5,column=3)
b5=Button(window,text="Delete Selected",width=12,command=delete_command)
b5.grid(row=6,column=3)
b6=Button(window,text="Delete All",width=12,command=delete_all)
b6.grid(row=7,column=3)
b7=Button(window,text="Close",width=12,command=window.destroy)
b7.grid(row=8,column=3)
window.mainloop() #wrap up all the widgets
|
import csv
import pandas
import json
class TermData:
def __init__( self ):
self._frequency = 1
self._documents = []
def incrementFrequency(self):
self._frequency = self._frequency + 1
def addDocument( self, doc_id ):
if doc_id not in self._documents:
self._documents.append( doc_id )
def main():
dataset = pandas.read_csv( "movies.csv" )
terms = {}
# dataset_to_process = 1000
dataset_to_process = len(dataset['overview'])
dirty_files_count = 0
for i in range( dataset_to_process ):
if isinstance( dataset['overview'][i], str ):
print( 'currently processing document %d' % (i) )
for word in dataset['overview'][i].split(' '):
# cleaning term
# remove ,
new_word = word.replace( ',', '' )
# remove .
new_word = new_word.replace( '.', '' )
# remove 's
new_word = new_word.replace( '\'s', '' )
# remove trailing symbols
new_word = new_word.strip(';')
new_word = new_word.strip('\'')
new_word = new_word.strip('\"')
new_word = new_word.strip(' ')
new_word = new_word.strip('(')
new_word = new_word.strip(')')
new_word = new_word.strip('[')
new_word = new_word.strip(']')
new_word = new_word.strip('?')
new_word = new_word.strip('!')
# make lowercase
new_word = new_word.lower()
# for item in terms.keys():
# print(item)
if new_word in terms.keys():
terms[ new_word ].incrementFrequency()
terms[ new_word ].addDocument( i )
else:
meta_data = TermData()
meta_data.addDocument( i )
terms.update( { new_word: meta_data })
else:
dirty_files_count = dirty_files_count + 1
# make json file for terms
data_json = {}
data_json['terms'] = []
for term in terms.keys():
data_json['terms'].append({
'word': term,
'frequency': terms[term]._frequency,
'documents': terms[term]._documents
})
# dump json file
with open( 'terms.json', 'w' ) as outfile:
json.dump( data_json, outfile )
print( 'number of dirty files: %d' % dirty_files_count )
print( 'number of terms: %d' % len(terms))
print( 'json file successfully created' )
if __name__ == '__main__':
main()
|
"""
Original Author:
Madrone (Jeff Kruys)
Created on:
2021-01-23
Purpose:
This script takes a feature class that contains Age in years (VRI, etc) and adds the binned ages (classes) to each overlapping polygon in the TEM feature class.
Usage:
Add_Age_Class_Data_To_tem.py bfc afc afl [-h] [-l] [-ld]
Positional Arguments:
bfc tem feature class or table
afc Polygon feature class that contains age data
afl Name of field that contains age values
Optional Arguments:
-h, --help show this help message and exit
-l, --level log level messages to display; Default: 20 - INFO
-ld, --log_dir path to directory for output log file
Example Input:
X:\fullpath\Add_Age_Class_Data_To_tem.py Y:\fullpath\bfc Z:\fullpath\afc age_field_name
History
2021-01-23 (JK): Created script.
2021-02-05 (AE): Changed age_cl_sts bins
"""
import logging
import time
import os
import sys
import ctypes
import pdb
import operator
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
def main(tem_fc, age_fc, age_field):
logging.info("Initializing...")
logging.info('Start Time: ' + time.ctime(time.time()))
dtCalcScriptStart = time.time()
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
def __init__(self):
# have to initialize this to the size of MEMORYSTATUSEX
self.dwLength = ctypes.sizeof(self)
super(MEMORYSTATUSEX, self).__init__()
python_script = sys.argv[0]
script_path = os.path.split(sys.argv[0])[0]
# ---------------------------------------------------------------------------------------------------------
# Function to construct a time string from a number (of seconds)
# ---------------------------------------------------------------------------------------------------------
def SanitizeElapsedTime(dtInput):
if dtInput < 120:
strElapsedTime = str(int(dtInput)) + ' sec.'
elif dtInput < 3600:
strElapsedTime = str(round(dtInput / 60, 1)) + ' min.'
else:
strElapsedTime = str(round(dtInput / 3600, 2)) + ' hr.'
return strElapsedTime
# ---------------------------------------------------------------------------------------------------------
# Function to determine age class (STS) for a given age value in years
# ---------------------------------------------------------------------------------------------------------
def calc_age_cl_sts(age_value):
if age_value >= 0 and age_value <= 2:
return 2
elif age_value > 2 and age_value <= 5:
return 5
elif age_value > 5 and age_value <= 10:
return 10
elif age_value > 10 and age_value <= 20:
return 20
elif age_value > 20 and age_value <= 40:
return 40
elif age_value > 40 and age_value <= 60:
return 60
elif age_value > 60 and age_value <= 80:
return 80
elif age_value > 80 and age_value <= 100:
return 100
elif age_value > 100 and age_value <= 120:
return 120
elif age_value > 120 and age_value <= 140:
return 140
elif age_value > 140 and age_value <= 180:
return 180
elif age_value > 180 and age_value <= 200:
return 200
elif age_value > 200 and age_value <= 250:
return 250
elif age_value > 250 and age_value <= 399:
return 399
elif age_value > 399:
return 9999
else:
return -1
# ---------------------------------------------------------------------------------------------------------
# Function to determine age class (STD) for a given age class (STS) value in years
# ---------------------------------------------------------------------------------------------------------
def calc_age_cl_std(age_cl_sts_value):
if age_cl_sts_value in [2, 5, 10]:
return 15
elif age_cl_sts_value in [20]:
return 30
elif age_cl_sts_value in [40]:
return 50
elif age_cl_sts_value in [60, 80]:
return 80
elif age_cl_sts_value in [100, 120, 140, 180, 200, 250, 399, 9999]:
return 9999
else:
return -1
# ---------------------------------------------------------------------------------------------------------
# Check that input feature classes exist and contain required fields
# ---------------------------------------------------------------------------------------------------------
if not arcpy.Exists(tem_fc):
logging.error("**** Specified tem feature class " + tem_fc + " does not exist. Exiting script.")
sys.exit()
if not arcpy.Exists(age_fc):
logging.error("**** Specified age feature class " + age_fc + " does not exist. Exiting script.")
sys.exit()
age_field_found = False
for f in arcpy.ListFields(age_fc):
if f.name == age_field:
age_field_found = True
if f.type not in ["Integer", "SmallInteger", "Single", "Double"]:
logging.error("**** Specified age field " + age_field + " is type " + str(f.type) + ", not a numeric "
"field. Exiting script.")
sys.exit()
if not age_field_found:
logging.error("**** Specified age field " + age_field + " does not exist in specified age feature class. "
"Exiting script.")
sys.exit()
teis_id_field_found = False
for f in arcpy.ListFields(tem_fc):
if f.name == "TEIS_ID":
teis_id_field_found = True
if f.type not in ["Integer", "SmallInteger"]:
logging.error("**** TEIS_ID field in specified tem feature class is not a numeric field. "
"Exiting script.")
sys.exit()
if not teis_id_field_found:
logging.error("**** Specified tem feature class does not have a TEIS_ID field. Exiting script.")
sys.exit()
# ---------------------------------------------------------------------------------------------------------
# Check that TEIS_ID field contains unique values
# ---------------------------------------------------------------------------------------------------------
logging.info("Checking that tem TEIS_ID field contains unique values")
teis_id_count_dict = {}
row_count = 0
total_count = int(arcpy.GetCount_management(tem_fc).getOutput(0))
dupe_teis_id_found = False
for row in arcpy.da.SearchCursor(tem_fc, ["TEIS_ID"]):
row_count += 1
try:
teis_id_count_dict[row[0]] += 1
except:
teis_id_count_dict[row[0]] = 1
if teis_id_count_dict[row[0]] > 1:
dupe_teis_id_found = True
if row_count % 100000 == 0 or row_count == total_count:
logging.info(" - Read " + str(row_count) + " of " + str(total_count) + " rows")
if dupe_teis_id_found:
logging.info(" - Duplicate TEIS_ID values found. Repopulating TEIS_ID field with OBJECTID values.")
row_count = 0
with arcpy.da.UpdateCursor(tem_fc, ["TEIS_ID", "OID@"]) as cursor:
for row in cursor:
row_count += 1
row[0] = row[1]
cursor.updateRow(row)
if row_count % 100000 == 0 or row_count == total_count:
logging.info(" - Updated " + str(row_count) + " of " + str(total_count) + " rows")
else:
logging.info(" - TEIS_ID field contains all unique values.")
# ---------------------------------------------------------------------------------------------------------
# Calculate age classes for each polygon. Write them to new fields AGE1_CL_STS and AGE1_CL_STD
# (or if they exist, AGE2_CL_STS and AGE2_CL_STD, or if they already exist, etc. etc.)
# ---------------------------------------------------------------------------------------------------------
tab_int_tbl = os.path.split(tem_fc)[0] + r"\tab_int_tbl"
logging.info("Creating Tabulate Intersection table " + tab_int_tbl)
if arcpy.Exists(tab_int_tbl):
arcpy.Delete_management(tab_int_tbl)
arcpy.TabulateIntersection_analysis(in_zone_features = tem_fc, zone_fields = "TEIS_ID", in_class_features = age_fc,
out_table = tab_int_tbl, class_fields = age_field, sum_fields = "",
xy_tolerance = "-1 Unknown", out_units = "UNKNOWN")
row_total = int(arcpy.GetCount_management(tab_int_tbl).getOutput(0))
tabulate_intersection_succeeded = False
# We are just going to store the area-dominant age class for STS, and later derive the age class STD from that.
# Otherwise if we calculate them separately, they may not be "compatible", e.g. age class STS might be 10-20 but
# age class STD might be 30-50 if they are calculated independently.
age_cl_sts_dict = {}
if row_total > 0: ## sometimes the TabulateIntersection tool results in an empty output table for no reason
logging.info("Reading Tabulate Intersection table to dictionary")
row_count = 0
for row in arcpy.da.SearchCursor(tab_int_tbl,["TEIS_ID", age_field, "AREA"]):
row_count += 1
try:
age_cl_sts_dict[row[0]][calc_age_cl_sts(row[1])] += row[2]
except:
try:
age_cl_sts_dict[row[0]][calc_age_cl_sts(row[1])] = row[2]
except:
age_cl_sts_dict[row[0]] = {}
age_cl_sts_dict[row[0]][calc_age_cl_sts(row[1])] = row[2]
if row_count % 100000 == 0 or row_count == row_total:
logging.info(" Read " + str(row_count) + " of " + str(row_total) + " rows")
tabulate_intersection_succeeded = True
else: ## if output table was empty, run an Intersect instead
logging.error("**** Tabulate Intersection output table is empty")
logging.info("Running an Intersect of tem and age feature classes")
intersect_fc = os.path.split(tem_fc)[0] + r"\int_fc"
if arcpy.Exists(intersect_fc):
arcpy.Delete_management(intersect_fc)
arcpy.Intersect_analysis(in_features = age_fc + " #;" + tem_fc + " #", out_feature_class = intersect_fc,
join_attributes = "ALL", cluster_tolerance = "-1 Unknown", output_type = "INPUT")
row_total = int(arcpy.GetCount_management(intersect_fc).getOutput(0))
if row_total > 0:
logging.info("Reading Intersect output feature class table to dictionary")
row_count = 0
for row in arcpy.da.SearchCursor(intersect_fc, ["TEIS_ID", age_field, "SHAPE@AREA"]):
row_count += 1
try:
age_cl_sts_dict[row[0]][calc_age_cl_sts(row[1])] += row[2]
except:
try:
age_cl_sts_dict[row[0]][calc_age_cl_sts(row[1])] = row[2]
except:
age_cl_sts_dict[row[0]] = {}
age_cl_sts_dict[row[0]][calc_age_cl_sts(row[1])] = row[2]
if row_count % 100000 == 0 or row_count == row_total:
logging.info(" Read " + str(row_count) + " of " + str(row_total) + " rows")
else:
arcpy.Delete_management(intersect_fc)
logging.error("Intersection is empty; VRI and PEM/tem feature classes do not overlap. Exiting.")
sys.exit()
tem_fields = [f.name for f in arcpy.ListFields(tem_fc)]
x = 1
fields_added = False
while not fields_added:
age_cl_sts_field = "AGE" + str(x) + "_CL_STS"
age_cl_std_field = "AGE" + str(x) + "_CL_STD"
if age_cl_sts_field not in tem_fields and age_cl_std_field not in tem_fields:
logging.info("Adding new fields AGE" + str(x) + "_CL_STS and AGE" + str(x) + "_CL_STD")
arcpy.AddField_management(tem_fc, age_cl_sts_field, "SHORT")
arcpy.AddField_management(tem_fc, age_cl_std_field, "SHORT")
fields_added = True
else:
x += 1
row_count = 0
no_age_count = 0
row_total = int(arcpy.GetCount_management(tem_fc).getOutput(0))
logging.info("Writing age class values to " + age_cl_sts_field + " and " + age_cl_std_field + " fields in "
+ tem_fc)
with arcpy.da.UpdateCursor(tem_fc,["TEIS_ID", age_cl_sts_field, age_cl_std_field]) as cursor:
for row in cursor:
row_count += 1
try:
biggest_age_cl_sts = max(age_cl_sts_dict[row[0]].iteritems(), key=operator.itemgetter(1))[0]
## see http://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
row[1] = biggest_age_cl_sts
row[2] = calc_age_cl_std(biggest_age_cl_sts)
except:
# if the current polygon had no entry in the dictionary, then there is no
# age class info for the polygon, so assign it values of -1.
row[1] = -1
row[2] = -1
no_age_count += 1
cursor.updateRow(row)
if row_count % 100000 == 0 or row_count == row_total:
logging.info(" Processed " + str(row_count) + " of " + str(row_total) + " rows")
if no_age_count == 0:
logging.info("All " + str(row_total) + " tem polygon(s) overlapped with an age polygon. That's good!")
else:
logging.info("**** WARNING: There were " + str(no_age_count) + " polygon(s) for which age classes could "
"not be calculated. These polygons probably don't overlap with any polygons in the age "
"feature class.")
# arcpy.Delete_management(tab_int_tbl)
# ---------------------------------------------------------------------------------------------------------
# Done
# ---------------------------------------------------------------------------------------------------------
dtCalcNow = time.time()
dtCalcScriptElapsed = dtCalcNow - dtCalcScriptStart
logging.info("Script complete after " + SanitizeElapsedTime(dtCalcScriptElapsed))
if __name__ == '__main__':
try:
# Parse arguments
parser = ArgumentParser(description='This script adds new age class fields to a tem feature class '
'and populates the new fields based on ages in a second feature class ',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bfc', help='tem feature class')
parser.add_argument('afc', help='Polygon feature class containing age data')
parser.add_argument('afl', help='Name of field that contains age values in age feature class')
parser.add_argument('-l', '--level', type=int,
help='Log level\nValues: 10-DEBUG, 20-INFO(default), 30-WARN, 40-ERROR, 50-CRITICAL')
parser.add_argument('-ld', '--log_dir', help='Path to log directory')
args = parser.parse_args()
# Set up logger
if args.level is not None and args.level not in [10, 20, 30, 40, 50]:
raise ValueError('Invalid log level')
elif args.level is None:
args.level = 20
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=args.level)
# Import arcpy
import arcpy
# Start the script
main(args.bfc, args.afc, args.afl)
except Exception as e:
logging.exception('Unexpected exception. Program terminating.')
else:
import arcpy
|
"""Tests for the ConsoleEnvironment and Console helper."""
from j5.backends.console import Console
def test_console_instantiation() -> None:
"""Test that we can create a console."""
console = Console("MockConsole")
assert type(console) is Console
assert console._descriptor == "MockConsole"
assert console._print is print
assert console._input is input
def test_console_info() -> None:
"""Test that the console can output information."""
# Define a testing print function
def mock_print(text: str) -> None:
"""Mock printing function."""
assert text == "TestBoard: Test the console info"
console = Console("TestBoard", print_function=mock_print)
console.info("Test the console info")
def test_console_read() -> None:
"""Test that we can read from the console."""
# Define a testing input function
def mock_input(prompt: str) -> str:
"""Mock some input."""
return str(reversed(prompt))
console = Console("TestBoard", input_function=mock_input)
assert str(console.read("Enter Test Input")) == str(reversed("Enter Test Input"))
def test_console_read_none_type() -> None:
"""Test that we can read None from console, i.e any input."""
# Define a testing input function
def mock_input(prompt: str) -> str:
"""Mock some input."""
return "string"
console = Console("TestBoard", input_function=mock_input)
assert console.read("Enter test input", None) is None
def test_console_read_bad_type() -> None:
"""Test that the console emits an error if it cannot cast to the desired type."""
class MockConsoleState:
"""A mock console with state."""
def __init__(self) -> None:
self.bad_attempt_count = 0
def input(self, prompt: str) -> str:
"""Mock some input."""
if self.bad_attempt_count == 0:
self.bad_attempt_count += 1
return "Not an int"
return "6"
def print(self, text: str) -> None:
"""Mock printing function."""
if self.bad_attempt_count == 0:
assert text == "TestConsole: Unable to construct a int from 'Not an int'"
mock = MockConsoleState()
console = Console(
"TestConsole",
print_function=mock.print,
input_function=mock.input,
)
assert console.read("I want an int", int) == 6
|
from app.models import *
#Room states
for state in ['rented', 'reserved', 'avaliable', 'need_cleaning', 'need_repair']:
db.session.add(Room_state(state_name=state))
db.session.commit()
#Room types
for r_type in [2, 3, 4]:
db.session.add(Room_type(r_type=r_type))
db.session.commit()
# Add some rooms
db.session.add(Room(number=1,floor=1,room_type=Room_type.query.filter_by(r_type=2).first(),price_per_day=100,room_state=Room_state.query.filter_by(state_name='avaliable').first()))
db.session.commit()
# Add sex
for sex in ['male', 'female', 'other', 'yes please']:
db.session.add(Sex(name=sex))
db.session.commit()
|
from pydbus import SystemBus
from gi.repository import GLib
import paho.mqtt.publish as publish
import json
import logging
logging.basicConfig(level=logging.DEBUG)
def mqtt(topic, payload, hostname="127.0.0.1", retain=False):
logging.debug('sending to mqtt: topic=%s payload=%s' % (topic, payload))
publish.single(topic, payload, hostname=hostname)
def prop(signal, params):
return json.dumps({signal: {params[0]: params[1]}})
def cadd(signal, params):
return json.dumps({signal: {"path": params[0], "props": params[1]}})
def crem(signal, params):
return json.dumps({signal: {"path": params[0]}})
def dev(obj, iface):
return obj.split('/')[-1] + '/' + iface.split('.')[-1]
def vcdev(obj, iface):
return obj.split('/')[-2] + '/' + iface.split('.')[-1]
if __name__ == '__main__':
logging.debug('starting')
bus = SystemBus()
if_list = [
('org.ofono.Modem', 'PropertyChanged', lambda _a, obj, iface, signal, params: mqtt(dev(obj, iface), prop(signal, params))),
('org.ofono.VoiceCall', 'PropertyChanged', lambda _a, obj, iface, signal, params: mqtt(vcdev(obj, iface), prop(signal, params))),
('org.ofono.CallVolume', 'PropertyChanged', lambda _a, obj, iface, signal, params: mqtt(dev(obj, iface), prop(signal, params))),
('org.ofono.VoiceCallManager', 'PropertyChanged', lambda _a, obj, iface, signal, params: mqtt(dev(obj, iface), prop(signal, params))),
('org.ofono.VoiceCallManager', 'CallAdded', lambda _a, obj, iface, signal, params: mqtt(dev(obj, iface), cadd(signal, params))),
('org.ofono.VoiceCallManager', 'CallRemoved', lambda _a, obj, iface, signal, params: mqtt(dev(obj, iface), crem(signal, params))),
('org.ofono.NetworkRegistration', 'PropertyChanged', lambda _a, obj, iface, signal, params: mqtt(dev(obj, iface), prop(signal, params))),
]
[bus.subscribe(iface=iface, signal=signal, signal_fired=cb) for iface, signal, cb in if_list]
mqtt("monitor", payload="started", retain=True)
loop = GLib.MainLoop()
loop.run()
|
SOCIAL_AUTH_TWITTER_KEY = 'EGmxjE55yVQigPCPWoMqdRsNp'
SOCIAL_AUTH_TWITTER_SECRET = '9rnyiG5HRHH187hkaaCaSADHNP4tRAD4Ob7SZiCJb9lSbWw3Pg'
SUPERVISOR_USER = 'user'
SUPERVISOR_PASSWORD = '123'
SUPERVISOR_URI = 'http://'+SUPERVISOR_USER+':'+SUPERVISOR_PASSWORD+'@127.0.0.1:9001'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tweetset',
'USER': 'tweetset',
'PASSWORD': 'somepassword',
'HOST': 'localhost',
'PORT': '',
}
}
|
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import mxnet as mx
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], 0] = img
buf[sy:sy+shape[1], sx:sx+shape[0], 1] = img
buf[sy:sy+shape[1], sx:sx+shape[0], 2] = img
return None
def visual(title, X, name):
print(X.shape)
X = X.reshape((-1, 28, 28))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), 3), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
class netG(nn.Block):
def __init__(self, **kwargs):
super(netG, self).__init__(**kwargs)
with self.name_scope():
self.fcz = nn.HybridSequential()
with self.fcz.name_scope():
self.fcz.add(nn.Dense(256),
nn.BatchNorm(),
nn.LeakyReLU(0.0))
self.fcy = nn.HybridSequential()
with self.fcy.name_scope():
self.fcy.add(nn.Dense(256),
nn.BatchNorm(),
nn.LeakyReLU(0.0))
self.rest = nn.Sequential()
with self.rest.name_scope():
self.rest.add(nn.Dense(512),
nn.BatchNorm(),
nn.LeakyReLU(0.0),
nn.Dense(1024),
nn.BatchNorm(),
nn.LeakyReLU(0.0),
nn.Dense(784),
nn.Lambda('tanh'))
def forward(self, z, y):
fcz_outputs = self.fcz(z)
fcy_outputs = self.fcy(y)
rest_inputs = mx.nd.concat(fcz_outputs, fcy_outputs, dim=1)
output = self.rest(rest_inputs)
return output
class netD(nn.Block):
def __init__(self, **kwargs):
super(netD, self).__init__(**kwargs)
self.fcX = nn.HybridSequential()
with self.fcX.name_scope():
self.fcX.add(nn.Dense(1024),
nn.LeakyReLU(0.2))
self.fcy = nn.HybridSequential()
with self.fcy.name_scope():
self.fcy.add(nn.Dense(1024),
nn.LeakyReLU(0.2))
self.final = nn.HybridSequential()
with self.final.name_scope():
self.final.add(nn.Dense(512),
nn.BatchNorm(),
nn.LeakyReLU(0.2),
nn.Dense(256),
nn.BatchNorm(),
nn.LeakyReLU(0.2),
nn.Dense(1, activation='sigmoid'))
def forward(self, X, y):
fcX_outputs = self.fcX(X)
fcy_outputs = self.fcy(y)
final_inputs = mx.nd.concat(fcX_outputs, fcy_outputs, dim=1)
final_outputs = self.final(final_inputs)
return final_outputs
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--nepoch', type=int, default=10, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda',action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
nz = opt.nz
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
def transformer(data, label):
data = data.reshape((1, 784))
one_hot = mx.nd.zeros((10, ))
one_hot[label] = 1
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
return data, one_hot
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
real_label = mx.nd.ones((opt.batch_size, ), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size, ), ctx=ctx)
metric = mx.metric.Accuracy()
GNet = netG()
DNet = netD()
outf = opt.outf
# loss
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=True)
# initialize the generator and the discriminator
GNet.initialize(mx.init.Normal(0.02), ctx=ctx)
DNet.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(GNet.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(DNet.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
print("Training...")
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, label in train_data:
if iter == 1:
mx.profiler.set_config(profile_all=True, filename='cgan_profile_output.json')
mx.profiler.set_state('run')
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, 1, nz), ctx=ctx)
noise_label = label.copy()
with autograd.record():
output = DNet(data, label)
errD_real = loss(output, real_label)
metric.update([real_label, ], [output, ])
fake = GNet(noise, noise_label)
output = DNet(fake.detach(), label)
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
metric.update([fake_label, ], [output, ])
errD.backward()
trainerD.step(opt.batch_size)
with autograd.record():
output = DNet(fake, noise_label)
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
if iter == 3:
mx.profiler.set_state('stop')
break
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
break
|
"""
montecarloDistribution.py
Created by Luca Camerani at 02/09/2020, University of Milano-Bicocca.
(l.camerani@campus.unimib.it)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
from collections import namedtuple
import numpy as np
from sklearn.neighbors import KernelDensity
class MonteCarlo():
def __init__(self, simulation):
self.simulation = simulation
def getIndexSpace(self):
return np.arange(0, self.simulation.shape[1])
def getProcessList(self):
return self.simulation
def getData(self, index=None):
if index is None: index = self.getIndexSpace()[-1]
return self.simulation[:, index]
def getValues(self, step=1, index=None):
if index is None:
bounds = {'min': [], 'max': []}
for k in self.getIndexSpace():
data = self.getData(index=k)
bounds['min'].append(np.amin(data))
bounds['max'].append(np.amax(data))
bounds = {'min': min(bounds['min']), 'max': max(bounds['max'])}
else:
hist = self.getData(index=index)
bounds = {'min': np.amin(hist), 'max': np.amax(hist)}
values = np.asarray([value for value in np.arange(int(bounds['min']), int(bounds['max']) + 1, step)])
values = values.reshape((len(values), 1))
return values
def getEmpiricalDist(self, index=None, step=1, bandwidth=5, fullSpace=False):
model = KernelDensity(bandwidth=bandwidth, kernel='gaussian')
hist = self.getData(index=index)
sample = hist.reshape((len(hist), 1))
model.fit(sample)
if fullSpace:
values = self.getValues(step=step)
else:
values = self.getValues(step=step, index=index)
probabilities = np.exp(model.score_samples(values))
return namedtuple('Density', ['values', 'probabilities'])(**{
"values": values,
"probabilities": probabilities
})
def getEmpiricalCumulativeDist(self, index=None, step=1, bandwidth=5, fullSpace=False):
PDF = self.getEmpiricalDist(index=index, step=step, bandwidth=bandwidth, fullSpace=fullSpace)
return namedtuple('Density', ['values', 'probabilities'])(**{
"values": PDF.values,
"probabilities": np.cumsum(PDF.probabilities)
})
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 20:08:59 2019
@author: admin
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
### scikit-learn库实现机器学习 ###
## iris数据集 ##
from sklearn import datasets
iris = datasets.load_iris()
iris.data
# 花卉的种类
iris.target
# 花卉类别
iris.target_names
# 数据集的第一、二列为萼片的长和宽,第三、四列为花瓣的长和宽
x = iris.data[:, 2]
y = iris.data[:, 3]
species = iris.target
x_min, x_max = x.min() - .5, x.max() + .5
y_min, y_max = y.min() - .5, y.max() + .5
plt.figure()
plt.scatter(x, y, c=species)
plt.xlabel("Sepal length")
plt.ylabel("Sepal width")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# 主成分分解 #
from sklearn.decomposition import PCA
# fit_transform()函数用来降维,n_components表示降维后的主成分个数
x_reduced = PCA(n_components=3).fit_transform(iris.data)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(x_reduced[:, 0], x_reduced[:,1], x_reduced[:,2], c=species)
ax.set_xlabel("First eigenvector")
ax.set_ylabel("Second eigenvector")
ax.set_zlabel("Third eigenvector")
ax.w_xaxis.set_ticklabels(())
ax.w_yaxis.set_ticklabels(())
ax.w_zaxis.set_ticklabels(())
## K-近邻分类器 ##
from sklearn.neighbors import KNeighborsClassifier
np.random.seed(0)
iris = datasets.load_iris()
x = iris.data
y = iris.target
# permutation()函数打乱数据集所有的元素,前140条作为训练集,后10条作为测试集
i = np.random.permutation(len(iris.data))
x_train = x[i[: -10]]
y_train = y[i[: -10]]
x_test = x[i[-10: ]]
y_test = y[i[-10: ]]
# 调用分类器构造函数
knn = KNeighborsClassifier()
# 使用fit()函数对其进行训练
knn.fit(x_train, y_train)
knn.predict(x_test)
y_test
# 绘制决策边界
# 散点图为实际的点,绘制的边界为预测边界
import matplotlib as mat
x = iris.data[:, :2]
y = iris.target
x_min, x_max = x[:,0].min() - .5, x[:,0].max() + .5
y_min, y_max = x[:,1].min() - .5, x[:,1].max() + .5
cmap_light = mat.colors.ListedColormap(['#AAAAFF', '#AAFFAA', '#FFAAAA'])
h = .02
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
knn = KNeighborsClassifier()
knn.fit(x, y)
# c_将参数左右相接,r_将参数上下相接
Z = knn.predict(np.c_[xx.ravel(),yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure()
# Z代表分类的结果
plt.pcolormesh(xx,yy,Z,cmap=cmap_light)
plt.scatter(x[:, 0], x[:, 1], c=y)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
## Diabetes数据集 ##
diabetes = datasets.load_diabetes()
diabetes.data[0]
# 每一个数据都经过了均值化中心处理,又用标准差乘以个体数量调整了数值范围
np.sum(diabetes.data[:,0]**2)
diabetes.target
## 线性回归:最小平方回归 ##
from sklearn import linear_model
linreg = linear_model.LinearRegression()
x_train = diabetes.data[:-20]
y_train = diabetes.target[:-20]
x_test = diabetes.data[-20:]
y_test = diabetes.target[-20:]
linreg.fit(x_train, y_train)
linreg.coef_
linreg.predict(x_test)
# 以方差为评级指标,方差越接近于1,预测结果越准确
linreg.score(x_test, y_test)
# 年龄和病情之间的线性回归
x0_test = x_test[:, 0]
x0_train = x_train[:, 0]
# 存在np.newaxis的位置增加数组维数1
# 一行变为一列
x0_test = x0_test[:, np.newaxis]
x0_train = x0_train[:, np.newaxis]
linreg.fit(x0_train, y_train)
y = linreg.predict(x0_test)
plt.scatter(x0_test, y_test, color='k')
plt.plot(x0_test, y, color='b', linewidth=3)
plt.figure(figsize=(8,12))
for f in range(0,10):
xi_test = x_test[:,f]
xi_train = x_train[:, f]
xi_test = xi_test[:, np.newaxis]
xi_train = xi_train[:, np.newaxis]
linreg.fit(xi_train, y_train)
y = linreg.predict(xi_test)
plt.subplot(5, 2, f+1)
plt.scatter(xi_test, y_test, color='k')
plt.plot(xi_test, y, color='b', linewidth=3)
## 支持向量机 ##
from sklearn import svm
x = np.array([[1,3], [1,2], [1,1.5], [1.5,2], [2,3], [2.5,1.5], [2,1], [3,1], [3,2], [3.5,1], [3.5,3]])
y = [0]*6 + [1]*5
plt.scatter(x[:,0], x[:,1], c=y, s=50, alpha=0.9)
svc = svm.SVC(kernel='linear').fit(x,y)
X,Y = np.mgrid[0:4:200j,0:4:200j]
Z = svc.decision_function(np.c_[X.ravel(),Y.ravel()])
Z = Z.reshape(X.shape)
plt.contourf(X,Y,Z>0, alpha=0.4)
plt.contour(X,Y,Z,colors=['k'], linestyles=['-'],levels=[0])
plt.scatter(x[:,0], x[:,1], c=y, s=50, alpha=0.9)
|
try:
from ysharp.ysharp_lexer import YsharpLexer
from ysharp.errors import syntax_error
except ModuleNotFoundError:
from ysharp_lexer import YsharpLexer
from errors import syntax_error
from sly import Parser
import pprint
import logging
class YsharpParser(Parser):
tokens = YsharpLexer.tokens
debugfile = "parser.out"
log = logging.getLogger()
log.setLevel(logging.ERROR)
syntax_error_obj = syntax_error()
precedence = (
("left", EMPTY),
("left", ","),
("right", "="),
("left", "|"),
("left", "&"),
("left", EQEQ, NOT_EQEQ),
("left", EQ_LESS, EQ_GREATER, "<", ">"),
("left", "+", "-"),
("left", "*", "/", "%"),
("right", UMINUS, UPLUS),
("right", "!"),
("left", COLON_COLON),
)
# Program START
@_("program statement")
def program(self, p):
return p.program + (p.statement,)
@_("statement")
def program(self, p):
return (p.statement,)
@_("empty")
def program(self, p):
return ()
# Program END
###########################################################################
# Statements START
@_("function_declaration")
def statement(self, p):
return p.function_declaration + ()
@_("class_declaration")
def statement(self, p):
return p.class_declaration
@_("function_call_statement")
def statement(self, p):
return p.function_call_statement
@_("class_attribute_assignment")
def statement(self, p):
return p.class_attribute_assignment
@_("conditional")
def statement(self, p):
return p.conditional
@_("while_loop")
def statement(self, p):
return p.while_loop
@_("python_code_statement")
def statement(self, p):
return p.python_code_statement
@_("variable_assignment")
def statement(self, p):
return p.variable_assignment
@_("break_statement")
def statement(self, p):
return p.break_statement
@_("for_loop")
def statement(self, p):
return p.for_loop
@_("delete_statement")
def statement(self, p):
return p.delete_statement
@_("return_statement")
def statement(self, p):
return p.return_statement
@_("variable_operation")
def statement(self, p):
return p.variable_operation
@_("import_statement")
def statement(self, p):
return p.import_statement
@_("sandbox")
def statement(self, p):
return p.sandbox
# Statements END
###########################################################################
# Statment syntax START
@_("LIMPORT expression ';'")
def sandbox(self, p):
return ("LIMPORT", {"EXPRESSION": p.expression}, p.lineno)
@_("SANDBOX '{' program '}'")
def sandbox(self, p):
return ("SANDBOX", {"PROGRAM": p.program}, p.lineno)
@_("function_call ';'")
def function_call_statement(self, p):
return p.function_call
@_("python_code ';'")
def python_code_statement(self, p):
return p.python_code
@_("BREAK ';'")
def break_statement(self, p):
return ("BREAK", p.lineno)
@_("RETURN expression ';'")
def return_statement(self, p):
return ("RETURN", {"EXPRESSION": p.expression}, p.lineno)
@_("expression '(' function_arguments ')'")
def function_call(self, p):
return (
"FUNCTION_CALL",
{"FUNCTION_ARGUMENTS": p.function_arguments, "ID": p.expression},
p.lineno,
)
@_("expression '(' empty ')'")
def function_call(self, p):
return (
"FUNCTION_CALL",
{"FUNCTION_ARGUMENTS": {}, "ID": p.expression},
p.lineno,
)
@_("FUNC ID '(' function_arguments ')' '{' program '}'")
def function_declaration(self, p):
return (
"FUNCTION_DECLARATION",
{
"FUNCTION_ARGUMENTS": p.function_arguments,
"ID": p.ID,
"PROGRAM": p.program,
},
p.lineno,
)
@_("FUNC ID '(' empty ')' '{' program '}'")
def function_declaration(self, p):
return (
"FUNCTION_DECLARATION",
{"FUNCTION_ARGUMENTS": {}, "ID": p.ID, "PROGRAM": p.program},
p.lineno,
)
@_("positional_args")
def function_arguments(self, p):
return {"POSITIONAL_ARGS": p.positional_args}
@_("positional_args ',' kwargs")
def function_arguments(self, p):
return {"POSITIONAL_ARGS": p.positional_args, "KWARGS": p.kwargs}
@_("kwargs")
def function_arguments(self, p):
return {"KWARGS": p.kwargs}
@_("CLASS ID '{' program '}'")
def class_declaration(self, p):
return ("CLASS_DECLARATION", {"ID": p.ID, "PROGRAM": p.program}, p.lineno)
@_("FOR expression IN expression '{' program '}'")
def for_loop(self, p):
return (
"FOR",
{
"PROGRAM": p.program,
"VARIABLE": p.expression0,
"ITERABLE": p.expression1,
},
p.lineno,
)
@_("WHILE '(' expression ')' '{' program '}'")
def while_loop(self, p):
return ("WHILE", {"PROGRAM": p.program, "CONDITION": p.expression}, p.lineno)
@_("positional_args ',' expression")
def positional_args(self, p):
return p.positional_args + (p.expression,)
@_("expression")
def positional_args(self, p):
return (p.expression,)
@_("kwargs ',' id '=' expression")
def kwargs(self, p):
return p.kwargs + ({"ID": p.id, "EXPRESSION": p.expression},)
@_("ID '=' expression")
def kwargs(self, p):
return ({"ID": p.ID, "EXPRESSION": p.expression},)
@_("ID '=' expression ';'")
def variable_assignment(self, p):
return (
"VARIABLE_ASSIGNMENT",
{"ID": p.ID, "EXPRESSION": p.expression},
p.lineno,
)
@_("get_index '=' expression ';'")
def variable_assignment(self, p):
return (
"VARIABLE_ASSIGNMENT",
{"ID": p.get_index, "EXPRESSION": p.expression},
p.lineno,
)
@_("ID EQ_ADD expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.ID, "EXPRESSION": p.expression, "OPERATION": "ADD"},
p.lineno,
)
@_("get_index EQ_ADD expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.get_index, "EXPRESSION": p.expression, "OPERATION": "ADD"},
p.lineno,
)
@_("ID EQ_SUB expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.ID, "EXPRESSION": p.expression, "OPERATION": "SUB"},
p.lineno,
)
@_("get_index EQ_SUB expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.get_index, "EXPRESSION": p.expression, "OPERATION": "SUB"},
p.lineno,
)
@_("ID EQ_MUL expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.ID, "EXPRESSION": p.expression, "OPERATION": "MUL"},
p.lineno,
)
@_("get_index EQ_MUL expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.get_index, "EXPRESSION": p.expression, "OPERATION": "MUL"},
p.lineno,
)
@_("ID EQ_MOD expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.ID, "EXPRESSION": p.expression, "OPERATION": "MOD"},
p.lineno,
)
@_("get_index EQ_MOD expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.get_index, "EXPRESSION": p.expression, "OPERATION": "MOD"},
p.lineno,
)
@_("ID EQ_DIV expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.ID, "EXPRESSION": p.expression, "OPERATION": "DIV"},
p.lineno,
)
@_("get_index EQ_DIV expression ';'")
def variable_operation(self, p):
return (
"VARIABLE_OPERATION",
{"ID": p.get_index, "EXPRESSION": p.expression, "OPERATION": "DIV"},
p.lineno,
)
@_("class_attribute '=' expression ';'")
def class_attribute_assignment(self, p):
return (
"CLASS_ATTRIBUTE_ASSIGNMENT",
{"CLASS_ATTRIBUTE": p.class_attribute, "EXPRESSION": p.expression},
p.lineno,
)
@_("if_statement")
def conditional(self, p):
return (
"CONDITIONAL",
{"IF": p.if_statement, "ELSE_IF": (None, None), "ELSE": (None, None)},
p.if_statement[2],
)
@_("if_statement else_if_loop")
def conditional(self, p):
return (
"CONDITIONAL",
{"IF": p.if_statement, "ELSE_IF": p.else_if_loop, "ELSE": (None, None)},
p.if_statement[2],
)
@_("if_statement else_if_loop else_statement")
def conditional(self, p):
return (
"CONDITIONAL",
{"IF": p.if_statement, "ELSE_IF": p.else_if_loop, "ELSE": p.else_statement},
p.if_statement[2],
)
@_("if_statement else_statement")
def conditional(self, p):
return (
"CONDITIONAL",
{"IF": p.if_statement, "ELSE_IF": (None, None), "ELSE": p.else_statement},
p.if_statement[2],
)
@_("IF '(' expression ')' '{' program '}'")
def if_statement(self, p):
return ("IF", {"CODE": p.program, "CONDITION": p.expression}, p.lineno)
@_("else_if_loop else_if_statement")
def else_if_loop(self, p):
return p.else_if_loop + (p.else_if_statement,)
@_("else_if_statement")
def else_if_loop(self, p):
return ("ELSE_IF", p.else_if_statement)
@_("ELSE IF '(' expression ')' '{' program '}'")
def else_if_statement(self, p):
return ({"CODE": p.program, "CONDITION": p.expression}, p.lineno)
@_("ELSE '{' program '}'")
def else_statement(self, p):
return ("ELSE", {"CODE": p.program}, p.lineno)
@_("DEL ID ';'")
def delete_statement(self, p):
return ("DEL", {"ID": p.ID}, p.lineno)
@_("IMPORT expression ';'")
def import_statement(self, p):
return ("IMPORT", {"EXPRESSION": p.expression}, p.lineno)
# Statment syntax END
###########################################################################
# Expression START
@_("'-' expression %prec UMINUS")
def expression(self, p):
return ("NEG", p.expression)
@_("'+' expression %prec UPLUS")
def expression(self, p):
return ("POS", p.expression)
@_("expression '+' expression")
def expression(self, p):
return ("ADD", p[0], p[2])
@_("expression '-' expression")
def expression(self, p):
return ("SUB", p[0], p[2])
@_("expression '/' expression")
def expression(self, p):
return ("DIV", p[0], p[2])
@_("expression '*' expression")
def expression(self, p):
return ("MUL", p[0], p[2])
@_("expression '%' expression")
def expression(self, p):
return ("MOD", p[0], p[2])
@_("expression EQEQ expression")
def expression(self, p):
return ("EQEQ", p[0], p[2])
@_("expression NOT_EQEQ expression")
def expression(self, p):
return ("NOT_EQEQ", p[0], p[2])
@_("expression EQ_LESS expression")
def expression(self, p):
return ("EQ_LESS", p[0], p[2])
@_("expression EQ_GREATER expression")
def expression(self, p):
return ("EQ_GREATER", p[0], p[2])
@_("expression '|' expression")
def expression(self, p):
return ("OR", p[0], p[2])
@_("expression '&' expression")
def expression(self, p):
return ("AND", p[0], p[2])
@_("'!' expression")
def expression(self, p):
return ("NOT", p.expression)
@_("expression '<' expression")
def expression(self, p):
return ("LESS", p[0], p[2])
@_("expression '>' expression")
def expression(self, p):
return ("GREATER", p[0], p[2])
@_("'(' expression ')'")
def expression(self, p):
return p.expression
@_("python_code")
def expression(self, p):
return p.python_code
@_("function_call")
def expression(self, p):
return p.function_call
@_("get_index")
def expression(self, p):
return p.get_index
@_("null")
def expression(self, p):
return p.null
@_("int")
def expression(self, p):
return p.int
@_("float")
def expression(self, p):
return p.float
@_("bool")
def expression(self, p):
return p.bool
@_("string")
def expression(self, p):
return p.string
@_("id")
def expression(self, p):
return p.id
@_("class_attribute")
def expression(self, p):
return p.class_attribute
@_("_tuple")
def expression(self, p):
return p._tuple
@_("_list")
def expression(self, p):
return p._list
@_("_numpy")
def expression(self, p):
return p._numpy
@_("assoc_array")
def expression(self, p):
return p.assoc_array
# Expression END
###########################################################################
# Intermediate expression START
@_("NULL")
def null(self, p):
return ("NULL", "NULL")
@_("expression '[' expression ']'")
def get_index(self, p):
return ("GET_INDEX", {"EXPRESSION": p.expression0, "INDEX": p.expression1}, p.lineno)
@_("'{' positional_args '}'")
def _tuple(self, p):
return ("TUPLE", {"ITEMS": p.positional_args})
@_("'{' positional_args ',' '}'")
def _tuple(self, p):
return ("TUPLE", {"ITEMS": p.positional_args})
@_("'[' positional_args ']'")
def _list(self, p):
return ("LIST", {"ITEMS": p.positional_args})
@_("'[' positional_args ',' ']'")
def _list(self, p):
return ("LIST", {"ITEMS": p.positional_args})
@_("'(' items ')'")
def _numpy(self, p):
return ("NUMPY", {"ITEMS": p.items})
@_("'(' items ',' ')'")
def _numpy(self, p):
return ("NUMPY", {"ITEMS": p.items})
@_("'(' expression ',' ')'")
def _numpy(self, p):
return ("NUMPY", {"ITEMS": (p.expression,)})
@_("'(' ')'")
def _numpy(self, p):
return ("NUMPY", {"ITEMS": ()})
@_("'(' ',' ')'")
def _numpy(self, p):
return ("NUMPY", {"ITEMS": ()})
@_("items ',' expression")
def items(self, p):
return p.items + (p.expression,)
@_("expression ',' expression")
def items(self, p):
return (p.expression,)
@_("INT")
def int(self, p):
return ("INT", {"VALUE": p.INT})
@_("STRING")
def string(self, p):
return ("STRING", {"VALUE": p.STRING[1:-1]})
@_("FLOAT")
def float(self, p):
return ("FLOAT", {"VALUE": p.FLOAT})
@_("TRUE")
def bool(self, p):
return ("BOOL", {"VALUE": p.TRUE})
@_("FALSE")
def bool(self, p):
return ("BOOL", {"VALUE": p.FALSE})
@_("expression COLON_COLON ID")
def class_attribute(self, p):
return ("CLASS_ATTRIBUTE", {"CLASS": p[0], "ATTRIBUTE": p[2]}, p.lineno)
@_("ID")
def id(self, p):
return ("ID", {"VALUE": p.ID}, p.lineno)
@_(r"'\' assoc_array_items '\'")
def assoc_array(self, p):
return ("ASSOC_ARRAY", {"ITEMS": p.assoc_array_items})
@_("assoc_array_items ',' expression ':' expression")
def assoc_array_items(self, p):
return p.assoc_array_items + ((p.expression0, p.expression1),)
@_("expression ':' expression")
def assoc_array_items(self, p):
return ((p.expression0, p.expression1),)
@_("PYTHON_CODE")
def python_code(self, p):
return ("PYTHON_CODE", {"CODE": p.PYTHON_CODE[1:-1]})
@_("%prec EMPTY")
def empty(self, p):
pass
# Intermediate expression END
###########################################################################
# Syntax error START
|
import pytest
from deal_solver import Conclusion
from ..helpers import prove_f
@pytest.mark.parametrize('check', [
# compare
'{1, 2, 3} == {1, 2, 3}',
'{1, 2, 3} != {1, 2, 3, 4}',
'{1, 2, 3} == {3, 1, 2}',
'{1, 2, 3} == {3, 2, 1, 1, 2}',
'set() != {1}',
'{1} != set()',
'set() == set()',
# compare mismatching types
'{1} != [1]',
'{1} != {1: 2}',
'{1} != "hi"',
'{1} != 1',
'{1} != True',
# 'len({7, 9, 9, 9, 11}) == 3',
# operations
'10 in {3, 6, 10, 17}',
'2 not in {3, 6, 10, 17}',
'{1, 2} | {2, 3} == {1, 2, 3}',
'{1, 2} ^ {2, 3} == {1, 3}',
'{1, 2} & {2, 3} == {2}',
# methods
'{1, 2}.union({2, 3}) == {1, 2, 3}',
'{1, 2}.intersection({2, 3}) == {2}',
'{1, 2}.symmetric_difference({2, 3}) == {1, 3}',
'{1, 2}.difference({2, 3}) == {1}',
'{1, 2}.copy() == {1, 2}',
# is* methods
'{1, 2}.issubset({1, 2, 3})',
'not {1, 2, 3}.issubset({1, 2})',
'{1, 2, 3}.issuperset({1, 2})',
'not {1, 2}.issuperset({1, 2, 3})',
'{1, 2}.isdisjoint({3, 4})',
'not {1, 2}.isdisjoint({2, 3})',
# functions
'len({True}) >= 1',
'len({4}) >= 1',
'len({4, 5, 5, 6}) >= 3',
'len(set()) == 0',
'set({1, 2}) == {1, 2}',
])
def test_expr_asserts_ok(check: str) -> None:
assert eval(check)
text = """
from typing import List
def f():
assert {}
"""
text = text.format(check)
theorem = prove_f(text)
assert theorem.conclusion is Conclusion.OK
def test_set_clear():
theorem = prove_f("""
def f():
a = {1, 2, 3}
a.clear()
assert a == set()
a.add(1)
assert a == {1}
""")
assert theorem.conclusion is Conclusion.OK
def test_set_add():
theorem = prove_f("""
def f():
a = set()
a.add(1)
assert a == {1}
a.add(2)
assert a == {1, 2}
a.add(2)
assert a == {1, 2}
""")
assert theorem.conclusion is Conclusion.OK
def test_set_update():
theorem = prove_f("""
def f():
a = {1, 2}
a.update({2, 3})
assert a == {1, 2, 3}
""")
assert theorem.conclusion is Conclusion.OK
def test_set_discard():
theorem = prove_f("""
def f():
a = {1, 2, 3}
a.discard(2)
assert a == {1, 3}
a.discard(2)
assert a == {1, 3}
""")
assert theorem.conclusion is Conclusion.OK
def test_set_remove():
theorem = prove_f("""
def f():
a = {1, 2, 3}
a.remove(2)
assert a == {1, 3}
""")
assert theorem.conclusion is Conclusion.OK
def test_set_remove_fails():
theorem = prove_f("""
def f():
a = {1, 2, 3}
a.remove(2)
a.remove(2)
""")
assert theorem.conclusion is Conclusion.FAIL
assert str(theorem.description) == 'KeyError'
def test_set_pop():
theorem = prove_f("""
def f():
a = {1}
v = a.pop()
assert v == 1
assert a == set()
""")
assert theorem.conclusion is Conclusion.OK
def test_set_pop_two():
theorem = prove_f("""
def f():
a = {1, 2}
v = a.pop()
assert (v == 1 and a == {2}) or (v == 2 and a == {1})
""")
assert theorem.conclusion is Conclusion.OK
def test_set_pop_empty():
theorem = prove_f("""
def f():
a = set()
a.pop()
""")
assert theorem.conclusion is Conclusion.FAIL
assert str(theorem.description) == 'KeyError: pop from an empty set'
|
import argparse
class GshCommandParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
self.errored = False
return super().__init__(*args, **kwargs)
def exit(self, *args, **kwargs):
self.errored = True
def parse_args(self, *args, **kwargs):
res = super().parse_args(*args, **kwargs)
if self.errored:
self.errored = False
return None
return res
# Endpoint
endpoint_parser = GshCommandParser(
prog="endpoint",
description='Query current node endpoints.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
endpoint_parser.add_argument(
'name', default='', nargs='?',
help='Endpoint name, if not provided will display all available endpoints'
'with descriptions')
endpoint_parser.add_argument(
'-m', '--http-method', choices=['get', 'post', 'patch', 'delete'],
default='get', help='HTTP method used to query endpoint')
endpoint_parser.add_argument(
'data', type=str, default='', nargs='?',
help='Data sent to endpoints')
# cd
cd_parser = GshCommandParser(
prog="cd",
description='Change directory by navigating thru the resource tree',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cd_parser.add_argument('path', help='Requested path')
# ls
ls_parser = GshCommandParser(
prog="ls",
description='Display current resource tree node',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ls_parser.add_argument('path', default='', nargs='?', help='Requested path')
# create
create_parser = GshCommandParser(
prog="create",
description='Create a new child resource by providing json as argument',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
create_parser.add_argument(
'path', type=str, default='', nargs='?',
help='Alternative path, could be relative or absolute')
create_parser.add_argument('data', help='JSON data')
# delete
delete_parser = GshCommandParser(
prog="delete",
description='Delete current resource and "cd" to parent',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
delete_parser.add_argument(
'path', type=str, default='', nargs='?',
help='Alternative path, could be relative or absolute')
# update
update_parser = GshCommandParser(
prog="update",
description='Update current resource by providing json as argument',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
update_parser.add_argument(
'path', type=str, default='', nargs='?',
help='Alternative path, could be relative or absolute')
update_parser.add_argument('data', help='JSON data')
|
class MoveFilter(object):
def _common_handle(self, moves, rival_move):
new_moves = list()
for move in moves:
if move[0] > rival_move[0]:
new_moves.append(move)
return new_moves
def filter_type_1_single(self, moves, rival_move):
return self._common_handle(moves, rival_move)
def filter_type_2_pair(self, moves, rival_move):
return self._common_handle(moves, rival_move)
def filter_type_3_triple(self, moves, rival_move):
return self._common_handle(moves, rival_move)
def filter_type_4_bomb(self, moves, rival_move):
return self._common_handle(moves, rival_move)
# No need to filter for type_5_king_bomb
def filter_type_6_3_1(self, moves, rival_move):
rival_card_dict = dict()
target_rival_card = -1
for card in rival_move:
if card not in rival_card_dict:
rival_card_dict[card] = 1
else:
target_rival_card = card
break
new_moves = list()
for move in moves:
card_dict = dict()
for card in move:
if card not in card_dict:
card_dict[card] = 1
else:
if card > target_rival_card:
new_moves.append(move)
break
return new_moves
def filter_type_7_3_2(self, moves, rival_move):
rival_card_dict = dict()
target_rival_card = -1
for card in rival_move:
if card not in rival_card_dict:
rival_card_dict[card] = 1
else:
rival_card_dict[card] += 1
if rival_card_dict[card] == 3:
target_rival_card = card
break
new_moves = list()
for move in moves:
card_dict = dict()
for card in move:
if card not in card_dict:
card_dict[card] = 1
else:
card_dict[card] += 1
if card_dict[card] == 3 and \
card > target_rival_card:
new_moves.append(move)
break
return new_moves
def filter_type_8_serial_single(self, moves, rival_move):
return self._common_handle(moves, rival_move)
def filter_type_9_serial_pair(self, moves, rival_move):
return self._common_handle(moves, rival_move)
def filter_type_10_serial_triple(self, moves, rival_move):
return self._common_handle(moves, rival_move)
def filter_type_11_serial_3_1(self, moves, rival_move):
rival_triple_list = list()
rival_dict = dict()
for card in rival_move:
if card not in rival_dict:
rival_dict[card] = 1
else:
rival_dict[card] += 1
if rival_dict[card] == 3:
rival_triple_list.append(card)
rival_triple_list = sorted(rival_triple_list)
new_moves = list()
for move in moves:
move_dict = dict()
move_triple_list = list()
for card in move:
if card not in move_dict:
move_dict[card] = 1
else:
move_dict[card] += 1
if move_dict[card] == 3:
move_triple_list.append(card)
move_triple_list = sorted(move_triple_list)
if move_triple_list[0] > rival_triple_list[0]:
new_moves.append(move)
return new_moves
def filter_type_12_serial_3_2(self, moves, rival_move):
rival_triple_list = list()
rival_dict = dict()
for card in rival_move:
if card not in rival_dict:
rival_dict[card] = 1
else:
rival_dict[card] += 1
if rival_dict[card] == 3:
rival_triple_list.append(card)
rival_triple_list = sorted(rival_triple_list)
new_moves = list()
for move in moves:
move_dict = dict()
move_triple_list = list()
for card in move:
if card not in move_dict:
move_dict[card] = 1
else:
move_dict[card] += 1
if move_dict[card] == 3:
move_triple_list.append(card)
move_triple_list = sorted(move_triple_list)
if move_triple_list[0] > rival_triple_list[0]:
new_moves.append(move)
return new_moves
def filter_type_13_4_2(self, moves, rival_move):
rival_card_dict = dict()
target_rival_card = -1
for card in rival_move:
if card not in rival_card_dict:
rival_card_dict[card] = 1
else:
rival_card_dict[card] += 1
if rival_card_dict[card] == 4:
target_rival_card = card
break
new_moves = list()
for move in moves:
card_dict = dict()
for card in move:
if card not in card_dict:
card_dict[card] = 1
else:
card_dict[card] += 1
if card_dict[card] == 4 and \
card > target_rival_card:
new_moves.append(move)
break
return new_moves
def filter_type_14_4_4(self, moves, rival_move):
rival_card_dict = dict()
target_rival_card = -1
for card in rival_move:
if card not in rival_card_dict:
rival_card_dict[card] = 1
else:
rival_card_dict[card] += 1
if rival_card_dict[card] == 4:
target_rival_card = card
break
new_moves = list()
for move in moves:
card_dict = dict()
for card in move:
if card not in card_dict:
card_dict[card] = 1
else:
card_dict[card] += 1
if card_dict[card] == 4 and \
card > target_rival_card:
new_moves.append(move)
break
return new_moves
|
# profile settings elements
ELEMENTS = [
## Settings ##
## Account
# cover image enter
{
"name": "coverImage",
"classes": ["g-btn.m-rounded.m-sm.m-border"],
"text": ["Upload cover image"],
"id": []
},
# cover image cancel button
{
"name": "coverImageCancel",
"classes": ["b-user-panel__del-btn.m-cover"],
"text": [],
"id": []
},
# profile photo
{
"name": "profilePhoto",
"classes": ["g-btn.m-rounded.m-sm.m-border"],
"text": ["Upload profile photo"],
"id": []
},
# profile photo cancel button
{
"name": "profilePhotoCancel",
"classes": ["b-user-panel__del-btn.m-avatar"],
"text": [],
"id": []
},
# username
{
"name": "username",
"classes": [],
"text": [],
"id": ["input-login"]
},
# display name
{
"name": "displayName",
"classes": [],
"text": [],
"id": ["input-name"]
},
# subscription price
{
"name": "subscriptionPrice",
"classes": ["form-control.g-input"],
"text": ["Free"],
"id": []
},
# subscription bundle
# TODO
{
"name": "subscriptionBundle",
"classes": [None],
"text": [],
"id": []
},
# referral award enabled / disabled
# TODO
{
"name": "referralReward",
"classes": [None],
"text": [],
"id": []
},
# ADD reward for subscriber referrals
# about
{
"name": "about",
"classes": [],
"text": [],
"id": ["input-about"]
},
# location
{
"name": "location",
"classes": [],
"text": [],
"id": ["input-location"]
},
# website url
{
"name": "websiteURL",
"classes": [],
"text": [],
"id": ["input-website"]
},
## Advanced
# username
# BLANK
# username
# {
# "name": "username",
# "classes": ["form-control.g-input"],
# "text": [],
# "id": []
# },
# email
{
"name": "email",
"classes": ["form-control.g-input"],
"text": [],
"id": []
},
# connect other onlyfans accounts username enter area
# BLANK
# password
{
"name": "password",
"classes": ["form-control.g-input"],
"text": [],
"id": []
},
# password 2x
{
"name": "newPassword",
"classes": ["form-control.g-input"],
"text": [],
"id": []
},
# confirm new password
{
"name": "confirmPassword",
"classes": ["form-control.g-input"],
"text": [],
"id": []
},
## Messaging
# all TODO
{
"name": "welcomeMessageToggle",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessageText",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessageUpload",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessageVoice",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessageVideo",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessagePrice",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessageSave",
"classes": [None],
"text": [],
"id": []
},
{
"name": "welcomeMessageHideToggle",
"classes": [None],
"text": [],
"id": []
},
{
"name": "showFullTextInEmailToggle",
"classes": [None],
"text": [],
"id": []
},
## Notifications
# push notifications
{
"name": "pushNotifs",
"classes": ["g-input__wrapper.m-checkbox__toggle"],
"text": [],
"id": ["push-notifications"]
},
# email notifications
{
"name": "emailNotifs",
"classes": ["g-input__wrapper.m-checkbox__toggle"],
"text": [],
"id": ["email-notifications"]
},
# new referral email
{
"name": "emailNotifsReferral",
"classes": ["b-input-radio"],
"text": ["New Referral"],
"id": []
},
# new stream email
{
"name": "emailNotifsStream",
"classes": ["b-input-radio"],
"text": ["New Stream"],
"id": []
},
# new subscriber email
{
"name": "emailNotifsSubscriber",
"classes": ["b-input-radio"],
"text": ["New Subscriber"],
"id": []
},
# new tip email
{
"name": "emailNotifsSubscriber",
"classes": ["b-input-radio"],
"text": ["New Tip"],
"id": []
},
# new renewal email
{
"name": "emailNotifsSubscriber",
"classes": ["b-input-radio"],
"text": ["Renewal"],
"id": []
},
{
"name": "emailNotifsTip",
"classes": ["b-input-radio"],
"text": [],
"id": []
},
#
{
"name": "emailNotifsRenewal",
"classes": ["b-input-radio"],
"text": [],
"id": []
},
# new likes summary
{
"name": "emailNotifsLikes",
"classes": [None],
"text": [],
"id": []
},
# new posts summary
{
"name": "emailNotifsPosts",
"classes": [None],
"text": [],
"id": []
},
# new private message summary
{
"name": "emailNotifsPrivMessages",
"classes": [None],
"text": [],
"id": []
},
# telegram bot button
# BLANK
# site notifications
{
"name": "siteNotifs",
"classes": [None],
"text": [],
"id": []
},
# new comment notification
{
"name": "siteNotifsComment",
"classes": [],
"text": ["New comment"],
"id": []
},
# new favorite notification
{
"name": "siteNotifsFavorite",
"classes": [],
"text": ["New favorite (like)"],
"id": []
},
# discounts from users i've used to follow notification
{
"name": "siteNotifsDiscounts",
"classes": [],
"text": ["Discounts from users I used to follow"],
"id": []
},
# new subscriber notification
{
"name": "siteNotifsSubscriber",
"classes": [],
"text": ["New Subscriber"],
"id": []
},
# new tip notification
{
"name": "siteNotifsTip",
"classes": [],
"text": ["New Tip"],
"id": []
},
# toast notification new comment
{
"name": "toastNotifsComment",
"classes": [],
"text": ["New comment"],
"id": []
},
# toast notification new favorite
{
"name": "toastNotifsFavorite",
"classes": [],
"text": ["New favorite (like)"],
"id": []
},
# toast notification new subscriber
{
"name": "toastNotifsSubscriber",
"classes": [],
"text": ["New Subscriber"],
"id": []
},
# toast notification new tip
{
"name": "toastNotifsTip",
"classes": [],
"text": ["New Tip"],
"id": []
},
## Security
# two step toggle
# BLANK
# fully private profile
{
"name": "fullyPrivate",
"classes": [],
"text": [],
"id": ["is_private"]
},
# enable comments
{
"name": "enableComments",
"classes": [],
"text": [],
"id": ["is_want_comments"]
},
# show fans count on profile
{
"name": "showFansCount",
"classes": [],
"text": [],
"id": ["show_subscribers_count"]
},
# show posts tips summary
{
"name": "showPostsTip",
"classes": [],
"text": [],
"id": ["show_posts_tips"]
},
# public friends list
{
"name": "publicFriendsList",
"classes": [],
"text": [],
"id": ["show_friends_list"]
},
# geo blocking
{
"name": "ipCountry",
"classes": ["multiselect__input"],
"text": [],
"id": []
},
# ip blocking
{
"name": "ipIP",
"classes": [],
"text": [],
"id": ["input-blocked-ips"]
},
# watermarks photos
{
"name": "watermarkPhoto",
"classes": [],
"text": [],
"id": ["hasWatermarkPhoto"]
},
# watermarks video
{
"name": "watermarkVideo",
"classes": [],
"text": [],
"id": ["hasWatermarkVideo"]
},
# watermarks text
{
"name": "watermarkText",
"classes": ["form-control.g-input"],
"text": [],
"id": []
},
####### save changes may be the same for each
## Story
# allow message replies - nobody
{
"name": "storyAllowRepliesNobody",
"classes": [],
"text": [],
"id": ["allowNobody"]
},
# allow message replies - subscribers
{
"name": "storyAllowRepliesSubscribers",
"classes": [],
"text": [],
"id": ["allowSubscribers"]
},
## Other
# obs server
{
"name": "liveServer",
"classes": [],
"text": [],
"id": ["obsstreamingserver"]
},
# obs key
{
"name": "liveServerKey",
"classes": [],
"text": [],
"id": ["streamingobskey"]
},
# welcome chat message toggle
{
"name": "welcomeMessageToggle",
"classes": [],
"text": [],
"id": ["autoMessage"]
},
# then same pattern for message enter text or add stuff and price
{
"name": "welcomeMessageText",
"classes": ["form-control.b-chat__message-input"],
"text": [],
"id": []
},
# save button for welcome chat message
{
"name": "welcomeMessageSave",
"classes": ["g-btn.m-rounded.b-chat__btn-submit"],
"text": [],
"id": []
},
{
"name": "profileSave",
"classes": ["g-btn.m-rounded"],
"text": ["Save changes"],
"id": [],
}
]
# # working
########################
# username
# displayName
# about
# location
# websiteURL
## security
# fullyPrivate
# enableComments
# showFansCount
# showPostsTip
# publicFriendsList
# ipCountry
# ipIP
# watermarkPhoto
# watermarkVideo
# watermarkText
# welcomeMessageToggle
## other
# liveServer
# liveServerKey
# # sorta working
########################
# coverImage
# profilePhoto
# password
# newPassword
# confirmPassword
# # all the notifs are probably false positives
# # are all b.input radio should maybe nth one found
# emailNotifsReferral
# emailNotifsStream
# emailNotifsSubscriber
# emailNotifsTip
# emailNotifsRenewal
# # not working
# ########################
# email
# emailNotifs
# emailNotifsPosts
# emailNotifsPrivMessages
# siteNotifs
# siteNotifsComment
# siteNotifsFavorite
# siteNotifsDiscounts
# siteNotifsSubscriber
# siteNotifsTip
# toastNotifsComment
# toastNotifsSubscriber
# toastNotifsTip
# welcomeMessageText
|
import glob
import numpy as np
import torch
from ignite.contrib.handlers import param_scheduler
from ignite.contrib.handlers import ProgressBar
import monai
from monai.apps import load_from_mmar
from monai.transforms import (
CastToTyped,
ToTensord
)
from utils.logger import log
def _get_transforms(transforms, dtype=(np.float32, np.uint8), keys=("image", "label")):
"""Returns a composed transform.
Args:
transforms (list): list containing all transforms specified on config file (cfg).
dtype (tuple, optional): dtypes used on CastToTyped MONAI transform. Defaults to (np.float32, np.uint8).
keys (tuple, optional): keys used as params for MONAI transforms . Defaults to ("image", "label").
Returns:
monai.transforms: returns MONAI transforms composed.
"""
def get_object(transform):
if hasattr(monai.transforms, transform.name):
return getattr(monai.transforms, transform.name)(**transform.params)
else:
return eval(transform.name)
xforms = [get_object(transform) for transform in transforms]
xforms.extend(
[
CastToTyped(keys=keys, dtype=dtype),
ToTensord(keys=keys),
]
)
return monai.transforms.Compose(xforms)
def get_dataloader(cfg, data):
"""Apply the transforms and create a DataLoader.
Args:
cfg (dict): config file.
data (list): list containing all the files (in this case the MRIs).
Returns:
monai.data.DataLoader: returns a DataLoader.
"""
transforms = _get_transforms(cfg.transforms)
dataset = monai.data.CacheDataset(
data=data,
transform=transforms
)
return monai.data.DataLoader(
dataset,
# if == 1 ==> image-level batch to the sliding window method, not the window-level batch
batch_size=cfg.batch_size,
shuffle=cfg.loader.shuffle,
num_workers=cfg.loader.num_workers,
pin_memory=torch.cuda.is_available(),
)
def get_post_transforms(transforms):
"""Returns MONAI post transforms composed.
Args:
transforms (dict): python dict containing all transforms and its parameters.
"""
def get_object(post_transform):
if hasattr(monai.transforms, post_transform.name):
return getattr(monai.transforms, post_transform.name)(**post_transform.params)
else:
return eval(post_transform.name)
post_transforms = [get_object(post_transform)
for post_transform in transforms]
return monai.transforms.Compose(post_transforms)
def get_model(cfg):
"""Instantiates the model.
Args:
cfg (dict): config file.
Returns:
Pytorch (MONAI) model: returns a model instance.
"""
if cfg.model.name == "DynUNet":
raise ValueError(f"Not supporting {cfg.model.name} anymore.")
if cfg.model.pretrained:
log(f"Loading pretrained model from NVIDIA Clara")
return load_from_mmar(**cfg.model.mmar)
try:
return getattr(monai.networks.nets, cfg.model.name)(**cfg.model.params)
except:
log(f"Failed to load model. Model: {cfg.model.name}")
def get_loss(cfg):
"""Instantiate the loss function.
Args:
cfg (dict): config file.
Returns:
monai.losses: returns a monai instance loss.
"""
log(f"Criterion: {cfg.loss.name}")
try:
return getattr(monai.losses, cfg.loss.name)(**cfg.loss.params)
except:
log(
f"Failed to import and load the loss function. Loss Function {cfg.loss.name}"
)
def get_optimizer(cfg, parameters):
"""Get the optimizer.
Args:
cfg (dict): config file.
parameters (model.params): params from the model.
Returns:
torch.optim: returns a optimizer (pytorch).
"""
optimizer = getattr(torch.optim, cfg.optimizer.name)(
parameters, **cfg.optimizer.params)
log(f"Optimizer: {cfg.optimizer.name}")
return optimizer
def get_scheduler(cfg, optimizer, len_loader):
"""Get scheduler.
Args:
cfg (dict): config file.
optimizer (torch.optim): optimizer.
len_loader (int): len of the DataLoader.
Returns:
lr_scheduler (ignite): returns a learning rate scheduler.
"""
log(f"LR Scheduler: {cfg.scheduler.name}")
try:
if cfg.scheduler.name == "CosineAnnealingScheduler":
return getattr(param_scheduler, cfg.scheduler.name)(
optimizer, cycle_size=len_loader, **cfg.scheduler.params)
else:
return getattr(
torch.optim.lr_scheduler,
cfg.scheduler.name
)(optimizer, **cfg.scheduler.params)
except:
log(f"Failed to load the scheduler. Scheduler: {cfg.scheduler.name}")
def get_inferer(cfg):
"""Returns a sliding window inference instance
Args:
cfg (dict): config file.
Returns:
monai.inferer: returns a MONAI inferer.
"""
try:
return getattr(monai.inferers, cfg.inferer.name)(**cfg.inferer.params)
except:
log(
f"Failed to import and load the loss function. Loss Function {cfg.inferer.name}"
)
def get_handlers(cfg, handler, model=None, fold=None, evaluator=None, scheduler=None):
"""Returns the handlers specified on config file (cfg).
Args:
cfg (dict): config file.
handler (list): list of all handlers and its parameters.
model (monai.networks.nets, optional): architecture used for training. Defaults to None.
fold (int, optional): current fold. Defaults to None.
evaluator (monai.engines.SupervisedEvaluator, optional): evaluator used for validation. Defaults to None.
scheduler (torch.optim.lr_scheduler, optional): lr scheduler used for training. Defaults to None.
Returns:
handlers (list): list containing all handlers loaded.
"""
def get_object(handler):
if hasattr(monai.handlers, handler.name):
return getattr(monai.handlers, handler.name)
else:
return eval(handler.name)
handlers = [get_object(_handler)(**_handler.params)
for _handler in handler.handlers]
if handler.name == "validation":
handlers.extend([
monai.handlers.CheckpointSaver(
save_dir=cfg.workdir,
file_prefix=f"model_fold{fold}",
save_dict={
"model": model
},
save_key_metric=True,
key_metric_n_saved=5)
])
else:
handlers.extend([
monai.handlers.ValidationHandler(
validator=evaluator,
interval=5,
epoch_level=True
),
monai.handlers.LrScheduleHandler(
lr_scheduler=scheduler, print_lr=True,)
])
return handlers
def get_models(cfg, device):
"""Load models for testing.
Args:
cfg (dict): config file.
device (str): device used. (eg.:. 'cpu' or 'cuda')
Returns:
list: return all the models loaded.
"""
if type(cfg.checkpoints) != list:
model_paths = glob.glob(cfg.checkpoints)
models = []
for model_path in model_paths:
model = get_model(cfg).to(device)
model.load_state_dict(torch.load(model_path))
models.append(model)
log(f"Total models successfully loaded: {len(models)}")
return models
|
import logging.config
import os
import uuid
from datetime import datetime
import pytz
from pynwb import NWBHDF5IO, NWBFile
from pynwb.file import Subject
from rec_to_nwb.processing.builder.originators.associated_files_originator import AssociatedFilesOriginator
from rec_to_nwb.processing.builder.originators.camera_device_originator import CameraDeviceOriginator
from rec_to_nwb.processing.builder.originators.camera_sample_frame_counts_originator import \
CameraSampleFrameCountsOriginator
from rec_to_nwb.processing.builder.originators.data_acq_device_originator import DataAcqDeviceOriginator
from rec_to_nwb.processing.builder.originators.electrode_group_originator import ElectrodeGroupOriginator
from rec_to_nwb.processing.builder.originators.electrodes_extension_originator import ElectrodesExtensionOriginator
from rec_to_nwb.processing.builder.originators.electrodes_originator import ElectrodesOriginator
from rec_to_nwb.processing.builder.originators.epochs_originator import EpochsOriginator
from rec_to_nwb.processing.builder.originators.header_device_originator import HeaderDeviceOriginator
from rec_to_nwb.processing.builder.originators.mda_invalid_time_originator import MdaInvalidTimeOriginator
from rec_to_nwb.processing.builder.originators.mda_valid_time_originator import MdaValidTimeOriginator
from rec_to_nwb.processing.builder.originators.old_analog_originator import OldAnalogOriginator
from rec_to_nwb.processing.builder.originators.old_dio_originator import OldDioOriginator
from rec_to_nwb.processing.builder.originators.old_mda_originator import OldMdaOriginator
from rec_to_nwb.processing.builder.originators.old_position_originator import OldPositionOriginator
from rec_to_nwb.processing.builder.originators.old_video_files_originator import OldVideoFilesOriginator
from rec_to_nwb.processing.builder.originators.pos_invalid_originator import PosInvalidTimeOriginator
from rec_to_nwb.processing.builder.originators.pos_valid_time_originator import PosValidTimeOriginator
from rec_to_nwb.processing.builder.originators.probe_originator import ProbeOriginator
from rec_to_nwb.processing.builder.originators.processing_module_originator import ProcessingModuleOriginator
from rec_to_nwb.processing.builder.originators.sample_count_timestamp_corespondence_originator import \
SampleCountTimestampCorespondenceOriginator
from rec_to_nwb.processing.builder.originators.shanks_electrodes_originator import ShanksElectrodeOriginator
from rec_to_nwb.processing.builder.originators.shanks_originator import ShanksOriginator
from rec_to_nwb.processing.builder.originators.task_originator import TaskOriginator
from rec_to_nwb.processing.header.header_checker.header_processor import HeaderProcessor
from rec_to_nwb.processing.header.header_checker.rec_file_finder import RecFileFinder
from rec_to_nwb.processing.header.module.header import Header
from rec_to_nwb.processing.metadata.corrupted_data_manager import CorruptedDataManager
from rec_to_nwb.processing.metadata.metadata_manager import MetadataManager
from rec_to_nwb.processing.nwb.components.device.device_factory import DeviceFactory
from rec_to_nwb.processing.nwb.components.device.device_injector import DeviceInjector
from rec_to_nwb.processing.nwb.components.device.probe.fl_probe_manager import FlProbeManager
from rec_to_nwb.processing.tools.beartype.beartype import beartype
from rec_to_nwb.processing.tools.data_scanner import DataScanner
from rec_to_nwb.processing.validation.associated_files_validator import AssociatedFilesExistanceValidator
from rec_to_nwb.processing.validation.metadata_section_validator import MetadataSectionValidator
from rec_to_nwb.processing.validation.ntrode_validator import NTrodeValidator
from rec_to_nwb.processing.validation.path_validator import PathValidator
from rec_to_nwb.processing.validation.preprocessing_validator import PreprocessingValidator
from rec_to_nwb.processing.validation.task_validator import TaskValidator
from rec_to_nwb.processing.validation.validation_registrator import ValidationRegistrator
path = os.path.dirname(os.path.abspath(__file__))
logging.config.fileConfig(fname=str(path) + '/../../logging.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
class OldNWBFileBuilder:
"""Unpack data from preprocessing folder specified by arguments, and write those data into NWB file format
Args:
data_path (string): path to directory containing all experiments data
animal_name (string): directory name which represents animal subject of experiment
date (string): date of experiment
nwb_metadata (MetadataManager): object contains metadata about experiment
process_dio (boolean): flag if dio data should be processed
process_mda (boolean): flag if mda data should be processed
process_analog (boolean): flag if analog data should be processed
video_path (string): path to directory with video files associated to nwb file
output_file (string): path and name specifying where .nwb file gonna be written
Methods:
build()
write()
build_and_append_to_nwb()
"""
@beartype
def __init__(
self,
data_path: str,
animal_name: str,
date: str,
session_start_time,
nwb_metadata: MetadataManager,
process_dio: bool = True,
process_mda: bool = True,
process_analog: bool = True,
process_pos_timestamps: bool = True,
video_path: str = '',
output_file: str = 'output.nwb',
reconfig_header: str = ''
):
logger.info('NWBFileBuilder initialization')
logger.info(
'NWB builder initialization parameters: \n'
+ 'data_path = ' + str(data_path) + '\n'
+ 'animal_name = ' + str(animal_name) + '\n'
+ 'date = ' + str(date) + '\n'
+ 'nwb_metadata = ' + str(nwb_metadata) + '\n'
+ 'process_dio = ' + str(process_dio) + '\n'
+ 'process_mda = ' + str(process_mda) + '\n'
+ 'process_analog = ' + str(process_analog) + '\n'
+ 'output_file = ' + str(output_file) + '\n'
)
validation_registrator = ValidationRegistrator()
validation_registrator.register(PathValidator(data_path))
validation_registrator.validate()
self.animal_name = animal_name
self.date = date
self.data_path = data_path
self.metadata = nwb_metadata.metadata
metadata_section_validator = MetadataSectionValidator(self.metadata)
metadata_section_validator.validate_sections()
if self.metadata.get('associated_files', []):
associated_files_existance_validator = AssociatedFilesExistanceValidator(self.metadata['associated_files'])
if associated_files_existance_validator.files_exist():
pass
else:
raise Exception("one or more associated file listed in metadata.yaml file does not exist")
self.probes = nwb_metadata.probes
self.process_dio = process_dio
self.process_mda = process_mda
self.process_analog = process_analog
self.process_pos_timestamps = process_pos_timestamps
self.output_file = output_file
self.video_path = video_path
self.link_to_notes = self.metadata.get('link to notes', None)
data_types_for_scanning = {'pos': True,
'time': True,
'mda': process_mda,
'DIO': process_dio,
'analog': process_analog}
rec_files_list = RecFileFinder().find_rec_files(
path=(self.data_path
+ '/' + self.animal_name
+ '/raw/'
+ self.date)
)
header_file = HeaderProcessor.process_headers(rec_files_list)
if reconfig_header:
self.header = Header(reconfig_header)
else:
self.header = Header(header_file)
self.data_scanner = DataScanner(data_path, animal_name, nwb_metadata)
self.dataset_names = self.data_scanner.get_all_epochs(date)
full_data_path = data_path + '/' + animal_name + '/preprocessing/' + date
validation_registrator = ValidationRegistrator()
validation_registrator.register(NTrodeValidator(self.metadata, self.header, self.probes))
validation_registrator.register(PreprocessingValidator(
full_data_path,
self.dataset_names,
data_types_for_scanning
))
validation_registrator.register(TaskValidator(self.metadata['tasks']))
validation_registrator.validate()
self.__extract_datasets(animal_name, date)
self.corrupted_data_manager = CorruptedDataManager(self.metadata)
self.shanks_electrode_originator = ShanksElectrodeOriginator(self.probes, self.metadata)
self.shanks_originator = ShanksOriginator(self.probes, self.metadata)
self.fl_probe_manager = FlProbeManager(self.probes)
self.device_injector = DeviceInjector()
self.device_factory = DeviceFactory()
self.electrode_group_originator = ElectrodeGroupOriginator(self.metadata)
self.electrodes_originator = ElectrodesOriginator(self.probes, self.metadata)
self.session_start_time = session_start_time
self.mda_valid_time_originator = MdaValidTimeOriginator(self.header, self.metadata)
self.mda_invalid_time_originator = MdaInvalidTimeOriginator(self.header, self.metadata)
self.pos_valid_time_originator = PosValidTimeOriginator(self.metadata)
self.pos_invalid_time_originator = PosInvalidTimeOriginator(self.metadata)
self.epochs_originator = EpochsOriginator(self.datasets)
if 'associated_files' in self.metadata:
self.associated_files_originator = AssociatedFilesOriginator(self.metadata)
self.electrodes_extension_originator = ElectrodesExtensionOriginator(
self.probes,
self.metadata,
self.header
)
self.sample_count_timestamp_corespondence_originator =\
SampleCountTimestampCorespondenceOriginator(self.datasets)
self.processing_module_originator = ProcessingModuleOriginator()
self.task_originator = TaskOriginator(self.metadata)
self.camera_device_originator = CameraDeviceOriginator(self.metadata)
self.header_device_originator = HeaderDeviceOriginator(self.header, self.metadata)
self.probes_originator = ProbeOriginator(self.device_factory, self.device_injector, self.probes)
self.camera_sample_frame_counts_originator = CameraSampleFrameCountsOriginator(
self.data_path + "/" + animal_name + "/raw/" + self.date + "/")
self.old_video_files_originator = OldVideoFilesOriginator(
self.data_path + "/" + animal_name + "/raw/" + self.date + "/",
self.video_path,
self.metadata["associated_video_files"],
)
self.data_acq_device_originator = DataAcqDeviceOriginator(
device_factory=self.device_factory,
device_injector=self.device_injector,
metadata=self.metadata['data acq device']
)
if self.process_mda:
self.old_mda_originator = OldMdaOriginator(self.datasets, self.header, self.metadata)
if self.process_dio:
self.old_dio_originator = OldDioOriginator(self.metadata, self.datasets)
if self.process_analog:
self.old_analog_originator = OldAnalogOriginator(self.datasets, self.metadata)
self.old_position_originator = OldPositionOriginator(self.datasets, self.metadata,
self.dataset_names, self.process_pos_timestamps)
def __extract_datasets(self, animal_name, date):
self.data_scanner.extract_data_from_date_folder(date)
self.datasets = [self.data_scanner.data[animal_name][date][dataset] for dataset in self.dataset_names]
def build(self):
"""Build NWBFile
Returns:
NWBFile: Return NWBFile content
"""
logger.info('Building components for NWB')
nwb_content = NWBFile(
session_description=self.metadata['session description'],
experimenter=self.metadata['experimenter name'],
lab=self.metadata['lab'],
institution=self.metadata['institution'],
session_start_time=self.session_start_time,
timestamps_reference_time=datetime.fromtimestamp(0, pytz.utc),
identifier=str(uuid.uuid1()),
session_id=self.metadata['session_id'],
notes=self.link_to_notes,
experiment_description=self.metadata['experiment description'],
subject=Subject(
description=self.metadata['subject']['description'],
genotype=self.metadata['subject']['genotype'],
sex=self.metadata['subject']['sex'],
species=self.metadata['subject']['species'],
subject_id=self.metadata['subject']['subject id'],
weight=str(self.metadata['subject']['weight']),
),
)
self.processing_module_originator.make(nwb_content)
self.old_video_files_originator.make(nwb_content)
if 'associated_files' in self.metadata:
self.associated_files_originator.make(nwb_content)
self.old_position_originator.make(nwb_content)
valid_map_dict = self.__build_corrupted_data_manager()
shanks_electrodes_dict = self.shanks_electrode_originator.make()
shanks_dict = self.shanks_originator.make(shanks_electrodes_dict)
probes = self.probes_originator.make(nwb_content, shanks_dict, valid_map_dict['probes'])
self.data_acq_device_originator.make(nwb_content)
self.header_device_originator.make(nwb_content)
self.camera_device_originator.make(nwb_content)
electrode_groups = self.electrode_group_originator.make(
nwb_content, probes, valid_map_dict['electrode_groups']
)
self.electrodes_originator.make(
nwb_content, electrode_groups, valid_map_dict['electrodes'], valid_map_dict['electrode_groups']
)
self.electrodes_extension_originator.make(nwb_content, valid_map_dict['electrodes'])
self.epochs_originator.make(nwb_content)
self.sample_count_timestamp_corespondence_originator.make(nwb_content)
self.task_originator.make(nwb_content)
self.camera_sample_frame_counts_originator.make(nwb_content)
if self.process_dio:
self.old_dio_originator.make(nwb_content)
if self.process_analog:
self.old_analog_originator.make(nwb_content)
if self.process_mda:
self.old_mda_originator.make(nwb_content)
return nwb_content
def write(self, content):
"""Write nwb file handler with colected data into actual file"""
logger.info('Writing down content to ' + self.output_file)
with NWBHDF5IO(path=self.output_file, mode='w') as nwb_fileIO:
nwb_fileIO.write(content)
nwb_fileIO.close()
logger.info(self.output_file + ' file has been created.')
return self.output_file
def __build_corrupted_data_manager(self):
logger.info('CorruptedData: Checking')
return self.corrupted_data_manager.get_valid_map_dict()
def build_and_append_to_nwb(self, process_mda_valid_time=True, process_mda_invalid_time=True,
process_pos_valid_time=True, process_pos_invalid_time=True):
"""Create and append to existing nwb. Set flag to add it to nwb
Args:
process_mda_valid_time (boolean): True if the mda valid times should be build and append to nwb.
Need the mda data inside the nwb. (default True)
process_mda_invalid_time (boolean): True if the mda invalid times should be build and append to nwb.
Need the mda data inside the nwb. (default True)
process_pos_valid_time (boolean): True if the pos valid times should be build and append to nwb.
Need the pos data inside the nwb. (default True)
process_pos_invalid_time (boolean): True if the pos invalid times should be build and append to nwb.
Need the pos data inside the nwb. (default True)
Raises:
ElementExistException: If element already exist in NWB
Returns:
NWBFile: Return NWBFile content
"""
with NWBHDF5IO(path=self.output_file, mode='a') as nwb_file_io:
nwb_content = nwb_file_io.read()
if self.process_pos_timestamps:
if process_pos_valid_time:
self.pos_valid_time_originator.make(nwb_content)
if process_pos_invalid_time:
self.pos_invalid_time_originator.make(nwb_content)
if process_mda_valid_time:
self.mda_valid_time_originator.make(nwb_content)
if process_mda_invalid_time:
self.mda_invalid_time_originator.make(nwb_content)
nwb_file_io.write(nwb_content)
|
# ------------------------------------------------------------------------------ #
# Author: Zhenwei Shao (https://github.com/ParadoxZW)
# Description: This is a 2D Cosine Attention Module inspired by
# [cosFormer: Rethinking Softmax in Attention](https://arxiv.org/abs/2202.08791).
# ------------------------------------------------------------------------------ #
from math import pi
import torch
from torch import nn
from torch.nn import functional as F
def linear_attention(q, k, v):
k_cumsum = k.sum(dim=-2)
D = torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd->...ne', context, q)
return out, D
class CosAttn2d(nn.Module):
def __init__(self, N, n_head=8, M=16):
"""
This module implements a cosine attention mechanism designed for
grid features, e.g. feature maps of images. Note that there is no
learnable weights in this module. It's only responsible for computing
KQV with linear time complexity.
Args:
N: edge length of 2d grid
n_head: number of heads
M: a constant, M > N
"""
super().__init__()
self.N = N
self.M = M
self.n_head = n_head
idx = torch.arange(0, N)
freq = pi / (2 * M)
_cos = torch.cos(idx * freq)
_sin = torch.sin(idx * freq)
icos_jcos = (_cos.view(-1, 1) * _cos.view(1, -1)).unsqueeze(0)
icos_jsin = (_cos.view(-1, 1) * _sin.view(1, -1)).unsqueeze(0)
isin_jcos = (_sin.view(-1, 1) * _cos.view(1, -1)).unsqueeze(0)
isin_jsin = (_sin.view(-1, 1) * _sin.view(1, -1)).unsqueeze(0)
attn_coef = torch.cat([icos_jcos, icos_jsin, isin_jcos, isin_jsin], dim=0)
self.register_buffer('attn_coef', attn_coef)
def flatten(self, x):
b, _, H, W = x.shape
x = x.view(b, self.n_head, -1, H, W)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(b, self.n_head, H*W, -1)
return x
def forward(self, q, k, v):
"""
Args:
q: [batch_size, channel, height, width], transformed Q
k: [batch_size, channel, height, width], transformed K
v: [batch_size, channel, height, width], transformed V
Returns:
out: [batch_size, channel, height, width]
"""
BS, C, H, W = q.shape
data_normalizer = (C ** -0.25)
v = self.flatten(v)
v = v.unsqueeze(1).repeat(1, 4, 1, 1, 1).view(BS * 4, self.n_head, H*W, -1)
q = F.relu(q * data_normalizer, True) + 1e-5 # (BS, C, H, W)
k = F.relu(k * data_normalizer, True) + 1e-5 # (BS, C, H, W)
q = q[:, None, :, :, :] * self.attn_coef[None, :, None, :, :] # (BS, 4, C, H, W)
k = k[:, None, :, :, :] * self.attn_coef[None, :, None, :, :] # (BS, 4, C, H, W)
q = self.flatten(q.view(BS * 4, C, H, W)) # (BS*4, head, H*W, C//head)
k = self.flatten(k.view(BS * 4, C, H, W)) # (BS*4, head, H*W, C//head)
unnormed, D = linear_attention(q, k, v)
unnormed = unnormed.view(BS, 4, self.n_head, H*W, -1).sum(dim=1)
D_inv = 1. / D.view(BS, 4, self.n_head, -1).sum(dim=1)
out = torch.einsum('...ne,...n->...ne', unnormed, D_inv)
out = out.permute(0, 1, 3, 2).contiguous().view(BS, C, H, W)
return out
if __name__ == '__main__':
attn = CosAttn2d(14).cuda()
x = torch.rand(32, 512, 14, 14).cuda()
y = attn(x, x, x)
print(y.shape)
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import mock
from rally.benchmark.context import users
from rally.benchmark import utils
from tests import test
run_concurrent = (lambda dummy, cls, f, args: list(
itertools.imap(getattr(cls, f), args)))
@mock.patch.object(utils, "run_concurrent", run_concurrent)
class UserGeneratorTestCase(test.TestCase):
tenants_num = 10
users_per_tenant = 5
users_num = tenants_num * users_per_tenant
concurrent = 10
@property
def context(self):
return {
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
"concurrent": self.concurrent,
}
},
"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()
}
def setUp(self):
super(UserGeneratorTestCase, self).setUp()
self.osclients_patcher = mock.patch(
"rally.benchmark.context.users.osclients")
self.osclients = self.osclients_patcher.start()
self.keystone_wrapper_patcher = mock.patch(
"rally.benchmark.context.users.keystone")
self.keystone_wrapper = self.keystone_wrapper_patcher.start()
self.wrapped_keystone = self.keystone_wrapper.wrap.return_value
def tearDown(self):
self.keystone_wrapper_patcher.stop()
self.osclients_patcher.stop()
super(UserGeneratorTestCase, self).tearDown()
def test_create_tenant_users(self):
users_num = 5
args = (mock.MagicMock(), users_num, 'default', 'default',
'ad325aec-f7b4-4a62-832a-bb718e465bb7', 1)
result = users.UserGenerator._create_tenant_users(args)
self.assertEqual(len(result), 2)
tenant, users_ = result
self.assertIn("id", tenant)
self.assertIn("name", tenant)
self.assertEqual(len(users_), users_num)
for user in users_:
self.assertIn("id", user)
self.assertIn("endpoint", user)
def test_delete_tenants(self):
tenant1 = mock.MagicMock()
tenant2 = mock.MagicMock()
args = (mock.MagicMock(), [tenant1, tenant2])
users.UserGenerator._delete_tenants(args)
self.keystone_wrapper.wrap.assert_called_once()
self.wrapped_keystone.delete_project.assert_has_calls([
mock.call(tenant1["id"]),
mock.call(tenant2["id"])])
def test_delete_users(self):
user1 = mock.MagicMock()
user2 = mock.MagicMock()
args = (mock.MagicMock(), [user1, user2])
users.UserGenerator._delete_users(args)
self.wrapped_keystone.delete_user.assert_has_calls([
mock.call(user1["id"]),
mock.call(user2["id"])])
def test_setup_and_cleanup(self):
with users.UserGenerator(self.context) as ctx:
self.assertEqual(self.wrapped_keystone.create_user.call_count, 0)
self.assertEqual(self.wrapped_keystone.create_project.call_count,
0)
ctx.setup()
self.assertEqual(len(ctx.context["users"]),
self.users_num)
self.assertEqual(self.wrapped_keystone.create_user.call_count,
self.users_num)
self.assertEqual(len(ctx.context["tenants"]),
self.tenants_num)
self.assertEqual(self.wrapped_keystone.create_project.call_count,
self.tenants_num)
# Assert nothing is deleted yet
self.assertEqual(self.wrapped_keystone.delete_user.call_count,
0)
self.assertEqual(self.wrapped_keystone.delete_project.call_count,
0)
# Cleanup (called by content manager)
self.assertEqual(self.wrapped_keystone.delete_user.call_count,
self.users_num)
self.assertEqual(self.wrapped_keystone.delete_project.call_count,
self.tenants_num)
def test_users_and_tenants_in_context(self):
task = {"uuid": "abcdef"}
config = {
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 2,
"concurrent": 1
}
},
"admin": {"endpoint": mock.MagicMock()},
"task": task
}
user_list = [mock.MagicMock(id='id_%d' % i)
for i in range(self.users_num)]
self.wrapped_keystone.create_user.side_effect = user_list
with users.UserGenerator(config) as ctx:
ctx.setup()
create_tenant_calls = []
for i, t in enumerate(ctx.context["tenants"]):
pattern = users.UserGenerator.PATTERN_TENANT
create_tenant_calls.append(
mock.call(pattern % {"task_id": task["uuid"], "iter": i},
ctx.config["project_domain"]))
self.wrapped_keystone.create_project.assert_has_calls(
create_tenant_calls, any_order=True)
for user in ctx.context["users"]:
self.assertEqual(set(["id", "endpoint", "tenant_id"]),
set(user.keys()))
tenants_ids = []
for t in ctx.context["tenants"]:
tenants_ids.extend([t["id"], t["id"]])
for (user, tenant_id, orig_user) in zip(ctx.context["users"],
tenants_ids, user_list):
self.assertEqual(user["id"], orig_user.id)
self.assertEqual(user["tenant_id"], tenant_id)
|
# Import ROS2 libraries
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSProfile
from rclpy.action import ActionClient
from rclpy.callback_groups import ReentrantCallbackGroup
from rclpy.executors import MultiThreadedExecutor
# Import message files
from geometry_msgs.msg import PoseStamped
from nav_msgs.msg import OccupancyGrid as OccG
from nav_msgs.msg import Odometry
from nav2_msgs.action import FollowWaypoints
from tf2_msgs.msg import TFMessage
from autonomous_exploration_msgs.msg import MapData, PosData, Nav2Waypoints
# Import other libraries
import numpy as np
from scipy.spatial.transform import Rotation
class RosbridgeMsgsPublisher(Node):
def __init__(self):
super().__init__('rosbridge_msgs_publisher')
# Initialize the variables
self.pos = PosData()
self.mp = MapData()
self.mp.origin = [0.0] * 7
self.inPos = []
self.odomReceived = False
qos = QoSProfile(depth=10)
# Create callback group
self.top_callback_group = ReentrantCallbackGroup()
# Setup rate
self.rate = self.create_rate(2)
# Setup subscribers
## /odom
self.create_subscription(Odometry, 'odom', self._odomCallback, qos, callback_group=self.top_callback_group)
## /map
self.create_subscription(OccG, 'map', self._mapCallback, qos, callback_group=self.top_callback_group)
## /rosbridge_msgs_unity/nav_goals
self.create_subscription(Nav2Waypoints, 'rosbridge_msgs_unity/nav_goals', self._navGoalCallback, qos, callback_group=self.top_callback_group)
## /tf
self.create_subscription(TFMessage, 'tf', self._tfCallback, qos, callback_group=self.top_callback_group)
# Setup publishers
## /rosbridge_msgs_publisher/map
self.rosbridgeMap_pub = self.create_publisher(MapData, '/rosbridge_msgs_publisher/map', qos)
## /rosbridge_msgs_publisher/robot_pos
self.rosbridgePos_pub = self.create_publisher(PosData, '/rosbridge_msgs_publisher/robot_pos', qos)
# Create the navigation2 action client
self.nav2ActionClient = ActionClient(self, FollowWaypoints, 'FollowWaypoints')
self.nav2ActionClient.wait_for_server()
# Publish the rosbridge_msgs
self.create_timer(0.50, self.PublishRosbridgeMsgs) # unit: s
self.get_logger().info('Rosbridge_msgs publisher was initiated successfully')
def _tfCallback(self, data:TFMessage):
''' Read the tf data and find the transformation between odom and map '''
for tr in data.transforms:
if tr.header.frame_id == 'map' and tr.child_frame_id == 'odom':
self.mp.origin[2] = tr.transform.translation.x
self.mp.origin[3] = tr.transform.translation.y
def _navGoalResponseCallback(self, future:rclpy.Future):
''' Callback to process the request send to the navigtion2 action server '''
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected :(')
return
self.get_logger().info('Goal accepted :)')
self._get_result_future = goal_handle.get_result_async()
self._get_result_future.add_done_callback(self._navGoalResultCallback)
def _navGoalResultCallback(self, future:rclpy.Future):
result = future.result().result
self.get_logger().info('Result: {0}'.format(result.result))
def _navGoalFeedbackCallback(self, data):
pass
#self.remaining_distance = data.feedback.distance_remaining
def _navGoalCallback(self, data:Nav2Waypoints):
''' Read the target position sent by unity and call the navigation2 action server with this goal'''
self.get_logger().info("In")
goal_msg = FollowWaypoints.Goal()
for wp in data.waypoints:
# Generate the target goal
goal = PoseStamped()
goal.header.frame_id = 'map'
goal.header.stamp = self.get_clock().now().to_msg()
# Position part
goal.pose.position.x = wp.x
goal.pose.position.y = wp.y
# Orientation part
rot = Rotation.from_euler('xyz', [0.0, 0.0, wp.yaw])
quat = rot.as_quat()
goal.pose.orientation.x = quat[0]
goal.pose.orientation.y = quat[1]
goal.pose.orientation.z = quat[2]
goal.pose.orientation.w = quat[3]
goal_msg.poses.append(goal)
#goal_msg.pose = goal
self.get_logger().info("Sending nav2 waypoints")
future = self.nav2ActionClient.send_goal_async(goal_msg, feedback_callback = self._navGoalFeedbackCallback)
future.add_done_callback(self._navGoalResponseCallback)
return future
def _mapCallback(self, data:OccG):
self.mp.width = data.info.height
self.mp.height = data.info.width
# Rearrange the data to be visible correctly on unity
tmp = np.array(data.data).reshape(data.info.height, data.info.width)
tmp = np.rot90(np.fliplr(tmp), -1)
tmp = np.flip(tmp, 0)
a = tmp.flatten()
map = [int(el) for el in a]
#map.append(a)
#map.data = a
#self.get_logger().info("-----> {}, {} {}".format(type(map[0]), a.shape, type(map)))
self.mp.map = map
self.mp.resolution = data.info.resolution
map_origin = np.array([data.info.origin.position.x, data.info.origin.position.y])
#Publish the map using the rosbridge_msg
self.mp.origin[0:2] = map_origin
def _odomCallback(self, msg:Odometry):
''' Odometry function callback'''
pos = msg.pose.pose.position
self.pos.x = pos.x + self.mp.origin[2]
self.pos.y = pos.y + self.mp.origin[3]
if len(self.inPos) == 0:
self.inPos = [pos.x, pos.y]
# Convert from quaternion to euler angles
orient = msg.pose.pose.orientation
quat_df = [orient.x, orient.y, orient.z, orient.w]
rot = Rotation.from_quat(quat_df)
rot_euler = rot.as_euler('xyz', degrees=True)
self.pos.yaw = rot_euler[2]
def PublishRosbridgeMsgs(self):
''' Publish the rosbridge_msgs every 500ms so that they can be used from Unity'''
# Publish the map using the rosbridge_msg
self.rosbridgeMap_pub.publish(self.mp)
# Publish the robot's position using the rosbridge_msg
self.rosbridgePos_pub.publish(self.pos)
###################################################################################################
def main(args=None):
rclpy.init(args=args)
RBMP = RosbridgeMsgsPublisher()
executor = MultiThreadedExecutor()
try:
rclpy.spin(RBMP, executor)
except KeyboardInterrupt:
pass #rclpy.spin_until_future_complete(SR, )
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
RBMP.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
import asyncio
import json
import traceback
from datetime import datetime, timedelta
from nonebot import require
from nonebot.adapters.cqhttp import Bot
from nonebot.log import logger
from ..bilibili.activity import ActivityList, H5Activity, activity_list
from ..common import CONF, get_bot, send_exception_to_su
from ..database import helper
scheduler = require("nonebot_plugin_apscheduler").scheduler
JOB_ID = "activity_monitor"
LOGNAME = "TASK:ACTIVITY"
INTERVAL = CONF.bam_monitor_task_interval
@scheduler.scheduled_job(
"interval",
seconds=0,
id=JOB_ID,
next_run_time=datetime.now() + timedelta(seconds=INTERVAL / 2.0),
max_instances=1,
coalesce=True,
)
async def task_check_new_activity():
scheduler.pause_job(JOB_ID)
try:
await check_new_activity()
except Exception as e:
logger.warning(f"[{LOGNAME}] Outer Exception {type(e).__name__}: {repr(e)}")
logger.warning(f"[{LOGNAME}] {traceback.format_exc()}")
send_exception_to_su(e)
scheduler.resume_job(JOB_ID)
async def process_user_actlist(user, actlist: ActivityList):
has_new = False
latest = 0
if actlist is None:
return has_new, latest
latest = user.status.newest_activity_id
if actlist.ok:
latest_id = actlist[0].id if len(actlist) > 0 else "no act"
logger.info(
f"[{LOGNAME}] {user.nickname}({user.uid})'s last act id: {latest_id}"
)
if latest == 0: # first fetch, only get latest id
for _, act in zip(range(1), actlist):
if act is not None:
latest = act.id
has_new = True
else: # get new activities
bot = get_bot()
for _, act in reversed(list(zip(range(3), actlist))): # max send 3
if act is None:
continue
if act.id > latest:
has_new = True
latest = act.id
if bot is not None:
group_message = f"叮铃铃铃!{user.nickname} 有新动态!\n{act.display()}"
h5_share_card = None
if isinstance(act, H5Activity):
h5_share_card = act.h5_share_card()
for link in user.groups:
group_id = link.group_id
at_users = link.at_users
the_group_message = group_message
if at_users:
the_group_message += "\n"
for at_user in at_users.split(";"):
the_group_message += f"[CQ:at,qq={at_user}]"
logger.info(f"Send activity message: {the_group_message}")
try:
await bot.send_group_msg(
group_id=group_id,
message=the_group_message,
auto_escape=False,
)
except Exception as e:
send_exception_to_su(e, the_group_message)
if h5_share_card is not None:
try:
await bot.send_group_msg(
group_id=group_id,
message=h5_share_card,
auto_escape=False,
)
except Exception as e:
pass
elif hasattr(actlist, "code"):
logger.info(
f"[{LOGNAME}] check {user.nickname}({user.uid}) failed: {actlist.code} {actlist.message}"
)
return has_new, latest
async def check_new_activity():
logger.info(f"[{LOGNAME}] Start check new activities")
users = helper.get_users_with_linked_groups_and_status()
user_newest_activity_ids = {}
for user in filter(lambda u: len(u.groups) > 0, users.values()):
actlist = None
try:
logger.info(f"[{LOGNAME}] checking {user.nickname} activities...")
actlist = await activity_list(uid=user.uid)
if not actlist.ok and hasattr(actlist, "code"):
logger.warning(
f"[{LOGNAME}] check {user.nickname}({user.uid})'s activities failed: {actlist.code} {actlist.message}"
)
except Exception as e:
logger.warning(
f"[{LOGNAME}] check {user.uid} activity list task failed: {str(e)}"
)
has_new, latest = await process_user_actlist(user, actlist)
if has_new:
user_newest_activity_ids[user] = latest
await asyncio.sleep(INTERVAL)
if user_newest_activity_ids:
try:
helper.update_user_newest_activity_id(user_newest_activity_ids)
except Exception as e:
logger.warning(
f"[{LOGNAME}] Update db exception {type(e).__name__}: {repr(e)}"
)
|
# -*- coding: utf-8 -*-
__version__ = '0.1.6'
__all__ = ['read', 'reads']
__author__ = 'Alex Revetchi <alex.revetchi@gmail.com>'
import os
import io
import re
import sys
import copy
import json
from jcson import jpath
from jcson import jfixer
from collections import defaultdict, OrderedDict
if sys.version_info[0] >= 3:
__str_types__ = (str, bytes)
else:
__str_types__ = (str, unicode)
class CircularReferenceError(Exception):
pass
def strip_comments(content_in):
content_out = ''
for l in content_in.splitlines(True):
## skip empty lines & strip comments
l = l.lstrip()
if not l or l.startswith('#'):
content_out +='\n' ## make error reporting match original file line numbering
else:
p = l.find('#')
if p > -1: l = l[:p] + '\n'
content_out += l
return content_out
re_include = re.compile(r'(#*\s*include\s+\"([\/\w\.]+\w+)\")')
def validate_jcontent(fcontent):
"""
Validates json content ignoring include directives
"""
includes = re_include.finditer(fcontent)
last_end = 0
content = ''
for inc in includes:
content += fcontent[last_end:inc.start()]
last_end = inc.end()
if last_end:
fcontent = content + fcontent[last_end:]
json.loads(fcontent)
def file_content(filename):
with io.open(filename, 'rt', encoding='utf8') as f:
content = f.read()
content = strip_comments(content)
validate_jcontent(content)
return content
def process_includes(fcontent, include_path=None):
path = include_path or os.getcwd()
includes = [i for i in re_include.finditer(fcontent)]
while len(includes):
last_end = 0
content = ''
for inc in includes:
icontent = file_content(os.path.join(path, inc.group(2)))
content += fcontent[last_end:inc.start()] + icontent.strip().strip('{}')
last_end = inc.end()
content += fcontent[last_end:]
content = jfixer.fix_missing_trailing_commas(content)
includes = [i for i in re_include.finditer(content)]
fcontent = content
return fcontent
# ${var.name} substitution with in the file or env
re_var = re.compile(r'\${(?P<mvar>[\w\._-]+)}')
def parse_substitutions(path, node):
if not isinstance(node, __str_types__): return
res = re_var.findall(node)
spath = '.'.join(path)
for m in res:
if m == spath:
raise CircularReferenceError('Circular reference detected for: {}'.format(m))
depth = len(m.split('.'))
yield depth, {'path': path, 'var': m}
def resolve_substitution_value(config, subst, path):
v = jpath.find(config, subst)
if v is None:
if subst in os.environ:
v = os.environ[subst]
elif subst.upper() in os.environ:
v = os.environ[subst.upper()]
if v is None:
raise Exception('{} cound not be resolved.'.format(subst))
return v
def expand_node_substitution(config, subst, path, value):
p = path[0]
if len(path) > 1:
expand_node_substitution(config[p], subst, path[1:], value)
else:
var = '${'+subst+'}'
if config[p] == var:
config[p] = copy.deepcopy(value)
else:
config[p] = config[p].replace(var, str(value))
def read(filename, include_path=None):
include_path = include_path or os.path.dirname(os.path.abspath(filename))
content = file_content(filename)
return reads(content, include_path=include_path)
def reads(content, include_path=None):
content = process_includes(content, include_path)
config = json.loads(content, object_pairs_hook=OrderedDict)
while re_var.findall(json.dumps(config)):
substitutions = defaultdict(list)
## collect all stubstitutions sorted by depth
for path, node in jpath.traverse(config):
for depth, s in parse_substitutions(path, node):
substitutions[depth].append(s)
if not len(substitutions): break
for _, subs in substitutions.items():
for s in subs:
v = resolve_substitution_value(config, s['var'], s['path'])
expand_node_substitution(config, s['var'], s['path'], v)
return config
|
"""CoreNLP-related utilities."""
def rejoin(tokens, sep=None):
"""Rejoin tokens into the original sentence.
Args:
tokens: a list of dicts containing 'originalText' and 'before' fields.
All other fields will be ignored.
sep: if provided, use the given character as a separator instead of
the 'before' field (e.g. if you want to preserve where tokens are).
Returns: the original sentence that generated this CoreNLP token list.
"""
if sep is None:
return ''.join('%s%s' % (t['before'], t['originalText']) for t in tokens)
else:
# Use the given separator instead
return sep.join(t['originalText'] for t in tokens)
class ConstituencyParse(object):
"""A CoreNLP constituency parse (or a node in a parse tree).
Word-level constituents have |word| and |index| set and no children.
Phrase-level constituents have no |word| or |index| and have at least one child.
"""
def __init__(self, tag, children=None, word=None, index=None):
self.tag = tag
if children:
self.children = children
else:
self.children = None
self.word = word
self.index = index
@classmethod
def _recursive_parse_corenlp(cls, tokens, i, j):
orig_i = i
if tokens[i] == '(':
tag = tokens[i + 1]
children = []
i = i + 2
while True:
child, i, j = cls._recursive_parse_corenlp(tokens, i, j)
if isinstance(child, cls):
children.append(child)
if tokens[i] == ')':
return cls(tag, children), i + 1, j
else:
if tokens[i] != ')':
raise ValueError('Expected ")" following leaf')
return cls(tag, word=child, index=j), i + 1, j + 1
else:
# Only other possibility is it's a word
return tokens[i], i + 1, j
@classmethod
def from_corenlp(cls, s):
"""Parses the "parse" attribute returned by CoreNLP parse annotator."""
# "parse": "(ROOT\n (SBARQ\n (WHNP (WDT What)\n (NP (NN portion)\n (PP (IN of)\n (NP\n (NP (NNS households))\n (PP (IN in)\n (NP (NNP Jacksonville)))))))\n (SQ\n (VP (VBP have)\n (NP (RB only) (CD one) (NN person))))\n (. ? )))",
s_spaced = s.replace('\n', ' ').replace('(', ' ( ').replace(')', ' ) ')
tokens = [t for t in s_spaced.split(' ') if t]
tree, index, num_words = cls._recursive_parse_corenlp(tokens, 0, 0)
if index != len(tokens):
raise ValueError('Only parsed %d of %d tokens' % (index, len(tokens)))
return tree
def is_singleton(self):
if self.word: return True
if len(self.children) > 1: return False
return self.children[0].is_singleton()
def print_tree(self, indent=0):
spaces = ' ' * indent
if self.word:
print(('%s%s: %s (%d)' % (spaces, self.tag, self.word, self.index)).encode('utf-8'))
else:
print('%s%s:' % (spaces, self.tag))
for c in self.children:
c.print_tree(indent=indent + 1)
def get_phrase(self):
if self.word: return self.word
toks = []
for i, c in enumerate(self.children):
p = c.get_phrase()
if i == 0 or p.startswith("'"):
toks.append(p)
else:
toks.append(' ' + p)
return ''.join(toks)
def get_start_index(self):
if self.index is not None: return self.index
return self.children[0].get_start_index()
def get_end_index(self):
if self.index is not None: return self.index + 1
return self.children[-1].get_end_index()
@classmethod
def _recursive_replace_words(cls, tree, new_words, i):
if tree.word:
new_word = new_words[i]
return (cls(tree.tag, word=new_word, index=tree.index), i + 1)
new_children = []
for c in tree.children:
new_child, i = cls._recursive_replace_words(c, new_words, i)
new_children.append(new_child)
return cls(tree.tag, children=new_children), i
@classmethod
def replace_words(cls, tree, new_words):
"""Return a new tree, with new words replacing old ones."""
new_tree, i = cls._recursive_replace_words(tree, new_words, 0)
if i != len(new_words):
raise ValueError('len(new_words) == %d != i == %d' % (len(new_words), i))
return new_tree
|
from __future__ import annotations
from typing import Tuple, Union
import numpy
from amulet.world_interface.chunk.translators import Translator
from PyMCTranslate.py3.translation_manager import Version
class JavaNumericalTranslator(Translator):
def _translator_key(
self, version_number: int
) -> Tuple[str, Union[int, Tuple[int, int, int]]]:
return "java", version_number
def _unpack_palette(self, version: Version, palette: numpy.ndarray):
"""
Unpacks an int array of block ids and block data values [[1, 0], [2, 0]] into a numpy array of Block objects.
:param version:
:param palette:
:return:
"""
palette = numpy.array([version.ints_to_block(*entry) for entry in palette])
return palette
def _pack_palette(self, version: Version, palette: numpy.ndarray) -> numpy.ndarray:
"""
Packs a numpy array of Block objects into an int array of block ids and block data values [[1, 0], [2, 0]].
:param version:
:param palette:
:return:
"""
palette = [version.block_to_ints(entry) for entry in palette]
for index, value in enumerate(palette):
if value is None:
palette[index] = (
0,
0,
) # TODO: find some way for the user to specify this
return numpy.array(palette)
@staticmethod
def is_valid(key):
if key[0] != "anvil":
return False
if key[1] > 1343:
return False
return True
TRANSLATOR_CLASS = JavaNumericalTranslator
|
# https://leetcode.com/problems/next-greater-element-i/
# ---------------------------------------------------
from collections import deque
from typing import List
# Runtime Complexity: O(nums2 + nums1)
# Space Complexity: O(nums2), if we don't count res
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
stack = deque()
next_greater = {}
for num in nums2:
while stack and num > stack[-1]:
next_greater[stack.pop()] = num
stack.append(num)
# We can explicitly set numbers, which are left in the stack to -1
# or just handle it later.
# while stack:
# next_greater[stack.pop()] = -1
res = [0] * len(nums1)
for i, num in enumerate(nums1):
res[i] = next_greater.get(num, -1)
return res
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# [-1, 3, -1]
print(solution.nextGreaterElement([4, 1, 2], [1, 3, 4, 2]))
# [3, -1]
print(solution.nextGreaterElement([2, 4], [1, 2, 3, 4]))
|
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import numpy as np
import unittest
import logging
from p3iv_utils_probability.distributions import (
UnivariateNormalMixtureDistribution,
UnivariateNormalSequenceMixtureDistribution,
UnivariateNormalDistributionSequence,
TruncatedUnivariateNormalDistributionSequence,
TruncatedUnivariateNormalSequenceMixtureDistribution,
)
class TestUnivariateNormalMixtureDistribution(unittest.TestCase):
def setUp(self):
m = np.array([10, 20])
v = np.array([1, 2])
w = np.array([0.5, 0.5])
self.distribution = UnivariateNormalMixtureDistribution(weights=w, mean=m, covariance=v)
def test_distribution_range(self):
lw, up = self.distribution.range(2)
print(("Distribution range : \n{}\n".format(lw)))
print(("Distribution range : \n{}\n".format(up)))
def test_init_false_weights(self):
try:
m = np.array([10, 20])
v = np.array([1, 2])
w = np.array([0.3, 0.5]) # must cause error; weights must sum up to 1
distribution = UnivariateNormalMixtureDistribution(weights=w, mean=m, covariance=v)
except:
pass
def test_init_single_weights(self):
m = np.array([10])
v = np.array([12])
w = np.array([1.0])
distribution = UnivariateNormalMixtureDistribution(weights=w, mean=m, covariance=v)
distribution.means = m
print(distribution)
print("Bound ", distribution.range(2))
# plot_gaussians(distribution.components, sigma=3, title='UnivariateNormalMixtureDistribution - single')
def test_init_high_dim(self):
m = np.array([10, 20])
v = np.array([1, 2])
w = np.array([0.5, 0.5])
mog = UnivariateNormalMixtureDistribution(weights=w, mean=m, covariance=v)
print(mog)
print("Bound")
print(mog.range(2))
m[0] = 0
mog.means = m
print(mog)
print("Bound")
print(mog.range(2))
class TestTruncatedUnivariateNormalMixtureDistribution(unittest.TestCase):
def setUp(self):
m = np.array([10, 20])
v = np.array([1, 2])
w = np.array([0.5, 0.5])
self.distribution = UnivariateNormalSequenceMixtureDistribution(weights=w, mean=m, covariance=v)
class TestUnivariateNormalSequenceMixtureDistribution(unittest.TestCase):
def test_init_components(self):
m1 = np.arange(100)
v1 = np.linspace(0.1, 10, 100)
tg1 = UnivariateNormalDistributionSequence()
tg1.resize(100)
tg1.mean = m1
tg1.covariance = v1
w_1 = 0.3
m2 = np.arange(100, 200)
v2 = np.linspace(0.1, 10, 100)
tg2 = UnivariateNormalDistributionSequence()
tg2.resize(100)
tg2.mean = m2
tg2.covariance = v2
w_2 = 0.7
tgs = [tg1, tg2]
ws = [w_1, w_2]
mog = UnivariateNormalSequenceMixtureDistribution(weights=ws, components=tgs)
# plot_gaussians(mog.components, sigma=3, title="Mixture w. init UnivariateNormalSequenceDistribution(s)")
class TestTruncatedUnivariateNormalSequenceMixtureDistribution(unittest.TestCase):
def setUp(self):
pass
"""
def test_truncated_univariate_sequence_init_two_arrays(self):
m = np.array([np.arange(100), np.arange(100) * 2])
v = np.vstack([np.linspace(0.1, 10, 100)]*2)
tr_up_1 = np.ones(100) * 100
tr_up_2 = np.ones(100) * 80
tr_up = np.vstack([tr_up_1, tr_up_2])
tr_lw_1 = np.ones(100) * 30
tr_lw_2 = np.ones(100) * 20
tr_lw = np.vstack([tr_lw_1, tr_lw_2])
w = np.array([0.5, 0.5])
mog = TruncatedUnivariateNormalSequenceMixtureDistribution(tr_up, tr_lw, weights=w, mean=m, covariance=v)
print mog
print mog.distribution_range(2)
plot_gaussians(mog.components, sigma=3, title='TruncatedUnivariateSequence init two arrays')
def test_truncated_univariate_sequence(self):
m = np.arange(15).reshape(3, 5) * 100
v = np.ones((3, 5)) * 20
w = np.ones(3) / 3
# tr = np.ones((3, 5)) * 99
tr_up = np.ones((3, 5))
tr_up[0, :] *= 350
tr_up[1, :] *= 800
tr_up[2, :] *= 1300
tr_lw = np.ones((3, 5))
tr_lw[0, :] *= 250
tr_lw[1, :] *= 500
tr_lw[2, :] *= 1100
mog = TruncatedUnivariateNormalSequenceMixtureDistribution(tr_up, tr_lw, weights=w, mean=m, covariance=v)
print mog
print "truncation_array up:"
print tr_up
print "truncation_array low:"
print tr_lw
plot_gaussians(mog.components, sigma=3, title='TruncatedUnivariateSequence init three arrays')
def test_truncated_univariate_sequence_no_boundaries(self):
m = np.arange(15).reshape(3, 5) * 100
v = np.ones((3, 5)) * 20
w = np.ones(3) / 3
# tr = np.ones((3, 5)) * 99
tr_up = np.asarray([None]*3)
tr_lw = np.asarray([None]*3)
mog = TruncatedUnivariateNormalSequenceMixtureDistribution(tr_up, tr_lw, weights=w, mean=m, covariance=v)
print mog
print "truncation_array up:"
print tr_up
print "truncation_array low:"
print tr_lw
plot_gaussians(mog.components, sigma=3, title='3 (not truncated) Univariate Normal Sequence Mixture')
def test_init_components(self):
m1 = np.arange(100)
v1 = np.linspace(0.1, 10, 100)
tr_upper1 = np.ones(100) * 80
tr_lower1 = np.ones(100) * 30
tg1 = TruncatedUnivariateNormalSequenceDistribution(tr_upper1, tr_lower1, mean=m1, covariance=v1)
w_1 = 0.3
m2 = np.arange(100)
v2 = np.linspace(0.1, 10, 100)
tr_upper2 = np.ones(100) * 70
tr_lower2 = np.ones(100) * 40
tg2 = TruncatedUnivariateNormalSequenceDistribution(tr_upper2, tr_lower2, mean=m2, covariance=v2)
w_2 = 0.7
tgs = [tg1, tg2]
ws = [w_1, w_2]
mog = UnivariateNormalSequenceMixtureDistribution(weights=ws, components=tgs)
plot_gaussians(mog.components, sigma=3, title="Mixture w. init TruncatedUnivariateNormalSequenceDistribution(s)")
"""
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
|
"""
# trans-tool
# The translation files checker and syncing tool.
#
# Copyright ©2021 Marcin Orlowski <mail [@] MarcinOrlowski.com>
# https://github.com/MarcinOrlowski/trans-tool/
#
"""
from configparser import ConfigParser
from pathlib import Path
from typing import Dict, List
from transtool.config.config import Config
from transtool.log import Log
from transtool.utils import Utils
class ConfigReader(object):
def __init__(self):
self.parser = ConfigParser()
# Prevent keys CaSe from being altered by default implementation.
self.parser.optionxform = str
def abort(self, msg: str):
Log.e(msg)
Utils.abort()
def read(self, config: Config, config_file_name: Path) -> Config:
"""
Reads and **MERGES** configuration parameters read from given configuration INI file.
Not that certain types (lists, maps/dicts) are MERGED with existing content!
:param config: Config to merge loaded config file into.
:param config_file_name: Path to valid config INI file.
:return: Instance of Config with fields containing
"""
if not config_file_name.exists():
self.abort(f'Config file not found: {config_file_name}')
# noinspection PyBroadException
try:
self.parser.read(config_file_name)
except Exception:
# noinspection PyUnresolvedReferences
self.abort(f'Failed parsing config INI file: {config_file_name}')
# Presence of "trans-tool" section is mandatory.
main_section = 'trans-tool'
if not self.parser.has_section(main_section):
self.abort(f'Missing "{main_section}" section.')
# Ensure we know how to read this config file.
config_version = self.parser.getint(main_section, 'version')
if config_version < Config.VERSION:
self.abort(f'Old version ({config_version}) of config INI file. Required {Config.VERSION}')
bool_opts = [
'debug',
'fatal',
'color',
'quiet',
'strict',
'verbose',
]
for single_bool in bool_opts:
if self.parser.has_option(main_section, single_bool):
config.__setattr__(single_bool, self.parser.get(main_section, single_bool))
self._merge_if_exists(self.parser, config.files, main_section, 'files')
self._merge_if_exists(self.parser, config.languages, main_section, 'languages')
self._merge_if_exists(self.parser, config.checks, main_section, 'checks')
if config.debug:
for attr_name in dir(config):
if attr_name[:2] != '__':
print(f'{attr_name}: {getattr(config, attr_name)}')
# Load checker's configs
for checker_id, checker_info in config.checks.items():
if self.parser.has_section(checker_id):
checker = checker_info.callable(checker_info.config)
checker.load_config_ini(checker_info.config, self.parser, checker_id)
return config
# #################################################################################################
def _merge_if_exists(self, parser: ConfigParser, target_list: List[str], config_section: str, config_option: str) -> None:
if parser.has_option(config_section, config_option):
self._merge_list(target_list, parser, config_section, config_option)
def _merge_list(self, target_list, parser: ConfigParser, section: str, option: str) -> None:
if parser.has_option(section, option):
import json
new_list = json.loads(parser.get(section, option).replace('\n', ''))
Utils.add_if_not_in_list(target_list, Utils.remove_quotes(new_list))
# #################################################################################################
def _merge_dict(self, old_dict: Dict, ini_parser: ConfigParser, section: str):
if ini_parser.has_section(section):
new_dict = dict(ini_parser.items(section))
if new_dict is None:
return
for key, value in new_dict.items():
old_dict[key] = Utils.remove_quotes(value)
|
from lib.sentence2vec import Sentence2Vec
model = Sentence2Vec('./data/job_titles.model')
# turn job title to vector
print(model.get_vector('Uber Driver Partner'))
# not similar job
print(model.similarity('Uber Driver Partner',
'Carpenter/ Modular building installer'))
# a bit similar job
print(model.similarity('Temporary Barista 30 hours per week',
'Waitress / Waiter Part-Timer'))
# similar job
print(model.similarity('Sandwich maker / All rounder',
'Cafe all rounder and Sandwich Hand'))
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import io
import sys
from distutils.version import StrictVersion
try:
try:
import pyvisa
# New style PyVISA
visa_rm = visa.ResourceManager()
visa_instrument_opener = visa_rm.open_resource
except AttributeError:
import visa
# Old style PyVISA
visa_instrument_opener = visa.instrument
except ImportError:
# PyVISA not installed, pass it up
raise ImportError
except:
# any other error
e = sys.exc_info()[1]
sys.stderr.write("python-ivi: PyVISA is installed, but could not be loaded (%s: %s)\n" %
(e.__class__.__name__, e.args[0]))
raise ImportError
class PyVisaInstrument:
"PyVisa wrapper instrument interface client"
def __init__(self, resource, *args, **kwargs):
if type(resource) is str:
self.instrument = visa_instrument_opener(resource, *args, **kwargs)
# For compatibility with new style PyVISA
if not hasattr(self.instrument, 'trigger'):
self.instrument.trigger = self.instrument.assert_trigger
else:
self.instrument = resource
self.buffer = io.BytesIO()
def write_raw(self, data):
"Write binary data to instrument"
self.instrument.write_raw(data)
def read_raw(self, num=-1):
"Read binary data from instrument"
# PyVISA only supports reading entire buffer
#return self.instrument.read_raw()
data = self.buffer.read(num)
if len(data) == 0:
self.buffer = io.BytesIO(self.instrument.read_raw())
data = self.buffer.read(num)
return data
def ask_raw(self, data, num=-1):
"Write then read binary data"
self.write_raw(data)
return self.read_raw(num)
def write(self, message, encoding = 'utf-8'):
"Write string to instrument"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
for message_i in message:
self.write(message_i, encoding)
return
self.write_raw(str(message).encode(encoding))
def read(self, num=-1, encoding = 'utf-8'):
"Read string from instrument"
return self.read_raw(num).decode(encoding).rstrip('\r\n')
def ask(self, message, num=-1, encoding = 'utf-8'):
"Write then read string"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
val = list()
for message_i in message:
val.append(self.ask(message_i, num, encoding))
return val
self.write(message, encoding)
return self.read(num, encoding)
def read_stb(self):
"Read status byte"
raise NotImplementedError()
def trigger(self):
"Send trigger command"
self.instrument.trigger()
def clear(self):
"Send clear command"
raise NotImplementedError()
def remote(self):
"Send remote command"
raise NotImplementedError()
def local(self):
"Send local command"
raise NotImplementedError()
def lock(self):
"Send lock command"
raise NotImplementedError()
def unlock(self):
"Send unlock command"
raise NotImplementedError()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SshPublicKeyArgs', 'SshPublicKey']
@pulumi.input_type
class SshPublicKeyArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
user: pulumi.Input[str],
expiration_time_usec: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SshPublicKey resource.
:param pulumi.Input[str] key: Public key text in SSH format, defined by RFC4253 section 6.6.
:param pulumi.Input[str] user: The user email.
:param pulumi.Input[str] expiration_time_usec: An expiration time in microseconds since epoch.
:param pulumi.Input[str] project: The project ID of the Google Cloud Platform project.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "user", user)
if expiration_time_usec is not None:
pulumi.set(__self__, "expiration_time_usec", expiration_time_usec)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Public key text in SSH format, defined by RFC4253 section 6.6.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
"""
The user email.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: pulumi.Input[str]):
pulumi.set(self, "user", value)
@property
@pulumi.getter(name="expirationTimeUsec")
def expiration_time_usec(self) -> Optional[pulumi.Input[str]]:
"""
An expiration time in microseconds since epoch.
"""
return pulumi.get(self, "expiration_time_usec")
@expiration_time_usec.setter
def expiration_time_usec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration_time_usec", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project ID of the Google Cloud Platform project.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _SshPublicKeyState:
def __init__(__self__, *,
expiration_time_usec: Optional[pulumi.Input[str]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SshPublicKey resources.
:param pulumi.Input[str] expiration_time_usec: An expiration time in microseconds since epoch.
:param pulumi.Input[str] fingerprint: The SHA-256 fingerprint of the SSH public key.
:param pulumi.Input[str] key: Public key text in SSH format, defined by RFC4253 section 6.6.
:param pulumi.Input[str] project: The project ID of the Google Cloud Platform project.
:param pulumi.Input[str] user: The user email.
"""
if expiration_time_usec is not None:
pulumi.set(__self__, "expiration_time_usec", expiration_time_usec)
if fingerprint is not None:
pulumi.set(__self__, "fingerprint", fingerprint)
if key is not None:
pulumi.set(__self__, "key", key)
if project is not None:
pulumi.set(__self__, "project", project)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter(name="expirationTimeUsec")
def expiration_time_usec(self) -> Optional[pulumi.Input[str]]:
"""
An expiration time in microseconds since epoch.
"""
return pulumi.get(self, "expiration_time_usec")
@expiration_time_usec.setter
def expiration_time_usec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration_time_usec", value)
@property
@pulumi.getter
def fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
The SHA-256 fingerprint of the SSH public key.
"""
return pulumi.get(self, "fingerprint")
@fingerprint.setter
def fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fingerprint", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Public key text in SSH format, defined by RFC4253 section 6.6.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project ID of the Google Cloud Platform project.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
The user email.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
class SshPublicKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
expiration_time_usec: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The SSH public key information associated with a Google account.
To get more information about SSHPublicKey, see:
* [API documentation](https://cloud.google.com/compute/docs/oslogin/rest/v1/users.sshPublicKeys)
* How-to Guides
* [Official Documentation](https://cloud.google.com/compute/docs/oslogin)
## Example Usage
### Os Login Ssh Key Basic
```python
import pulumi
import pulumi_gcp as gcp
me = gcp.organizations.get_client_open_id_user_info()
cache = gcp.oslogin.SshPublicKey("cache",
user=me.email,
key=(lambda path: open(path).read())("path/to/id_rsa.pub"))
```
## Import
SSHPublicKey can be imported using any of these accepted formats
```sh
$ pulumi import gcp:oslogin/sshPublicKey:SshPublicKey default users/{{user}}/sshPublicKeys/{{fingerprint}}
```
```sh
$ pulumi import gcp:oslogin/sshPublicKey:SshPublicKey default {{user}}/{{fingerprint}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] expiration_time_usec: An expiration time in microseconds since epoch.
:param pulumi.Input[str] key: Public key text in SSH format, defined by RFC4253 section 6.6.
:param pulumi.Input[str] project: The project ID of the Google Cloud Platform project.
:param pulumi.Input[str] user: The user email.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SshPublicKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The SSH public key information associated with a Google account.
To get more information about SSHPublicKey, see:
* [API documentation](https://cloud.google.com/compute/docs/oslogin/rest/v1/users.sshPublicKeys)
* How-to Guides
* [Official Documentation](https://cloud.google.com/compute/docs/oslogin)
## Example Usage
### Os Login Ssh Key Basic
```python
import pulumi
import pulumi_gcp as gcp
me = gcp.organizations.get_client_open_id_user_info()
cache = gcp.oslogin.SshPublicKey("cache",
user=me.email,
key=(lambda path: open(path).read())("path/to/id_rsa.pub"))
```
## Import
SSHPublicKey can be imported using any of these accepted formats
```sh
$ pulumi import gcp:oslogin/sshPublicKey:SshPublicKey default users/{{user}}/sshPublicKeys/{{fingerprint}}
```
```sh
$ pulumi import gcp:oslogin/sshPublicKey:SshPublicKey default {{user}}/{{fingerprint}}
```
:param str resource_name: The name of the resource.
:param SshPublicKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SshPublicKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
expiration_time_usec: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SshPublicKeyArgs.__new__(SshPublicKeyArgs)
__props__.__dict__["expiration_time_usec"] = expiration_time_usec
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
__props__.__dict__["project"] = project
if user is None and not opts.urn:
raise TypeError("Missing required property 'user'")
__props__.__dict__["user"] = user
__props__.__dict__["fingerprint"] = None
super(SshPublicKey, __self__).__init__(
'gcp:oslogin/sshPublicKey:SshPublicKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
expiration_time_usec: Optional[pulumi.Input[str]] = None,
fingerprint: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None) -> 'SshPublicKey':
"""
Get an existing SshPublicKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] expiration_time_usec: An expiration time in microseconds since epoch.
:param pulumi.Input[str] fingerprint: The SHA-256 fingerprint of the SSH public key.
:param pulumi.Input[str] key: Public key text in SSH format, defined by RFC4253 section 6.6.
:param pulumi.Input[str] project: The project ID of the Google Cloud Platform project.
:param pulumi.Input[str] user: The user email.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SshPublicKeyState.__new__(_SshPublicKeyState)
__props__.__dict__["expiration_time_usec"] = expiration_time_usec
__props__.__dict__["fingerprint"] = fingerprint
__props__.__dict__["key"] = key
__props__.__dict__["project"] = project
__props__.__dict__["user"] = user
return SshPublicKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="expirationTimeUsec")
def expiration_time_usec(self) -> pulumi.Output[Optional[str]]:
"""
An expiration time in microseconds since epoch.
"""
return pulumi.get(self, "expiration_time_usec")
@property
@pulumi.getter
def fingerprint(self) -> pulumi.Output[str]:
"""
The SHA-256 fingerprint of the SSH public key.
"""
return pulumi.get(self, "fingerprint")
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Public key text in SSH format, defined by RFC4253 section 6.6.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def project(self) -> pulumi.Output[Optional[str]]:
"""
The project ID of the Google Cloud Platform project.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
"""
The user email.
"""
return pulumi.get(self, "user")
|
from flask import url_for
from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.fields import (
SubmitField,
IntegerField,
DateField,
StringField,
HiddenField
)
from wtforms.fields.html5 import EmailField
from wtforms.validators import Email, EqualTo, InputRequired, Length
from wtforms.widgets import TextArea
import datetime
from app.models import User
def IsInFuture(form, field):
if datetime.datetime(field.data.year, field.data.month, field.data.day) < datetime.datetime.now():
raise ValidationError('Kan geen spel starten in het verleden.')
class CreateGameForm(FlaskForm):
name = StringField('Spelnaam', [InputRequired(), Length(max=32, min=4)])
player_amount = IntegerField('Aantal spelers', [InputRequired()])
start_date = DateField('Start datum', [InputRequired(), IsInFuture], format="%d-%m-%Y")
submit = SubmitField('Maak aan')
class InvitePlayersForm(FlaskForm):
email = EmailField('Emailadres', [InputRequired(), Email()])
submit = SubmitField('Verstuur uitnodiging')
class NewMessageForm(FlaskForm):
reply_to = HiddenField()
text = StringField('', [InputRequired(), Length(max=500)], widget=TextArea())
submit = SubmitField('Verstuur bericht')
|
import os, os.path
import contextlib
from . import verbose
from . import descr
from . import revision_sql
from . import receivers
from . import pg_role_path
from . import scr_env
from . import init_sql
def init_cmd(args_ctx, print_func, err_print_func):
verb = verbose.make_verbose(print_func, err_print_func, args_ctx.verbose)
verb.prepare_init()
hosts_descr = descr.HostsDescr()
if args_ctx.hosts is not None:
hosts_path = os.path.realpath(args_ctx.hosts)
hosts_descr.load(hosts_path)
include_list = []
include_ref_map = {}
for include in args_ctx.include_list:
include_list.append(os.path.realpath(include))
for include_ref_name in args_ctx.include_ref_map:
include_ref_map[include_ref_name] = \
os.path.realpath(args_ctx.include_ref_map[include_ref_name])
source_code_file_path = os.path.realpath(os.path.join(
args_ctx.source_code,
descr.ClusterDescr.file_name,
))
source_code_include_list = include_list + [os.path.dirname(source_code_file_path)]
source_code_cluster_descr = descr.ClusterDescr()
source_code_cluster_descr.load(
source_code_file_path, source_code_include_list, include_ref_map)
if args_ctx.hosts is None:
hosts_descr.load_pseudo(source_code_cluster_descr)
rev_sql = revision_sql.RevisionSql(source_code_cluster_descr.application)
verb.source_code_revision(
source_code_cluster_descr.application,
source_code_cluster_descr.revision,
None,
)
with contextlib.closing(
receivers.Receivers(
args_ctx.execute,
args_ctx.pretend,
args_ctx.output,
),
) as recv:
for host in hosts_descr.host_list:
host_name = host['name']
host_type = host['type']
verb.begin_host(host_name)
recv.begin_host(hosts_descr, host)
recv.execute(host_name, pg_role_path.pg_role_path(None, None))
verb.scr_env(host_name, recv.look_fragment_i(host_name))
recv.execute(host_name, scr_env.scr_env(hosts_descr, host_name))
verb.ensure_revision_structs(host_name, recv.look_fragment_i(host_name))
recv.execute(host_name, rev_sql.ensure_revision_structs(host_type))
for i, sql in enumerate(
init_sql.read_init_sql(source_code_cluster_descr, host_type),
):
if not i:
verb.execute_sql(
host_name, 'init_sql', recv.look_fragment_i(host_name))
recv.execute(
host_name, '{}\n\n{}\n\n;'.format(
pg_role_path.pg_role_path(None, None),
sql.rstrip(),
),
)
recv.execute(host_name, pg_role_path.pg_role_path(None, None))
verb.clean_scr_env(host_name, recv.look_fragment_i(host_name))
recv.execute(host_name, scr_env.clean_scr_env())
verb.finish_host(host_name)
recv.finish_host(hosts_descr, host)
# vi:ts=4:sw=4:et
|
import Quandl
def economic_indicator(source, country, indicator, **kwargs):
dataset = "{source}/{country}_{indicator}".format(
source=source.upper(),
country=country.upper(),
indicator=indicator.upper()
)
return Quandl.get(dataset, **kwargs)
class Fundamentals(object):
'''
Wrapper for the stock fundamentals portion
of the Quandl API. Initialize with a ticker symbol and
call .get('some ratio code') with a code or list of codes.
'''
def __init__(self, symbol):
self.symbol = symbol.upper()
def dataset_code(self, ratios):
code = 'DMDRN/' + self.symbol
codes = []
if type(ratios) == list:
for r in ratios:
codes.append(code + '_' + r.upper())
return codes
else:
return code + '_' + ratios.upper()
def get(self, ratios, **kwargs):
dataset_code = self.dataset_code(ratios)
return Quandl.get(dataset_code, **kwargs)
def all_stats(self, **kwargs):
return self.get('ALLFINANCIALRATIOS', **kwargs)
|
import numpy as np
import os
import torch
from isaacgym import gymutil, gymtorch, gymapi
from isaacgym.torch_utils import *
from tasks.base.vec_task import VecTask
from isaacgymenvs.utils.torch_jit_utils import *
import time
class CubeBot_TargPos(VecTask):
def __init__(self, cfg, sim_device, graphics_device_id, headless):
self.cfg = cfg
self.dt = self.cfg["sim"]["dt"]
self.reset_dist = self.cfg["env"]["resetDist"]
self.max_episode_length = self.cfg["env"]["maxEpisodeLength"]
self.control_mode = self.cfg["env"]["controlMode"]
self.stiffness = self.cfg["env"]["stiffness"] * self.control_mode
self.damping = self.cfg["env"]["damping"] * self.control_mode
self.maxSpeed = self.cfg["env"]["maxSpeed"]
self.maxTorque = self.cfg["env"]["maxTorque"]
self.friction = self.cfg["env"]["friction"]
self.angularDamping = self.cfg["env"]["angularDamping"]
self.goal_dist = self.cfg["env"]["goalDist"]
# cube root state (13) pos(3),ori(4),linvel(3),angvel(3)
# wheel velocities (6)
# Drive signal for each of the three primary axis reaction wheels
self.cfg["env"]["numActions"] = 3
# goal position (3) probably will ignore the z though... keep it in for now
self.cfg["env"]["numObservations"] = 31+self.cfg["env"]["numActions"]
super().__init__(config=self.cfg, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless)
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, self.num_dof, 2)[..., 1]
actor_root_state = self.gym.acquire_actor_root_state_tensor(self.sim)
self.root_states = gymtorch.wrap_tensor(actor_root_state)
self.cube_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.cube_ori = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0, 3:7] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.cube_linvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0, 7:10] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.cube_angvel = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 0, 10:13] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
self.goal_pos = self.root_states.view(self.num_envs, self.num_actors, 13)[..., 1, 0:3] #num_envs, num_actors, 13 (pos,ori,Lvel,Avel)
rb_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.rb_state = gymtorch.wrap_tensor(rb_state_tensor)
self.corner1_pos = self.rb_state.view(self.num_envs, self.num_bodies, 13)[..., self.body_dict['CornerBumper_1'], 0:3] #num_envs, num_rigid_bodies, 13 (pos,ori,Lvel,Avel)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
print('self.dof_pos')
print(self.dof_pos)
print(self.dof_pos.shape)
print('self.dof_vel')
print(self.dof_vel)
print(self.dof_vel.shape)
print('self.cube_pos')
print(self.cube_pos)
print(self.cube_pos.shape)
print('self.cube_ori')
print(self.cube_ori)
print(self.cube_ori.shape)
print('self.cube_linvel')
print(self.cube_linvel)
print(self.cube_linvel.shape)
print('self.cube_angvel')
print(self.cube_angvel)
print(self.cube_angvel.shape)
print('self.corner1_pos')
print(self.corner1_pos)
print(self.corner1_pos.shape)
self.x_unit_tensor = to_torch([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = to_torch([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = to_torch([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
# Used for rewarding moving towards a target
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
to_target = self.goal_pos - self.cube_pos
to_target[:, 2] = 0.0
self.potentials = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.prev_potentials = self.potentials.clone()
# self.potentials = to_torch([-1000./self.dt], device=self.device).repeat(self.num_envs)
# self.prev_potentials = self.potentials.clone()
self.goal_reset = torch.ones(self.num_envs, device=self.device, dtype=torch.long)
goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)
if len(goal_ids) > 0:
self.reset_goal(goal_ids)
# Measurements for rewards
self.up_vec = to_torch(get_axis_params(1., self.up_axis_idx), device=self.device).repeat((self.num_envs, 1))
self.heading_vec = to_torch([0, 1, 0], device=self.device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.reward_total = torch.zeros((self.num_envs), device=self.device)
self.torques = torch.zeros((self.num_envs, self.num_dof), device=self.device)
def create_sim(self):
# set the up axis to be z-up given that assets are y-up by default
self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, 'z')
# self.sim_params.gravity = gymapi.Vec3(0.0, 0.0, -9.81/2)
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
# set the normal force to be z dimension
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
# plane_params.static_friction = 0.0
# plane_params.dynamic_friction = 0.0
# plane_params.restitution = 0.1
# print('{} : {} : {}'.format(plane_params.static_friction, plane_params.dynamic_friction, plane_params.restitution))
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
# define plane on which environments are initialized
lower = gymapi.Vec3(0.5 * -spacing, -spacing, 0.0)
upper = gymapi.Vec3(0.5 * spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../assets")
asset_file = "urdf/CubeBot.urdf"
if "asset" in self.cfg["env"]:
asset_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.cfg["env"]["asset"].get("assetRoot", asset_root))
asset_file = self.cfg["env"]["asset"].get("assetFileName", asset_file)
asset_path = os.path.join(asset_root, asset_file)
asset_root = os.path.dirname(asset_path)
asset_file = os.path.basename(asset_path)
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = self.angularDamping
asset_options.max_angular_velocity = self.maxSpeed
cubebot_asset = self.gym.load_asset(self.sim, asset_root, asset_file, asset_options)
self.num_dof = self.gym.get_asset_dof_count(cubebot_asset)
# self.num_actor = get_sim_actor_count
# self.num_rb = get_actor_rigid_body_count(cubebot_asset)
goal_asset = self.gym.create_sphere(self.sim, 0.05)
self.num_bodies = self.gym.get_asset_rigid_body_count(cubebot_asset) + self.gym.get_asset_rigid_body_count(goal_asset)
pose = gymapi.Transform()
pose.p.z = 1.0
# asset is rotated z-up by default, no additional rotations needed
pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1.0)
self.start_rotation = torch.tensor([pose.r.x, pose.r.y, pose.r.z, pose.r.w], device=self.device)
self.cubebot_handles = []
self.goal_handles = []
self.envs = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(
self.sim, lower, upper, num_per_row
)
cubebot_handle = self.gym.create_actor(env_ptr, cubebot_asset, pose, "cubebot", 0, 0, 0)
dof_props = self.gym.get_actor_dof_properties(env_ptr, cubebot_handle)
if(self.control_mode):
dof_props['driveMode'][:] = gymapi.DOF_MODE_VEL
else:
dof_props['driveMode'][:] = gymapi.DOF_MODE_EFFORT
dof_props['stiffness'][:] = self.stiffness
dof_props['damping'][:] = self.damping
dof_props['velocity'][:] = self.maxSpeed
dof_props['effort'][:] = self.maxTorque
dof_props['friction'][:] = self.friction
self.gym.set_actor_dof_properties(env_ptr, cubebot_handle, dof_props)
self.envs.append(env_ptr)
self.cubebot_handles.append(cubebot_handle)
goal_pose = gymapi.Transform()
goal_pose.p.y = self.goal_dist
goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_pose, "goal", 0, 0, 1)
self.gym.set_rigid_body_color(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.2, 0.8, 0.2))
self.goal_handles.append(goal_handle)
self.num_actors = self.gym.get_actor_count(self.envs[0])
self.body_dict = self.gym.get_actor_rigid_body_dict(env_ptr, cubebot_handle)
for b in self.body_dict:
print(b)
def compute_reward(self):
# retrieve environment observations from buffer
# box_pos = self.obs_buf[:, 0:2]
# box_ori = self.obs_buf[:, 3:7]
# box_lin_vel = self.obs_buf[:, 7:10]
# box_ang_vel = self.obs_buf[:, 10:13]
# print(self.corner1_pos)
# print(self.corner1_pos.shape)
# distance_to_goal = torch.norm(self.cube_pos - self.goal_pos, dim=-1)
# goal_reward = torch.where(distance_to_goal<1, 1, 0)
# print(goal_reward)
# progress_reward = self.potentials - self.prev_potentials
# print(progress_reward)
# print('progress_reward')
# # print(progress_reward)
# self.reward_total += progress_reward
# print('self.reward_total')
# print(self.reward_total)
self.rew_buf[:], self.reset_buf[:], self.goal_reset = compute_cubebot_reward(
self.corner1_pos[:, 2],
self.obs_buf[:,17:23], #obs_old [13:19] obs_new[17:23]
self.cube_pos,
self.goal_pos,
self.potentials,
self.prev_potentials,
self.reset_buf,
self.progress_buf,
self.max_episode_length
)
# print('{} : {} '.format(self.potentials, self.prev_potentials))
# if(torch.abs(self.rew_buf[0]) > 2 or torch.abs(self.rew_buf[1]) > 2):
# print('self.rew_buf')
# print(self.rew_buf)
# print(self.rew_buf.shape)
# time.sleep(1)
# if(torch.abs(self.reset_buf[0]) == 1 or torch.abs(self.reset_buf[1]) == 1):
# print('self.reset_buf')
# print(self.reset_buf)
# print(self.reset_buf.shape)
# time.sleep(1)
# if(torch.abs(self.goal_reset[0]) == 1 or torch.abs(self.goal_reset[1]) == 1):
# print('self.goal_reset')
# print(self.goal_reset)
# print(self.goal_reset.shape)
# time.sleep(1)
def compute_observations(self, env_ids=None):
if env_ids is None:
env_ids = np.arange(self.num_envs)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.obs_buf, self.potentials, self.prev_potentials = compute_cubebot_observations(
self.cube_pos,
self.cube_ori,
self.cube_linvel,
self.cube_angvel,
self.dof_vel,
self.goal_pos,
self.potentials,
self.inv_start_rot,
self.basis_vec0,
self.basis_vec1,
self.actions,
self.torques,
self.maxSpeed,
self.dt)
# print('Potential = {}. Previous_Potential = {}. Diff = {}'.format(self.potentials[0], self.prev_potentials[0], self.potentials[0] - self.prev_potentials[0]))
# print('actions = {}'.format(self.obs_buf[:, 25:28]))
# print('torques = {}'.format(self.obs_buf[:, 28:34]))
# print('dof_vel = {}'.format(self.obs_buf[:, 19:25]))
# print()
return self.obs_buf
def reset_idx(self, env_ids):
positions = 0.2 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5)
velocities = 0.5 * (torch.rand((len(env_ids), self.num_dof), device=self.device) - 0.5)
self.dof_pos[env_ids, :] = positions[:]
self.dof_vel[env_ids, :] = velocities[:]
env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
cube_pos_update = torch.zeros((len(env_ids), 3), device=self.device)
cube_pos_update[:,2] = 0.3
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
cube_ori_update = quat_mul(quat_from_angle_axis(rand_floats[:,0] * np.pi, self.x_unit_tensor[env_ids]),
quat_from_angle_axis(rand_floats[:,1] * np.pi, self.y_unit_tensor[env_ids]))
cube_linvel_update = torch.zeros((len(env_ids), 3), device=self.device)
cube_angvel_update = torch.zeros((len(env_ids), 3), device=self.device)
self.cube_pos[env_ids, :] = cube_pos_update
self.cube_ori[env_ids, :] = cube_ori_update
self.cube_linvel[env_ids, :] = cube_linvel_update
self.cube_angvel[env_ids, :] = cube_angvel_update
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32), len(env_ids_int32))
self.reset_goal(env_ids)
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def reset_goal(self, env_ids):
self.gym.refresh_actor_root_state_tensor(self.sim)
# print('reset_goals')
# print('Old Goal Position = {}'.format(self.goal_pos))
env_ids_int32 = env_ids.to(dtype=torch.int32)*self.num_actors
goal_pos_update = torch_rand_float(-10.0, 10.0, (len(env_ids), 3), device=self.device)
# goal_pos_update[:,0] = 0
# goal_pos_update[:,1] = self.goal_dist
goal_pos_update[:,2] = 0.1
self.goal_pos[env_ids, :] = goal_pos_update
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_states),
gymtorch.unwrap_tensor(env_ids_int32+1), len(env_ids_int32))
self.gym.refresh_actor_root_state_tensor(self.sim)
to_target = self.goal_pos[env_ids] - self.cube_pos[env_ids]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
self.goal_reset[env_ids] = 0
# print('New Goal Position = {}'.format(self.goal_pos))
def pre_physics_step(self, actions):
# print(actions)
# print(actions.shape)
# print(actions.to(self.device).squeeze() * self.max_push_effort)
# print(actions.to(self.device).squeeze().shape())
self.actions = actions.clone().to(self.device)
if(self.control_mode):
# Vel Control
self.set_motor_velocitys(self.actions)
else:
# Torque Control
self.set_motor_torques(self.actions)
def set_motor_velocitys(self, targets):
target_vels = torch.zeros((self.num_envs, self.num_dof))
target_vels[0:2] = targets[0]*self.maxSpeed
target_vels[2:4] = targets[1]*self.maxSpeed
target_vels[2:6] = targets[2]*self.maxSpeed
self.gym.set_dof_velocity_target_tensor(self.sim, gymtorch.unwrap_tensor(target_vels))
def set_motor_torques(self, targets):
target_torques = torch.zeros((self.num_envs, self.num_dof), device=self.device)
target_torques[:, 0] = targets[:, 0]*self.maxTorque
target_torques[:, 1] = targets[:, 0]*self.maxTorque
target_torques[:, 2] = targets[:, 1]*self.maxTorque
target_torques[:, 3] = targets[:, 1]*self.maxTorque
target_torques[:, 4] = targets[:, 2]*self.maxTorque
target_torques[:, 5] = targets[:, 2]*self.maxTorque
# print('target_torques = {}'.format(target_torques))
offset = 2
max_available_torque = torch.clip(self.maxTorque - (offset*self.dof_vel/self.maxSpeed + (1-offset))*self.maxTorque, -self.maxTorque, self.maxTorque)
min_available_torque = torch.clip(-self.maxTorque - (offset*self.dof_vel/self.maxSpeed - (1-offset))*self.maxTorque, -self.maxTorque, self.maxTorque)
self.torques = torch.clip(target_torques, min_available_torque, max_available_torque)
self.gym.set_dof_actuation_force_tensor(self.sim, gymtorch.unwrap_tensor(self.torques))
def post_physics_step(self):
self.progress_buf += 1
goal_ids = self.goal_reset.nonzero(as_tuple=False).squeeze(-1)
if len(goal_ids) > 0:
self.reset_goal(goal_ids)
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward()
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def compute_cubebot_reward(corner_height, wheel_speeds, cube_pos, goal_pos, potentials, prev_potentials,
reset_buf, progress_buf, max_episode_length):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float) -> Tuple[Tensor, Tensor, Tensor]
penelty = torch.square((torch.sum(torch.abs(wheel_speeds), dim=1)/6)) # Wheel velocity observation is scaled between -1 and 1
progress_reward = potentials - prev_potentials
distance_to_goal = torch.norm(cube_pos - goal_pos, dim=-1)
goal_reached = torch.where(distance_to_goal < 0.5, 1, 0)
# reward = corner_height + goal_reward - torch.square(distance_to_goal/5.0)
reward = progress_reward+goal_reached
# reward = corner_height
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset_buf)
goal_reset = torch.where(goal_reached==1, 1, 0)
# goal_reset = torch.zeros_like(reset)
return reward, reset, goal_reset
@torch.jit.script
def compute_cubebot_observations(cube_pos, cube_ori, cube_linvel, cube_angvel, dof_vel, goal_pos,
potentials, inv_start_rot, basis_vec0, basis_vec1, actions, torques, maxSpeed, dt):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, float) -> Tuple[Tensor, Tensor, Tensor]
to_target = goal_pos - cube_pos
to_target[:, 2] = 0.0
prev_potentials_new = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
cube_ori, inv_start_rot, to_target, basis_vec0, basis_vec1, 2)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, cube_linvel, cube_angvel, goal_pos, cube_pos)
dof_vel_scaled = dof_vel[:, 0:6]/maxSpeed
# obs_buf shapes: 3, 4, 3, 3, 6, 3 = 22
# obs = torch.cat((cube_pos/100,
# cube_ori,
# cube_linvel/100,
# cube_angvel/100,
# dof_vel_scaled,
# goal_pos/100), dim=-1)
# obs_buf shapes: 3, 4, 3, 3, (cube_pos, cube_ori, vel_loc, angvel_loc)
# 3, 1, 1, 1 (goal_pos, angle_to_target, up_proj, heading_proj)
# 6, 3, 6 (dof_vel_scaled, actions, torques)
# total = 34
# obs = torch.cat((cube_pos, cube_ori, vel_loc, angvel_loc,
# dof_vel_scaled, actions), dim=-1)
obs = torch.cat((cube_pos, cube_ori, vel_loc, angvel_loc, goal_pos,
angle_to_target.unsqueeze(-1), up_proj.unsqueeze(-1), heading_proj.unsqueeze(-1),
dof_vel_scaled, actions, torques), dim=-1)
return obs, potentials, prev_potentials_new
|
from dataclasses import dataclass
from enum import Enum
class TokenType(Enum):
NUMBER = 0
PLUS = 1
MINUS = 2
MULTIPLY = 3
DIVIDE = 4
LPAREN = 5
RPAREN = 6
POWER = 7
MOD = 8
INTDIV = 9
LS = 10
RS = 11
GT = 12
ST = 13
EQU = 14
@dataclass
class Token:
type: TokenType
value: any = None
def __repr__(self):
return self.type.name + (f":{self.value}" if self.value != None else "")
|
"""
Functions for generating bootstrapped error bars
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.drivers import longsequence as _longseq
from pygsti import algorithms as _alg
from pygsti.data.dataset import DataSet as _DataSet
def create_bootstrap_dataset(input_data_set, generation_method, input_model=None,
seed=None, outcome_labels=None, verbosity=1):
"""
Creates a DataSet used for generating bootstrapped error bars.
Parameters
----------
input_data_set : DataSet
The data set to use for generating the "bootstrapped" data set.
generation_method : { 'nonparametric', 'parametric' }
The type of dataset to generate. 'parametric' generates a DataSet
with the same circuits and sample counts as input_data_set but
using the probabilities in input_model (which must be provided).
'nonparametric' generates a DataSet with the same circuits
and sample counts as input_data_set using the count frequencies of
input_data_set as probabilities.
input_model : Model, optional
The model used to compute the probabilities for circuits when
generation_method is set to 'parametric'. If 'nonparametric' is selected,
this argument must be set to None (the default).
seed : int, optional
A seed value for numpy's random number generator.
outcome_labels : list, optional
The list of outcome labels to include in the output dataset. If None
are specified, defaults to the spam labels of input_data_set.
verbosity : int, optional
How verbose the function output is. If 0, then printing is suppressed.
If 1 (or greater), then printing is not suppressed.
Returns
-------
DataSet
"""
if generation_method not in ['nonparametric', 'parametric']:
raise ValueError("generation_method must be 'parametric' or 'nonparametric'!")
if outcome_labels is None:
outcome_labels = input_data_set.outcome_labels
rndm = seed if isinstance(seed, _np.random.RandomState) \
else _np.random.RandomState(seed)
if input_model is None:
if generation_method == 'nonparametric':
print("Generating non-parametric dataset.")
elif generation_method == 'parametric':
raise ValueError("For 'parmametric', must specify input_model")
else:
if generation_method == 'parametric':
print("Generating parametric dataset.")
elif generation_method == 'nonparametric':
raise ValueError("For 'nonparametric', input_model must be None")
firstPOVMLbl = list(input_model.povms.keys())[0]
# TODO: allow outcomes from multiple POVMS? (now just consider *first* POVM)
possibleOutcomeLabels = [(eLbl,) for eLbl in input_model.povms[firstPOVMLbl].keys()]
assert(all([ol in possibleOutcomeLabels for ol in outcome_labels]))
possibleOutcomeLabels = input_data_set.outcome_labels
assert(all([ol in possibleOutcomeLabels for ol in outcome_labels]))
#create new dataset
simDS = _DataSet(outcome_labels=outcome_labels,
collision_action=input_data_set.collisionAction)
circuit_list = list(input_data_set.keys())
probs = input_model.sim.bulk_probs(circuit_list) \
if generation_method == 'parametric' else None
for s in circuit_list:
nSamples = input_data_set[s].total
if generation_method == 'parametric':
ps = probs[s] # SLOW: input_model.probabilities(s)
elif generation_method == 'nonparametric':
dsRow_fractions = input_data_set[s].fractions
ps = {ol: dsRow_fractions[ol] for ol in outcome_labels}
pList = _np.array([_np.clip(ps[outcomeLabel], 0, 1) for outcomeLabel in outcome_labels])
#Truncate before normalization; bad extremal values shouldn't
# screw up not-bad values, yes?
pList = pList / sum(pList)
countsArray = rndm.multinomial(nSamples, pList, 1)
counts = {ol: countsArray[0, i] for i, ol in enumerate(outcome_labels)}
simDS.add_count_dict(s, counts)
simDS.done_adding_data()
return simDS
def create_bootstrap_models(num_models, input_data_set, generation_method,
fiducial_prep, fiducial_measure, germs, max_lengths,
input_model=None, target_model=None, start_seed=0,
outcome_labels=None, lsgst_lists=None,
return_data=False, verbosity=2):
"""
Creates a series of "bootstrapped" Models.
Models are created from a single DataSet (and possibly Model) and are
typically used for generating bootstrapped error bars. The resulting Models
are obtained by performing MLGST on data generated by repeatedly calling
:function:`create_bootstrap_dataset` with consecutive integer seed values.
Parameters
----------
num_models : int
The number of models to create.
input_data_set : DataSet
The data set to use for generating the "bootstrapped" data set.
generation_method : { 'nonparametric', 'parametric' }
The type of data to generate. 'parametric' generates DataSets
with the same circuits and sample counts as input_data_set but
using the probabilities in input_model (which must be provided).
'nonparametric' generates DataSets with the same circuits
and sample counts as input_data_set using the count frequencies of
input_data_set as probabilities.
fiducial_prep : list of Circuits
The state preparation fiducial circuits used by MLGST.
fiducial_measure : list of Circuits
The measurement fiducial circuits used by MLGST.
germs : list of Circuits
The germ circuits used by MLGST.
max_lengths : list of ints
List of integers, one per MLGST iteration, which set truncation lengths
for repeated germ strings. The list of circuits for the i-th LSGST
iteration includes the repeated germs truncated to the L-values *up to*
and including the i-th one.
input_model : Model, optional
The model used to compute the probabilities for circuits when
generation_method is set to 'parametric'. If 'nonparametric' is selected,
this argument must be set to None (the default).
target_model : Model, optional
Mandatory model to use for as the target model for MLGST when
generation_method is set to 'nonparametric'. When 'parametric'
is selected, input_model is used as the target.
start_seed : int, optional
The initial seed value for numpy's random number generator when
generating data sets. For each succesive dataset (and model)
that are generated, the seed is incremented by one.
outcome_labels : list, optional
The list of Outcome labels to include in the output dataset. If None
are specified, defaults to the effect labels of `input_data_set`.
lsgst_lists : list of circuit lists, optional
Provides explicit list of circuit lists to be used in analysis;
to be given if the dataset uses "incomplete" or "reduced" sets of
circuit. Default is None.
return_data : bool
Whether generated data sets should be returned in addition to
models.
verbosity : int
Level of detail printed to stdout.
Returns
-------
models : list
The list of generated Model objects.
data : list
The list of generated DataSet objects, only returned when
return_data == True.
"""
if max_lengths is None:
print("No max_lengths value specified; using [0,1,2,4,...,1024]")
max_lengths = [0] + [2**k for k in range(10)]
if (input_model is None and target_model is None):
raise ValueError("Must supply either input_model or target_model!")
if (input_model is not None and target_model is not None):
raise ValueError("Cannot supply both input_model and target_model!")
if generation_method == 'parametric':
target_model = input_model
datasetList = []
print("Creating DataSets: ")
for run in range(num_models):
print("%d " % run, end='')
datasetList.append(
create_bootstrap_dataset(input_data_set, generation_method,
input_model, start_seed + run,
outcome_labels)
)
modelList = []
print("Creating Models: ")
for run in range(num_models):
print("Running MLGST Iteration %d " % run)
if lsgst_lists is not None:
results = _longseq.run_long_sequence_gst_base(
datasetList[run], target_model, lsgst_lists, verbosity=verbosity)
else:
results = _longseq.run_long_sequence_gst(
datasetList[run], target_model,
fiducial_prep, fiducial_measure, germs, max_lengths,
verbosity=verbosity)
modelList.append(results.estimates.get('default', next(iter(results.estimates.values()))).models['go0'])
if not return_data:
return modelList
else:
return modelList, datasetList
def gauge_optimize_models(gs_list, target_model,
gate_metric='frobenius', spam_metric='frobenius',
plot=True):
"""
Optimizes the "spam weight" parameter used when gauge optimizing a set of models.
This function gauge optimizes multiple times using a range of spam weights
and takes the one the minimizes the average spam error multiplied by the
average gate error (with respect to a target model).
Parameters
----------
gs_list : list
The list of Model objects to gauge optimize (simultaneously).
target_model : Model
The model to compare the gauge-optimized gates with, and also
to gauge-optimize them to.
gate_metric : { "frobenius", "fidelity", "tracedist" }, optional
The metric used within the gauge optimization to determing error
in the gates.
spam_metric : { "frobenius", "fidelity", "tracedist" }, optional
The metric used within the gauge optimization to determing error
in the state preparation and measurement.
plot : bool, optional
Whether to create a plot of the model-target discrepancy
as a function of spam weight (figure displayed interactively).
Returns
-------
list
The list of Models gauge-optimized using the best spamWeight.
"""
listOfBootStrapEstsNoOpt = list(gs_list)
numResamples = len(listOfBootStrapEstsNoOpt)
ddof = 1
SPAMMin = []
SPAMMax = []
SPAMMean = []
gateMin = []
gateMax = []
gateMean = []
for spWind, spW in enumerate(_np.logspace(-4, 0, 13)): # try spam weights
print("Spam weight %s" % spWind)
listOfBootStrapEstsNoOptG0toTargetVarSpam = []
for mdl in listOfBootStrapEstsNoOpt:
listOfBootStrapEstsNoOptG0toTargetVarSpam.append(
_alg.gaugeopt_to_target(mdl, target_model,
item_weights={'spam': spW},
gates_metric=gate_metric,
spam_metric=spam_metric))
ModelGOtoTargetVarSpamVecArray = _np.zeros([numResamples],
dtype='object')
for i in range(numResamples):
ModelGOtoTargetVarSpamVecArray[i] = \
listOfBootStrapEstsNoOptG0toTargetVarSpam[i].to_vector()
mdlStdevVec = _np.std(ModelGOtoTargetVarSpamVecArray, ddof=ddof)
gsStdevVecSPAM = mdlStdevVec[:8]
mdlStdevVecOps = mdlStdevVec[8:]
SPAMMin.append(_np.min(gsStdevVecSPAM))
SPAMMax.append(_np.max(gsStdevVecSPAM))
SPAMMean.append(_np.mean(gsStdevVecSPAM))
gateMin.append(_np.min(mdlStdevVecOps))
gateMax.append(_np.max(mdlStdevVecOps))
gateMean.append(_np.mean(mdlStdevVecOps))
if plot:
raise NotImplementedError("plot removed b/c matplotlib support dropped")
#_mpl.pyplot.loglog(_np.logspace(-4,0,13),SPAMMean,'b-o')
#_mpl.pyplot.loglog(_np.logspace(-4,0,13),SPAMMin,'b--+')
#_mpl.pyplot.loglog(_np.logspace(-4,0,13),SPAMMax,'b--x')
#
#_mpl.pyplot.loglog(_np.logspace(-4,0,13),gateMean,'r-o')
#_mpl.pyplot.loglog(_np.logspace(-4,0,13),gateMin,'r--+')
#_mpl.pyplot.loglog(_np.logspace(-4,0,13),gateMax,'r--x')
#
#_mpl.pyplot.xlabel('SPAM weight in gauge optimization')
#_mpl.pyplot.ylabel('Per element error bar size')
#_mpl.pyplot.title('Per element error bar size vs. ${\\tt spamWeight}$')
#_mpl.pyplot.xlim(1e-4,1)
#_mpl.pyplot.legend(['SPAM-mean','SPAM-min','SPAM-max',
# 'gates-mean','gates-min','gates-max'],
# bbox_to_anchor=(1.4, 1.))
# gateTimesSPAMMean = _np.array(SPAMMean) * _np.array(gateMean)
bestSPAMWeight = _np.logspace(-4, 0, 13)[_np.argmin(
_np.array(SPAMMean) * _np.array(gateMean))]
print("Best SPAM weight is %s" % bestSPAMWeight)
listOfBootStrapEstsG0toTargetSmallSpam = []
for mdl in listOfBootStrapEstsNoOpt:
listOfBootStrapEstsG0toTargetSmallSpam.append(
_alg.gaugeopt_to_target(mdl, target_model,
item_weights={'spam': bestSPAMWeight},
gates_metric=gate_metric,
spam_metric=spam_metric))
return listOfBootStrapEstsG0toTargetSmallSpam
################################################################################
# Utility functions (perhaps relocate?)
################################################################################
#For metrics that evaluate model with single scalar:
def _model_stdev(gs_func, gs_ensemble, ddof=1, axis=None, **kwargs):
"""
Standard deviation of `gs_func` over an ensemble of models.
Parameters
----------
gs_func : function
A function that takes a :class:`Model` as its first argument, and
whose additional arguments may be given by keyword arguments.
gs_ensemble : list
A list of `Model` objects.
ddof : int, optional
As in numpy.std
axis : int or None, optional
As in numpy.std
Returns
-------
numpy.ndarray
The output of numpy.std
"""
return _np.std([gs_func(mdl, **kwargs) for mdl in gs_ensemble], axis=axis, ddof=ddof)
def _model_mean(gs_func, gs_ensemble, axis=None, **kwargs):
"""
Mean of `gs_func` over an ensemble of models.
Parameters
----------
gs_func : function
A function that takes a :class:`Model` as its first argument, and
whose additional arguments may be given by keyword arguments.
gs_ensemble : list
A list of `Model` objects.
axis : int or None, optional
As in numpy.mean
Returns
-------
numpy.ndarray
The output of numpy.mean
"""
return _np.mean([gs_func(mdl, **kwargs) for mdl in gs_ensemble], axis=axis)
#Note: for metrics that evaluate model with scalar for each gate, use axis=0
# argument to above functions
def _to_mean_model(gs_list, target_gs):
"""
Take the per-gate-element mean of a set of models.
Return the :class:`Model` constructed from the mean parameter
vector of the models in `gs_list`, that is, the mean of the
parameter vectors of each model in `gs_list`.
Parameters
----------
gs_list : list
A list of :class:`Model` objects.
target_gs : Model
A template model used to specify the parameterization
of the returned `Model`.
Returns
-------
Model
"""
numResamples = len(gs_list)
gsVecArray = _np.zeros([numResamples], dtype='object')
for i in range(numResamples):
gsVecArray[i] = gs_list[i].to_vector()
output_gs = target_gs.copy()
output_gs.from_vector(_np.mean(gsVecArray))
return output_gs
def _to_std_model(gs_list, target_gs, ddof=1):
"""
Take the per-gate-element standard deviation of a list of models.
Return the :class:`Model` constructed from the standard-deviation
parameter vector of the models in `gs_list`, that is, the standard-
devaiation of the parameter vectors of each model in `gs_list`.
Parameters
----------
gs_list : list
A list of :class:`Model` objects.
target_gs : Model
A template model used to specify the parameterization
of the returned `Model`.
ddof : int, optional
As in numpy.std
Returns
-------
Model
"""
numResamples = len(gs_list)
gsVecArray = _np.zeros([numResamples], dtype='object')
for i in range(numResamples):
gsVecArray[i] = gs_list[i].to_vector()
output_gs = target_gs.copy()
output_gs.from_vector(_np.std(gsVecArray, ddof=ddof))
return output_gs
def _to_rms_model(gs_list, target_gs):
"""
Take the per-gate-element RMS of a set of models.
Return the :class:`Model` constructed from the root-mean-squared
parameter vector of the models in `gs_list`, that is, the RMS
of the parameter vectors of each model in `gs_list`.
Parameters
----------
gs_list : list
A list of :class:`Model` objects.
target_gs : Model
A template model used to specify the parameterization
of the returned `Model`.
Returns
-------
Model
"""
numResamples = len(gs_list)
gsVecArray = _np.zeros([numResamples], dtype='object')
for i in range(numResamples):
gsVecArray[i] = _np.sqrt(gs_list[i].to_vector()**2)
output_gs = target_gs.copy()
output_gs.from_vector(_np.mean(gsVecArray))
return output_gs
#Unused?
#def gateset_jtracedist(mdl,target_model,mx_basis="gm"):
# output = _np.zeros(3,dtype=float)
# for i, gate in enumerate(target_model.operations.keys()):
# output[i] = _tools.jtracedist(mdl.operations[gate],target_model.operations[gate],mx_basis=mx_basis)
## print output
# return output
#
#def gateset_entanglement_fidelity(mdl,target_model):
# output = _np.zeros(3,dtype=float)
# for i, gate in enumerate(target_model.operations.keys()):
# output[i] = _tools.entanglement_fidelity(mdl.operations[gate],target_model.operations[gate])
# return output
#
#def gateset_decomp_angle(mdl):
# output = _np.zeros(3,dtype=float)
# for i, gate in enumerate(mdl.operations.keys()):
# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('pi rotations',0)
# return output
#
#def gateset_decomp_decay_diag(mdl):
# output = _np.zeros(3,dtype=float)
# for i, gate in enumerate(mdl.operations.keys()):
# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('decay of diagonal rotation terms',0)
# return output
#
#def gateset_decomp_decay_offdiag(mdl):
# output = _np.zeros(3,dtype=float)
# for i, gate in enumerate(mdl.operations.keys()):
# output[i] = _tools.decompose_gate_matrix(mdl.operations[gate]).get('decay of off diagonal rotation terms',0)
# return output
#
##def gateset_fidelity(mdl,target_model,mx_basis="gm"):
## output = _np.zeros(3,dtype=float)
## for i, gate in enumerate(target_model.operations.keys()):
## output[i] = _tools.fidelity(mdl.operations[gate],target_model.operations[gate])
## return output
#
#def gateset_diamonddist(mdl,target_model,mx_basis="gm"):
# output = _np.zeros(3,dtype=float)
# for i, gate in enumerate(target_model.operations.keys()):
# output[i] = _tools.diamonddist(mdl.operations[gate],target_model.operations[gate],mx_basis=mx_basis)
# return output
#
#def spamrameter(mdl):
# firstRho = list(mdl.preps.keys())[0]
# firstE = list(mdl.effects.keys())[0]
# return _np.dot(mdl.preps[firstRho].T,mdl.effects[firstE])[0,0]
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Enumerate Hidden Markov Model
=============================
This example is ported from [1], which shows how to marginalize out
discrete model variables in Pyro.
This combines MCMC with a variable elimination algorithm, where we
use enumeration to exactly marginalize out some variables from the
joint density.
To marginalize out discrete variables ``x``:
1. Verify that the variable dependency structure in your model
admits tractable inference, i.e. the dependency graph among
enumerated variables should have narrow treewidth.
2. Ensure your model can handle broadcasting of the sample values
of those variables.
Note that difference from [1], which uses Python loop, here we use
:func:`~numpryo.contrib.control_flow.scan` to reduce compilation
times (only one step needs to be compiled) of the model. Under the
hood, `scan` stacks all the priors' parameters and values into
an additional time dimension. This allows us computing the joint
density in parallel. In addition, the stacked form allows us
to use the parallel-scan algorithm in [2], which reduces parallel
complexity from O(length) to O(log(length)).
Data are taken from [3]. However, the original source of the data
seems to be the Institut fuer Algorithmen und Kognitive Systeme
at Universitaet Karlsruhe.
**References:**
1. *Pyro's Hidden Markov Model example*,
(https://pyro.ai/examples/hmm.html)
2. *Temporal Parallelization of Bayesian Smoothers*,
Simo Sarkka, Angel F. Garcia-Fernandez
(https://arxiv.org/abs/1905.13002)
3. *Modeling Temporal Dependencies in High-Dimensional Sequences:
Application to Polyphonic Music Generation and Transcription*,
Boulanger-Lewandowski, N., Bengio, Y. and Vincent, P.
"""
import argparse
import logging
import os
import time
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro.contrib.control_flow import scan
from numpyro.contrib.indexing import Vindex
import numpyro.distributions as dist
from numpyro.examples.datasets import JSB_CHORALES, load_dataset
from numpyro.handlers import mask
from numpyro.infer import HMC, MCMC, NUTS
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Let's start with a simple Hidden Markov Model.
#
# x[t-1] --> x[t] --> x[t+1]
# | | |
# V V V
# y[t-1] y[t] y[t+1]
#
# This model includes a plate for the data_dim = 44 keys on the piano. This
# model has two "style" parameters probs_x and probs_y that we'll draw from a
# prior. The latent state is x, and the observed state is y.
def model_1(sequences, lengths, args, include_prior=True):
num_sequences, max_length, data_dim = sequences.shape
with mask(mask=include_prior):
probs_x = numpyro.sample("probs_x",
dist.Dirichlet(0.9 * jnp.eye(args.hidden_dim) + 0.1)
.to_event(1))
probs_y = numpyro.sample("probs_y",
dist.Beta(0.1, 0.9)
.expand([args.hidden_dim, data_dim])
.to_event(2))
def transition_fn(carry, y):
x_prev, t = carry
with numpyro.plate("sequences", num_sequences, dim=-2):
with mask(mask=(t < lengths)[..., None]):
x = numpyro.sample("x", dist.Categorical(probs_x[x_prev]))
with numpyro.plate("tones", data_dim, dim=-1):
numpyro.sample("y", dist.Bernoulli(probs_y[x.squeeze(-1)]), obs=y)
return (x, t + 1), None
x_init = jnp.zeros((num_sequences, 1), dtype=jnp.int32)
# NB swapaxes: we move time dimension of `sequences` to the front to scan over it
scan(transition_fn, (x_init, 0), jnp.swapaxes(sequences, 0, 1))
# Next let's add a dependency of y[t] on y[t-1].
#
# x[t-1] --> x[t] --> x[t+1]
# | | |
# V V V
# y[t-1] --> y[t] --> y[t+1]
def model_2(sequences, lengths, args, include_prior=True):
num_sequences, max_length, data_dim = sequences.shape
with mask(mask=include_prior):
probs_x = numpyro.sample("probs_x",
dist.Dirichlet(0.9 * jnp.eye(args.hidden_dim) + 0.1)
.to_event(1))
probs_y = numpyro.sample("probs_y",
dist.Beta(0.1, 0.9)
.expand([args.hidden_dim, 2, data_dim])
.to_event(3))
def transition_fn(carry, y):
x_prev, y_prev, t = carry
with numpyro.plate("sequences", num_sequences, dim=-2):
with mask(mask=(t < lengths)[..., None]):
x = numpyro.sample("x", dist.Categorical(probs_x[x_prev]))
# Note the broadcasting tricks here: to index probs_y on tensors x and y,
# we also need a final tensor for the tones dimension. This is conveniently
# provided by the plate associated with that dimension.
with numpyro.plate("tones", data_dim, dim=-1) as tones:
y = numpyro.sample("y",
dist.Bernoulli(probs_y[x, y_prev, tones]),
obs=y)
return (x, y, t + 1), None
x_init = jnp.zeros((num_sequences, 1), dtype=jnp.int32)
y_init = jnp.zeros((num_sequences, data_dim), dtype=jnp.int32)
scan(transition_fn, (x_init, y_init, 0), jnp.swapaxes(sequences, 0, 1))
# Next consider a Factorial HMM with two hidden states.
#
# w[t-1] ----> w[t] ---> w[t+1]
# \ x[t-1] --\-> x[t] --\-> x[t+1]
# \ / \ / \ /
# \/ \/ \/
# y[t-1] y[t] y[t+1]
#
# Note that since the joint distribution of each y[t] depends on two variables,
# those two variables become dependent. Therefore during enumeration, the
# entire joint space of these variables w[t],x[t] needs to be enumerated.
# For that reason, we set the dimension of each to the square root of the
# target hidden dimension.
def model_3(sequences, lengths, args, include_prior=True):
num_sequences, max_length, data_dim = sequences.shape
hidden_dim = int(args.hidden_dim ** 0.5) # split between w and x
with mask(mask=include_prior):
probs_w = numpyro.sample("probs_w",
dist.Dirichlet(0.9 * jnp.eye(hidden_dim) + 0.1)
.to_event(1))
probs_x = numpyro.sample("probs_x",
dist.Dirichlet(0.9 * jnp.eye(hidden_dim) + 0.1)
.to_event(1))
probs_y = numpyro.sample("probs_y",
dist.Beta(0.1, 0.9)
.expand([args.hidden_dim, 2, data_dim])
.to_event(3))
def transition_fn(carry, y):
w_prev, x_prev, t = carry
with numpyro.plate("sequences", num_sequences, dim=-2):
with mask(mask=(t < lengths)[..., None]):
w = numpyro.sample("w", dist.Categorical(probs_w[w_prev]))
x = numpyro.sample("x", dist.Categorical(probs_x[x_prev]))
# Note the broadcasting tricks here: to index probs_y on tensors x and y,
# we also need a final tensor for the tones dimension. This is conveniently
# provided by the plate associated with that dimension.
with numpyro.plate("tones", data_dim, dim=-1) as tones:
numpyro.sample("y",
dist.Bernoulli(probs_y[w, x, tones]),
obs=y)
return (w, x, t + 1), None
w_init = jnp.zeros((num_sequences, 1), dtype=jnp.int32)
x_init = jnp.zeros((num_sequences, 1), dtype=jnp.int32)
scan(transition_fn, (w_init, x_init, 0), jnp.swapaxes(sequences, 0, 1))
# By adding a dependency of x on w, we generalize to a
# Dynamic Bayesian Network.
#
# w[t-1] ----> w[t] ---> w[t+1]
# | \ | \ | \
# | x[t-1] ----> x[t] ----> x[t+1]
# | / | / | /
# V / V / V /
# y[t-1] y[t] y[t+1]
#
# Note that message passing here has roughly the same cost as with the
# Factorial HMM, but this model has more parameters.
def model_4(sequences, lengths, args, include_prior=True):
num_sequences, max_length, data_dim = sequences.shape
hidden_dim = int(args.hidden_dim ** 0.5) # split between w and x
with mask(mask=include_prior):
probs_w = numpyro.sample("probs_w",
dist.Dirichlet(0.9 * jnp.eye(hidden_dim) + 0.1)
.to_event(1))
probs_x = numpyro.sample("probs_x",
dist.Dirichlet(0.9 * jnp.eye(hidden_dim) + 0.1)
.expand_by([hidden_dim])
.to_event(2))
probs_y = numpyro.sample("probs_y",
dist.Beta(0.1, 0.9)
.expand([hidden_dim, hidden_dim, data_dim])
.to_event(3))
def transition_fn(carry, y):
w_prev, x_prev, t = carry
with numpyro.plate("sequences", num_sequences, dim=-2):
with mask(mask=(t < lengths)[..., None]):
w = numpyro.sample("w", dist.Categorical(probs_w[w_prev]))
x = numpyro.sample("x", dist.Categorical(Vindex(probs_x)[w, x_prev]))
with numpyro.plate("tones", data_dim, dim=-1) as tones:
numpyro.sample("y",
dist.Bernoulli(probs_y[w, x, tones]),
obs=y)
return (w, x, t + 1), None
w_init = jnp.zeros((num_sequences, 1), dtype=jnp.int32)
x_init = jnp.zeros((num_sequences, 1), dtype=jnp.int32)
scan(transition_fn, (w_init, x_init, 0), jnp.swapaxes(sequences, 0, 1))
models = {name[len('model_'):]: model
for name, model in globals().items()
if name.startswith('model_')}
def main(args):
model = models[args.model]
_, fetch = load_dataset(JSB_CHORALES, split='train', shuffle=False)
lengths, sequences = fetch()
if args.num_sequences:
sequences = sequences[0:args.num_sequences]
lengths = lengths[0:args.num_sequences]
logger.info('-' * 40)
logger.info('Training {} on {} sequences'.format(
model.__name__, len(sequences)))
# find all the notes that are present at least once in the training set
present_notes = ((sequences == 1).sum(0).sum(0) > 0)
# remove notes that are never played (we remove 37/88 notes with default args)
sequences = sequences[..., present_notes]
if args.truncate:
lengths = lengths.clip(0, args.truncate)
sequences = sequences[:, :args.truncate]
logger.info('Each sequence has shape {}'.format(sequences[0].shape))
logger.info('Starting inference...')
rng_key = random.PRNGKey(2)
start = time.time()
kernel = {'nuts': NUTS, 'hmc': HMC}[args.kernel](model)
mcmc = MCMC(kernel, args.num_warmup, args.num_samples, args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True)
mcmc.run(rng_key, sequences, lengths, args=args)
mcmc.print_summary()
logger.info('\nMCMC elapsed time: {}'.format(time.time() - start))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="HMC for HMMs")
parser.add_argument("-m", "--model", default="1", type=str,
help="one of: {}".format(", ".join(sorted(models.keys()))))
parser.add_argument('-n', '--num-samples', nargs='?', default=1000, type=int)
parser.add_argument("-d", "--hidden-dim", default=16, type=int)
parser.add_argument('-t', "--truncate", type=int)
parser.add_argument("--num-sequences", type=int)
parser.add_argument("--kernel", default='nuts', type=str)
parser.add_argument('--num-warmup', nargs='?', default=500, type=int)
parser.add_argument("--num-chains", nargs='?', default=1, type=int)
parser.add_argument('--device', default='cpu', type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
|
#!/usr/bin/env python
# coding: utf-8
# # QUESTION ONE (1)
# # DATA LOADING
# In[1]:
# I would have required to use dask which is a parallel form of data loading if
# the size of the data were heaavier to increase time efficiciency and avoiding loading
# all the data into the memory. An alternative is to chunk the data but it is not as efficient,comparatively
# because of the concatenation required at the end of the chunk process.
import pandas as pd
data=pd.read_csv('realestate.csv',sep=';',index_col=False)
data.head(10)
# .
# # QUESTION TWO (2)
# # DATA CLEANING
# The data cleaning steps would be done in three phases as:
#
#
# # PHASE 1: THE GENERAL OUTLOOK AND PROFILE OF THE DATASET
# # (a) Statistical Description
# The "describe" method of panda's dataframe gives the statistical description of the dataset.This helps to see the count of unique values,most frequent value,how the values deviate or vary from one another percentile, among others.
#
# In[2]:
data.describe(include='all')
# In[3]:
data.info()
# # (b) Data Type Formats
# When trying to convert to specific datatypes, the rows that do not comply to the rules of this datatype are identified as errors.These would help in making suitable corrections on the identified observations.
#
#
# Also,possible operations on the columns depend on the datatype.The correct datatypes would also help to identify errors in the columns.In this section, emphasis would be made on the numeric columns while the non-numeric features would form the basis for the Inconsistency check in phase two
# The above information could help determine the need for type conversion
# The columns with 'object' datatypes need to be investigated to determine which ones would require conversion
# # i) The 'city','state', 'street' and 'type' object columns are non-numeric values
# The 'city','state', and 'type' look tempting to convert to the category dtypes for memory efficiency and optimization.
# However, they would be left as object because the dataset is not large enough to cause memory issues.Also, if converted to category dtype, the addition of new distinct value into the columns would generate 'NaN' error.
# # ii) The 'sale_date' column being a date would be converted to date datatype.
#
# data['sale_date'] = pd.to_datetime(data.sale_date, format='%Y-%m-%d %H:%M:%S')
#
# data['sale_date'] = pd.to_datetime(data.sale_date, format='%Y-%m-%d %H:%M:%S')
#
#
# Running the above line gives errors such as the one identified below
#
#
# ValueError: time data 1917-07-24 08:12:24% doesn't match format specified
# In[4]:
# The error causing rows were identified and corrected as follows
data["sale_date"].replace({"2013-12-19 04:05:22A": "2013-12-19 04:05:22", "1917-07-24 08:12:24%":"1917-07-24 08:12:24","1918-02-25 20:36:13&":"1918-02-25 20:36:13"}, inplace=True)
# # iii) The 'zip' and 'price' object columns have numeric values. These are supposed to be integer values.This is checked and the rows with errors are identified
# In[5]:
# The error causing rows were identified in the zip column
for j, value in enumerate(data['zip']):
try:
int(value)
except ValueError:
print('The identified error index {}: {!r}'.format(j, value))
# In[6]:
# The error causing rows were identified in the price column
for j, value in enumerate(data['price']):
try:
int(value)
except ValueError:
print('The identified error index {}: {!r}'.format(j, value))
# In[7]:
# The typographical error were corrected intuitively as follows
data["zip"].replace({"957f58": "95758"}, inplace=True)
data["price"].replace({"298000D": "298000"}, inplace=True)
# # iv) The 'longitude' and 'latitude' object columns have floating values. These are checked and the rows with errors identified
# In[8]:
for j, value in enumerate(data['longitude']):
try:
float(value)
except ValueError:
print('Index error for Longitude {}: {!r}'.format(j, value))
# In[9]:
for j, value in enumerate(data['latitude']):
try:
float(value)
except ValueError:
print('Index error for Latitude {}: {!r}'.format(j, value))
# In[10]:
# The typographical error were replaced intuitively as follows
data["longitude"].replace({"-121.2286RT": "-121.228678","-121.363757$": "-121.363757"}, inplace=True)
data["latitude"].replace({"38.410992C": "38.410992"}, inplace=True)
# In[11]:
#data = data.astype({'longitude': 'float64', 'latitude': 'float64','price':'int64','zip':'int64'})
data.info()
# # PHASE 2: THE INCONSISTENCY CHECK
# (The data consistency check is used for the following:
#
# Redundancy such as duplicates,irrelevant datapoints, format error among others in both the columns and the rows
#
#
# To do this, we check the consistency of non-numeric features (type, state, city and street) by:
# (i) Capitalization Consistency Check
# In[12]:
#The solution to the inconsistency in the case format (lower and upper cases) can be solved by either making all the letters
# The upper case would be used in this case
data= data.apply(lambda x: x.astype(str).str.upper() if x.name in ['street', 'type','city','state'] else x)
data
# (ii) Duplicate Row Check
# In[13]:
# Duplicate row check would result into repetition with no new information in the dataset.
# Therefore, observations that have been earlier recorded should be deleted. It could happen as a result of double submission
# file merging, among others
data.drop_duplicates(inplace=True)
data
# (iii) Irrelevant/Redundant Row Check
# In[14]:
# Since, it is a real estate sales data. Some columns could be seen as unique identifiers.
# Unavailability or missingness of this identifiers would render the observation(row) redundant
# An identifier here would be the Longitude and Lattitude.
# This is because the house/bed/baths sold would not be identified without this information.
# Therefore, rows with this missing values should be removed
import numpy as np
data = data.dropna(axis=0, subset=['longitude','latitude'])
data
# (iv) Typographical and Format Errors
# The unique values of the non-numeric columns ('type','state','city', and 'street') as shown in Out[2]: above are free text, which is prone to typographical error and human discretion in its format used. A look at the unique values show these errors.
#
#
# As can be seen in the state column, there are typographical error as 'CA', 'CA3', 'CA-' is pointing to a singular state 'CA'.
# [1] The solution to the 'states' column can be either of:
#
#
# a) Delete the column since it is a single-valued column and would not help in any ML modelling task.
#
#
# b) Correct the spelling and typo-errors.
#
#
# For completeness of the dataset, I will just replace the values with 'CA'
# In[15]:
# Check the unique values in the 'state' column and also save a copy of the data with a new name
print(data.state.unique())
new_data=data.copy()
# In[16]:
#new_data.loc[new_data['state'] == 'CA']
new_data=data.loc[data['state'] == 'CA']
new_data.state.unique()
# [2] The solution to the 'type' column:
# In[31]:
#The unique values in the type column are replaced appropriately
new_data.type.unique()
new_data["type"].replace({"RESIDENTIAL%": "RESIDENTIAL","RESIDEN_TIAL": "RESIDENTIAL","RESIDENTIAL)": "RESIDENTIAL"}, inplace=True)
new_data.type.unique()
# [3] The solution to the 'city' column:
# In[18]:
# To check the count and unique values in the column
print(new_data.city.nunique())
new_data.city.unique()
# In[19]:
# One way to do this is to create a list of valid cities in California
# Then, check the "city" column with this list.
# Any value that is present in the 'city' column but not available in the actual
# city list would be investigated
actual_city=['SACRAMENTO', 'RANCHO CORDOVA', 'RIO LINDA', 'CITRUS HEIGHTS',
'NORTH HIGHLANDS', 'ANTELOPE', 'ELK GROVE',
'ELVERTA', 'GALT', 'CARMICHAEL', 'ORANGEVALE', 'FOLSOM',
'ELK GROVE', 'MATHER', 'POLLOCK PINES', 'GOLD RIVER',
'EL DORADO HILLS', 'RANCHO MURIETA', 'WILTON', 'GREENWOOD',
'FAIR OAKS', 'CAMERON PARK', 'LINCOLN', 'PLACERVILLE',
'MEADOW VISTA', 'ROSEVILLE', 'ROCKLIN', 'AUBURN', 'LOOMIS',
'EL DORADO', 'PENRYN', 'GRANITE BAY', 'FORESTHILL',
'DIAMOND SPRINGS', 'SHINGLE SPRINGS', 'COOL', 'WALNUT GROVE',
'GARDEN VALLEY', 'SLOUGHHOUSE', 'WEST SACRAMENTO']
check_this= new_data[~new_data.city.isin(actual_city)].city
check_this
# In[20]:
#The unique values in the type column are replaced appropriately
new_data["city"].replace({"SACRAMENTO@": "SACRAMENTO","ELK GROVE<>": "ELK GROVE"}, inplace=True)
print(new_data.city.nunique())
new_data.city.unique()
# In[32]:
# Other possible typo-error that can be checked are whitespace, fullstop, among others
new_data['city'] = new_data['city'].str.strip() # delete whitespace.
new_data['city'] = new_data['city'].str.replace('\\.', '') # delete dot/full stop.
print(new_data.city.nunique())
new_data.city.unique()
# [4] The solution to the 'street' column:
# In[22]:
# To check the count of unique values in the column
new_data.street.nunique()
# In[33]:
# There is actually less to e done here because the unique values almost equal the number of observations
# Therefore,one way to clean the data is to emove blanks,dots,abbreviate some words, etc
new_data['street'] = new_data['street'].str.strip() # delete blankspaces
new_data['street'] = new_data['street'].str.replace('\\.', '') # delete dot/full stop.
print(new_data.street.nunique())
# In[24]:
#changing the datatypes after the corrections have been effected
datatype= {'price': int, 'zip': int,'longitude':float,'latitude':float}
new_data = new_data.astype(datatype)
new_data['sale_date'] = pd.to_datetime(new_data.sale_date, format='%Y-%m-%d %H:%M:%S')
print(new_data.dtypes)
# # PHASE 3: HANDLING THE MISSING VALUES
# In[25]:
new_data.isnull().values.any()
# There are no missing values in the refined data. However, there are 'zero' valued cells which could also mean that the missing values have been replaced with zero.If the zero values actually represent missing values. Then, there are a number of ways to handle this:
#
#
# (i) Single-Value Imputation(SI) which involves replacing the missing cells with a single value. It could be the mean,highest occuring values,among others.
#
#
# (ii) Multiple/Multivariate Imputation(MI) which involves the use of different values to replace the missing cell based on the distribution of the data. There are several state of the art methods to do this.
#
#
# My master thesis research was based on Classification with data irregularities (missing values and class imbalance).I implemented and compared different sota imputation algorithms such as Generative Adversarial Network (GAN) for building prediction. This could be a good alternatives to handling the missing values.
# The link to my thesis can be found here https://github.com/busarieedris/Classification-with-Data-Irregularities
# (There may be some restrictions on some data due to privacy concerns.It is a collaborative research with a foremost research institute in Germany)
# .
# # QUESTION THREE (3)
# # DATA SAVING
# In[26]:
# Save the cleaned data with a better interactive name. This can be done with the '.to_csv' command
# But the instruction says 'write a new csv with a similar name with the cleaned data'.That is the reason for changing the cleaned data
# with a better name first.
clean_realestate=new_data.copy()
clean_realestate.to_csv('clean_realestate.csv',index=False,sep=';')
# In[27]:
clean_realestate.info()
# .
# # QUESTION FOUR (4)
# (A) what is the distance (in meters) between the cheapest sale and the most recent sale?
#
#
# SOLUTION / APPROACH
#
#
# To do this:
#
#
# STEP 1: You need the location (Longitude and Latitude) of the two points (The cheapest sale and the most recent sale).
#
#
# In[28]:
# LET X BE THE CHEAPEST SALE (i.e The least value in the 'price' column)
lon_x=new_data.loc[new_data['price'].idxmin()]['longitude'] # The corresponding longitude for X
lat_x=new_data.loc[new_data['price'].idxmin()]['latitude'] # The corresponding latitude for X
# In[29]:
# LET Y REPRESENT THE MOST RECENT SALE (i.e The most recent date in the 'sale_date' column)
lon_y=new_data.loc[new_data.sale_date.idxmax()]['longitude'] # The corresponding longitude for the most recent sale
lat_y=new_data.loc[new_data.sale_date.idxmax()]['latitude'] # The corresponding latitude for the most recent sale
# STEP 2: Calculate the difference in distance between these two points
# In order to get the distance between two coordinate points, there are quite some formulars for such calculations with varying degree of accuracy.
#
# Some of the methods are:
#
# 1) Haversine formula: It is used to determine the distance between two points based on the law of Haversine.
#
#
# 2) Vincenty Formula: It is a distance calculation based on the fact that the earth is oblate spherical.It has an accuracy of almost 1mm
#
#
# Step (i): Converting the trigonometrical values of the longitude and latitude into radian.
#
#
# Step (ii): Find the difference in the coordinates.
#
#
# Step (iii): Use one of the formulars above to calculate the distance between two points.
#
# In[34]:
import math
from math import sin, cos, sqrt, atan2, radians
R = 6373.0 # Mean Radius of the Earth
# Step(i) Converting the trigonometrical values of the longitude and latitude into radian.
lat_x_rad = math.radians(lat_x)
lon_x_rad= math.radians(lon_x)
lat_y_rad = math.radians(lat_y)
lon_y_rad= math.radians(lon_y)
# Step(ii) Find the difference in the coordinates.
diff_lon = lon_y_rad - lon_x_rad
diff_lat = lat_y_rad - lat_x_rad
# Step(iii) For the purpose of this assignment,the Haversine formula would be used.
# Using Haversine formula to calculate the distance between two points.
a = math.sin(diff_lat / 2)**2 + math.cos(lat_x_rad) * math.cos(lat_y_rad) * math.sin(diff_lon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
dist = R * c
print("The distance (in meters) between the cheapest sale and the most recent sale:", dist* 1000, 'metres')
# .
# (B) what is the median street number, in multi-family houses, sold between 05/11/1933 and 03/12/1998 , in Sacramento?
#
#
# SOLUTION / APPROACH
#
#
# To do this:
#
#
# STEP 1: Filter out the rows with 'city= SACRAMENTO' and 'type= MULTI-FAMILY'
# In[35]:
# Filter out the rows with 'city= SACRAMENTO' and 'type= MULTI-FAMILY' ,
data_add=new_data[(new_data['type']=='MULTI-FAMILY') & (new_data['city']=='SACRAMENTO')]
data_add
#
#
#
# STEP 2: Filter the date that falls between '05/11/1933' and '03/12/1998' in step 1
# In[36]:
# From the data_add gotten above, fiter the date thaat falls between 05/11/1933 and 03/12/1998
date_filter = (data_add['sale_date'] > '1933-11-05 00:00:00') & (data_add['sale_date'] <= '1998-12-03 00:00:00') # Filter date 05/11/1933 and 03/12/1998
data_ctd= data_add.loc[date_filter] # data with filtered city= SACRAMENTO, type=MULTI-FAMILY and date=05/11/1933 and 03/12/1998.
data_ctd
# STEP 3: From the 'street' column, extract the characters before the first blankspace. This corresponds to the street numbers .Then, find the median of these numbers
# In[37]:
# Extract street numbers from the street column (by splitting the content of the column by blank spaces and extracting the first value)
# The result is passed to the median value method
street_num = (data_ctd['street'].apply(lambda x: x.split()[0])).median()
print('The median street number, in multi-family houses, sold between 05/11/1933 and 03/12/1998 , in Sacramento is: ',street_num)
# .
# (C) What is the city name, and its 3 most common zip codes, that has the 2nd highest amount of beds sold?
# SOLUTION / APPROACH
#
#
# To do this:
#
#
# STEP 1: Get the name of the city that has the 2nd highest amount of beds sold
# This is achieved by summing the number of beds per city.
#
# The name of the city with the second highest number of sold beds is gotten
# In[38]:
# Step 1: Get the name of the city that has the 2nd highest amount of beds sold
# This is achieved by
k=new_data.groupby('city')['beds'].sum()
k.nlargest(2).iloc[[-1]]
# STEP 2: Find the three (3) most common zip codes of the city(ELK GROVE) gotten in step 1
#
# In[39]:
# Filter out ELK GROVE rows from th original data since we established that ELK GROVE is the city of interest.
data_elk=new_data[(new_data['city']=='ELK GROVE')]
data_elk
# STEP 3 Do a groupby of zip with the GROVE city.This gives all the unique zip codes belonging to ELK GROVE
#
#
# Then, count the number of occurrences(frequency) of the unique ELK GROVE's zip codes and rename the resulting column as frequency
#
# Rearrange the table in descending order
# In[40]:
data_elk.groupby(['city','zip']).size().reset_index(name='frequency').sort_values(['frequency','zip'],ascending=[0,1]).groupby('city').head(3)
# In[41]:
stg='''Therefore, the city name, and the 3 most common zip codes, that has the 2nd highest amount of beds sold: \n
city name: ELK GROVE \n
Zip codes: 95758,95757 and 95624'''
print(stg)
|
import pytest
from protoseg import Config
configs = {'run1':{},
'run2':{}}
def test_len():
config = Config(configs=configs)
assert(len(config), 2)
def test_index():
config = Config(configs=configs)
assert(config[0], 'run1')
assert(config[1], 'run2')
def test_iterator():
count = 0
config = Config(configs=configs)
for _ in config:
count += 1
assert(count, 2)
|
from AbstractClasses.AbstractController import AbstractController
class ConsoleController(AbstractController):
def __init__(self, io_controller=None):
super(ConsoleController, self).__init__(io_controller)
def make_guess(self, line_number=None):
return super(ConsoleController, self).make_guess(line_number=line_number)
def play(self):
super(ConsoleController, self).play()
if self.game_ready:
# If we're here, then the game was successfully created and
# the next step in the console model is to ask the user to
# choose a mode.
mode, error_detail = self.io_controller.choose_a_mode(
available_modes=self.available_modes,
default_choice=self.game.default_mode
)
if not mode:
# This should never be reachable, but just in case :)
self.io_controller.report_error(error_detail)
return
# Now create a game with the requested mode.
self.game.get_game(mode=mode)
# If the AbstractView has a start message, tell it to show.
# Ask the user to chose a mode to execute.
self.io_controller.start("Okay, the game is about to begin.")
# Setup the header and screen based on the mode (the number of
# digits and the number of guesses) of the game.
self.io_controller.setup(game_tries=self.game.game_tries, game_digits=self.game.game_digits)
# Initialize a counter to track the number of guesses which have
# been made on the game. Note, the user can quit out of the game
# at any point and control then returns to app.py.
counter = 1
# Draw the screen
self.io_controller.draw_screen(current_try=counter)
# Set a default finish message.
finish_message = "Okay, thanks for playing!"
# Loop, ask the user for their guess, and then analyze the guess
# using the Game model.
while True:
return_signal, output = self.make_guess(line_number=counter)
if return_signal == self.SIGNAL_BREAK:
break
if return_signal == self.SIGNAL_FINISH:
# Regardless of win or loss, the game is over and the message
# returned by the Game model needs to be delivered to the
# user. The finish_message is updated and the loop is broken.
self.io_controller.report_status(message=output["outcome"]["status"])
self.io_controller.draw_screen(current_try=counter)
self.io_controller.finish(finish_message=output["outcome"]["status"])
break
elif return_signal == self.SIGNAL_ERROR:
continue
# Increment the guess counter.
counter += 1
# Draw the screen
self.io_controller.report_status(message=output["outcome"]["status"])
self.io_controller.draw_screen(current_try=counter)
# Check if the user has exceeded their guesses. If they have,
# break the loop.
if counter > self.game.game_tries:
break
|
# -*- coding: utf-8 -*-
import os
import pytest
from scout.utils.scout_requests import fetch_refseq_version, get_request
TRAVIS = os.getenv("TRAVIS")
def test_get_request_bad_url():
"""Test functions that accepts an url and returns decoded data from it"""
# test function with a url that is not valid
url = "fakeyurl"
with pytest.raises(ValueError) as err:
# function should raise error
assert get_request(url)
@pytest.mark.skipif(TRAVIS, reason="Requests seems to be problematic on travis")
def test_get_request():
"""Test functions that accepts an url and returns decoded data from it"""
# test function with url that exists
url = "http://www.github.com"
decoded_resp = get_request(url)
assert "<!DOCTYPE html>" in decoded_resp
def test_fetch_refseq_version():
"""Test eutils service from entrez that retrieves refseq version"""
# fetch complete refseq version for accession that has version
refseq_acc = "NM_020533"
refseq_version = fetch_refseq_version(refseq_acc)
# entrez eutils might be down the very moment of the test
version_n = refseq_version.split(".")[1]
# make sure that contains version number
assert version_n.isdigit()
# fetch complete refseq version for accession without version
refseq_acc = "NM_000000"
refseq_version = fetch_refseq_version(refseq_acc)
# make sure that contains version number
assert refseq_version == refseq_acc
|
# Exercise 1.8
# Author: Noah Waterfield Price
from math import pi
h = 5.0 # height
b = 2.0 # base
r = 1.5 # radius
area_parallelogram = h * b
print 'The area of the parallelogram is %.3f' % area_parallelogram
area_square = b ** 2
print 'The area of the square is %g' % area_square
area_circle = pi * r ** 2
print 'The area of the circle is %.f' % area_circle
volume_cone = 1.0 / 3 * pi * r ** 2 * h
print 'The volume of the cone is %.3f' % volume_cone
"""
Sample run:
python formulas_shapes.py
The area of the parallelogram is 10.000
The area of the square is 4
The area of the circle is 7
The volume of the cone is 11.781
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import socket
import string
import os
HOST = 'chat.freenode.net'
PORT = 6667
NICK = 'irCri'
IDENT = 'MM'
REALNAME = 'Mihai Maruseac'
OWNER = 'Mihai Maruseac'
CHANNELINIT = '#mm_test'
readbuffer = ''
s=socket.socket()
s.connect((HOST, PORT))
s.send('NICK ' + NICK + 'n')
s.send('USER ' + IDENT + ' ' + HOST + ' bla :' + REALNAME + 'n')
def syscmd(commandline,channel):
cmd=commandline.replace('sys ','')
cmd=cmd.rstrip()
os.system(cmd+' >temp.txt')
a=open('temp.txt')
ot=a.read()
ot.replace('n','|')
a.close()
s.send('PRIVMSG '+channel+' :'+ot+'n')
def parsemsg(msg):
complete = msg[1:].split(':',1)
info = complete[0].split(' ')
msgpart = complete[1]
sender = info[0].split('!')
if msgpart[0] == '!' and sender[0] == OWNER:
cmd = msgpart[1:].split(' ')
if cmd[0] == 'op':
s.send('MODE ' + info[2] + ' +o ' + cmd[1] + 'n')
if cmd[0] == 'deop':
s.send('MODE ' + info[2] + ' -o ' + cmd[1]+'n')
if cmd[0] == 'voice':
s.send('MODE ' + info[2] + ' +v ' + cmd[1] + 'n')
if cmd[0] == 'devoice':
s.send('MODE ' + info[2] + ' -v ' + cmd[1] + 'n')
if cmd[0] == 'sys':
syscmd(msgpart[1:],info[2])
if msgpart[0] == '-' and sender[0] == OWNER:
cmd = msgpart[1:]
s.send(cmd + 'n')
print 'cmd=' + cmd
while 1:
line=s.recv(500)
if not line:
continue
print line
if line.find('No Ident response')!=-1:
print "JOINING..."
s.send('JOIN ' + CHANNELINIT + '\n')
if line.find('PRIVMSG')!=-1:
parsemsg(line)
line = line.rstrip()
line = line.split()
if(line[0] == 'PING'):
s.send('PONG ' + line[1] + '\n')
|
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pinball_ext.common import utils
from pinball_ext.job.basic_jobs import ClusterJob
__author__ = 'Changshu Liu, Mohammad Shahangian, Mao Ye'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = utils.get_logger('pinball_ext.job.hadoop_jobs')
class HadoopJob(ClusterJob):
"""Base class for actual Hadoop jobs.
App jar and lib jars are configured in executor, please see
Executor.run_hadoop_job() for detailed info.
Derived class should at least override _get_class_name() to specify what's
the main Java class to execute. It can also optionally override _setup() to
config the follow parameters to further tune the job config:
- self.jobconf_args
- self.extra_jars
- self.extra_arguments
self.params derived from JobBase will also be passed as job's extra
arguments (together with self.extra_arguments).
"""
def __init__(self, params, settings=None):
super(HadoopJob, self).__init__(params, settings)
self.jobconf_args = {}
self.extra_arguments = []
self.extra_jars = []
def _get_class_name(self):
raise NotImplementedError('No class name specified for this Hadoop Job')
def _execute(self):
param_args = ['-%s %s' % (k, v) for k, v in self.params.iteritems()]
self._job_output, self._job_stderr, self._job_ids = \
self.executor.run_hadoop_job(
self._get_class_name(),
jobconf_args=self.jobconf_args,
extra_args=param_args + self.extra_arguments,
extra_jars=self.extra_jars)
LOG.info('Dump job output ...')
for line in self._job_output:
LOG.info('\t'.join(line))
def __str__(self):
return '(%s): (%s) - (%s)' % (self.job_name,
self.params,
self.jobconf_args)
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
import os
import sys
from astrometry.util.fits import fits_table
from astrometry.libkd.spherematch import match_radec
from astrometry.util.plotutils import PlotSequence
from legacyanalysis.ps1cat import ps1cat, ps1_to_decam
from legacypipe.survey import *
'''
pixsc = 0.262
apr = [1.0, 2.0, 3.5] / pixsc
#-> aperture photometry radius in pixels
decstat -- aper, img, ..., apr
-> allmags
mags = reform(allmags[2,ii])
Skyrad_pix -- default 7 to 10 pixel radius in pixels
skyrad_pix = skyrad/pixsc ; sky radii in pixels
image.py -- SE called with PIXEL_SCALE 0 -> determined by SE from header
# corresponding to diameters of [1.5,3,5,7,9,11,13,15] arcsec
# assuming 0.262 arcsec pixel scale
PHOT_APERTURES 5.7251911,11.450382,19.083969,26.717558,34.351147,41.984734,49.618320,57.251911
-> photutils aperture photometry on PsfEx image -> 1.0 at radius ~ 13;
total ~ 1.05
One of the largest differences:
z band
Photometric diff 0.0249176025391 PSF size 1.07837 expnum 292604
-> Notice that the difference is largest for *small* PSFs.
Could this be brighter-fatter?
-> Check out the region Aaron pointed to with 0.025 errors
-> Is it possible that this is coming from saturation in the zeropoints
computation (decstat)?!
-> Sky estimation?
-> Is PsfEx using any flags (SATUR) to cut candidates?
-> Look at forced phot residuals? Take model & data slices of a whole
pile of stars in expnum 292604 N4
'''
def star_profiles(ps):
# Run an example CCD, 292604-N4, with fairly large difference vs PS1.
# python -c "from astrometry.util.fits import *; T = merge_tables([fits_table('/project/projectdirs/desiproc/dr3/tractor/244/tractor-244%s.fits' % b) for b in ['2p065','4p065', '7p065']]); T.writeto('tst-cat.fits')"
# python legacypipe/forced_photom_decam.py --save-data tst-data.fits --save-model tst-model.fits 292604 N4 tst-cat.fits tst-phot.fits
# -> tst-{model,data,phot}.fits
datafn = 'tst-data.fits'
modfn = 'tst-model.fits'
photfn = 'tst-phot.fits'
catfn = 'tst-cat.fits'
img = fitsio.read(datafn)
mod = fitsio.read(modfn)
phot = fits_table(photfn)
cat = fits_table(catfn)
print(len(phot), 'forced-photometry results')
margin = 25
phot.cut((phot.x > 0+margin) * (phot.x < 2046-margin) *
(phot.y > 0+margin) * (phot.y < 4096-margin))
print(len(phot), 'in bounds')
cmap = dict([((b,o),i) for i,(b,o) in enumerate(zip(cat.brickname, cat.objid))])
I = np.array([cmap.get((b,o), -1) for b,o in zip(phot.brickname, phot.objid)])
print(np.sum(I >= 0), 'forced-phot matched cat')
phot.type = cat.type[I]
wcs = Sip(datafn)
phot.ra,phot.dec = wcs.pixelxy2radec(phot.x+1, phot.y+1)
phot.cut(np.argsort(phot.flux))
phot.sn = phot.flux * np.sqrt(phot.flux_ivar)
phot.cut(phot.sn > 5)
print(len(phot), 'with S/N > 5')
ps1 = ps1cat(ccdwcs=wcs)
stars = ps1.get_stars()
print(len(stars), 'PS1 sources')
# Now cut to just *stars* with good colors
stars.gicolor = stars.median[:,0] - stars.median[:,2]
keep = (stars.gicolor > 0.4) * (stars.gicolor < 2.7)
stars.cut(keep)
print(len(stars), 'PS1 stars with good colors')
stars.cut(np.minimum(stars.stdev[:,1], stars.stdev[:,2]) < 0.05)
print(len(stars), 'PS1 stars with min stdev(r,i) < 0.05')
I,J,d = match_radec(phot.ra, phot.dec, stars.ra, stars.dec, 1./3600.)
print(len(I), 'matches')
plt.clf()
ha=dict(histtype='step', bins=20, range=(0,100), normed=True)
plt.hist(phot.flux, color='b', **ha)
plt.hist(phot.flux[I], color='r', **ha)
ps.savefig()
plt.clf()
plt.hist(phot.flux * np.sqrt(phot.flux_ivar), bins=100,
range=(-10, 50))
plt.xlabel('Flux S/N')
ps.savefig()
K = np.argsort(phot.flux[I])
I = I[K]
J = J[K]
ix = np.round(phot.x).astype(int)
iy = np.round(phot.y).astype(int)
sz = 10
P = np.flatnonzero(phot.type == 'PSF ')
print(len(P), 'PSFs')
imed = len(P)/2
i1 = int(len(P) * 0.75)
i2 = int(len(P) * 0.25)
N = 401
allmods = []
allimgs = []
for II,tt in [#(I[:len(I)/2], 'faint matches to PS1'),
#(I[len(I)/2:], 'bright matches to PS1'),
#(P[i2: i2+N], '25th pct PSFs'),
#(P[imed: imed+N], 'median PSFs'),
#(P[i1: i1+N], '75th pct PSFs'),
#(P[-25:], 'brightest PSFs'),
(P[i2:imed], '2nd quartile of PSFs'),
(P[imed:i1], '3rd quartile of PSFs'),
#(P[:len(P)/2], 'faint half of PSFs'),
#(P[len(P)/2:], 'bright half of PSFs'),
]:
imgs = []
mods = []
shimgs = []
shmods = []
imgsum = modsum = 0
#plt.clf()
for i in II:
from astrometry.util.util import lanczos_shift_image
dy = phot.y[i] - iy[i]
dx = phot.x[i] - ix[i]
sub = img[iy[i]-sz : iy[i]+sz+1, ix[i]-sz : ix[i]+sz+1]
shimg = lanczos_shift_image(sub, -dx, -dy)
sub = mod[iy[i]-sz : iy[i]+sz+1, ix[i]-sz : ix[i]+sz+1]
shmod = lanczos_shift_image(sub, -dx, -dy)
iyslice = img[iy[i], ix[i]-sz : ix[i]+sz+1]
myslice = mod[iy[i], ix[i]-sz : ix[i]+sz+1]
ixslice = img[iy[i]-sz : iy[i]+sz+1, ix[i]]
mxslice = mod[iy[i]-sz : iy[i]+sz+1, ix[i]]
mx = iyslice.max()
# plt.plot(iyslice/mx, 'b-', alpha=0.1)
# plt.plot(myslice/mx, 'r-', alpha=0.1)
# plt.plot(ixslice/mx, 'b-', alpha=0.1)
# plt.plot(mxslice/mx, 'r-', alpha=0.1)
siyslice = shimg[sz, :]
sixslice = shimg[:, sz]
smyslice = shmod[sz, :]
smxslice = shmod[:, sz]
shimgs.append(siyslice/mx)
shimgs.append(sixslice/mx)
shmods.append(smyslice/mx)
shmods.append(smxslice/mx)
imgs.append(iyslice/mx)
imgs.append(ixslice/mx)
mods.append(myslice/mx)
mods.append(mxslice/mx)
imgsum = imgsum + ixslice + iyslice
modsum = modsum + mxslice + myslice
# plt.ylim(-0.1, 1.1)
# plt.title(tt)
# ps.savefig()
mimg = np.median(np.array(imgs), axis=0)
mmod = np.median(np.array(mods), axis=0)
mshim = np.median(np.array(shimgs), axis=0)
mshmo = np.median(np.array(shmods), axis=0)
allmods.append(mshmo)
allimgs.append(mshim)
plt.clf()
# plt.plot(mimg, 'b-')
# plt.plot(mmod, 'r-')
plt.plot(mshim, 'g-')
plt.plot(mshmo, 'm-')
plt.ylim(-0.1, 1.1)
plt.title(tt + ': median; sums %.3f/%.3f' % (np.sum(mimg), np.sum(mmod)))
ps.savefig()
# plt.clf()
# mx = imgsum.max()
# plt.plot(imgsum/mx, 'b-')
# plt.plot(modsum/mx, 'r-')
# plt.ylim(-0.1, 1.1)
# plt.title(tt + ': sum')
# ps.savefig()
plt.clf()
plt.plot((mimg + 0.01) / (mmod + 0.01), 'k-')
plt.plot((imgsum/mx + 0.01) / (modsum/mx + 0.01), 'g-')
plt.plot((mshim + 0.01) / (mshmo + 0.01), 'm-')
plt.ylabel('(img + 0.01) / (mod + 0.01)')
plt.title(tt)
ps.savefig()
iq2,iq3 = allimgs
mq2,mq3 = allmods
plt.clf()
plt.plot(iq2, 'r-')
plt.plot(mq2, 'm-')
plt.plot(iq3, 'b-')
plt.plot(mq3, 'g-')
plt.title('Q2 vs Q3')
ps.savefig()
def main():
# ps = PlotSequence('pro')
# star_profiles(ps)
# sys.exit(0)
#survey_dir = '/project/projectdirs/desiproc/dr3'
#survey = LegacySurveyData(survey_dir=survey_dir)
survey = LegacySurveyData()
ralo,rahi = 240,245
declo,dechi = 5, 12
ps = PlotSequence('comp')
bands = 'grz'
ccdfn = 'ccds-forced.fits'
if not os.path.exists(ccdfn):
ccds = survey.get_annotated_ccds()
ccds.cut((ccds.ra > ralo) * (ccds.ra < rahi) *
(ccds.dec > declo) * (ccds.dec < dechi))
print(len(ccds), 'CCDs')
ccds.path = np.array([os.path.join(#'dr3',
'forced', ('%08i' % e)[:5], '%08i' % e, 'decam-%08i-%s-forced.fits' % (e, n.strip()))
for e,n in zip(ccds.expnum, ccds.ccdname)])
I, = np.nonzero([os.path.exists(fn) for fn in ccds.path])
print(len(I), 'CCDs with forced photometry')
ccds.cut(I)
#ccds = ccds[:500]
#e,I = np.unique(ccds.expnum, return_index=True)
#print(len(I), 'unique exposures')
#ccds.cut(I)
FF = read_forcedphot_ccds(ccds, survey)
FF.writeto('forced-all-matches.fits')
# - z band -- no trend w/ PS1 mag (brighter-fatter)
ccds.writeto(ccdfn)
ccdfn2 = 'ccds-forced-2.fits'
if not os.path.exists(ccdfn2):
ccds = fits_table(ccdfn)
# Split into brighter/fainter halves
FF = fits_table('forced-all-matches.fits')
print(len(FF), 'forced measurements')
FF.cut(FF.masked == False)
print(len(FF), 'forced measurements not masked')
ccds.brightest_mdiff = np.zeros(len(ccds))
ccds.brightest_mscatter = np.zeros(len(ccds))
ccds.bright_mdiff = np.zeros(len(ccds))
ccds.bright_mscatter = np.zeros(len(ccds))
ccds.faint_mdiff = np.zeros(len(ccds))
ccds.faint_mscatter = np.zeros(len(ccds))
for iccd in range(len(ccds)):
I = np.flatnonzero(FF.iforced == iccd)
if len(I) == 0:
continue
if len(I) < 10:
continue
F = FF[I]
b = np.percentile(F.psmag, 10)
m = np.median(F.psmag)
print(len(F), 'matches for CCD', iccd, 'median mag', m, '10th pct', b)
J = np.flatnonzero(F.psmag < b)
diff = F.mag[J] - F.psmag[J]
ccds.brightest_mdiff[iccd] = np.median(diff)
ccds.brightest_mscatter[iccd] = (np.percentile(diff, 84) -
np.percentile(diff, 16))/2.
J = np.flatnonzero(F.psmag < m)
diff = F.mag[J] - F.psmag[J]
ccds.bright_mdiff[iccd] = np.median(diff)
ccds.bright_mscatter[iccd] = (np.percentile(diff, 84) -
np.percentile(diff, 16))/2.
J = np.flatnonzero(F.psmag > m)
diff = F.mag[J] - F.psmag[J]
ccds.faint_mdiff[iccd] = np.median(diff)
ccds.faint_mscatter[iccd] = (np.percentile(diff, 84) -
np.percentile(diff, 16))/2.
ccds.writeto(ccdfn2)
ccds = fits_table(ccdfn2)
plt.clf()
plt.hist(ccds.nforced, bins=100)
plt.title('nforced')
ps.savefig()
plt.clf()
plt.hist(ccds.nmatched, bins=100)
plt.title('nmatched')
ps.savefig()
#ccds.cut(ccds.nmatched >= 150)
ccds.cut(ccds.nmatched >= 50)
print('Cut to', len(ccds), 'with >50 matched')
ccds.cut(ccds.photometric)
print('Cut to', len(ccds), 'photometric')
neff = 1. / ccds.psfnorm_mean**2
# Narcsec is in arcsec**2
narcsec = neff * ccds.pixscale_mean**2
# to arcsec
narcsec = np.sqrt(narcsec)
# Correction factor to get back to equivalent of Gaussian sigma
narcsec /= (2. * np.sqrt(np.pi))
# Conversion factor to FWHM (2.35)
narcsec *= 2. * np.sqrt(2. * np.log(2.))
ccds.psfsize = narcsec
for band in bands:
I = np.flatnonzero((ccds.filter == band)
* (ccds.photometric) * (ccds.blacklist_ok))
mlo,mhi = -0.01, 0.05
plt.clf()
plt.plot(ccds.ccdzpt[I],
ccds.exptime[I], 'k.', alpha=0.1)
J = np.flatnonzero((ccds.filter == band) * (ccds.photometric == False))
plt.plot(ccds.ccdzpt[J],
ccds.exptime[J], 'r.', alpha=0.1)
plt.xlabel('Zeropoint (mag)')
plt.ylabel('exptime')
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
plt.clf()
plt.plot(ccds.ccdzpt[I],
np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
plt.xlabel('Zeropoint (mag)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.axhline(0, color='k', alpha=0.2)
#plt.axis([0, mxsee, mlo,mhi])
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
plt.clf()
plt.plot(ccds.ccdzpt[I], ccds.psfsize[I], 'k.', alpha=0.1)
plt.xlabel('Zeropoint (mag)')
plt.ylabel('PSF size (arcsec)')
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
# plt.clf()
# plt.plot(ccds.avsky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('avsky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
#
# plt.clf()
# plt.plot(ccds.meansky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('meansky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
# plt.clf()
# plt.plot(ccds.avsky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('avsky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
#
# plt.clf()
# plt.plot(ccds.meansky[I],
# np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# plt.xlabel('meansky')
# plt.ylabel('DECaLS PSF - PS1 (mag)')
# plt.axhline(0, color='k', alpha=0.2)
# plt.title('DR3: EDR region, Forced phot: %s band' % band)
# ps.savefig()
plt.clf()
lo,hi = (-0.02, 0.05)
ha = dict(bins=50, histtype='step', range=(lo,hi))
n,b,p1 = plt.hist(ccds.brightest_mdiff[I], color='r', **ha)
n,b,p2 = plt.hist(ccds.bright_mdiff[I], color='g', **ha)
n,b,p3 = plt.hist(ccds.faint_mdiff[I], color='b', **ha)
plt.legend((p1[0],p2[0],p3[0]), ('Brightest 10%', 'Brightest 50%',
'Faintest 50%'))
plt.xlabel('DECaLS PSF - PS1 (mag)')
plt.ylabel('Number of CCDs')
plt.title('DR3: EDR region, Forced phot: %s band' % band)
plt.xlim(lo,hi)
ps.savefig()
for band in bands:
I = np.flatnonzero(ccds.filter == band)
mxsee = 4.
mlo,mhi = -0.01, 0.05
plt.clf()
plt.plot(np.clip(ccds.psfsize[I], 0, mxsee),
np.clip(ccds.mdiff[I], mlo,mhi), 'k.', alpha=0.1)
# for p in [1,2,3]:
# J = np.flatnonzero(ccds.tilepass[I] == p)
# if len(J):
# plt.plot(np.clip(ccds.psfsize[I[J]], 0, mxsee),
# np.clip(ccds.mdiff[I[J]], mlo,mhi), '.', color='rgb'[p-1], alpha=0.2)
#plt.plot(ccds.seeing[I], ccds.mdiff[I], 'b.')
plt.xlabel('PSF size (arcsec)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.axhline(0, color='k', alpha=0.2)
plt.axis([0, mxsee, mlo,mhi])
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
# Group by exposure
for band in bands:
I = np.flatnonzero((ccds.filter == band)
* (ccds.photometric) * (ccds.blacklist_ok))
E,J = np.unique(ccds.expnum[I], return_index=True)
print(len(E), 'unique exposures in', band)
exps = ccds[I[J]]
print(len(exps), 'unique exposures in', band)
assert(len(np.unique(exps.expnum)) == len(exps))
exps.ddiff = np.zeros(len(exps))
exps.dsize = np.zeros(len(exps))
exps.nccds = np.zeros(len(exps), int)
exps.brightest_ddiff = np.zeros(len(exps))
exps.bright_ddiff = np.zeros(len(exps))
exps.faint_ddiff = np.zeros(len(exps))
for iexp,exp in enumerate(exps):
J = np.flatnonzero(ccds.expnum[I] == exp.expnum)
J = I[J]
print(len(J), 'CCDs in exposure', exp.expnum)
exps.brightest_mdiff[iexp] = np.median(ccds.brightest_mdiff[J])
exps.bright_mdiff[iexp] = np.median(ccds.bright_mdiff[J])
exps.faint_mdiff[iexp] = np.median(ccds.faint_mdiff[J])
exps.brightest_ddiff[iexp] = (
np.percentile(ccds.brightest_mdiff[J], 84) -
np.percentile(ccds.brightest_mdiff[J], 16))/2.
exps.bright_ddiff[iexp] = (
np.percentile(ccds.bright_mdiff[J], 84) -
np.percentile(ccds.bright_mdiff[J], 16))/2.
exps.faint_ddiff[iexp] = (
np.percentile(ccds.faint_mdiff[J], 84) -
np.percentile(ccds.faint_mdiff[J], 16))/2.
exps.mdiff[iexp] = np.median(ccds.mdiff[J])
exps.ddiff[iexp] = (np.percentile(ccds.mdiff[J], 84) - np.percentile(ccds.mdiff[J], 16))/2.
exps.psfsize[iexp] = np.median(ccds.psfsize[J])
exps.dsize[iexp] = (np.percentile(ccds.psfsize[J], 84) - np.percentile(ccds.psfsize[J], 16))/2.
exps.nccds[iexp] = len(J)
mxsee = 4.
mlo,mhi = -0.01, 0.05
exps.cut(exps.nccds >= 10)
plt.clf()
plt.errorbar(np.clip(exps.psfsize, 0, mxsee),
np.clip(exps.mdiff, mlo,mhi), yerr=exps.ddiff,
#xerr=exps.dsize,
fmt='.', color='k')
# plt.errorbar(np.clip(exps.psfsize, 0, mxsee),
# np.clip(exps.brightest_mdiff, mlo,mhi),
# yerr=exps.brightest_ddiff, fmt='r.')
# plt.errorbar(np.clip(exps.psfsize, 0, mxsee),
# np.clip(exps.bright_mdiff, mlo,mhi),
# yerr=exps.bright_ddiff, fmt='g.')
# plt.errorbar(np.clip(exps.psfsize, 0, mxsee),
# np.clip(exps.faint_mdiff, mlo,mhi),
# yerr=exps.faint_ddiff, fmt='b.')
# plt.plot(np.clip(exps.psfsize, 0, mxsee),
# np.clip(exps.brightest_mdiff, mlo,mhi), 'r.')
# plt.plot(np.clip(exps.psfsize, 0, mxsee),
# np.clip(exps.bright_mdiff, mlo,mhi), 'g.')
# plt.plot(np.clip(exps.psfsize, 0, mxsee),
# np.clip(exps.faint_mdiff, mlo,mhi), 'b.')
#plt.plot(ccds.seeing[I], ccds.mdiff[I], 'b.')
plt.xlabel('PSF size (arcsec)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.axhline(0, color='k', alpha=0.2)
plt.axis([0, mxsee, mlo,mhi])
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
plt.clf()
p1 = plt.plot(np.clip(exps.psfsize, 0, mxsee),
np.clip(exps.brightest_mdiff, mlo,mhi), 'r.', alpha=0.5)
p2 = plt.plot(np.clip(exps.psfsize, 0, mxsee),
np.clip(exps.bright_mdiff, mlo,mhi), 'g.', alpha=0.5)
p3 = plt.plot(np.clip(exps.psfsize, 0, mxsee),
np.clip(exps.faint_mdiff, mlo,mhi), 'b.', alpha=0.5)
plt.legend((p1[0],p2[0],p3[0]), ('Brightest 10%', 'Brightest 50%',
'Faintest 50%'))
plt.xlabel('PSF size (arcsec)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.axhline(0, color='k', alpha=0.2)
plt.axis([0, mxsee, mlo,mhi])
plt.title('DR3: EDR region, Forced phot: %s band' % band)
ps.savefig()
J = np.argsort(-exps.mdiff)
for j in J:
print(' Photometric diff', exps.mdiff[j], 'PSF size', exps.psfsize[j], 'expnum', exps.expnum[j])
sys.exit(0)
def read_forcedphot_ccds(ccds, survey):
ccds.mdiff = np.zeros(len(ccds))
ccds.mscatter = np.zeros(len(ccds))
Nap = 8
ccds.apdiff = np.zeros((len(ccds), Nap))
ccds.apscatter = np.zeros((len(ccds), Nap))
ccds.nforced = np.zeros(len(ccds), np.int16)
ccds.nunmasked = np.zeros(len(ccds), np.int16)
ccds.nmatched = np.zeros(len(ccds), np.int16)
ccds.nps1 = np.zeros(len(ccds), np.int16)
brickcache = {}
FF = []
for iccd,ccd in enumerate(ccds):
print('CCD', iccd, 'of', len(ccds))
F = fits_table(ccd.path)
print(len(F), 'sources in', ccd.path)
ccds.nforced[iccd] = len(F)
# arr, have to match with brick sources to get RA,Dec.
F.ra = np.zeros(len(F))
F.dec = np.zeros(len(F))
F.masked = np.zeros(len(F), bool)
maglo,maghi = 14.,21.
maxdmag = 1.
F.mag = -2.5 * (np.log10(F.flux) - 9)
F.cut((F.flux > 0) * (F.mag > maglo-maxdmag) * (F.mag < maghi+maxdmag))
print(len(F), 'sources between', (maglo-maxdmag), 'and', (maghi+maxdmag), 'mag')
im = survey.get_image_object(ccd)
print('Reading DQ image for', im)
dq = im.read_dq()
H,W = dq.shape
ix = np.clip(np.round(F.x), 0, W-1).astype(int)
iy = np.clip(np.round(F.y), 0, H-1).astype(int)
F.mask = dq[iy,ix]
print(np.sum(F.mask != 0), 'sources are masked')
for brickname in np.unique(F.brickname):
if not brickname in brickcache:
brickcache[brickname] = fits_table(survey.find_file('tractor', brick=brickname))
T = brickcache[brickname]
idtoindex = np.zeros(T.objid.max()+1, int) - 1
idtoindex[T.objid] = np.arange(len(T))
I = np.flatnonzero(F.brickname == brickname)
J = idtoindex[F.objid[I]]
assert(np.all(J >= 0))
F.ra [I] = T.ra [J]
F.dec[I] = T.dec[J]
F.masked[I] = (T.decam_anymask[J,:].max(axis=1) > 0)
#F.cut(F.masked == False)
#print(len(F), 'not masked')
print(np.sum(F.masked), 'masked in ANYMASK')
ccds.nunmasked[iccd] = len(F)
wcs = Tan(*[float(x) for x in [ccd.crval1, ccd.crval2, ccd.crpix1, ccd.crpix2,
ccd.cd1_1, ccd.cd1_2, ccd.cd2_1, ccd.cd2_2,
ccd.width, ccd.height]])
ps1 = ps1cat(ccdwcs=wcs)
stars = ps1.get_stars()
print(len(stars), 'PS1 sources')
ccds.nps1[iccd] = len(stars)
# Now cut to just *stars* with good colors
stars.gicolor = stars.median[:,0] - stars.median[:,2]
keep = (stars.gicolor > 0.4) * (stars.gicolor < 2.7)
stars.cut(keep)
print(len(stars), 'PS1 stars with good colors')
stars.cut(np.minimum(stars.stdev[:,1], stars.stdev[:,2]) < 0.05)
print(len(stars), 'PS1 stars with min stdev(r,i) < 0.05')
I,J,d = match_radec(F.ra, F.dec, stars.ra, stars.dec, 1./3600.)
print(len(I), 'matches')
band = ccd.filter
colorterm = ps1_to_decam(stars.median[J], band)
F.cut(I)
F.psmag = stars.median[J, ps1.ps1band[band]] + colorterm
K = np.flatnonzero((F.psmag > maglo) * (F.psmag < maghi))
print(len(K), 'with mag', maglo, 'to', maghi)
F.cut(K)
K = np.flatnonzero(np.abs(F.mag - F.psmag) < maxdmag)
print(len(K), 'with good mag matches (<', maxdmag, 'mag difference)')
ccds.nmatched[iccd] = len(K)
if len(K) == 0:
continue
F.cut(K)
ccds.mdiff[iccd] = np.median(F.mag - F.psmag)
ccds.mscatter[iccd] = (np.percentile(F.mag - F.psmag, 84) -
np.percentile(F.mag - F.psmag, 16))/2.
for i in range(Nap):
apmag = -2.5 * (np.log10(F.apflux[:, i]) - 9)
ccds.apdiff[iccd,i] = np.median(apmag - F.psmag)
ccds.apscatter[iccd,i] = (np.percentile(apmag - F.psmag, 84) -
np.percentile(apmag - F.psmag, 16))/2.
#F.about()
for c in ['apflux_ivar', 'brickid', 'flux_ivar',
'mjd', 'objid', 'fracflux', 'rchi2', 'x','y']:
F.delete_column(c)
F.expnum = np.zeros(len(F), np.int32) + ccd.expnum
F.ccdname = np.array([ccd.ccdname] * len(F))
F.iforced = np.zeros(len(F), np.int32) + iccd
FF.append(F)
FF = merge_tables(FF)
return FF
bricks = survey.get_bricks_readonly()
bricks = bricks[(bricks.ra > ralo) * (bricks.ra < rahi) *
(bricks.dec > declo) * (bricks.dec < dechi)]
print(len(bricks), 'bricks')
I, = np.nonzero([os.path.exists(survey.find_file('tractor', brick=b.brickname))
for b in bricks])
print(len(I), 'bricks with catalogs')
bricks.cut(I)
for band in bands:
bricks.set('diff_%s' % band, np.zeros(len(bricks), np.float32))
bricks.set('psfsize_%s' % band, np.zeros(len(bricks), np.float32))
diffs = dict([(b,[]) for b in bands])
for ibrick,b in enumerate(bricks):
fn = survey.find_file('tractor', brick=b.brickname)
T = fits_table(fn)
print(len(T), 'sources in', b.brickname)
brickwcs = wcs_for_brick(b)
ps1 = ps1cat(ccdwcs=brickwcs)
stars = ps1.get_stars()
print(len(stars), 'PS1 sources')
# Now cut to just *stars* with good colors
stars.gicolor = stars.median[:,0] - stars.median[:,2]
keep = (stars.gicolor > 0.4) * (stars.gicolor < 2.7)
stars.cut(keep)
print(len(stars), 'PS1 stars with good colors')
I,J,d = match_radec(T.ra, T.dec, stars.ra, stars.dec, 1./3600.)
print(len(I), 'matches')
for band in bands:
bricks.get('psfsize_%s' % band)[ibrick] = np.median(
T.decam_psfsize[:, survey.index_of_band(band)])
colorterm = ps1_to_decam(stars.median[J], band)
psmag = stars.median[J, ps1.ps1band[band]]
psmag += colorterm
decflux = T.decam_flux[I, survey.index_of_band(band)]
decmag = -2.5 * (np.log10(decflux) - 9)
#K = np.flatnonzero((psmag > 14) * (psmag < 24))
#print(len(K), 'with mag 14 to 24')
K = np.flatnonzero((psmag > 14) * (psmag < 21))
print(len(K), 'with mag 14 to 21')
decmag = decmag[K]
psmag = psmag [K]
K = np.flatnonzero(np.abs(decmag - psmag) < 1)
print(len(K), 'with good mag matches (< 1 mag difference)')
decmag = decmag[K]
psmag = psmag [K]
if False and ibrick == 0:
plt.clf()
#plt.plot(psmag, decmag, 'b.')
plt.plot(psmag, decmag - psmag, 'b.')
plt.xlabel('PS1 mag')
plt.xlabel('DECam - PS1 mag')
plt.title('PS1 matches for %s band, brick %s' % (band, b.brickname))
ps.savefig()
mdiff = np.median(decmag - psmag)
diffs[band].append(mdiff)
print('Median difference:', mdiff)
bricks.get('diff_%s' % band)[ibrick] = mdiff
for band in bands:
d = diffs[band]
plt.clf()
plt.hist(d, bins=20, range=(-0.02, 0.02), histtype='step')
plt.xlabel('Median mag difference per brick')
plt.title('DR3 EDR PS1 vs DECaLS: %s band' % band)
ps.savefig()
print('Median differences in', band, 'band:', np.median(d))
if False:
plt.clf()
plt.hist(diffs['g'], bins=20, range=(-0.02, 0.02), histtype='step', color='g')
plt.hist(diffs['r'], bins=20, range=(-0.02, 0.02), histtype='step', color='r')
plt.hist(diffs['z'], bins=20, range=(-0.02, 0.02), histtype='step', color='m')
plt.xlabel('Median mag difference per brick')
plt.title('DR3 EDR PS1 vs DECaLS')
ps.savefig()
rr,dd = np.meshgrid(np.linspace(ralo,rahi, 400), np.linspace(declo,dechi, 400))
I,J,d = match_radec(rr.ravel(), dd.ravel(), bricks.ra, bricks.dec, 0.18, nearest=True)
print(len(I), 'matches')
for band in bands:
plt.clf()
dmag = np.zeros_like(rr) - 1.
dmag.ravel()[I] = bricks.get('diff_%s' % band)[J]
plt.imshow(dmag, interpolation='nearest', origin='lower',
vmin=-0.01, vmax=0.01, cmap='hot',
extent=(ralo,rahi,declo,dechi))
plt.colorbar()
plt.title('DR3 EDR PS1 vs DECaLS: %s band' % band)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.axis([ralo,rahi,declo,dechi])
ps.savefig()
plt.clf()
# reuse 'dmag' map...
dmag = np.zeros_like(rr)
dmag.ravel()[I] = bricks.get('psfsize_%s' % band)[J]
plt.imshow(dmag, interpolation='nearest', origin='lower',
cmap='hot', extent=(ralo,rahi,declo,dechi))
plt.colorbar()
plt.title('DR3 EDR: DECaLS PSF size: %s band' % band)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.axis([ralo,rahi,declo,dechi])
ps.savefig()
if False:
for band in bands:
plt.clf()
plt.scatter(bricks.ra, bricks.dec, c=bricks.get('diff_%s' % band), vmin=-0.01, vmax=0.01,
edgecolors='face', s=200)
plt.colorbar()
plt.title('DR3 EDR PS1 vs DECaLS: %s band' % band)
plt.xlabel('RA (deg)')
plt.ylabel('Dec (deg)')
plt.axis('scaled')
plt.axis([ralo,rahi,declo,dechi])
ps.savefig()
plt.clf()
plt.plot(bricks.psfsize_g, bricks.diff_g, 'g.')
plt.plot(bricks.psfsize_r, bricks.diff_r, 'r.')
plt.plot(bricks.psfsize_z, bricks.diff_z, 'm.')
plt.xlabel('PSF size (arcsec)')
plt.ylabel('DECaLS PSF - PS1 (mag)')
plt.title('DR3 EDR')
ps.savefig()
if __name__ == '__main__':
main()
|
import numpy as np
import matplotlib.pylab as plt
from scipy import interpolate
fig= plt.figure()
fig.suptitle('Estimated SOC and Real SOC Comparation',fontsize=14,fontweight='bold')
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Time(s)')
ax.set_ylabel('SOC')
calSOC=[]
dischargeSOC=[]
x=[]
y=[]
x1=[]
y1=[]
error=[]
dischargeSOCValue=[]
calSOC=np.loadtxt('calSOC.txt',dtype=np.float)
dischargeSOC=np.loadtxt('dischargeData.txt',dtype=np.float)
x=calSOC[:,0]
y=calSOC[:,1]
x1=dischargeSOC[:,0]
y1=dischargeSOC[:,1]
# for index in range(len(y)):
# error.append(y1[index]-y[index])
# print error
dischargeSOCdict={}
for index in range(len(y1)):
dischargeSOCdict[x1[index]] = y1[index]
for element in x:
dischargeSOCValue.append(dischargeSOCdict[element])
print dischargeSOCValue
for index in range(len(y)):
error.append(y[index] - dischargeSOCValue[index])
ax.plot(x,y,'g',label='Estimated SOC')
ax.plot(x1,y1,'b',label='Real SOC')
ax.plot(x,error,'r--',label='Error')
ax.legend()
plt.show()
|
import pymysql as mdb
import traceback
class MyDB():
def __init__(self):
self.connection = None
self.cursor = None
def connect(self):
self.connection = mdb.connect('192.168.100.93', 'username', 'password', 'gestioip')
self.cursor = self.connection.cursor(mdb.cursors.DictCursor)
return self
def execute(self, sql, *args):
# self.cursor = self.connection.cursor(mdb.cursors.DictCursor)
try:
self.connect()
e = self.cursor.execute(sql, *args)
self.connection.commit()
self.cursor.close()
except (mdb.Error, e):
print(traceback.format_exc())
self.connection.rollback()
self.cursor.close()
return e
def queryone(self, sql, *args):
try:
self.connect()
d = self.cursor
d.execute(sql, *args)
tmp = d.fetchone()
d.close()
return tmp
# Reopen database connection
except (AttributeError, mdb.OperationalError ):
self.connect()
self.cursor.execute(sql, *args)
return self.cursor.fetchone()
def queryall(self, sql, *args):
try:
self.connect()
d = self.cursor
d.execute(sql, *args)
tmp = d.fetchall()
d.close()
return tmp
# Reopen database connection
except (AttributeError, mdb.OperationalError ):
self.connect()
self.cursor.execute(sql, *args)
return self.cursor.fetchall()
def lastrowid(self, sql, *args):
try:
self.cursor.execute(sql, *args)
return self.cursor.lastrowid
# Reopen database connection
except (AttributeError, mdb.OperationalError ):
self.connect()
self.cursor.execute(sql, *args)
return self.cursor.lastrowid
def disconnect(self):
self.connection.close()
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_leaflet as dl
import dash_leaflet.express as dlx
import db
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from dash.dependencies import Output, Input
from dash_extensions.javascript import assign
from plotly.subplots import make_subplots
TITLE = 'Ribbit Network'
REFRESH_MS = 60 * 1000
chroma = 'https://cdnjs.cloudflare.com/ajax/libs/chroma-js/2.1.0/chroma.min.js'
colorscale = ['lightgreen', 'green', 'darkgreen', 'black']
# Dash App
app = dash.Dash(__name__, title=TITLE, update_title=None, external_scripts=[chroma])
server = app.server
sensor_data = pd.DataFrame(columns=['Time', 'CO₂ (PPM)', 'Temperature (°C)', 'Barometric Pressure (mBar)', 'Humidity (%)'])
def serve_layout():
df = db.get_map_data()
return html.Div([
html.Div(id='onload', hidden=True),
dcc.Interval(id='interval', interval=REFRESH_MS, n_intervals=0),
html.Div([
html.Img(src='assets/frog.svg'),
html.H1(TITLE),
html.A(html.H3('Learn'), href='https://ribbitnetwork.org/',
style={'margin-left': 'auto', 'text-decoration': 'underline', 'color': 'black'}),
html.A(html.H3('Build'),
href='https://github.com/Ribbit-Network/ribbit-network-frog-sensor#build-a-frog',
style={'margin-left': '2em', 'text-decoration': 'underline', 'color': 'black'}),
html.A(html.H3('Order'),
href='https://ribbitnetwork.org/#buy',
style={'margin-left': '2em', 'text-decoration': 'underline', 'color': 'black'}),
html.A(html.H3('Support'), href='https://ko-fi.com/keenanjohnson',
style={'margin-left': '2em', 'text-decoration': 'underline', 'color': 'black'}),
], id='nav'),
html.Div([
dl.Map(
[
dl.TileLayer(url='https://cartodb-basemaps-{s}.global.ssl.fastly.net/light_all/{z}/{x}/{y}.png',
attribution='Map tiles by Carto, under CC BY 3.0. Data by OpenStreetMap, under ODbL.'),
dl.LocateControl(startDirectly=True, options=dict(keepCurrentZoomLevel=True, drawCircle=False, drawMarker=False)),
dl.GeoJSON(id='geojson'),
dl.Colorbar(colorscale=colorscale, width=20, height=200, min=300, max=600, unit='PPM'),
dl.GestureHandling(),
],
id='map',
zoom=3,
minZoom=3,
maxBounds=[[-75, -180],[75, 200]],
),
], id='map-container'),
html.Div([
dcc.Dropdown(id='duration', clearable=False, searchable=False, value='24h', options=[
{'label': '10 minutes', 'value': '10m'},
{'label': '30 minutes', 'value': '30m'},
{'label': '1 hour', 'value': '1h'},
{'label': '1 day', 'value': '24h'},
{'label': '7 days', 'value': '7d'},
{'label': '30 days', 'value': '30d'},
]),
html.Div([
html.Button(html.Div([
html.Img(src='assets/download.svg'),
'Export as CSV',
]), id='export'),
dcc.Download(id='download'),
]),
], id='controls'),
html.Div([
dcc.Graph(id='timeseries'),
html.Div(id='timezone', hidden=True),
], id='graphs'),
])
app.layout = serve_layout
# Get browser timezone
app.clientside_callback(
'''
function(n_intervals) {
return Intl.DateTimeFormat().resolvedOptions().timeZone
}
''',
Output('timezone', 'children'),
Input('onload', 'children'),
)
point_to_layer = assign('''function(feature, latlng, context) {
const {min, max, colorscale, circleOptions, colorProp} = context.props.hideout;
const csc = chroma.scale(colorscale).domain([min, max]);
circleOptions.fillColor = csc(feature.properties[colorProp]);
return L.circleMarker(latlng, circleOptions);
}''')
cluster_to_layer = assign('''function(feature, latlng, index, context) {
const {min, max, colorscale, circleOptions, colorProp} = context.props.hideout;
const csc = chroma.scale(colorscale).domain([min, max]);
// Set color based on mean value of leaves.
const leaves = index.getLeaves(feature.properties.cluster_id);
let valueSum = 0;
for (let i = 0; i < leaves.length; ++i) {
valueSum += leaves[i].properties[colorProp]
}
const valueMean = valueSum / leaves.length;
// Render a circle with the number of leaves written in the center.
const icon = L.divIcon.scatter({
html: '<div style="background-color:white;"><span>' + feature.properties.point_count_abbreviated + '</span></div>',
className: "marker-cluster",
iconSize: L.point(40, 40),
color: csc(valueMean)
});
return L.marker(latlng, {icon : icon})
}''')
# Update the Map
@app.callback(
Output('geojson', 'children'),
[
Input('onload', 'children'),
Input('interval', 'n_intervals'),
],
)
def update_map(_children, _n_intervals):
df = db.get_map_data()
df['tooltip'] = df['co2'].round(decimals=2).astype(str) + ' PPM'
return dl.GeoJSON(
id='geojson',
data=dlx.dicts_to_geojson(df.to_dict('records')),
options=dict(pointToLayer=point_to_layer),
cluster=True,
clusterToLayer=cluster_to_layer,
zoomToBoundsOnClick=True,
superClusterOptions=dict(radius=100),
hideout=dict(colorProp='co2', circleOptions=dict(fillOpacity=1, stroke=False, radius=8), min=300, max=600,
colorscale=colorscale),
)
# Update Data Plots
@app.callback(
Output('timeseries', 'figure'),
[
Input('timezone', 'children'),
Input('duration', 'value'),
Input('geojson', 'click_feature'),
Input('interval', 'n_intervals'),
],
)
def update_graphs(timezone, duration, click_feature, _n_intervals):
global sensor_data
if click_feature is not None:
sensor = click_feature.get('properties', {}).get('host', None)
if sensor is not None:
sensor_data = db.get_sensor_data(sensor, duration)
sensor_data.rename(
columns={'_time': 'Time', 'co2': 'CO₂ (PPM)', 'humidity': 'Humidity (%)', 'lat': 'Latitude', 'lon': 'Longitude',
'alt': 'Altitude (m)', 'temperature': 'Temperature (°C)',
'baro_pressure': 'Barometric Pressure (mBar)'}, inplace=True)
sensor_data['Time'] = sensor_data['Time'].dt.tz_convert(timezone)
columns_to_plot = ['CO₂ (PPM)', 'Temperature (°C)', 'Barometric Pressure (mBar)', 'Humidity (%)']
fig = make_subplots(rows=4, cols=1, shared_xaxes=True)
for ind, col in enumerate(columns_to_plot):
fig.add_scatter(x=sensor_data["Time"], y=sensor_data[col], mode="lines", line=go.scatter.Line(color="black"), showlegend=False, row=ind+1, col=1, hovertemplate="Time: %{x}<br>%{text}: %{y:.2f}<extra></extra>", text=[col]*len(sensor_data[col]))
fig.update_yaxes(title_text=col, row=ind+1, col=1)
fig.update_layout(template="plotly_white", height=1200)
return fig
# Export data as CSV
@app.callback(
Output('download', 'data'),
Input('export', 'n_clicks'),
)
def export_data(n_clicks):
if n_clicks is None or sensor_data.empty:
return
return dcc.send_data_frame(sensor_data.to_csv, index=False, filename='data.csv')
if __name__ == '__main__':
app.run_server(debug=True)
|
# coding=utf-8
try:
from urllib2 import Request, urlopen, URLError, HTTPError
except Exception as e:
from urllib.request import Request, urlopen, URLError, HTTPError
import os
import netifaces
deviceId = None
gourpId = None
device_id_file_path = os.environ.get('DEVICE_UUID_FILEPATH','/dev/ro_serialno')
group_id_file_path= os.environ.get('DEVICE_GROUP_ID_FILEPATH','/data/usr/com.deep.workai/cache/groupid.txt')
def getUUID():
interfaces = netifaces.interfaces()
for interface in interfaces:
if interface == 'wlan0':
return netifaces.ifaddresses('wlan0')[netifaces.AF_LINK][0]['addr'].strip(":")
if interface == 'eth0':
return netifaces.ifaddresses('eth0')[netifaces.AF_LINK][0]['addr'].strip(":")
if interface == 'en0':
return netifaces.ifaddresses('en0')[netifaces.AF_LINK][0]['addr'].strip(":")
return None
def get_deviceid():
global deviceId
if deviceId is not None:
return deviceId
if os.path.exists(device_id_file_path):
with open(device_id_file_path) as f:
deviceId = f.readline()
if deviceId is None or len(deviceId)<1:
deviceId = getUUID()
# print('>>> no file found, use MAC as deviceId %s' %(deviceId))
if deviceId is not None and len(deviceId) > 1:
deviceId = deviceId.strip('\n')
#deviceId = deviceId.upper()
# print("get deviceId: %s" %(deviceId))
return deviceId
def get_deviceid_old():
return get_deviceid()
def save_groupid_to_file(group_id):
try:
with open(group_id_file_path, "w") as group_id_file:
group_id_file.write(group_id)
except IOError:
pass
def get_groupid_from_file():
try:
with open(group_id_file_path, 'r') as group_id_file:
data=group_id_file.read().replace('\n', '')
except IOError:
return None
if data is not None and data != '':
return data
return None
def set_groupid(groupid):
global gourpId
gourpId = groupid
def get_groupid(uuid):
arr=[]
cmd_arr=[]
if(len(uuid)<1):
return arr, cmd_arr
groupid = get_groupid_from_file()
if groupid is not None:
arr.append("/device/" + groupid)
cmd_arr.append("/msg/g/" + groupid)
return arr, cmd_arr
#url="http://192.168.1.230:9000/restapi/workai-getgroupid?uuid=" + uuid
API_SERVER_ADDRESS = os.getenv('API_SERVER_ADDRESS','workaihost.tiegushi.com')
API_SERVER_PORT = os.getenv('API_SERVER_PORT','80')
url='http://'+API_SERVER_ADDRESS+':'+API_SERVER_PORT+'/restapi/workai-getgroupid?uuid=' + uuid
print('get group id:', url)
#url = "http://deepeye.tiegushi.com/restapi/workai-getgroupid?uuid=" + uuid
try:
response = urlopen(url, timeout=10)
except HTTPError as e:
print('HTTPError: ', e.code)
except URLError as e:
print('URLError: ', e.reason)
except Exception as e:
print('Error: ', e)
else:
# everything is fine
if 200 == response.getcode():
result = response.readline()
groupid=result.decode().split(',')
for i in range(len(groupid)):
if len(groupid[i])>0:
arr.append("/device/" + groupid[i])
cmd_arr.append("/msg/g/" + groupid[i])
# Currently we only allow tablet to join one group.
save_groupid_to_file(groupid[i])
else:
print('response code != 200')
return arr, cmd_arr
return arr, cmd_arr
# print get_groupid('7YRBBDB722002717')
# print get_deviceid()
# global会有BUG
def get_deviceid2():
return get_deviceid()
def get_current_groupid(uuid=get_deviceid()):
groupid, _ = get_groupid(uuid)
if groupid:
return groupid[0].replace('/device/', '')
else:
return None
def check_groupid_changed():
save_groupid_to_file('')
get_current_groupid(get_deviceid())
|
import os
import random
import dialogflow_v2 as dialogflow
# path to the Google API credentials file
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="path to your Google API credetials file"
# Google project ID from the agent settings page
PROJECT_ID = "your dialogflow project ID"
# the language your agent is trained on
LANGUAGE_CODE = "en-US"
session_client = dialogflow.SessionsClient()
def get_suggestions(text, session_id=None):
# if no session, create a random session id (default behaviour)
if session_id is None:
session_id = str(random.randint(0, 10000))
# get the dialogflow session
session = session_client.session_path(PROJECT_ID, session_id)
# create a new input using the text description
text_input = dialogflow.types.TextInput(text=text, language_code=LANGUAGE_CODE)
query_input = dialogflow.types.QueryInput(text=text_input)
# call the Dialogflow API to ask for suggested articles given text in input
response = session_client.detect_intent(session=session, query_input=query_input)
result = response.query_result
# if the matching intent is fallback, no suggested articles were found
if result.intent.is_fallback or len(result.fulfillment_messages) == 0:
return None
# return the list of suggested articles as a list of dict
articles = []
for msg in result.fulfillment_messages:
fields = msg.payload.fields
articles.append({"url": fields["url"].string_value,
"title": fields["title"].string_value,
"confidence": result.intent_detection_confidence})
return articles if len(articles)>0 else None
|
import csv, argparse, os, re
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument("--output_dir", default=None, type=str, required=True)
args = parser.parse_args()
with open(args.data_file,'r') as in_file:
with open(os.path.join(args.output_dir,args.data_file.split('/')[-1]+'.proc'),'w') as out_file:
reader = csv.reader(in_file, delimiter='\t')
next(reader)
sentences = []
for row in reader:
if row[1] != 'misc':
speaker = row[1].split('_')[1]
if speaker == 'int':
continue
if row[3][:6] == '(pause':
continue
#print(row[3])
s = re.sub(r'<(.*?)>', '', row[3])
s = re.sub(r'\((.*?)\)', '', s)
s = re.sub(r'/\?(.*?)/', '', s)
s = s.replace('[','').replace(']','').replace('/unintelligible/','').replace('/','').replace(' ',' ').strip()
if not s:
continue
if sentences and s[0].islower():
sentences[-1] += ' ' + s
elif sentences and sentences[-1][-1] in ',-':
sentences[-1] += ' ' + s
else:
sentences.append(s)
for s in sentences:
if len(s.split()) > 3:
out_file.write(s + '\n')
|
import datetime
from abc import ABC
from fedot.core.log import Log, default_log
class Timer(ABC):
def __init__(self, max_lead_time: datetime.timedelta = None, log: Log = None):
self.process_terminated = False
if not log:
self.log = default_log(__name__)
else:
self.log = log
self.max_lead_time = max_lead_time
def __enter__(self):
self.start = datetime.datetime.now()
return self
@property
def start_time(self):
return self.start
@property
def spent_time(self) -> datetime.timedelta:
return datetime.datetime.now() - self.start
@property
def minutes_from_start(self) -> float:
return self.spent_time.total_seconds() / 60.
@property
def seconds_from_start(self) -> float:
return self.spent_time.total_seconds()
def is_time_limit_reached(self) -> bool:
self.process_terminated = False
if self.max_lead_time is not None:
if datetime.datetime.now() - self.start >= self.max_lead_time:
self.process_terminated = True
return self.process_terminated
def __exit__(self, *args):
return self.process_terminated
class CompositionTimer(Timer):
def __init__(self, max_lead_time: datetime.timedelta = None, log: Log = None):
super().__init__(max_lead_time=max_lead_time, log=log)
self.init_time = 0
def _is_next_iteration_possible(self, time_constraint: float, generation_num: int = None) -> bool:
minutes = self.minutes_from_start
if generation_num is not None:
evo_proc_minutes = minutes - self.init_time
possible = time_constraint > (minutes + (evo_proc_minutes / (generation_num + 1)))
else:
possible = time_constraint > minutes
if not possible:
self.process_terminated = True
return possible
def is_time_limit_reached(self, generation_num: int = None) -> bool:
if self.max_lead_time:
max_lead_time = 0 if self.max_lead_time.total_seconds() < 0 else self.max_lead_time.total_seconds() / 60.
if max_lead_time:
reached = not self._is_next_iteration_possible(generation_num=generation_num,
time_constraint=max_lead_time)
else:
self.process_terminated = True
reached = True
else:
reached = False
return reached
def set_init_time(self, init_time: float):
self.init_time = init_time
def __exit__(self, *args):
self.log.info(f'Composition time: {round(self.minutes_from_start, 3)} min')
if self.process_terminated:
self.log.info('Algorithm was terminated due to processing time limit')
class TunerTimer(Timer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def is_time_limit_reached(self) -> bool:
super().is_time_limit_reached()
if self.process_terminated:
self.log.info('Tuning completed because of the time limit reached')
return self.process_terminated
def __exit__(self, *args):
return self.process_terminated
|
"""Implementations of grading abstract base class managers."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class GradingProfile:
"""The ``GradingProfile`` describes the interoperability among grading services."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def supports_visible_federation(self):
"""Tests if federation is visible.
:return: ``true`` if visible federation is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_lookup(self):
"""Tests if a grade system lookup service is supported.
:return: true if grade system lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_query(self):
"""Tests if a grade system query service is supported.
:return: ``true`` if grade system query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_search(self):
"""Tests if a grade system search service is supported.
:return: ``true`` if grade system search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_admin(self):
"""Tests if a grade system administrative service is supported.
:return: ``true`` if grade system admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_notification(self):
"""Tests if grade system notification is supported.
Messages may be sent when grade entries are created, modified,
or deleted.
:return: ``true`` if grade system notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_gradebook(self):
"""Tests if a grade system to gradebook lookup session is available.
:return: ``true`` if grade system gradebook lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_gradebook_assignment(self):
"""Tests if a grade system to gradebook assignment session is available.
:return: ``true`` if grade system gradebook assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_system_smart_gradebook(self):
"""Tests if a grade system smart gradebook session is available.
:return: ``true`` if grade system smart gradebook is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_entry_lookup(self):
"""Tests if a grade entry lookup service is supported.
:return: true if grade entry lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_entry_query(self):
"""Tests if a grade entry query service is supported.
:return: true if grade entry query is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_entry_search(self):
"""Tests if a grade entry search service is supported.
:return: ``true`` if grade entry search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_entry_admin(self):
"""Tests if a grade entry administrative service is supported.
:return: ``true`` if grade entry admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grade_entry_notification(self):
"""Tests if grade entry notification is supported.
:return: ``true`` if grade entry notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_lookup(self):
"""Tests if a gradebook column lookup service is supported.
:return: true if gradebook column lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_query(self):
"""Tests if a gradebook column query service is supported.
:return: ``true`` if grade system query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_search(self):
"""Tests if a gradebook column search service is supported.
:return: ``true`` if grade system search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_admin(self):
"""Tests if a gradebook column administrative service is supported.
:return: ``true`` if gradebook column admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_notification(self):
"""Tests if gradebook column notification is supported.
Messages may be sent when grade entries are created, modified,
or deleted.
:return: ``true`` if gradebook column notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_gradebook(self):
"""Tests if a gradebook column to gradebook lookup session is available.
:return: ``true`` if gradebook column gradebook lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_gradebook_assignment(self):
"""Tests if a gradebook column to gradebook assignment session is available.
:return: ``true`` if gradebook column gradebook assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_column_smart_gradebook(self):
"""Tests if a gradebook column smart gradebookt session is available.
:return: ``true`` if gradebook column amsrt gradebook is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_lookup(self):
"""Tests if a gradebook lookup service is supported.
:return: ``true`` if gradebook lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_query(self):
"""Tests if a gradebook query service is supported.
:return: ``true`` if gradebook query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_search(self):
"""Tests if a gradebook search service is supported.
:return: ``true`` if gradebook search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_admin(self):
"""Tests if a gradebook administrative service is supported.
:return: ``true`` if gradebook admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_notification(self):
"""Tests if gradebook notification is supported.
Messages may be sent when gradebooks are created, modified, or
deleted.
:return: ``true`` if gradebook notification is supported ``,`` ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_hierarchy(self):
"""Tests if a gradebook hierarchy traversal is supported.
:return: ``true`` if a gradebook hierarchy traversal is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_gradebook_hierarchy_design(self):
"""Tests if gradebook hierarchy design is supported.
:return: ``true`` if a gradebook hierarchy design is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grading_batch(self):
"""Tests if a grading batch service is supported.
:return: ``true`` if a grading batch service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grading_calculation(self):
"""Tests if a grading calculation service is supported.
:return: ``true`` if a grading calculation service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def supports_grading_transform(self):
"""Tests if a grade system transform service is supported.
:return: ``true`` if a grading transform service is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_record_types(self):
"""Gets the supported ``Grade`` record types.
:return: a list containing the supported ``Grade`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
grade_record_types = property(fget=get_grade_record_types)
@abc.abstractmethod
def supports_grade_record_type(self, grade_record_type):
"""Tests if the given ``Grade`` record type is supported.
:param grade_record_type: a ``Type`` indicating a ``Grade`` record type
:type grade_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``grade_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_system_record_types(self):
"""Gets the supported ``GradeSystem`` record types.
:return: a list containing the supported ``GradeSystem`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
grade_system_record_types = property(fget=get_grade_system_record_types)
@abc.abstractmethod
def supports_grade_system_record_type(self, grade_system_record_type):
"""Tests if the given ``GradeSystem`` record type is supported.
:param grade_system_record_type: a ``Type`` indicating a ``GradeSystem`` record type
:type grade_system_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``grade_system_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_system_search_record_types(self):
"""Gets the supported ``GradeSystem`` search record types.
:return: a list containing the supported ``GradeSystem`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
grade_system_search_record_types = property(fget=get_grade_system_search_record_types)
@abc.abstractmethod
def supports_grade_system_search_record_type(self, grade_system_search_record_type):
"""Tests if the given ``GradeSystem`` search record type is supported.
:param grade_system_search_record_type: a ``Type`` indicating a ``GradeSystem`` search record type
:type grade_system_search_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``grade_system_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_entry_record_types(self):
"""Gets the supported ``GradeEntry`` record types.
:return: a list containing the supported ``GradeEntry`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
grade_entry_record_types = property(fget=get_grade_entry_record_types)
@abc.abstractmethod
def supports_grade_entry_record_type(self, grade_entry_record_type):
"""Tests if the given ``GradeEntry`` record type is supported.
:param grade_entry_record_type: a ``Type`` indicating a ``GradeEntry`` record type
:type grade_entry_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``grade_entry_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_grade_entry_search_record_types(self):
"""Gets the supported ``GradeEntry`` search record types.
:return: a list containing the supported ``GradeEntry`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
grade_entry_search_record_types = property(fget=get_grade_entry_search_record_types)
@abc.abstractmethod
def supports_grade_entry_search_record_type(self, grade_entry_search_record_type):
"""Tests if the given ``GradeEntry`` search record type is supported.
:param grade_entry_search_record_type: a ``Type`` indicating a ``GradeEntry`` search record type
:type grade_entry_search_record_type: ``osid.type.Type``
:return: ``true`` if the given Type is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``grade_entry_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_gradebook_column_record_types(self):
"""Gets the supported ``GradebookColumn`` record types.
:return: a list containing the supported ``GradebookColumn`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
gradebook_column_record_types = property(fget=get_gradebook_column_record_types)
@abc.abstractmethod
def supports_gradebook_column_record_type(self, gradebook_column_record_type):
"""Tests if the given ``GradebookColumn`` record type is supported.
:param gradebook_column_record_type: a ``Type`` indicating a ``GradebookColumn`` type
:type gradebook_column_record_type: ``osid.type.Type``
:return: ``true`` if the given gradebook column record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``gradebook_column_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_gradebook_column_search_record_types(self):
"""Gets the supported gradebook column search record types.
:return: a list containing the supported ``GradebookColumn`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
gradebook_column_search_record_types = property(fget=get_gradebook_column_search_record_types)
@abc.abstractmethod
def supports_gradebook_column_search_record_type(self, gradebook_column_search_record_type):
"""Tests if the given gradebook column search record type is supported.
:param gradebook_column_search_record_type: a ``Type`` indicating a ``GradebookColumn`` search record type
:type gradebook_column_search_record_type: ``osid.type.Type``
:return: ``true`` if the given search record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``gradebook_column_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_gradebook_column_summary_record_types(self):
"""Gets the supported ``GradebookColumnSummary`` record types.
:return: a list containing the supported ``GradebookColumnSummary`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
gradebook_column_summary_record_types = property(fget=get_gradebook_column_summary_record_types)
@abc.abstractmethod
def supports_gradebook_column_summary_record_type(self, gradebook_column_summary_record_type):
"""Tests if the given ``GradebookColumnSummary`` record type is supported.
:param gradebook_column_summary_record_type: a ``Type`` indicating a ``GradebookColumnSummary`` type
:type gradebook_column_summary_record_type: ``osid.type.Type``
:return: ``true`` if the given gradebook column summary record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``gradebook_column_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_gradebook_record_types(self):
"""Gets the supported ``Gradebook`` record types.
:return: a list containing the supported ``Gradebook`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
gradebook_record_types = property(fget=get_gradebook_record_types)
@abc.abstractmethod
def supports_gradebook_record_type(self, gradebook_record_type):
"""Tests if the given ``Gradebook`` record type is supported.
:param gradebook_record_type: a ``Type`` indicating a ``Gradebook`` type
:type gradebook_record_type: ``osid.type.Type``
:return: ``true`` if the given gradebook record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``gradebook_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_gradebook_search_record_types(self):
"""Gets the supported gradebook search record types.
:return: a list containing the supported ``Gradebook`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
gradebook_search_record_types = property(fget=get_gradebook_search_record_types)
@abc.abstractmethod
def supports_gradebook_search_record_type(self, gradebook_search_record_type):
"""Tests if the given gradebook search record type is supported.
:param gradebook_search_record_type: a ``Type`` indicating a ``Gradebook`` search record type
:type gradebook_search_record_type: ``osid.type.Type``
:return: ``true`` if the given search record ``Type`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``gradebook_search_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
class GradingManager:
"""The grading manager provides access to grading sessions and provides interoperability tests for various aspects of this service.
The sessions included in this manager are:
* ``GradeSystemLookupSession:`` a session to look up grades and
grade systems
* ``GradeSystemQuerySession:`` a session to query grade systems
``None``
* ``GradeSystemSearchSession:`` a session to search grade systems
* ``GradeSystemAdminSession:`` a session to manage grade systems
* ``GradeSystemNotificationSession`` a session for subscribing to
new or deleted grades or grade systems
* ``GradeSystemGradebookSession:`` a session for retrieving grade
system to gradebook mappings
* ``GradeSystemGradebookAssignmentSession:`` a session for
managing grade system to gradebook mappings
* ``GradeSystemSmartGradebookSession:`` a session for managing
smart gradebooks of grade systems
* ``GradeEntryLookupSession:`` a session to look up grade entries
* ``GradeEntryQuerySession:`` a session to query grade entries
``None``
* ``GradeEntrySearchSession:`` a session to search grade entries
* ``GradeEntryAdminSession:`` a session to create, modify and
delete grade entries ``None``
* ``GradeEntryNotificationSession: a`` session to receive messages
pertaining to grade entry ```` changes
* ``GradebookColumnLookupSession:`` a session to look up gradebook
columns
* ``GradebookColumnQuerySession:`` a session to query gradebook
columns ``None``
* ``GradebookColumnSearchSession:`` a session to search gradebook
columns
* ``GradebookColumnAdminSession:`` a session to manage gradebook
columns
* ``GradebookColumnNotificationSession`` a session for subscribing
to new or deleted gradebook columns
* ``GradebookColumnGradebookSession:`` a session for retrieving
gradebook column to gradebook mappings
* ``GradebookColumnGradebookAssignmentSession:`` a session for
managing gradebook column to gradebook mappings
* ``GradebookColumnSmartGradebookSession:`` a session for managing
smart gradebooks of gradebook columns
* ``GradebookLookupSession:`` a session to lookup gradebooks
* ``GradebookQuerySession:`` a session to query gradebooks
* ``GradebookSearchSession`` : a session to search gradebooks
* ``GradebookAdminSession`` : a session to create, modify and
delete gradebooks
* ``GradebookNotificationSession`` : a session to receive messages
pertaining to gradebook changes
* ``GradebookHierarchySession:`` a session to traverse the
gradebook hierarchy
* ``GradebookHierarchyDesignSession:`` a session to manage the
gradebook hierarchy
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_grade_system_lookup_session(self):
"""Gets the ``OsidSession`` associated with the grade system lookup service.
:return: a ``GradeSystemLookupSession``
:rtype: ``osid.grading.GradeSystemLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` is ``true``.*
"""
return # osid.grading.GradeSystemLookupSession
grade_system_lookup_session = property(fget=get_grade_system_lookup_session)
@abc.abstractmethod
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade system lookup service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeSystemLookupSession``
:rtype: ``osid.grading.GradeSystemLookupSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemLookupSession
@abc.abstractmethod
def get_grade_system_query_session(self):
"""Gets the ``OsidSession`` associated with the grade system query service.
:return: a ``GradeSystemQuerySession``
:rtype: ``osid.grading.GradeSystemQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
return # osid.grading.GradeSystemQuerySession
grade_system_query_session = property(fget=get_grade_system_query_session)
@abc.abstractmethod
def get_grade_system_query_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade system query service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeSystemQuerySession``
:rtype: ``osid.grading.GradeSystemQuerySession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemQuerySession
@abc.abstractmethod
def get_grade_system_search_session(self):
"""Gets the ``OsidSession`` associated with the grade system search service.
:return: a ``GradeSystemSearchSession``
:rtype: ``osid.grading.GradeSystemSearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` is ``true``.*
"""
return # osid.grading.GradeSystemSearchSession
grade_system_search_session = property(fget=get_grade_system_search_session)
@abc.abstractmethod
def get_grade_system_search_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade system search service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeSystemSearchSession``
:rtype: ``osid.grading.GradeSystemSearchSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemSearchSession
@abc.abstractmethod
def get_grade_system_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade system administration service.
:return: a ``GradeSystemAdminSession``
:rtype: ``osid.grading.GradeSystemAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
"""
return # osid.grading.GradeSystemAdminSession
grade_system_admin_session = property(fget=get_grade_system_admin_session)
@abc.abstractmethod
def get_grade_system_admin_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade system admin service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeSystemAdminSession``
:rtype: ``osid.grading.GradeSystemAdminSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemAdminSession
@abc.abstractmethod
def get_grade_system_notification_session(self, grade_system_receiver):
"""Gets the notification session for notifications pertaining to grade system changes.
:param grade_system_receiver: the grade system receiver
:type grade_system_receiver: ``osid.grading.GradeSystemReceiver``
:return: a ``GradeSystemNotificationSession``
:rtype: ``osid.grading.GradeSystemNotificationSession``
:raise: ``NullArgument`` -- ``grade_system_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` is ``true``.*
"""
return # osid.grading.GradeSystemNotificationSession
@abc.abstractmethod
def get_grade_system_notification_session_for_gradebook(self, grade_system_receiver, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade system notification service for the given gradebook.
:param grade_system_receiver: the grade system receiver
:type grade_system_receiver: ``osid.grading.GradeSystemReceiver``
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a _grade_system_notification_session``
:rtype: ``osid.grading.GradeSystemNotificationSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``grade_system_receiver`` or ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemNotificationSession
@abc.abstractmethod
def get_grade_system_gradebook_session(self):
"""Gets the session for retrieving grade system to gradebook mappings.
:return: a ``GradeSystemGradebookSession``
:rtype: ``osid.grading.GradeSystemGradebookSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook()`` is ``true``.*
"""
return # osid.grading.GradeSystemGradebookSession
grade_system_gradebook_session = property(fget=get_grade_system_gradebook_session)
@abc.abstractmethod
def get_grade_system_gradebook_assignment_session(self):
"""Gets the session for assigning grade system to gradebook mappings.
:return: a ``GradeSystemGradebookAssignmentSession``
:rtype: ``osid.grading.GradeSystemGradebookSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_gradebook_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook_assignment()`` is ``true``.*
"""
return # osid.grading.GradeSystemGradebookAssignmentSession
grade_system_gradebook_assignment_session = property(fget=get_grade_system_gradebook_assignment_session)
@abc.abstractmethod
def get_grade_system_smart_gradebook_session(self, gradebook_id):
"""Gets the session for managing smart gradebooks of grade systems.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: a ``GradeSystemSmartGradebookSession``
:rtype: ``osid.grading.GradeSystemSmartGradebookSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_smart_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_smart_gradebook()`` is ``true``.*
"""
return # osid.grading.GradeSystemSmartGradebookSession
@abc.abstractmethod
def get_grade_entry_lookup_session(self):
"""Gets the ``OsidSession`` associated with the grade entry lookup service.
:return: a ``GradeEntryLookupSession``
:rtype: ``osid.grading.GradeEntryLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` is ``true``.*
"""
return # osid.grading.GradeEntryLookupSession
grade_entry_lookup_session = property(fget=get_grade_entry_lookup_session)
@abc.abstractmethod
def get_grade_entry_lookup_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade entry lookup service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeEntryLookupSession``
:rtype: ``osid.grading.GradeEntryLookupSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryLookupSession
@abc.abstractmethod
def get_grade_entry_query_session(self):
"""Gets the ``OsidSession`` associated with the grade entry query service.
:return: a ``GradeEntryQuerySession``
:rtype: ``osid.grading.GradeEntryQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
return # osid.grading.GradeEntryQuerySession
grade_entry_query_session = property(fget=get_grade_entry_query_session)
@abc.abstractmethod
def get_grade_entry_query_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade entry query service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeEntryQuerySession``
:rtype: ``osid.grading.GradeEntryQuerySession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryQuerySession
@abc.abstractmethod
def get_grade_entry_search_session(self):
"""Gets the ``OsidSession`` associated with the grade entry search service.
:return: a ``GradeEntrySearchSession``
:rtype: ``osid.grading.GradeEntrySearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` is ``true``.*
"""
return # osid.grading.GradeEntrySearchSession
grade_entry_search_session = property(fget=get_grade_entry_search_session)
@abc.abstractmethod
def get_grade_entry_search_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade entry search service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeEntrySearchSession``
:rtype: ``osid.grading.GradeEntrySearchSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntrySearchSession
@abc.abstractmethod
def get_grade_entry_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade entry administration service.
:return: a ``GradeEntryAdminSession``
:rtype: ``osid.grading.GradeEntryAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
"""
return # osid.grading.GradeEntryAdminSession
grade_entry_admin_session = property(fget=get_grade_entry_admin_session)
@abc.abstractmethod
def get_grade_entry_admin_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade entry admin service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradeEntryAdminSession``
:rtype: ``osid.grading.GradeEntryAdminSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryAdminSession
@abc.abstractmethod
def get_grade_entry_notification_session(self, receiver):
"""Gets the notification session for notifications pertaining to grade entry changes.
:param receiver: the grade entry receiver
:type receiver: ``osid.grading.GradeEntryReceiver``
:return: a ``GradeEntryNotificationSession``
:rtype: ``osid.grading.GradeEntryNotificationSession``
:raise: ``NullArgument`` -- ``receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` is ``true``.*
"""
return # osid.grading.GradeEntryNotificationSession
@abc.abstractmethod
def get_grade_entry_notification_session_for_gradebook(self, receiver, gradebook_id):
"""Gets the ``OsidSession`` associated with the grade entry notification service for the given gradebook.
:param receiver: the grade entry receiver
:type receiver: ``osid.grading.GradeEntryReceiver``
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a _grade_entry_notification_session``
:rtype: ``osid.grading.GradeEntryNotificationSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``receiver`` or ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryNotificationSession
@abc.abstractmethod
def get_gradebook_column_lookup_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service.
:return: a ``GradebookColumnLookupSession``
:rtype: ``osid.grading.GradebookColumnLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` is ``true``.*
"""
return # osid.grading.GradebookColumnLookupSession
gradebook_column_lookup_session = property(fget=get_gradebook_column_lookup_session)
@abc.abstractmethod
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a _gradebook_column_lookup_session``
:rtype: ``osid.grading.GradebookColumnLookupSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnLookupSession
@abc.abstractmethod
def get_gradebook_column_query_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column query service.
:return: a ``GradebookColumnQuerySession``
:rtype: ``osid.grading.GradebookColumnQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
return # osid.grading.GradebookColumnQuerySession
gradebook_column_query_session = property(fget=get_gradebook_column_query_session)
@abc.abstractmethod
def get_gradebook_column_query_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column query service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradebookColumnQuerySession``
:rtype: ``osid.grading.GradebookColumnQuerySession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnQuerySession
@abc.abstractmethod
def get_gradebook_column_search_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column search service.
:return: a ``GradebookColumnSearchSession``
:rtype: ``osid.grading.GradebookColumnSearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` is ``true``.*
"""
return # osid.grading.GradebookColumnSearchSession
gradebook_column_search_session = property(fget=get_gradebook_column_search_session)
@abc.abstractmethod
def get_gradebook_column_search_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column search service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a _gradebook_column_search_session``
:rtype: ``osid.grading.GradebookColumnSearchSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnSearchSession
@abc.abstractmethod
def get_gradebook_column_admin_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column administration service.
:return: a ``GradebookColumnAdminSession``
:rtype: ``osid.grading.GradebookColumnAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` is ``true``.*
"""
return # osid.grading.GradebookColumnAdminSession
gradebook_column_admin_session = property(fget=get_gradebook_column_admin_session)
@abc.abstractmethod
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a GradebookColumnAdminSession``
:rtype: ``osid.grading.GradebookColumnAdminSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnAdminSession
@abc.abstractmethod
def get_gradebook_column_notification_session(self, gradebook_column_receiver):
"""Gets the notification session for notifications pertaining to gradebook column changes.
:param gradebook_column_receiver: the grade system receiver
:type gradebook_column_receiver: ``osid.grading.GradebookColumnReceiver``
:return: a ``GradebookColumnNotificationSession``
:rtype: ``osid.grading.GradebookColumnNotificationSession``
:raise: ``NullArgument`` -- ``gradebook_column_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` is ``true``.*
"""
return # osid.grading.GradebookColumnNotificationSession
@abc.abstractmethod
def get_gradebook_column_notification_session_for_gradebook(self, gradebook_column_receiver, gradebook_id):
"""Gets the ``OsidSession`` associated with the gradebook column notification service for the given gradebook.
:param gradebook_column_receiver: the gradebook column receiver
:type gradebook_column_receiver: ``osid.grading.GradebookColumnReceiver``
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: ``a _gradebook_column_notification_session``
:rtype: ``osid.grading.GradebookColumnNotificationSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_column_receiver`` or ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnNotificationSession
@abc.abstractmethod
def get_gradebook_column_gradebook_session(self):
"""Gets the session for retrieving gradebook column to gradebook mappings.
:return: a ``GradebookColumnGradebookSession``
:rtype: ``osid.grading.GradebookColumnGradebookSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook()`` is ``true``.*
"""
return # osid.grading.GradebookColumnGradebookSession
gradebook_column_gradebook_session = property(fget=get_gradebook_column_gradebook_session)
@abc.abstractmethod
def get_gradebook_column_gradebook_assignment_session(self):
"""Gets the session for assigning gradebook column to gradebook mappings.
:return: a ``GradebookColumnGradebookAssignmentSession``
:rtype: ``osid.grading.GradebookColumnGradebookAssignmentSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_gradebook_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
"""
return # osid.grading.GradebookColumnGradebookAssignmentSession
gradebook_column_gradebook_assignment_session = property(fget=get_gradebook_column_gradebook_assignment_session)
@abc.abstractmethod
def get_gradebook_column_smart_gradebook_session(self, gradebook_id):
"""Gets the session for managing smart gradebooks of gradebook columns.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:return: a ``GradebookColumnSmartGradebookSession``
:rtype: ``osid.grading.GradebookColumnSmartGradebookSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_smart_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_smart_gradebook()`` is ``true``.*
"""
return # osid.grading.GradebookColumnSmartGradebookSession
@abc.abstractmethod
def get_gradebook_lookup_session(self):
"""Gets the OsidSession associated with the gradebook lookup service.
:return: a ``GradebookLookupSession``
:rtype: ``osid.grading.GradebookLookupSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_lookup() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_lookup()`` is true.*
"""
return # osid.grading.GradebookLookupSession
gradebook_lookup_session = property(fget=get_gradebook_lookup_session)
@abc.abstractmethod
def get_gradebook_query_session(self):
"""Gets the OsidSession associated with the gradebook query service.
:return: a ``GradebookQuerySession``
:rtype: ``osid.grading.GradebookQuerySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_query() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is true.*
"""
return # osid.grading.GradebookQuerySession
gradebook_query_session = property(fget=get_gradebook_query_session)
@abc.abstractmethod
def get_gradebook_search_session(self):
"""Gets the OsidSession associated with the gradebook search service.
:return: a ``GradebookSearchSession``
:rtype: ``osid.grading.GradebookSearchSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_search() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_search()`` is true.*
"""
return # osid.grading.GradebookSearchSession
gradebook_search_session = property(fget=get_gradebook_search_session)
@abc.abstractmethod
def get_gradebook_admin_session(self):
"""Gets the OsidSession associated with the gradebook administration service.
:return: a ``GradebookAdminSession``
:rtype: ``osid.grading.GradebookAdminSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_admin() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_admin()`` is true.*
"""
return # osid.grading.GradebookAdminSession
gradebook_admin_session = property(fget=get_gradebook_admin_session)
@abc.abstractmethod
def get_gradebook_notification_session(self, gradebook_receiver):
"""Gets the notification session for notifications pertaining to gradebook service changes.
:param gradebook_receiver: the gradebook receiver
:type gradebook_receiver: ``osid.grading.GradebookReceiver``
:return: a ``GradebookNotificationSession``
:rtype: ``osid.grading.GradebookNotificationSession``
:raise: ``NullArgument`` -- ``gradebook_receiver`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_notification() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_notification()`` is true.*
"""
return # osid.grading.GradebookNotificationSession
@abc.abstractmethod
def get_gradebook_hierarchy_session(self):
"""Gets the session traversing gradebook hierarchies.
:return: a ``GradebookHierarchySession``
:rtype: ``osid.grading.GradebookHierarchySession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_hierarchy() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.*
"""
return # osid.grading.GradebookHierarchySession
gradebook_hierarchy_session = property(fget=get_gradebook_hierarchy_session)
@abc.abstractmethod
def get_gradebook_hierarchy_design_session(self):
"""Gets the session designing gradebook hierarchies.
:return: a ``GradebookHierarchyDesignSession``
:rtype: ``osid.grading.GradebookHierarchyDesignSession``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_hierarchy_design() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy_design()`` is true.*
"""
return # osid.grading.GradebookHierarchyDesignSession
gradebook_hierarchy_design_session = property(fget=get_gradebook_hierarchy_design_session)
@abc.abstractmethod
def get_grading_batch_manager(self):
"""Gets the ``GradingBatchManager``.
:return: a ``GradingBatchManager``
:rtype: ``osid.grading.batch.GradingBatchManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grading_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_batch()`` is true.*
"""
return # osid.grading.batch.GradingBatchManager
grading_batch_manager = property(fget=get_grading_batch_manager)
@abc.abstractmethod
def get_grading_calculation_manager(self):
"""Gets the ``GradingCalculationManager``.
:return: a ``GradingCalculationManager``
:rtype: ``osid.grading.calculation.GradingCalculationManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grading_calculation() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_calculation()`` is true.*
"""
return # osid.grading.calculation.GradingCalculationManager
grading_calculation_manager = property(fget=get_grading_calculation_manager)
@abc.abstractmethod
def get_grading_transform_manager(self):
"""Gets the ``GradingTransformManager``.
:return: a ``GradingTransformManager``
:rtype: ``osid.grading.transform.GradingTransformManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grading_transform() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_transform()`` is true.*
"""
return # osid.grading.transform.GradingTransformManager
grading_transform_manager = property(fget=get_grading_transform_manager)
class GradingProxyManager:
"""The grading manager provides access to grading sessions and provides interoperability tests for various aspects of this service.
Methods in this manager accept a ``Proxy`` for passing information
from server environments.The sessions included in this manager are:
* ``GradeSystemLookupSession:`` a session to look up grades and
grade systems
* ``GradeSystemQuerySession:`` a session to query grade systems
``None``
* ``GradeSystemSearchSession:`` a session to search grade systems
* ``GradeSystemAdminSession:`` a session to manage grade systems
* ``GradeSystemNotificationSession`` a session for subscribing to
new or deleted grades or grade systems
* ``GradeSystemGradebookSession:`` a session for retrieving grade
system to gradebook mappings
* ``GradeSystemGradebookAssignmentSession:`` a session for
managing grade system to gradebook mappings
* ``GradeSystemSmartGradebookSession:`` a session for managing
smart gradebooks of grade systems
* ``GradeEntryLookupSession:`` a session to look up grade entries
* ``GradeEntryQuerySession:`` a session to query grade entries
``None``
* ``GradeEntrySearchSession:`` a session to search grade entries
* ``GradeEntryAdminSession:`` a session to create, modify and
delete grade entries ``None``
* ``GradeEntryNotificationSession: a`` session to receive messages
pertaining to grade entry ```` changes
* ``GradebookColumnLookupSession:`` a session to look up gradebook
columns
* ``GradebookColumnQuerySession:`` a session to query gradebook
columns ``None``
* ``GradebookColumnSearchSession:`` a session to search gradebook
columns
* ``GradebookColumnAdminSession:`` a session to manage gradebook
columns
* ``GradebookColumnDerivationSession:`` a session to manage
derived gradebook columns
* ``GradebookColumnNotificationSession`` a session for subscribing
to new or deleted gradebook columns
* ``GradebookColumnGradebookSession:`` a session for retrieving
gradebook column to gradebook mappings
* ``GradebookColumnGradebookAssignmentSession:`` a session for
managing gradebook column to gradebook mappings
* ``GradebookColumnSmartGradebookSession:`` a session for managing
smart gradebooks of gradebook columns
* ``GradebookLookupSession:`` a session to lookup gradebooks
* ``GradebookQuerySession:`` a session to query gradebooks
* ``GradebookSearchSession`` : a session to search gradebooks
* ``GradebookAdminSession`` : a session to create, modify and
delete gradebooks
* ``GradebookNotificationSession`` : a session to receive messages
pertaining to gradebook changes
* ``GradebookHierarchySession:`` a session to traverse the
gradebook hierarchy
* ``GradebookHierarchyDesignSession:`` a session to manage the
gradebook hierarchy
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_grade_system_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade system lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemLookupSession``
:rtype: ``osid.grading.GradeSystemLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` is ``true``.*
"""
return # osid.grading.GradeSystemLookupSession
@abc.abstractmethod
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade system lookup service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeSystemLookupSession``
:rtype: ``osid.grading.GradeSystemLookupSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemLookupSession
@abc.abstractmethod
def get_grade_system_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade system query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemQuerySession``
:rtype: ``osid.grading.GradeSystemQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
return # osid.grading.GradeSystemQuerySession
@abc.abstractmethod
def get_grade_system_query_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade system query service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeSystemQuerySession``
:rtype: ``osid.grading.GradeSystemQuerySession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemQuerySession
@abc.abstractmethod
def get_grade_system_search_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade system search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemSearchSession``
:rtype: ``osid.grading.GradeSystemSearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` is ``true``.*
"""
return # osid.grading.GradeSystemSearchSession
@abc.abstractmethod
def get_grade_system_search_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade system search service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeSystemSearchSession``
:rtype: ``osid.grading.GradeSystemSearchSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemSearchSession
@abc.abstractmethod
def get_grade_system_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade system administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemAdminSession``
:rtype: ``osid.grading.GradeSystemAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
"""
return # osid.grading.GradeSystemAdminSession
@abc.abstractmethod
def get_grade_system_admin_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade system admin service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeSystemAdminSession``
:rtype: ``osid.grading.GradeSystemAdminSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemAdminSession
@abc.abstractmethod
def get_grade_system_notification_session(self, grade_system_receiver, proxy):
"""Gets the notification session for notifications pertaining to grade system changes.
:param grade_system_receiver: the grade system receiver
:type grade_system_receiver: ``osid.grading.GradeSystemReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemNotificationSession``
:rtype: ``osid.grading.GradeSystemNotificationSession``
:raise: ``NullArgument`` -- ``grade_system_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` is ``true``.*
"""
return # osid.grading.GradeSystemNotificationSession
@abc.abstractmethod
def get_grade_system_notification_session_for_gradebook(self, grade_system_receiver, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade system notification service for the given gradebook.
:param grade_system_receiver: the grade system receiver
:type grade_system_receiver: ``osid.grading.GradeSystemReceiver``
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a _grade_system_notification_session``
:rtype: ``osid.grading.GradeSystemNotificationSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``grade_system_receiver, gradebook_id`` or ``porxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_system_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeSystemNotificationSession
@abc.abstractmethod
def get_grade_system_gradebook_session(self, proxy):
"""Gets the session for retrieving grade system to gradebook mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemGradebookSession``
:rtype: ``osid.grading.GradeSystemGradebookSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook()`` is ``true``.*
"""
return # osid.grading.GradeSystemGradebookSession
@abc.abstractmethod
def get_grade_system_gradebook_assignment_session(self, proxy):
"""Gets the session for assigning grade system to gradebook mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemGradebookAssignmentSession``
:rtype: ``osid.grading.GradeSystemGradebookSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_gradebook_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook_assignment()`` is ``true``.*
"""
return # osid.grading.GradeSystemGradebookAssignmentSession
@abc.abstractmethod
def get_grade_system_smart_gradebook_session(self, gradebook_id, proxy):
"""Gets the session for managing smart gradebooks of grade systems.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeSystemSmartGradebookSession``
:rtype: ``osid.grading.GradeSystemSmartGradebookSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_system_smart_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_smart_gradebook()`` is ``true``.*
"""
return # osid.grading.GradeSystemSmartGradebookSession
@abc.abstractmethod
def get_grade_entry_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade entry lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeEntryLookupSession``
:rtype: ``osid.grading.GradeEntryLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` is ``true``.*
"""
return # osid.grading.GradeEntryLookupSession
@abc.abstractmethod
def get_grade_entry_lookup_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade entry lookup service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeEntryLookupSession``
:rtype: ``osid.grading.GradeEntryLookupSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryLookupSession
@abc.abstractmethod
def get_grade_entry_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade entry query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeEntryQuerySession``
:rtype: ``osid.grading.GradeEntryQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
return # osid.grading.GradeEntryQuerySession
@abc.abstractmethod
def get_grade_entry_query_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade entry query service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeEntryQuerySession``
:rtype: ``osid.grading.GradeEntryQuerySession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryQuerySession
@abc.abstractmethod
def get_grade_entry_search_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade entry search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeEntrySearchSession``
:rtype: ``osid.grading.GradeEntrySearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` is ``true``.*
"""
return # osid.grading.GradeEntrySearchSession
@abc.abstractmethod
def get_grade_entry_search_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade entry search service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeEntrySearchSession``
:rtype: ``osid.grading.GradeEntrySearchSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntrySearchSession
@abc.abstractmethod
def get_grade_entry_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the grade entry administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeEntryAdminSession``
:rtype: ``osid.grading.GradeEntryAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
"""
return # osid.grading.GradeEntryAdminSession
@abc.abstractmethod
def get_grade_entry_admin_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade entry admin service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradeEntryAdminSession``
:rtype: ``osid.grading.GradeEntryAdminSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryAdminSession
@abc.abstractmethod
def get_grade_entry_notification_session(self, grade_entry_receiver, proxy):
"""Gets the notification session for notifications pertaining to grade entry changes.
:param grade_entry_receiver: the grade entry receiver
:type grade_entry_receiver: ``osid.grading.GradeEntryReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradeEntryNotificationSession``
:rtype: ``osid.grading.GradeEntryNotificationSession``
:raise: ``NullArgument`` -- ``grade_entry_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grade_entry_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` is ``true``.*
"""
return # osid.grading.GradeEntryNotificationSession
@abc.abstractmethod
def get_grade_entry_notification_session_for_gradebook(self, grade_entry_receiver, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the grade entry notification service for the given gradebook.
:param grade_entry_receiver: the grade entry receiver
:type grade_entry_receiver: ``osid.grading.GradeEntryReceiver``
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a _grade_entry_notification_session``
:rtype: ``osid.grading.GradeEntryNotificationSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``grade_entry_receiver, gradebook_id`` or ``porxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_grade_entry_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradeEntryNotificationSession
@abc.abstractmethod
def get_gradebook_column_lookup_session(self, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnLookupSession``
:rtype: ``osid.grading.GradebookColumnLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_lookup()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` is ``true``.*
"""
return # osid.grading.GradebookColumnLookupSession
@abc.abstractmethod
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a _gradebook_column_lookup_session``
:rtype: ``osid.grading.GradebookColumnLookupSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_lookup()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnLookupSession
@abc.abstractmethod
def get_gradebook_column_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnQuerySession``
:rtype: ``osid.grading.GradebookColumnQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
return # osid.grading.GradebookColumnQuerySession
@abc.abstractmethod
def get_gradebook_column_query_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column query service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnQuerySession``
:rtype: ``osid.grading.GradebookColumnQuerySession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_query()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnQuerySession
@abc.abstractmethod
def get_gradebook_column_search_session(self, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnSearchSession``
:rtype: ``osid.grading.GradebookColumnSearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_search()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` is ``true``.*
"""
return # osid.grading.GradebookColumnSearchSession
@abc.abstractmethod
def get_gradebook_column_search_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column search service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a _gradebook_column_search_session``
:rtype: ``osid.grading.GradebookColumnSearchSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_search()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnSearchSession
@abc.abstractmethod
def get_gradebook_column_admin_session(self, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnAdminSession``
:rtype: ``osid.grading.GradebookColumnAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_admin()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` is ``true``.*
"""
return # osid.grading.GradebookColumnAdminSession
@abc.abstractmethod
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a GradebookColumnAdminSession``
:rtype: ``osid.grading.GradebookColumnAdminSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_admin()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnAdminSession
@abc.abstractmethod
def get_gradebook_column_notification_session(self, gradebook_column_receiver, proxy):
"""Gets the notification session for notifications pertaining to gradebook column changes.
:param gradebook_column_receiver: the grade system receiver
:type gradebook_column_receiver: ``osid.grading.GradebookColumnReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnNotificationSession``
:rtype: ``osid.grading.GradebookColumnNotificationSession``
:raise: ``NullArgument`` -- ``gradebook_column_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_notification()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` is ``true``.*
"""
return # osid.grading.GradebookColumnNotificationSession
@abc.abstractmethod
def get_gradebook_column_notification_session_for_gradebook(self, gradebook_column_receiver, gradebook_id, proxy):
"""Gets the ``OsidSession`` associated with the gradebook column notification service for the given gradebook.
:param gradebook_column_receiver: the gradebook column receiver
:type gradebook_column_receiver: ``osid.grading.GradebookColumnReceiver``
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: ``a _gradebook_column_notification_session``
:rtype: ``osid.grading.GradebookColumnNotificationSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_column_receiver, gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- ``unable to complete request``
:raise: ``Unimplemented`` -- ``supports_gradebook_column_notification()`` or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
return # osid.grading.GradebookColumnNotificationSession
@abc.abstractmethod
def get_gradebook_column_gradebook_session(self, proxy):
"""Gets the session for retrieving gradebook column to gradebook mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnGradebookSession``
:rtype: ``osid.grading.GradebookColumnGradebookSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook()`` is ``true``.*
"""
return # osid.grading.GradebookColumnGradebookSession
@abc.abstractmethod
def get_gradebook_column_gradebook_assignment_session(self, proxy):
"""Gets the session for assigning gradebook column to gradebook mappings.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnGradebookAssignmentSession``
:rtype: ``osid.grading.GradebookColumnGradebookAssignmentSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_gradebook_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
"""
return # osid.grading.GradebookColumnGradebookAssignmentSession
@abc.abstractmethod
def get_gradebook_column_smart_gradebook_session(self, gradebook_id, proxy):
"""Gets the session for managing smart gradebooks of gradebook columns.
:param gradebook_id: the ``Id`` of the gradebook
:type gradebook_id: ``osid.id.Id``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookColumnSmartGradebookSession``
:rtype: ``osid.grading.GradebookColumnSmartGradebookSession``
:raise: ``NotFound`` -- ``gradebook_id`` not found
:raise: ``NullArgument`` -- ``gradebook_id`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_column_smart_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_smart_gradebook()`` is ``true``.*
"""
return # osid.grading.GradebookColumnSmartGradebookSession
@abc.abstractmethod
def get_gradebook_lookup_session(self, proxy):
"""Gets the OsidSession associated with the gradebook lookup service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookLookupSession``
:rtype: ``osid.grading.GradebookLookupSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_lookup() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_lookup()`` is true.*
"""
return # osid.grading.GradebookLookupSession
@abc.abstractmethod
def get_gradebook_query_session(self, proxy):
"""Gets the OsidSession associated with the gradebook query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookQuerySession``
:rtype: ``osid.grading.GradebookQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_query() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is true.*
"""
return # osid.grading.GradebookQuerySession
@abc.abstractmethod
def get_gradebook_search_session(self, proxy):
"""Gets the OsidSession associated with the gradebook search service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookSearchSession``
:rtype: ``osid.grading.GradebookSearchSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_search() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_search()`` is true.*
"""
return # osid.grading.GradebookSearchSession
@abc.abstractmethod
def get_gradebook_admin_session(self, proxy):
"""Gets the OsidSession associated with the gradebook administration service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookAdminSession``
:rtype: ``osid.grading.GradebookAdminSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_admin() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_admin()`` is true.*
"""
return # osid.grading.GradebookAdminSession
@abc.abstractmethod
def get_gradebook_notification_session(self, gradebook_receiver, proxy):
"""Gets the notification session for notifications pertaining to gradebook service changes.
:param gradebook_receiver: the gradebook receiver
:type gradebook_receiver: ``osid.grading.GradebookReceiver``
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookNotificationSession``
:rtype: ``osid.grading.GradebookNotificationSession``
:raise: ``NullArgument`` -- ``gradebook_receiver`` or ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_notification() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_notification()`` is true.*
"""
return # osid.grading.GradebookNotificationSession
@abc.abstractmethod
def get_gradebook_hierarchy_session(self, proxy):
"""Gets the session traversing gradebook hierarchies.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookHierarchySession``
:rtype: ``osid.grading.GradebookHierarchySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_hierarchy() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.*
"""
return # osid.grading.GradebookHierarchySession
@abc.abstractmethod
def get_gradebook_hierarchy_design_session(self, proxy):
"""Gets the session designing gradebook hierarchies.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``GradebookHierarchyDesignSession``
:rtype: ``osid.grading.GradebookHierarchyDesignSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_gradebook_hierarchy_design() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy_design()`` is true.*
"""
return # osid.grading.GradebookHierarchyDesignSession
@abc.abstractmethod
def get_grading_batch_proxy_manager(self):
"""Gets the ``GradingBatchProxyManager``.
:return: a ``GradingBatchProxyManager``
:rtype: ``osid.grading.batch.GradingBatchProxyManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grading_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_batch()`` is true.*
"""
return # osid.grading.batch.GradingBatchProxyManager
grading_batch_proxy_manager = property(fget=get_grading_batch_proxy_manager)
@abc.abstractmethod
def get_grading_calculation_proxy_manager(self):
"""Gets the ``GradingCalculationProxyManager``.
:return: a ``GradingCalculationProxyManager``
:rtype: ``osid.grading.calculation.GradingCalculationProxyManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grading_calculation() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_calculation()`` is true.*
"""
return # osid.grading.calculation.GradingCalculationProxyManager
grading_calculation_proxy_manager = property(fget=get_grading_calculation_proxy_manager)
@abc.abstractmethod
def get_grading_transform_proxy_manager(self):
"""Gets the ``GradingTransformProxyManager``.
:return: a ``GradingTransformManager``
:rtype: ``osid.grading.transform.GradingTransformProxyManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_grading_transform() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_transform()`` is true.*
"""
return # osid.grading.transform.GradingTransformProxyManager
grading_transform_proxy_manager = property(fget=get_grading_transform_proxy_manager)
|
from setuptools import setup
setup(
name="nnhealpix",
version="0.3.0",
description="",
url="",
author="Nicoletta Krachmalnicoff, Maurizio Tomasi",
author_email="nkrach@sissa.it, maurizio.tomasi@unimi.it",
license="MIT",
packages=["nnhealpix", "nnhealpix.layers"],
package_dir={"nnhealpix": "nnhealpix", "nnhealpix.layers": "nnhealpix/layers"},
package_data={"nnhealpix": ["ancillary_files/*"]},
include_package_data=True,
zip_safe=False,
)
|
import os, sys
import subprocess
from time import sleep
usage_string = """
Usage: python3 control_chrome.py <COMMAND>
<COMMAND>s available:
>> goto URL - Navigates the tab to 'URL' specified
>> previous - Go back in history
>> forward - Go forward in history
>> reload - Reloads current tab
>> close - Closes current tab
>> pin - Pins current tab
>> unpin - Unpins current tab
>> scrollDown N, smoothScrollDown N - Scrolls down by 'N' pixels
>> scrollUp N, smoothScrollUp N - Scrolls up by 'N' pixels
>> duplicate - Duplicates current tab and switches to it
>> bookmark - Bookmarks current tab
>> openLink N - Clicks on the 'N'th link of Google Search Results page
>> details - Displays some details about the current tab (tid, title, url)
>> speak - Speaks the title of the current tab
"""
def convert(url):
if url.startswith('www.'):
return 'https://' + url[len('www.'):]
if not url.startswith('http://') and not url.startswith('https://'):
return 'https://' + url
return url
args = sys.argv
del args[0]
try:
command = args[0]
except IndexError:
command = "iDontKnowWhatIAmDoing"
print("-" * 40)
print("No. of Arguments:", len(args))
print("Args:", str(args))
print("-" * 40)
current_tid = int(subprocess.getoutput("""chromix-too raw chrome.tabs.getSelected null | grep -Po '"id":.*?[^\\\],"' """)[5:-2])
current_title = subprocess.getoutput("""chromix-too raw chrome.tabs.getSelected null | grep -Po '"title":.*?[^\\\],"' """)[9:-3]
current_url = subprocess.getoutput("""chromix-too raw chrome.tabs.getSelected null | grep -Po '"url":.*?[^\\\],"' """)[7:-3]
if command == "iDontKnowWhatIAmDoing":
print(usage_string)
exit()
elif command == "back" or command == "previous":
os.system("""chromix-too raw chrome.tabs.executeScript null '{"code":"window.history.back()"}' """)
elif command == "forward" or command == "next":
os.system("""chromix-too raw chrome.tabs.executeScript null '{"code":"window.history.forward()"}' """)
elif command == "pin":
os.system("""chromix-too raw chrome.tabs.update """ + str(current_tid) + """ '{"pinned":true}' """)
elif command == "unpin":
os.system("""chromix-too raw chrome.tabs.update """ + str(current_tid) + """ '{"pinned":false}' """)
elif command == "openLink":
try:
n = int(args[1])
except IndexError:
n = 1
cStr = """chromix-too raw chrome.tabs.executeScript null '{"code":"document.querySelectorAll(\\".g > div > .rc > .r > a\\")[""" + str(n-1) + """].click()"}' """
os.system(cStr)
elif command == "scrollDown" or command == "down":
try:
n = int(args[1])
except IndexError:
n = 50
os.system("""chromix-too raw chrome.tabs.executeScript null '{"code":"window:scrollBy(0,""" + str(n) + """)"}' """)
elif command == "scrollUp" or command == "up":
try:
n = int(args[1])
except IndexError:
n = 50
os.system("""chromix-too raw chrome.tabs.executeScript null '{"code":"window:scrollBy(0,-""" + str(n) + """)"}' """)
elif command == "smoothScrollDown" or command == "sDown":
try:
n = int(args[1])
except IndexError:
n = 50
for i in range(n//4):
os.system("""chromix-too raw chrome.tabs.executeScript null '{"code":"window:scrollBy(0,4)"}' """)
sleep(0.001)
elif command == "smoothScrollUp" or command == "sUp":
try:
n = int(args[1])
except IndexError:
n = 50
for i in range(n//4):
os.system("""chromix-too raw chrome.tabs.executeScript null '{"code":"window:scrollBy(0,-4)"}' """)
sleep(0.001)
elif command == "bookmark":
os.system("chromix-too raw chrome.bookmarks.create '{\"title\":\"" + str(current_title) + "\", \"url\":\"" + str(current_url) + "\"}'")
elif command == "reload":
os.system("chromix-too raw chrome.tabs.reload")
elif command == "remove" or command == "close":
os.system("chromix-too raw chrome.tabs.remove " + str(current_tid))
elif command == "details":
print("-" * 40)
print("Current tab ID:", current_tid)
print("Title:", current_title)
print("URL:", current_url)
print("-" * 40)
elif command == "remove" or command == "close":
os.system("chromix-too raw chrome.tabs.remove " + str(current_tid))
elif command == "duplicate" or command == "clone":
os.system("chromix-too raw chrome.tabs.duplicate " + str(current_tid))
elif command == "say" or command == "speak":
print("Title:", current_title)
os.system("chromix-too raw chrome.tabs.executeScript null '{\"code\":\"window.speechSynthesis.speak(new SpeechSynthesisUtterance(\\\"" + str(current_title) + "\\\"))\"}'")
elif command == "goto":
try:
goto_url = convert(str(args[1]))
except IndexError:
goto_url = "https://www.google.co.in"
os.system("chromix-too raw chrome.tabs.update " + str(current_tid) + " '{\"url\":\"" + goto_url + "\"}'")
else:
print("No such command available.")
print(usage_string)
exit()
|
# -*- coding: utf-8 -*-
import logging
from os import makedirs
from os.path import join, exists
from textlytics.sentiment.document_preprocessing import \
DocumentPreprocessor
from textlytics.sentiment.io_sentiment import Dataset
from textlytics.sentiment.io_sentiment import to_pickle
from textlytics.sentiment.lexicons import SentimentLexicons
from textlytics.sentiment.sentiment import Sentiment
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def sentiment_lexicons_run(lexs_names=None, lex_path=None, output_folder=None,
evaluate=True):
"""
Counting sentiment analysis tasks with lexicon for IMDB Dataset with
predefined Cross-Validation split.
Parameters
----------
lexs_names : list
List of path/file names for lexicons loading.
lex_path: str
Path to the directory with lexicon's files.
output_folder : str
Path where we want to save our results.
evaluate : bool, True by default
If true the metrics for analysis will be counted, otherwise only
prediction will be saved.
Returns
----------
Nothing, all necessary files will be saved automatically.
"""
dataset_name = 'semeval'
results = []
predictions = []
predictions_directory = join(output_folder, 'predictions')
if not exists(output_folder):
makedirs(output_folder)
log.info('New directory has been created in: {}'.format(output_folder))
if not exists(predictions_directory):
makedirs(predictions_directory)
log.info('Directory for predictions has been created: {}'.format(predictions_directory))
dataset = Dataset()
# df = dataset.load_semeval_2014_sentiment()
df = dataset.load_semeval_sentiment()
log.info('Sentiment distribution: {}'.format(df.Sentiment.value_counts()))
log.info('Pre-processing phase starts!')
dp = DocumentPreprocessor()
df.Document = [dp.remove_numbers(doc) for doc in df.Document]
sent_lex = SentimentLexicons(stemmed=False,
lexicons_path=lex_path)
lexicons = sent_lex.load_lexicons(lexicons_file_names=lexs_names)
s = Sentiment(n_jobs=len(lexs_names), output_results=output_folder)
_, lexicon_prediction, lexicon_result, classes = \
s.lex_sent_batch(
df=df,
dataset_name=dataset_name,
lexicons=lexicons)
results.append(lexicon_result)
predictions.append(lexicon_prediction)
to_pickle(f_path=join(output_folder, '{}-{}.pkl'.format(dataset_name,
'results')),
obj=results)
# ############################# exemplary run ##############################
lexicons_files = [
'AFINN-96.txt',
'AFINN-111.txt',
'Bing-Liu.txt',
'enchantedlearning.com.txt',
# 'past_future_list.txt',
# 'past_future_list_plus.txt',
'simple_list.txt',
'simple_list_plus.txt',
'simplest.txt',
'nrcEmotion.txt',
'mpaa.txt',
# 'nrcHashtag.txt',
# 'nrcHashtagBigrams.txt',
# 'sentiment140.txt',
# 'sentiment140Bigrams.txt',
'MSOL-lexicon.txt',
# 'Amazon-laptops-electronics-reviews-unigrams.txt',
# 'Amazon-laptops-electronics-reviews-bigrams.txt',
# 'Yelp-restaurant-reviews-unigrams.txt',
# 'Yelp-restaurant-reviews-bigrams.txt',
]
sentiment_lexicons_run(lexs_names=lexicons_files,
output_folder='results/semeval')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.