hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
35deafd975317c421134eb40936909b54e0afa4b | 1,701 | py | Python | server/swagger_server/test/test_statements_controller.py | lhannest/pharos-beacon | 101da505203622a76882c1b21dc861a8143ed0d1 | [
"MIT"
] | null | null | null | server/swagger_server/test/test_statements_controller.py | lhannest/pharos-beacon | 101da505203622a76882c1b21dc861a8143ed0d1 | [
"MIT"
] | null | null | null | server/swagger_server/test/test_statements_controller.py | lhannest/pharos-beacon | 101da505203622a76882c1b21dc861a8143ed0d1 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.beacon_annotation import BeaconAnnotation # noqa: E501
from swagger_server.models.beacon_statement import BeaconStatement # noqa: E501
from swagger_server.test import BaseTestCase
class TestStatementsController(BaseTestCase):
"""StatementsController integration test stubs"""
def test_get_evidence(self):
"""Test case for get_evidence
"""
query_string = [('keywords', 'keywords_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open(
'//evidence/{statementId}'.format(statementId='statementId_example'),
method='GET',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_get_statements(self):
"""Test case for get_statements
"""
query_string = [('s', 's_example'),
('relations', 'relations_example'),
('t', 't_example'),
('keywords', 'keywords_example'),
('types', 'types_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open(
'//statements',
method='GET',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 31.5 | 82 | 0.562022 | 1,315 | 0.773075 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.299236 |
35df05205df26d9b8705fd6ac8ad49277f753502 | 752 | py | Python | mailmerge/smtp_dummy.py | Denzeldeveloper/Python-Auto-MailMerge- | 4df95a67ef1b1c0f79a09f6c1c373b81d4220822 | [
"MIT"
] | null | null | null | mailmerge/smtp_dummy.py | Denzeldeveloper/Python-Auto-MailMerge- | 4df95a67ef1b1c0f79a09f6c1c373b81d4220822 | [
"MIT"
] | null | null | null | mailmerge/smtp_dummy.py | Denzeldeveloper/Python-Auto-MailMerge- | 4df95a67ef1b1c0f79a09f6c1c373b81d4220822 | [
"MIT"
] | null | null | null | """Dummy SMTP API."""
class SMTP_dummy(object): # pylint: disable=useless-object-inheritance
# pylint: disable=invalid-name, no-self-use
"""Dummy SMTP API."""
# Class variables track member function calls for later checking.
msg_from = None
msg_to = None
msg = None
def login(self, login, password):
"""Do nothing."""
def sendmail(self, msg_from, msg_to, msg):
"""Remember the recipients."""
SMTP_dummy.msg_from = msg_from
SMTP_dummy.msg_to = msg_to
SMTP_dummy.msg = msg
def close(self):
"""Do nothing."""
def clear(self):
"""Reset class variables."""
SMTP_dummy.msg_from = None
SMTP_dummy.msg_to = []
SMTP_dummy.msg = None
| 25.066667 | 71 | 0.606383 | 727 | 0.966755 | 0 | 0 | 0 | 0 | 0 | 0 | 286 | 0.380319 |
35dfaf06e8a5f5aaf7ae1089fe23e645c5b81780 | 3,395 | py | Python | steepshot_bot/steepshot_api.py | weyoume/wetelegrambot | fcee44a701319b190484e91b3ce444571a5bdd90 | [
"MIT"
] | null | null | null | steepshot_bot/steepshot_api.py | weyoume/wetelegrambot | fcee44a701319b190484e91b3ce444571a5bdd90 | [
"MIT"
] | null | null | null | steepshot_bot/steepshot_api.py | weyoume/wetelegrambot | fcee44a701319b190484e91b3ce444571a5bdd90 | [
"MIT"
] | null | null | null | import json
import logging
import requests
from requests.exceptions import RequestException
from steepshot_bot import settings
from steepshot_bot.exceptions import SteepshotServerError
from steepshot_bot.steem import get_signed_transaction
logger = logging.getLogger(__name__)
API_URLS = {
'posts_recent': settings.STEEPSHOT_API + '/v1_1/recent',
'posts_new': settings.STEEPSHOT_API + '/v1_1/posts/new',
'posts_hot': settings.STEEPSHOT_API + '/v1_1/posts/hot',
'posts_top': settings.STEEPSHOT_API + '/v1_1/posts/top',
'post_prepare': settings.STEEPSHOT_API + '/v1/post/prepare',
'log_post': settings.STEEPSHOT_API + '/v1/log/post',
'log_upvote': settings.STEEPSHOT_API + '/v1/log/post/%s/upvote'
}
def get_recent_posts(username: str) -> list:
try:
return requests.get(API_URLS['posts_recent'], params={'username': username}).json().get('results', [])
except RequestException as error:
logger.error('Failed to retrieve data from api: {error}'.format(error=error))
return []
def get_new_posts(username: str) -> list:
try:
return requests.get(API_URLS['posts_new'], params={'username': username}).json().get('results', [])
except RequestException as error:
logger.error('Failed to retrieve data from api: {error}'.format(error=error))
return []
def get_hot_posts(username: str) -> list:
try:
return requests.get(API_URLS['posts_hot'], params={'username': username}).json().get('results', [])
except RequestException as error:
logger.error('Failed to retrieve data from api: {error}'.format(error=error))
return []
def get_top_posts(username: str) -> list:
try:
return requests.get(API_URLS['posts_top'], params={'username': username}).json().get('results', [])
except RequestException as error:
logger.error('Failed to retrieve data from api: {error}'.format(error=error))
return []
def post_prepare(photo, title: str, username: str, tags: list = None):
try:
trx = get_signed_transaction(username)
files = {'photo': photo}
payload = [
('title', title),
('username', username),
('trx', json.dumps(trx.json()))
]
if tags:
for tag in tags:
payload.append(('tags', tag))
resp = requests.post(API_URLS['post_prepare'], data=payload, files=files)
return resp.json()
except RequestException as e:
logger.error('Failed to retrieve data from api: %s', e)
return {}
def log_new_post(username: str, error_occured: str = None):
try:
payload = {
'username': username,
'error': error_occured
}
return requests.post(API_URLS['log_post'], data=payload).json()
except RequestException as error:
logger.error('Failed to retrieve data from api: {error}'.format(error=error))
return []
def log_upvote_post(identifier: str, username: str, error_occured: str = None):
try:
payload = {
'username': username,
'error': error_occured
}
return requests.post(API_URLS['log_upvote'] % identifier, data=payload).json()
except RequestException as error:
logger.error('Failed to retrieve data from api: {error}'.format(error=error))
raise SteepshotServerError(error)
| 34.642857 | 110 | 0.648895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 728 | 0.214433 |
35e0f0d3502703e87d1e48941fca0756c0155a59 | 10,315 | py | Python | kernelized_correlation_filter.py | ElnuraMusaoglu/KernelizedCorrelationFilter | 78eab4297218b107cf7688e7e7c76d79b5609893 | [
"MIT"
] | null | null | null | kernelized_correlation_filter.py | ElnuraMusaoglu/KernelizedCorrelationFilter | 78eab4297218b107cf7688e7e7c76d79b5609893 | [
"MIT"
] | null | null | null | kernelized_correlation_filter.py | ElnuraMusaoglu/KernelizedCorrelationFilter | 78eab4297218b107cf7688e7e7c76d79b5609893 | [
"MIT"
] | null | null | null | '''
Elnura Musaoglu
2021
'''
import numpy as np
import cv2
from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift
from numpy import conj, real
from utils import gaussian2d_rolled_labels, cos_window
from hog_cpp.fhog.get_hog import get_hog
vgg_path = 'model/imagenet-vgg-verydeep-19.mat'
def create_model():
from scipy import io
from keras.applications.vgg19 import VGG19
from keras.models import Model
mat = io.loadmat(vgg_path)
model = VGG19(mat)
ixs = [2, 5, 10, 15, 20]
outputs = [model.layers[i].output for i in ixs]
model = Model(inputs=model.inputs, outputs=outputs)
# model.summary()
return model
vgg_model = create_model()
class KernelizedCorrelationFilter:
def __init__(self, correlation_type='gaussian', feature='hog'):
self.padding = 1.5 # extra area surrounding the target #padding = 2 #extra area surrounding the target
self.lambda_ = 1e-4 # regularization
self.output_sigma_factor = 0.1 # spatial bandwidth (proportional to target)
self.correlation_type = correlation_type
self.feature = feature
self.resize = False
# GRAY
if feature == 'gray':
self.interp_factor = 0.075 # linear interpolation factor for adaptation
self.sigma = 0.2 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 7 # polynomial kernel exponent
self.gray = True
self.cell_size = 1
# HOG
elif feature == 'hog':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.hog = True
self.hog_orientations = 9
self.cell_size = 4
# DEEP
elif feature == 'deep':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.deep = True
self.cell_size = 4 # 8
def start(self, init_gt, show, frame_list):
poses = []
poses.append(init_gt)
init_frame = cv2.imread(frame_list[0])
x1, y1, w, h = init_gt
init_gt = tuple(init_gt)
self.init(init_frame, init_gt)
for idx in range(len(frame_list)):
if idx != 0:
current_frame = cv2.imread(frame_list[idx])
bbox = self.update(current_frame)
if bbox is not None:
x1, y1, w, h = bbox
if show is True:
if len(current_frame.shape) == 2:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2BGR)
show_frame = cv2.rectangle(current_frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)),
(255, 0, 0), 1)
cv2.imshow('demo', show_frame)
cv2.waitKey(1)
else:
print('bbox is None')
poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
return np.array(poses)
def init(self, image, roi):
# Get image size and search window size
x, y, w, h = roi
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.target_sz = np.array([h, w])
self.target_sz_real = np.array([h, w])
self.pos = np.array([y + np.floor(h/2), x + np.floor(w/2)])
if np.sqrt(h * w) >= 100: # diagonal size >= threshold
self.resize = True
self.pos = np.floor(self.pos / 2)
self.target_sz = np.floor(self.target_sz / 2)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
# window size, taking padding into account
self.window_sz = np.floor(np.multiply(self.target_sz, (1 + self.padding)))
self.output_sigma = round(round(np.sqrt(self.target_sz[0]*self.target_sz[1]), 4) * self.output_sigma_factor / self.cell_size, 4)
yf_sz = np.floor(self.window_sz / self.cell_size)
yf_sz[0] = np.floor(self.window_sz / self.cell_size)[1]
yf_sz[1] = np.floor(self.window_sz / self.cell_size)[0]
gauss = gaussian2d_rolled_labels(yf_sz, self.output_sigma)
self.yf = fft2(gauss)
#store pre-computed cosine window
self.cos_window = cos_window([self.yf.shape[1], self.yf.shape[0]])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
kf = []
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
self.model_alphaf = alphaf
self.model_xf = xf
def update(self, image):
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
zf = fftn(self.get_features(patch), axes=(0, 1))
if self.correlation_type == 'gaussian':
kzf = self.gaussian_correlation(zf, self.model_xf)
response = real(ifftn(self.model_alphaf * kzf, axes=(0, 1))) # equation for fast detection
# Find indices and values of nonzero elements curr = np.unravel_index(np.argmax(gi, axis=None), gi.shape)
delta = np.unravel_index(np.argmax(response, axis=None), response.shape)
vert_delta, horiz_delta = delta[0], delta[1]
if vert_delta > np.size(zf, 0) / 2: # wrap around to negative half-space of vertical axis
vert_delta = vert_delta - np.size(zf, 0)
if horiz_delta > np.size(zf, 1) / 2: # same for horizontal axis
horiz_delta = horiz_delta - np.size(zf, 1)
self.pos = self.pos + self.cell_size * np.array([vert_delta, horiz_delta])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
# Kernel Ridge Regression, calculate alphas (in Fourier domain)
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
# subsequent frames, interpolate model
self.model_alphaf = (1 - self.interp_factor) * self.model_alphaf + self.interp_factor * alphaf
self.model_xf = (1 - self.interp_factor) * self.model_xf + self.interp_factor * xf
if self.resize:
pos_real = np.multiply(self.pos, 2)
else:
pos_real = self.pos
box = [pos_real[1] - self.target_sz_real[1] / 2,
pos_real[0] - self.target_sz_real[0] / 2,
self.target_sz_real[1],
self.target_sz_real[0]]
return box[0], box[1], box[2], box[3]
def get_subwindow(self, im, pos, sz):
_p1 = np.array(range(0, int(sz[0]))).reshape([1, int(sz[0])])
_p2 = np.array(range(0, int(sz[1]))).reshape([1, int(sz[1])])
ys = np.floor(pos[0]) + _p1 - np.floor(sz[0]/2)
xs = np.floor(pos[1]) + _p2 - np.floor(sz[1]/2)
# Check for out-of-bounds coordinates, and set them to the values at the borders
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs > np.size(im, 1) - 1] = np.size(im, 1) - 1
ys[ys > np.size(im, 0) - 1] = np.size(im, 0) - 1
xs = xs.astype(int)
ys = ys.astype(int)
# extract image
out1 = im[list(ys[0, :]), :, :]
out = out1[:, list(xs[0, :]), :]
return out
def get_features(self, im):
if self.feature == 'hog':
# HOG features, from Piotr's Toolbox
x = np.double(self.get_fhog(im))
return x * self.cos_window[:, :, None]
if self.feature == 'gray':
x = np.double(im) / 255
x = x - np.mean(x)
return x * self.cos_window[:, :, None]
if self.feature == 'deep':
x = self.get_deep_feature(im)
x = x / np.max(x)
return x * self.cos_window[:, :, None]
def get_fhog(self, im_patch):
H = get_hog(im_patch/255)
return H
def gaussian_correlation(self, xf, yf):
N = xf.shape[0] * xf.shape[1]
xff = xf.reshape([xf.shape[0] * xf.shape[1] * xf.shape[2], 1], order='F')
xff_T = xff.conj().T
yff = yf.reshape([yf.shape[0] * yf.shape[1] * yf.shape[2], 1], order='F')
yff_T = yff.conj().T
xx = np.dot(xff_T, xff).real / N # squared norm of x
yy = np.dot(yff_T, yff).real / N # squared norm of y
# cross-correlation term in Fourier domain
xyf = xf * conj(yf)
ixyf = ifftn(xyf, axes=(0, 1))
rxyf = real(ixyf)
xy = np.sum(rxyf, 2) # to spatial domain
# calculate gaussian response for all positions, then go back to the Fourier domain
sz = xf.shape[0] * xf.shape[1] * xf.shape[2]
mltp = (xx + yy - 2 * xy) / sz
crpm = -1 / (self.sigma * self.sigma)
expe = crpm * np.maximum(0, mltp)
expx = np.exp(expe)
kf = fftn(expx, axes=(0, 1))
return kf
def get_deep_feature(self, im):
# Preprocessing
from numpy import expand_dims
#img = im.astype('float32') # note: [0, 255] range
img = im # note: [0, 255] range
img = cv2.resize(img, (224, 224))
img = expand_dims(img, axis=0)
feature_maps = vgg_model.predict(img)
f_map = feature_maps[3][0][:][:][:]
feature_map_n = cv2.resize(f_map, (self.cos_window.shape[1], self.cos_window.shape[0]),
interpolation=cv2.INTER_LINEAR)
return feature_map_n
| 40.93254 | 136 | 0.575279 | 9,630 | 0.933592 | 0 | 0 | 0 | 0 | 0 | 0 | 1,742 | 0.16888 |
35e24c24b699b1a26e6f2ffbc1f1f23fc306cb6b | 3,839 | py | Python | sketches/demo_02a/editor.py | heerdyes/raspi-art | 2e38c1926b6a6f4c745e0629b193d9c3c15acc22 | [
"MIT"
] | 1 | 2021-02-02T12:36:07.000Z | 2021-02-02T12:36:07.000Z | sketches/demo_02a/editor.py | heerdyes/raspi-art | 2e38c1926b6a6f4c745e0629b193d9c3c15acc22 | [
"MIT"
] | null | null | null | sketches/demo_02a/editor.py | heerdyes/raspi-art | 2e38c1926b6a6f4c745e0629b193d9c3c15acc22 | [
"MIT"
] | null | null | null | from arch import *
from wnds import *
from helper import *
import os
class Ed(Wnd,Pub):
def __init__(self,x,y,w,h,nm):
Wnd.__init__(self,x,y,w,h,nm)
Pub.__init__(self)
self.txt=['']
self.r=0
self.c=0
self.mt=24
def rendercursor(self,c):
rowht=20
colwt=8
y=self.y+self.mt+10+self.r*rowht
x=self.x+26+self.c*colwt
drawcursor(x,y,colwt,rowht,4,c)
def renderhighlighter(self):
rowht=20
colwt=8
y=self.y+self.mt+10+self.r*rowht
drawhighlighter(self.x,y,self.w,rowht,color(0,92,0))
def renderstatus(self):
sy=self.y+self.h-20
stroke(0,144,0)
line(self.x,sy,self.x+self.w,sy)
fill(0,255,0)
text('| %03d:%03d |'%(self.r,self.c),self.x+5,sy)
noFill()
def render(self,c):
Wnd.render(self,c)
line(self.x,self.y+self.mt,self.x+self.w,self.y+self.mt)
fill(0,255,0)
text(self.nm,self.x+5,self.y+2)
drawlines(self.x,self.y,self.mt,self.txt,color(0,144,0),color(0,255,0))
self.renderhighlighter()
self.rendercursor(color(0,192,0))
self.renderstatus()
def edithandler(self,k,kc):
currow=self.r
curcol=self.c
curln=self.txt[currow]
if kc==10:
self.txt.append('')
self.r=currow+1
self.c=0
elif kc==8:
modstr=curln[0:curcol]+curln[curcol+1:]
print(modstr)
elif kc==16:
print('no special handling for shift')
elif kc==20:
print('no special handling for caps lock')
elif kc==37:
self.handleleft()
elif kc==38:
self.handletop()
elif kc==39:
self.handleright()
elif kc==40:
self.handledown()
elif kc==35:
self.handleend()
elif kc==36:
self.handlehome()
else:
self.txt[currow]=curln[0:curcol]+k+curln[curcol:]
self.c=curcol+1
self.wipe()
self.render(color(0,255,0))
def handleend(self):
self.c=len(self.txt[self.r])
def handlehome(self):
self.c=0
def handleleft(self):
if self.r==0 and self.c==0:
self.publish(['stat','already at beginning'])
elif self.c>0:
self.c-=1
elif self.c==0 and self.r>0:
rownum=self.r-1
self.c=len(self.txt[rownum])
self.r=rownum
def handletop(self):
if self.r==0:
self.publish(['stat','already topmost line'])
else:
prevlinum=self.r-1
prevln=self.txt[prevlinum]
if self.c>=len(prevln):
self.c=len(prevln)-1
self.r=prevlinum
def handleright(self):
currow=self.r
curcol=self.c
curln=self.txt[currow]
if currow==len(self.txt)-1 and curcol==len(curln):
self.stat('already at the end')
elif curcol==len(curln) and currow<len(self.txt)-1:
self.c=0
self.r+=1
else:
self.c+=1
def handledown(self):
currow=self.r
curcol=self.c
curln=self.txt[currow]
if currow==len(self.txt)-1:
self.publish(['stat','already last line'])
else:
nxtrow=currow+1
nxtln=self.txt[nxtrow]
if curcol>len(nxtln):
self.c=len(nxtln)
self.r=nxtrow
def savebuffer(self):
cwd=os.getcwd()
afp=os.path.join(cwd,'fs',self.nm)
self.publish(['stat','saving to file: %s'%afp])
with open(afp,'w') as f:
for ln in self.txt:
f.write('%s\n'%ln)
| 28.021898 | 79 | 0.505079 | 3,768 | 0.981506 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.058609 |
35e6fd9082eae1ca801cddf99ab1c81fb1bb1309 | 23,626 | py | Python | app.py | Ator97/dataMiningDataviz | 0cf78b93c45ee08f73045b057f95290f8bf7d26e | [
"Apache-2.0"
] | null | null | null | app.py | Ator97/dataMiningDataviz | 0cf78b93c45ee08f73045b057f95290f8bf7d26e | [
"Apache-2.0"
] | null | null | null | app.py | Ator97/dataMiningDataviz | 0cf78b93c45ee08f73045b057f95290f8bf7d26e | [
"Apache-2.0"
] | null | null | null | import base64
import datetime
import io
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import plotly.express as px
from apyori import apriori
from scipy.spatial import distance
import plotly.graph_objects as go
import numpy as np
from sklearn import linear_model
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
from kneed import KneeLocator
# Variables de control
nut = 1 #Variable de contencion para actualizacion de datos cargados
ncd = 1 #Varaible de contencion para ejecucion de matriz cruzada
ndm = 1 #Varaible de contencion para ejecucion de matriz de distancias
nam = 1 #Varaible de contencion para ejecucion de apriori
nc = 1 #Varaible de contencion para ejecucion de clasificacion por clustering
ns = 1 #Varaible de contencion para ejecucuion de clasificacion sigmoide
mensaje = "" #Varaible para mostrar mensaje del resultado sigmoide
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
#Interfaz del sistema
app.layout = html.Div([
#Titulo
html.H6("Data Minning Crawler"),
#Menu de configuracion para cargar archivos
dcc.Upload(
id='upload-data',
children=html.Div(['Toma y suelta o ', html.A('seleciona el archivo')]),
style={
'width': '98%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
# Por si queremos analizar mas archivos en la misma sesion
multiple=True
),
html.Div([
"Separador: ", dcc.Input(id='separador', value=',', type='text'),
" Decimal: ", dcc.Input(id='decimal', value='.', type='text'),
html.Button('Cargar Archivo', id='loadFile', n_clicks=0,style={'width': '25%','margin': '3%'}),
]),
#Seccion de pestañas
dcc.Tabs(id='tabsControlInput', value='tab-1',
children=[
#Pestaña con datos cargados
dcc.Tab(label='Set de datos', value='tab-1',children=[
html.Div(id="output-data-upload"),
]),
#Pestalla con matriz de correlacion y grafica de correlacion
dcc.Tab(label='Correlación', value='tab-2',children=[
dcc.Dropdown(
id='correlationMethod',
options=[
{'label': 'Pearson', 'value': 'pearson'},
{'label': 'Kendall', 'value': 'kendall'},
{'label': 'Spearman', 'value': 'spearman'}
],
value='pearson',style={'width': '50%','margin': '2%'}),
html.Button('Ejecutar', id='executeCorr', n_clicks=0,style={'width': '25%','margin': '3%'}),
dcc.Tabs(id="subtabs-1",value="subtab-1",children = [
dcc.Tab(label='Matriz de correlación',value='subtab-5',children=[
html.Div(id="crossMatrix"),]),
dcc.Tab(label='Grafica', value='subtab-2',children=[
html.Div(id="graphCrossMatrix"),]),
])
]),
#Pestaña con resultados de algoritmo apriori
dcc.Tab(label='Apriori', value='tab-3',children = [
html.Div([
"Soporte mínimo ", dcc.Input(
id="soporteMinimo", type="number", placeholder="Valor de soporte mínimo",
min=0, max=100, step=0.0001,value=0.003,style={'width': '6%','margin': '2%'}),
" Confidencia mínima ", dcc.Input(
id="confidenciaMinima", type="number", placeholder="Valor de confidencia mínimo",
min=0, max=100, step=0.01,value=0.2,style={'width': '6%','margin': '2%'}),
" Elevación mínima ", dcc.Input(
id="elevacionMinima", type="number", placeholder="Valor de elevacion mínimo",
min=0, max=100, step=0.01,value=3,style={'width': '6%','margin': '2%'}),
" Tamaño mínimo ", dcc.Input(
id="tamañoMinimo", type="number", placeholder="Valor de tamaño mínimo",
min=0, max=100, step=0.01,value=2,style={'width': '6%','margin': '2%'}),
html.Button(' Ejecutar', id='executeAprori', n_clicks=0,style={'margin': '2%'}),
html.Div(id="aprioriMatrix")
])
]),
#Pestaña con resultados de matriz de distancias
dcc.Tab(label='Distancias', value='tab-4',children=[
dcc.Dropdown(
id='distance',
options=[
{'label': 'Chebyshev', 'value': 'chebyshev'},
{'label': 'Cityblock', 'value': 'cityblock'},
{'label': 'Euclidean', 'value': 'euclidean'}
],
value='euclidean',style={'width': '50%','margin': '2%'}),
html.Button('Ejecutar', id='executeDis', n_clicks=0,style={'width': '25%','margin': '3%'}),
html.Div(id="distanceMatrix"),
]),
#Pestaña con resultado de clustering particional
dcc.Tab(label='Clustering Particional', value='tab-5',children=[
html.Button('Ejecutar', id='executeCluster', n_clicks=0,style={'width': '25%','margin': '3%'}),
dcc.Tabs(id="subtabs-2",value="subtab-2",children = [
dcc.Tab(label='Grafica del Codo',value='subtab-5',children=[
html.Div(id="elbow"),
]),
dcc.Tab(label='Gráfica del Cluster', value='subtab-2',children=[
html.Div(id="cluster"),
]),
])
]),
#Pestaña de menu de dteccion de cancer. Solicitada en expreso
dcc.Tab(label='Clasificación Sigmoide', value='tab-6',children = [
html.Button(' Ejecutar', id='executeSigmoide', n_clicks=0,
style={'margin': '2%','textAlign': 'center'}),
html.Div([
html.Div([
"Compactividad",
dcc.Input(
id="compactividad", type="number", value= 0.04362,
placeholder="Compactividad",style={'margin': '5%','textAlign': 'center'}),
],className="six columns"),
html.Div([
"Textura",
dcc.Input(
id="textura", type="number", value=24.54,
placeholder="Textura",style={'margin': '5%','textAlign': 'center'}),
],className="six columns"),
],className="row"),
html.Div([
html.Div([
"Area",
dcc.Input(
id="area", type="number", value=181.0,
placeholder="Area",style={'margin': '5%','textAlign': 'center'}),
],className="six columns"),
html.Div([
"Concavidad",
dcc.Input(
id="concavidad", type="number",value = 0,
placeholder="Concavidad",style={'margin': '5%','textAlign': 'center'}),
],className="six columns"),
],className="row"),
html.Div([
html.Div([
"Simetria",
dcc.Input(
id="simetria", type="number", value=0.1587,
placeholder="Simetria",style={'margin': '5%','textAlign': 'center'}),
],className="six columns"),
html.Div([
"Dimensión fractal",
dcc.Input(
id="dimensionFractal",type="number", value=1.0,
placeholder="Dimensión Fractal",style={'margin': '5%','textAlign': 'center'}),
],className="six columns"),
],className="row"),
html.Div(id='sigmoide',style={'textAlign': 'center'}),
]),
]),
#Cargad de los anterior contruido
html.Div(id='tabsControl'),
html.Div(id='subtabsControl')
])
#Funcion de carga
# Extraida directamente de la documetnacion de Dash.com
def parse_data(contents, filename,separador,decimal):
content_type, content_string = contents.split(separador)
decoded = base64.b64decode(content_string)
try:
if "csv" in filename:
# Assume that the user uploaded a CSV or TXT file
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")),decimal=decimal)
elif "xls" in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
elif "txt" or "tsv" in filename:
# Assume that the user upl, delimiter = r'\s+'oaded an excel file
df = pd.read_csv(io.StringIO(decoded.decode("utf-8")), delimiter=r"\s+")
except Exception as e:
print(e)
return html.Div(["There was an error processing this file."])
return df
#Prodcimiento de carga de archivos, extraido de la documentacion de Dash.com
@app.callback(
Output("output-data-upload", "children"),
[
Input("upload-data", "contents"),
Input("upload-data", "filename"),
Input("separador","value"),
Input("decimal","value"),
Input("loadFile", "n_clicks")
]
)
def update_table(contents, filename,separador,decimal,n_clicks):
table = html.Div()
global nut
if nut == n_clicks:
nut = nut +1
if contents:
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename,separador,decimal)
table = html.Div(
[
html.H5(filename),
dash_table.DataTable(
data=df.to_dict("rows"),
columns=[{"name": i, "id": i} for i in df.columns],
fixed_rows={'headers': True},
),
]
)
return table
#Forma ejecucion de analisis para matriz cruzada
@app.callback(
[
Output('graphCrossMatrix','children') ,
Output('crossMatrix', 'children'),
],[
Input('decimal','value'),
Input('separador','value'),
Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('correlationMethod','value'),
Input('executeCorr', 'n_clicks')
]
)
def crossData(decimal,separador,contents, filename,correlationMethod,n_clicks):
#Objetos a retornar
table = html.Div()
figure = dcc.Graph()
#Ejecutamos despues de asegurar datos de entrada
global ncd
if ncd == n_clicks:
if contents:
ncd = ncd + 1
contents = contents[0]
filename = filename[0]
#Calculamos matriz de correlacion con ayuda de valor extrarno llamado correlationMethod
df = parse_data(contents, filename,separador,decimal)
df = df.set_index(df.columns[0])
df = df.corr(method=correlationMethod)
#Retornamos el objeto tabla
table = html.Div(
[
dash_table.DataTable(
data=df.to_dict("rows"),
columns=[{"name": i, "id": i} for i in df.columns],
),
]
),
#Retornamos el objeto grafica de los resultados basados en la tabla
fig = px.imshow(df)
figure = html.Div(
[
dcc.Graph(
id='kind',
figure=fig
),
]
)
return figure,table
#Forma de ejecucion de analisi para matriz de distancias
@app.callback(
Output('distanceMatrix', 'children'),
[
Input('decimal','value'),
Input('separador','value'),
Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('distance','value'),
Input('executeDis', 'n_clicks')
]
)
def distanceMatrix(decimal,separador,contents, filename,correlationMethod,n_clicks):
#Objeto a retornar
table = html.Div()
#Empezamos analisis
global ndm
if ndm == n_clicks:
if contents:
ndm = ndm + 1
#Cargamos datos
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename,separador,decimal)
df = df.set_index(df.columns[0])
index = df.index[:].tolist()
df = df.values.tolist()
df= [df[i] + [index[i]] for i in range(0,len(df))]
l= []
#Realizamos analis
for i in df:
ll=[]
for j in df:
if correlationMethod == 'euclidean':
ll.append(round(distance.euclidean(i, j),2))
elif correlationMethod == 'cityblock':
ll.append(round(distance.cityblock(i, j),2))
elif correlationMethod == 'chebyshev':
ll.append(round(distance.chebyshev(i, j),2))
l.append(ll)
#Formateamos datos por estilo
df = pd.DataFrame(l)
#objeto tabla a retornar.
#Apadtativo por la cantidad de columnas probables
table = html.Div(
[
dash_table.DataTable(
data=df.to_dict("rows"),
columns=[{"name": str(i), "id": str(i),"type":"numeric"} for i in df.columns],
fixed_rows={'headers': True},
style_table={'overflowX': 'auto','overflowY': 'auto'},
style_cell={
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'overflow': 'scroll' }
),
]
)
return table
#Forma elegante de generar tabla que pose los valores de un analisis apriori.
#Variblas a retornar verbosas
def inspect(results):
rh = [tuple(result[2][0][0]) for result in results]
lh = [tuple(result[2][0][1]) for result in results]
supports = [result[1] for result in results]
confidences = [result[2][0][2] for result in results]
lifts = [result[2][0][3] for result in results]
return list(zip(rh, lh, supports, confidences, lifts))
#Forma de ejecucion de analissi para matriz de distancias
@app.callback(
Output('aprioriMatrix', 'children'),
[
Input('decimal','value'),
Input('separador','value'),
Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('soporteMinimo','value'),
Input('confidenciaMinima','value'),
Input('elevacionMinima','value'),
Input('tamañoMinimo','value'),
Input('executeAprori', 'n_clicks')
]
)
def aprioriMatrix(decimal,separador,contents, filename,soporteMinimo,confidenciaMinima,elevacionMinima,tamañoMinimo,n_clicks):
#Objeto a retornar
table = html.Div()
#Comenzamos analisis
global nam
if nam == n_clicks:
if contents :
nam = nam+1
#Cargamos datos
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename,separador,decimal)
df = df.set_index(df.columns[0])
#La primer columna es tomada como indice, reverimos eso
transactions = []
for i in range(0, len(df.index)):
transactions.append([str(df.values[i,j]) for j in range(0, len(df.columns) )])
#Entremamos algoritmo
from apyori import apriori
rules = apriori(transactions, min_support = soporteMinimo, min_confidence = confidenciaMinima, min_lift = elevacionMinima, min_length = tamañoMinimo)
# Resultados
results = list(rules)
# Este comamdo crea un frame para ver los datos resultados
df=pd.DataFrame(inspect(results),
columns=['rhs','lhs','Soporte','Confidencia','Elevación'])
#Objeto a retornar
table = html.Div([
dash_table.DataTable(
data=df.to_dict("rows"),
columns=[{"name": str(i), "id": str(i)} for i in df.columns],
fixed_rows={'headers': True},
style_table={'overflowX': 'auto','overflowY': 'auto'},
style_cell={
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
'overflow': 'scroll' }
),
])
return table
#Forma de ejecicion de analisis para clustering
@app.callback(
[
Output('elbow', 'children'),
Output('cluster','children')
],[
Input('decimal','value'),
Input('separador','value'),
Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('executeCluster', 'n_clicks')
]
)
def clustering(decimal,separador,contents, filename,n_clicks):
#Objetos a retornar
figure1 = dcc.Graph()
figure2 = dcc.Graph()
#Comenzamos analisis
global nc
if nc == n_clicks:
if contents :
nc = nc+1
#Cargamos datos
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename,separador,decimal)
#OBtenemos variables modelo
VariablesModelo = df.iloc[:,:].values
SSE = []
for i in range(2, 16):
km = KMeans(n_clusters=i)
km.fit(VariablesModelo)
SSE.append(km.inertia_)
#Obtenemos numero de clusters con grafica de codo
x = np.arange(len(SSE))
fig = go.Figure( data= go.Scatter(x=x,y=SSE))
#Obtenemos la canditdad optima de clusters
kl = KneeLocator(range(2, 16), SSE, curve="convex", direction="decreasing")
MParticional = KMeans(n_clusters=kl.elbow, random_state=0).fit(VariablesModelo)
#Generamos el modelo segun la catndidad de clusters previamente calculada
model = KMeans(n_clusters = kl.elbow, init = "k-means++", max_iter = 300, n_init = 10, random_state = 0)
y_clusters = model.fit_predict(VariablesModelo)
#Obenemos el comportamiento de los datos en una grafica de tres dimensiones
labels = model.labels_
trace = go.Scatter3d(x=VariablesModelo[:, 0], y=VariablesModelo[:, 1], z=VariablesModelo[:, 2], mode='markers',marker=dict(color = labels, size= 3, line=dict(color= 'black',width = 3)))
layout = go.Layout(margin=dict(l=0,r=0))
data = [trace]
fig2 = go.Figure(data = data, layout = layout)
#Grafica de codo a retornar
figure1 = html.Div(
[
dcc.Graph(
id='kind',
figure=fig
),
]
)
#Gracia de clusters a retornar
figure2 = html.Div(
[
dcc.Graph(
id='kind2',
figure=fig2
),
]
)
return figure1,figure2
#Forma de ejecucion de analisis sigmoide
@app.callback(
Output('sigmoide', 'children'),
[
Input('compactividad','value'),
Input('textura','value'),
Input('area','value'),
Input('concavidad','value'),
Input('simetria','value'),
Input('dimensionFractal','value'),
Input('decimal','value'),
Input('separador','value'),
Input('upload-data', 'contents'),
Input('upload-data', 'filename'),
Input('executeSigmoide', 'n_clicks')
]
)
def sigmoide(compactividad,textura,area,concavidad,simetria,dimensionFractal,decimal,separador,contents, filename,n_clicks):
#Objeto a retornar
mensaje = html.Div()
#Empezamos analisis
global ns
if ns == n_clicks:
if contents :
ns = nc+1
#Cargamos los datos
contents = contents[0]
filename = filename[0]
df = parse_data(contents, filename,separador,decimal)
df = df.set_index(df.columns[0])
#Obtenemos las caracteristicas principales de forma estatica.
X = np.array(df[['Texture', 'Area', 'Compactness','Concavity', 'Symmetry', 'FractalDimension']])
Y = np.array(df[['Diagnosis']])
#Preparativos del modelo
Clasificacion = linear_model.LogisticRegression()
validation_size = 0.2
seed = 1234
#Variables a usar como entrenamiento y validacion
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(
X, Y, test_size=validation_size, random_state=seed, shuffle = True)
Clasificacion.fit(X_train, Y_train)
Probabilidad = Clasificacion.predict_proba(X_train)
Predicciones = Clasificacion.predict(X_train)
Clasificacion.score(X_train, Y_train)
#Prediccion nueva segun los datos conocidos
PrediccionesNuevas = Clasificacion.predict(X_validation)
confusion_matrix = pd.crosstab(Y_validation.ravel(), PrediccionesNuevas,
rownames=['Real'], colnames= ['Predicción'])
v = Clasificacion.score(X_validation, Y_validation)
NuevoPaciente = pd.DataFrame({ 'Texture': [textura], 'Area': [area],
'Compactness': [compactividad], 'Concavity': [concavidad],
'Symmetry': [simetria], 'FractalDimension': [dimensionFractal]})
print(Clasificacion.predict(NuevoPaciente))
#Retornamos la prediccion con el grado de certeza
if Clasificacion.predict(NuevoPaciente) == "B":
mensaje = html.Div(
html.H5("Con una certeza del " + str(format(v*100, '.2f') ) +"% se pronostica POSITIVO a Cancer ")
)
else:
mensaje = html.Div(
html.H5("Con una certeza del " + str(format(v*100, '.2f')) +"% se pronostica NEGATIVO a Cancer "))
return mensaje
if __name__ == '__main__':
app.run_server(debug=True)
| 41.017361 | 206 | 0.526496 | 0 | 0 | 0 | 0 | 12,767 | 0.539717 | 0 | 0 | 7,107 | 0.300444 |
35e7e19298ec398add76703d93ea4b308eefc9cc | 3,394 | py | Python | pepys_import/file/highlighter/support/export.py | debrief/pepys-import | 12d29c0e0f69e1119400334983947893e7679b6b | [
"Apache-2.0"
] | 4 | 2021-05-14T08:22:47.000Z | 2022-02-04T19:48:25.000Z | pepys_import/file/highlighter/support/export.py | debrief/pepys-import | 12d29c0e0f69e1119400334983947893e7679b6b | [
"Apache-2.0"
] | 1,083 | 2019-11-06T17:01:07.000Z | 2022-03-25T10:26:51.000Z | pepys_import/file/highlighter/support/export.py | debrief/pepys-import | 12d29c0e0f69e1119400334983947893e7679b6b | [
"Apache-2.0"
] | 4 | 2019-11-06T12:00:45.000Z | 2021-06-09T04:18:28.000Z | import html
from tqdm import tqdm
from .color_picker import color_for, html_color_for, mean_color_for
def export_report(filename, chars, dict_colors, include_key=False):
"""
Export a HTML report showing all the extraction usages for the file.
:param filename: Output filename
:param chars: Characters array (should be HighlightedFile.chars)
:param dict_colors: Dictionary specifying colors to use (should be HighlightedFile.dict_colors)
:param include_key: Whether to include a key at the bottom defining the usages of the colors
This basically loops through all of the characters in the characters array, and then creates
the relevant <span> tags for each character based on the usages stored for that character.
"""
output_strings = []
html_header = """<html>
<head>
</head>
<body style="font-family: Courier">
"""
output_strings.append(html_header)
last_hash = ""
for char in tqdm(chars):
letter = char.letter
this_hash = ""
this_message = ""
colors = []
multi_usages = len(char.usages) > 1
for usage in char.usages:
this_hash += usage.tool_field
needs_new_line = this_message != ""
colors.append(color_for(usage.tool_field, dict_colors))
if needs_new_line:
this_message += "
"
if multi_usages:
this_message += "-"
this_message += usage.tool_field + ", " + usage.message
# do we have anything to shade?
if this_hash != "":
# generate/retrieve a color for this hash
new_color = mean_color_for(colors)
hex_color = html_color_for(new_color)
# are we already in hash?
if last_hash != "":
# is it the different to this one?
if last_hash != this_hash:
# ok, close the span
output_strings.append("</span>")
# start a new span
output_strings.append(
f"<span title='{this_message}' style=\"background-color:{hex_color}\">"
)
else:
output_strings.append(
f"<span title='{this_message}' style=\"background-color:{hex_color}\">"
)
elif last_hash != "":
output_strings.append("</span>")
# just check if it's newline
if letter == "\n":
output_strings.append("<br>")
else:
# Escape the letter as otherwise the XML from XML files gets
# interpreted by browsers as (invalid) HTML
output_strings.append(html.escape(letter))
last_hash = this_hash
if last_hash != "":
output_strings.append("</span>")
# also provide a key
if include_key:
output_strings.append("<hr/><h3>Color Key</h3><ul>")
for key in dict_colors:
color = dict_colors[key]
hex_color = html_color_for(color)
output_strings.append(
f'<li><span style="background-color:{hex_color}">{key}</span></li>'
)
output_strings.append("</ul>")
html_footer = """</body>
</html>"""
output_strings.append(html_footer)
with open(filename, "w") as f:
f.write("".join(output_strings))
| 32.634615 | 99 | 0.578079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,324 | 0.3901 |
35e8080312909f6045b82180fc2168f39798312c | 4,694 | py | Python | Task 4/scripts/utilities_activity7.py | gourab337/Technocolab_DL_Internship | 269c578b9eab5a02f65d9c0952e19db69bf9a1b0 | [
"MIT"
] | 1 | 2020-11-29T20:09:46.000Z | 2020-11-29T20:09:46.000Z | Task 4/scripts/utilities_activity7.py | gourab337/Technocolab_DL_Internship | 269c578b9eab5a02f65d9c0952e19db69bf9a1b0 | [
"MIT"
] | null | null | null | Task 4/scripts/utilities_activity7.py | gourab337/Technocolab_DL_Internship | 269c578b9eab5a02f65d9c0952e19db69bf9a1b0 | [
"MIT"
] | 1 | 2020-12-01T05:39:11.000Z | 2020-12-01T05:39:11.000Z | """Utility functions used in Activity 7."""
import random
import numpy as np
from matplotlib import pyplot as plt
from keras.callbacks import TensorBoard
def create_groups(data, group_size=7):
"""Create distinct groups from a continuous series.
Parameters
----------
data: np.array
Series of continious observations.
group_size: int, default 7
Determines how large the groups are. That is,
how many observations each group contains.
Returns
-------
A Numpy array object.
"""
samples = list()
for i in range(0, len(data), group_size):
sample = list(data[i:i + group_size])
if len(sample) == group_size:
samples.append(np.array(sample).reshape(1, group_size).tolist())
a = np.array(samples)
return a.reshape(1, a.shape[0], group_size)
def split_lstm_input(groups):
"""Split groups in a format expected by the LSTM layer.
Parameters
----------
groups: np.array
Numpy array with the organized sequences.
Returns
-------
X, Y: np.array
Numpy arrays with the shapes required by
the LSTM layer. X with (1, a - 1, b)
and Y with (1, b). Where a is the total
number of groups in `group` and b the
number of observations per group.
"""
X = groups[0:, :-1].reshape(1, groups.shape[1] - 1, groups.shape[2])
Y = groups[0:, -1:][0]
return X, Y
def mape(A, B):
"""Calculate the mean absolute percentage error from two series."""
return np.mean(np.abs((A - B) / A)) * 100
def rmse(A, B):
"""Calculate the root mean square error from two series."""
return np.sqrt(np.square(np.subtract(A, B)).mean())
def train_model(model, X, Y, epochs=100, version=0, run_number=0):
"""Shorthand function for training a new model.
This function names each run of the model
using the TensorBoard naming conventions.
Parameters
----------
model: Keras model instance
Compiled Keras model.
X, Y: np.array
Series of observations to be used in
the training process.
version: int
Version of the model to run.
run_number: int
The number of the run. Used in case
the same model version is run again.
"""
hash = random.getrandbits(128)
hex_code = '%032x' % hash
model_name = f'bitcoin_lstm_v{version}_run_{run_number}_{hex_code[:6]}'
tensorboard = TensorBoard(log_dir=f'./logs/{model_name}')
model_history = model.fit(
x=X, y=Y,
batch_size=1, epochs=epochs,
callbacks=[tensorboard],
shuffle=False)
return model_history
def plot_two_series(A, B, variable, title):
"""Plot two series using the same `date` index.
Parameters
----------
A, B: pd.DataFrame
Dataframe with a `date` key and a variable
passed in the `variable` parameter. Parameter A
represents the "Observed" series and B the "Predicted"
series. These will be labelled respectivelly.
variable: str
Variable to use in plot.
title: str
Plot title.
"""
plt.figure(figsize=(14, 4))
plt.xlabel('Observed and predicted')
ax1 = A.set_index('date')[variable].plot(
color='#d35400', grid=True, label='Observed', title=title)
ax2 = B.set_index('date')[variable].plot(
color='grey', grid=True, label='Predicted')
ax1.set_xlabel("Predicted Week")
ax1.set_ylabel("Predicted Values")
plt.legend()
plt.show()
def denormalize(reference, series,
normalized_variable='close_point_relative_normalization',
denormalized_variable='close'):
"""Denormalize the values for a given series.
Parameters
----------
reference: pd.DataFrame
DataFrame to use as reference. This dataframe
contains both a week index and the USD price
reference that we are interested on.
series: pd.DataFrame
DataFrame with the predicted series. The
DataFrame must have the same columns as the
`reference` dataset.
normalized_variable: str, default 'close_point_relative_normalization'
Variable to use in normalization.
denormalized_variable: str, default `close`
Variable to use in de-normalization.
Returns
-------
A modified DataFrame with the new variable provided
in `denormalized_variable` parameter.
"""
week_values = reference[reference['iso_week'] == series['iso_week'].values[0]]
last_value = week_values[denormalized_variable].values[0]
series[denormalized_variable] = last_value * (series[normalized_variable] + 1)
return series
| 27.290698 | 82 | 0.639966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,881 | 0.613762 |
ea0058bf191d7578f0be7e70bc2cea8837da26ba | 10,577 | py | Python | pygenenet/CausalNetwork.py | ttdtrang/pygenenet | 49cc0c088e4f633f18e0294e42a0603a7027f06d | [
"MIT"
] | null | null | null | pygenenet/CausalNetwork.py | ttdtrang/pygenenet | 49cc0c088e4f633f18e0294e42a0603a7027f06d | [
"MIT"
] | null | null | null | pygenenet/CausalNetwork.py | ttdtrang/pygenenet | 49cc0c088e4f633f18e0294e42a0603a7027f06d | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import itertools
__metaclass__ = type
def prob_incr(species, proj_compressed_data, min_occurences = 10):
p = proj_compressed_data['count_incr']/ proj_compressed_data['count']
p[proj_compressed_data['count'] < min_occurences] = -1
return p
def score(species,IV, G, exp_digitized, bins, thresholds): # G: control species set
(v_f, v_a, v_n) = (0, 0, 0)
IV[IV.isnull()] = 0
if (IV == 0).all(): return thresholds['Ti']
n_repressors = IV[IV == -1].count()
n_activators = IV[IV == 1].count()
# G.extend(parents(IV) )
GG = G[:]
GG.extend(parents(IV))
GG = np.unique(GG)
pcd = exp_digitized.project(species,GG)
pcd['prob_incr'] = prob_incr(species, pcd, min_occurences = 1)
# if (GG == ['CI','LacI']).all(): print pcd
query_parents= ""
if n_repressors > n_activators:
query_parents = " & ".join(['%s == %s' %(sp, bins[sp][0] ) for sp in IV[IV == -1].index ] ) # lowest level for repressors
query_act = " & ".join(['%s == %s' %(sp, bins[sp][-2]) for sp in IV[IV == 1].index ] ) # highest level for activators
if query_act != "": query_parents += (" & " + query_act )
else:
query_parents = " & ".join(['%s == %s' %(sp, bins[sp][0] ) for sp in IV[IV == 1].index ] ) # lowest level for activators
query_rep = " & ".join(['%s == %s' %(sp, bins[sp][-1]) for sp in IV[IV == -1].index ] ) # highest level for repressors
if query_rep != "": query_parents += (" & " + query_rep)
for g in G:
if (len(parents(IV) == 1) and g == parents(IV)[0]): # single-influence and self-regulating
idx_base = pcd.query(query_parents).index
p_base = pcd.at[idx_base[0], 'prob_incr']
idx_test = np.setdiff1d(pcd.index, idx_base)
if p_base != -1:
for i in idx_test:
p_a = pcd.loc[i,'prob_incr']
# print "p_a / p_base = %s / %s" % (p_a, p_base)
if p_a != -1 :
if n_repressors < n_activators:
if (p_a / p_base) > thresholds['Ta']: v_f += 1; # print "Voted for"
elif (p_a / p_base) < thresholds['Tr']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
if (p_a / p_base) < thresholds['Tr']: v_f += 1; # print "Voted for"
elif (p_a / p_base) > thresholds['Ta']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
for b in bins[g]:
query_cntl = '%s == %s' % (g,b)
if ( g in parents(IV)):
query_str = query_cntl
else:
#p_base = float(pcd.query(query_parents+ ' & ' + query_cntl )['prob_incr'])
query_str = (query_parents+ ' & ' + query_cntl, query_cntl)[query_parents == ""]
idx_base = pcd.query(query_str).index
p_base = pcd.at[idx_base[0], 'prob_incr']
if p_base != -1:
# if p_base == 0: p_base += pseudo_count
idx_test = np.setdiff1d(pcd.query(query_cntl).index, idx_base)
for i in idx_test:
# pcd.loc[i, 'ratio'] = pcd.loc[i,'prob_incr'] / p_base
p_a = pcd.loc[i,'prob_incr']
# print "p_a / p_base = %s / %s" % (p_a, p_base)
if p_a != -1 :
# print pcd.loc[idx, 'prob_incr']/ p_base
if n_repressors < n_activators:
if (p_a / p_base) > thresholds['Ta']: v_f += 1; # print "Voted for"
elif (p_a / p_base) < thresholds['Tr']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
else:
if (p_a / p_base) < thresholds['Tr']: v_f += 1; # print "Voted for"
elif (p_a / p_base) > thresholds['Ta']: v_a +=1; # print "Voted against"
else: v_n += 1; # print "Voted neutral"
# print "IV: %s" % IV
# print (v_f, v_a, v_n)
if (v_f + v_a + v_n == 0): return 0.
score = (v_f - v_a + 0.) / (v_f + v_a + v_n )
if (len(parents(IV) == 1) and g == parents(IV)[0]): score *= 0.75 # down weight single-influence and self-regulating
return score
def parents(infl):
return infl[(infl.notnull()) & (infl != 0 )].index
def createIVSet(species, exp_digitized,IV0, bins, thresholds):
I = []
scores = []
idx_unknown = IV0[IV0.isnull()].index # species name
iv = IV0.copy()
iv[idx_unknown] = 0
G = [species]
score_zero = score(species,iv, G, exp_digitized, bins, thresholds)
# print "%s \t Background score: %s" % (list(iv), score_zero)
for u in idx_unknown:
iv1 = iv.copy()
iv1.loc[u] = 1 # set activators
# print "scoring %s" % iv1
score_a = score(species ,iv1, G, exp_digitized, bins, thresholds)
# print "%s \t Activator score: %s" % (list(iv1), score_a)
if score_a >= score_zero:
I.append(iv1)
scores.append(score_a)
else:
iv1.loc[u] = -1
# print "scoring %s" % iv1
score_r = score(species ,iv1, G, exp_digitized, bins, thresholds)
# print "%s \t Repressor score: %s" % (list(iv1), score_r)
if score_r >= score_zero:
I.append(iv1)
scores.append(score_r)
return (I, scores)
# IV[IV.isnull()] = 0
def combineIVs(species, IVs, IVscores, IV0,exp_digitized, bins, thresholds):
'''score every possible combination of IV in input IVs'''
I = []
scores = []
to_remove = []
tj = len(IV0[IV0.notnull()])
bg_score = 0.
bg_iv = IV0.copy()
bg_iv[IV0.isnull()] = 0
bg_score = score(species, bg_iv, [species], exp_digitized, bins, thresholds)
for i in range(2, min(thresholds['Tj'], len(IV0)- tj+1)):
K = itertools.combinations(range(len(IVs)), i)
for k in K:
old_scores = np.zeros((len(k),))
added = IVs[0][IV0.isnull()]; added[:] = 0 # combined vector
for j in range(len(k)):
added += IVs[k[j]][IV0.isnull()]
old_scores[j] = IVscores[k[j]]
new_iv = pd.concat((added , IV0[IV0.notnull()]))
if (max(old_scores) - min(old_scores)) <= thresholds['Tm']:
new_score = score(species, new_iv, [species] ,exp_digitized, bins, thresholds)
if ((new_score >= old_scores).all() and (new_score > bg_score)):
I.append(new_iv)
scores.append(new_score)
to_remove.extend(k)
return (I, scores, set(to_remove))
def competeIVs(species, iv1, iv2, exp_digitized, bins, thresholds):
G = [species]; G.extend(np.setdiff1d(parents(iv2), parents(iv1)) )
s1 = score(species, iv1, G, exp_digitized, bins, thresholds)
G = [species]; G.extend(np.setdiff1d(parents(iv1), parents(iv2)) )
s2 = score(species, iv2, G, exp_digitized, bins, thresholds)
if s1 > s2: return (0, s1)
elif s1 < s2: return (1, s2)
else: return ([0, 1], [s1, s2] )
def learn(experiments, initialNetwork, thresholds = { 'Tr': 0.75, 'Ta': 1.15, 'Tj': 2, 'Ti': 0.5, 'Tm': 0.} , nbins=4, bin_assignment = 1):
'''Learning of causal network from a set of time series data, each resulted from an independent experiment
The algorithm learn influence vectors for one gene at a time.
For each gene, there are 3 main stages of learning:
(1) Adding single influence to create set of new influence vectors
(2) Combining influence vectors from stage (1) to create new influence vectors (with more than 1 parents)
(3) competing between influence vectors to determine the best one
'''
cnet = initialNetwork.copy()
binned = experiments.digitize(nbins=nbins, bin_assignment = 1)
bins = { sp: np.unique(binned[sp]) for sp in initialNetwork }
for sp in initialNetwork:
# print "----------------------------\nLearning influence vector for %s" % sp
initial = initialNetwork.influences(sp)
(IVs, scores) = createIVSet(sp,binned, initial, bins, thresholds)
# if sp == 'LacI':
# print "Initial IVs"
# print IVs
# print scores
(cIVs, cScores, to_remove) = combineIVs(sp, IVs, scores, initial,binned, bins, thresholds)
# if sp == 'LacI':
# print "Combined IVs"
# print cIVs
# print cScores
for i in np.setdiff1d(range(len(IVs)), to_remove):
cIVs.append(IVs[i])
cScores.append(scores[i])
while len(cIVs) > 1:
sorted_idx = np.argsort(-np.array(cScores)) # ranking IVs from highest scores
winnerId, wScore = competeIVs(sp, cIVs[0], cIVs[-1], binned, bins, thresholds)
if winnerId == 1:
cIVs[0] = cIVs[-1]
cScores[0] = cScores[-1]
cIVs = cIVs[:-1]
cScores = cScores[:-1]
if len(cIVs) > 0: cnet.loc[sp] = cIVs[0]
else:
cnet.loc[sp] = initial.copy()
cnet.loc[sp][initial.isnull()] = 0
return cnet
class CausalNetwork(pd.DataFrame):
def __init__(self, species):
'''store influence vectors of each gene in a row, with value indicating relationship of gene in the column --> gene in the row. Example
n = CausalNetwork(...)
A B C
A 0 -1 0
B 0 1 1
C -1 None 0
0: no relation ship
1: activate
-1: repress
None: unknown
'''
super(CausalNetwork,self).__init__(np.zeros((len(species), len(species)), dtype=int)/ 0., columns = species, index=species)
def activators(self,i):
''' return the activators of i'''
pass
def repressors(self,i):
''' return the repressors of i'''
pass
def influences(self,i):
'''return the influence vector of i'''
return self.loc[i]
def __getitem__(self, i):
return self.loc[i]
| 45.008511 | 143 | 0.525102 | 814 | 0.076959 | 0 | 0 | 0 | 0 | 0 | 0 | 2,647 | 0.25026 |
ea008637c73dda8e84514900695de29b3ed914c6 | 14,069 | py | Python | animation_retarget/animation_retarget_mh.py | curmil/makehuman-utils | 1e1a56479bc1deac613802e891abf440cbeb342e | [
"CC0-1.0"
] | 3 | 2018-04-16T15:14:54.000Z | 2021-08-11T16:00:58.000Z | animation_retarget/animation_retarget_mh.py | curmil/makehuman-utils | 1e1a56479bc1deac613802e891abf440cbeb342e | [
"CC0-1.0"
] | 1 | 2020-10-29T07:53:51.000Z | 2020-10-29T07:53:51.000Z | animation_retarget/animation_retarget_mh.py | curmil/makehuman-utils | 1e1a56479bc1deac613802e891abf440cbeb342e | [
"CC0-1.0"
] | 5 | 2019-08-09T15:21:50.000Z | 2022-02-21T14:02:45.000Z | #!/usr/bin/python
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehuman.org/
**Code Home Page:** https://bitbucket.org/MakeHuman/makehuman/
**Author:** Jonas Hauquier, Thomas Larsson
**Copyright(c):** MakeHuman Team 2001-2015
**Licensing:** AGPL3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
Transfer an animation or pose from one skeleton to another by copying each
bone's relative poses, and compensating for differences in bind pose.
Allows transferring the animation from a BVH file imported in Blender to a MH
(or other) skeleton.
Bone names between the two skeletons are matched using fuzzy string matching,
allowing it to automatically find combinations if bone names are similar.
"""
import bpy
import mathutils
from difflib import SequenceMatcher
BONE_NAME_SIMILARITY_THRESHOLD = 0.7
# Credit goes to Thomas Larsson for these derivations
#
# M_b = global bone matrix, relative world (PoseBone.matrix)
# L_b = local bone matrix, relative parent and rest (PoseBone.matrix_local)
# R_b = bone rest matrix, relative armature (Bone.matrix_local)
# T_b = global T-pose marix, relative world
#
#
# M_p = parent global bone matrix
# R_p = parent rest matrix
#
# A_b = A bone matrix, A-pose rest matrix, converts M'_b in A pose to M_b in T pose
# M'_b= bone matrix for the mesh in A pose
#
# T_b = T bone matrix, converts bone matrix from T pose into A pose
#
#
# M_b = M_p R_p^-1 R_b L_b
# M_b = A_b M'_b
# T_b = A_b T'_b
# A_b = T_b T'^-1_b
# B_b = R^-1_b R_p
#
# L_b = R^-1_b R_p M^-1_p A_b M'_b
# L_b = B_b M^-1_p A_b M'_b
#
def _get_bone_matrix(bone):
"""bone should be a Bone
B_b
"""
if bone.parent:
b_mat = bone.matrix_local.inverted() * bone.parent.matrix_local
else:
b_mat = bone.matrix_local.inverted()
return b_mat
def _get_rest_pose_compensation_matrix(src_pbone, trg_pbone):
"""Bind pose compensation matrix
bones are expected to be of type PoseBone and be in rest pose
A_b
"""
a_mat = src_pbone.matrix.inverted() * trg_pbone.matrix
return a_mat
def set_rotation(pose_bone, rot, frame_idx, group=None):
"""Apply rotation to PoseBone and insert a keyframe.
Rotation can be a matrix, a quaternion or a tuple of euler angles
"""
if not group:
group = pose_bone.name
if pose_bone.rotation_mode == 'QUATERNION':
try:
quat = rot.to_quaternion()
except:
quat = rot
pose_bone.rotation_quaternion = quat
pose_bone.keyframe_insert('rotation_quaternion', frame=frame_idx, group=group)
else:
try:
euler = rot.to_euler(pose_bone.rotation_mode)
except:
euler = rot
pose_bone.rotation_euler = euler
pose_bone.keyframe_insert('rotation_euler', frame=frame_idx, group=group)
def set_translation(pose_bone, trans, frame_idx, group=None):
"""Insert a translation keyframe for a pose bone
"""
if not group:
group = pose_bone.name
try:
trans = trans.to_translation()
except:
pass
pose_bone.location = trans
pose_bone.keyframe_insert("location", frame=frame_idx, group=group)
def fuzzy_stringmatch_ratio(str1, str2):
"""Compare two strings using a fuzzy matching algorithm. Returns the
similarity of both strings as a float, with 1 meaning identical match,
and 0 meaning no similarity at all.
"""
m = SequenceMatcher(None, str1, str2)
return m.ratio()
def select_and_set_rest_pose(rig, scn):
"""Select the rig, go into pose mode and clear all rotations (sets to rest
pose)
"""
scn.objects.active = rig
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.rot_clear()
bpy.ops.pose.loc_clear()
bpy.ops.pose.scale_clear()
def sort_by_depth(bonemaplist):
"""Sort bone mapping list by depth of target bone.
Creating a breadth-first list through the target skeleton.
This order is needed for correct retargeting, so that we build up the
_trg_mat and _src_mat top to bottom.
"""
def _depth(bonemap):
"""Depth of target bone in the skeleton, is 0 for root bone.
Depth also is the number of parents this bone has.
"""
return len(bonemap.trg_bone.parent_recursive)
sort_tuples = [(_depth(bm), bm) for bm in bonemaplist]
return [x[1] for x in sorted(sort_tuples, key=lambda b: b[0])]
class AnimationRetarget(object):
"""Manages the retargetting operation between two armatures.
"""
def __init__(self, src_amt, trg_amt):
self.src_amt = src_amt
self.trg_amt = trg_amt
self.bone_mappings = []
self.trg_bone_lookup = {} # Lookup a mapping by target bone name
self.src_bone_lookup = {} # Lookup a mapping by source bone name
# Automatically map source bones to target bones using fuzzy matching
self.find_bone_mapping()
self.bone_mappings = sort_by_depth(self.bone_mappings)
self._init_lookup_structures()
def _init_lookup_structures(self):
"""Create lookup dicts that allow quick access to the mappings by
source or target bone name.
"""
for bm in self.bone_mappings:
self.trg_bone_lookup[bm.trg_bone.name] = bm
self.src_bone_lookup[bm.src_bone.name] = bm
def find_bone_mapping(self):
"""Find combination of source and target bones by comparing the bones
from both armatures with a fuzzy string matching algorithm.
"""
# TODO allow more complicated remappings by allowing to specify a mapping file
not_mapped_trg = {}
mapped_src = {}
for trg_bone in self.trg_amt.pose.bones:
if trg_bone.name in self.src_amt.pose.bones:
src_bone = self.src_amt.pose.bones[trg_bone.name]
self.bone_mappings.append(BoneMapping(src_bone, trg_bone, self))
print ("Bone mapped: %s -> %s" % (src_bone.name, trg_bone.name))
mapped_src[src_bone.name] = True
else:
not_mapped_trg[trg_bone.name] = trg_bone
for trg_bone in not_mapped_trg.values():
src_candidates = [b for b in self.src_amt.pose.bones if b.name not in mapped_src]
best_candidate = None
score = -1
for b_idx, src_bone in enumerate(src_candidates):
ratio = fuzzy_stringmatch_ratio(src_bone.name, trg_bone.name)
if ratio > score:
score = ratio
best_candidate = b_idx
if best_candidate is not None and score > BONE_NAME_SIMILARITY_THRESHOLD:
src_bone = src_candidates[best_candidate]
self.bone_mappings.append(BoneMapping(src_bone, trg_bone, self))
print ("Bone mapped: %s -> %s" % (src_bone.name, trg_bone.name))
del src_candidates[best_candidate]
else:
print ("Could not find an approriate source bone for %s" % trg_bone.name)
def _retarget_frame(self, scn, frame_idx, target_frame, in_place=False):
scn.frame_set(frame_idx)
for b_map in self.bone_mappings:
b_map.retarget(target_frame, in_place)
def _set_rest_frame(self, target_frame, in_place=False):
pose_mat = mathutils.Matrix()
pose_mat.identity()
for b_map in self.bone_mappings:
b_map.insert_keyframe(target_frame, pose_mat, in_place)
def retarget(self, scn, frames, insert_restframes=False, in_place=False):
"""Start the retarget operation for specified frames.
"""
scn.frame_set(0)
select_and_set_rest_pose(self.src_amt, scn)
select_and_set_rest_pose(self.trg_amt, scn)
for bm in self.bone_mappings:
bm.update_matrices()
if insert_restframes:
print ("Rest keyframe insertion is enabled")
tf_idx = 1
for c, frame_idx in enumerate(frames):
print ("Retargetting frame %s/%s" % (c, len(frames)))
if insert_restframes and frame_idx > 2:
self._set_rest_frame(tf_idx, in_place)
tf_idx += 1
self._retarget_frame(scn, frame_idx, tf_idx, in_place)
tf_idx += 1
class BoneMapping(object):
def __init__(self, src_pbone, trg_pbone, container):
"""A mapping of a source bone to a target bone. Retargetting will
transfer the pose from the source bone, compensate it for the difference
in bind pose between source and target bone, and apply a corresponding
pose matrix on the target bone.
src_pbone and trg_pbone are expected to be PoseBones
"""
self.container = container
self.src_bone = src_pbone.bone
self.trg_bone = trg_pbone.bone
self.src_pbone = src_pbone
self.trg_pbone = trg_pbone
self.src_mat = None
self.trg_mat = None
self.a_mat = None
self.b_mat = None
@property
def src_parent(self):
"""Return the bone mapping for the parent of the source bone.
"""
if not self.src_bone.parent:
return None
return self.container.src_bone_lookup[self.src_bone.parent.name]
@property
def trg_parent(self):
"""Return the bone mapping for the parent of the target bone.
"""
if not self.trg_bone.parent:
return None
# TODO guard against unmapped bones
return self.container.trg_bone_lookup[self.trg_bone.parent.name]
def update_matrices(self):
"""Update static matrices. These change only if the rest poses or structure
of one of the two rigs changes.
Should be called when both rigs are in rest pose.
"""
self.a_mat = _get_rest_pose_compensation_matrix(self.src_pbone, self.trg_pbone)
self.b_mat = _get_bone_matrix(self.trg_bone)
#self.src_mat = _get_bone_matrix(self.src_pbone)
#self.b_mat =
def __repr__(self):
return self.__unicode__()
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return '<BoneMapping %s -> %s>' % (self.src_bone.name, self.trg_bone.name)
def insert_keyframe(self, frame_idx, pose_mat, in_place=False):
"""Insert the specified matrix as a keyframe for the target bone.
"""
set_rotation(self.trg_pbone, pose_mat, frame_idx)
if not in_place and not self.trg_bone.parent:
set_translation(self.trg_pbone, pose_mat, frame_idx)
def retarget(self, frame_idx, in_place=False):
"""Retarget the current pose of the source bone to the target bone, and
apply it as keyframe with specified index.
"""
frame_mat = self.src_pbone.matrix.to_4x4()
pose_mat = self.retarget_frame(frame_mat)
self.insert_keyframe(frame_idx, pose_mat, in_place)
def retarget_frame(self, frame_mat):
"""Calculate a pose matrix for the target bone by retargeting the
specified frame_mat, which is a pose on the source bone.
"""
# Store these for reuse in child bones, should be recalculated for every frame
self._src_mat = frame_mat
self._trg_mat = self._src_mat * self.a_mat.to_4x4()
self._trg_mat.col[3] = frame_mat.col[3]
trg_parent = self.trg_parent
if trg_parent:
mat = trg_parent._trg_mat.inverted() * self._trg_mat
else:
mat = self._trg_mat
mat = self.b_mat * mat
# TODO apply rotation locks and corrections
#mat = correctMatrixForLocks(mat, self.order, self.locks, self.trgBone, self.useLimits)
# Don't know why, but apparently we need to modify _trg_mat another time
mat_ = self.b_mat.inverted() * mat
if trg_parent:
self._trg_mat = trg_parent._trg_mat * mat_
else:
self._trg_mat = mat_
return mat
def get_armatures(context):
trg_rig = context.active_object
selected_objs = context.selected_objects[:]
if not trg_rig or len(selected_objs) != 2 or trg_rig.type != "ARMATURE":
raise Exception("Exactly two armatures must be selected. This Addon copies the current animation/pose the selected armature to the active armature.")
selected_objs.remove(trg_rig)
src_rig = selected_objs[0]
if src_rig.type != "ARMATURE":
raise Exception("Exactly two armatures must be selected. This Addon copies the current animation/pose the selected armature to the active armature.")
return (src_rig, trg_rig)
def retarget_animation(src_rig, trg_rig, insert_restframes=False, in_place=False):
"""With insert_restframes == True the first frame, which is supposed to contain the
rest pose, is copied in between every two frames. This makes it possible to
blend in each pose using action constraints.
If in_place == True translations of the root bone are ignored.
"""
r = AnimationRetarget(src_rig, trg_rig)
r.retarget(bpy.context.scene, range(1,500+1), insert_restframes, in_place) # TODO determine how many frames to copy
def main():
src_rig, trg_rig = get_armatures(bpy.context)
print ("Retarget animation from %s to %s" % (src_rig.name, trg_rig.name))
retarget_animation(src_rig, trg_rig)
if __name__ == '__main__':
main()
| 36.074359 | 157 | 0.665435 | 7,555 | 0.536996 | 0 | 0 | 546 | 0.038809 | 0 | 0 | 6,022 | 0.428033 |
ea01af5836b19c682f107e67a34eda620b66f4f3 | 1,376 | py | Python | Python/Config.py | mariocar/Nerf_Aimbot | 5afe56c9904b9facc59342e316e126dc11a1157b | [
"MIT"
] | 13 | 2020-10-19T15:16:34.000Z | 2021-12-13T19:20:17.000Z | Python/Config.py | mariocar/Nerf_Aimbot | 5afe56c9904b9facc59342e316e126dc11a1157b | [
"MIT"
] | 1 | 2022-03-14T18:45:56.000Z | 2022-03-20T00:04:04.000Z | Python/Config.py | DDeGonge/Nerf_Aimbot | 5afe56c9904b9facc59342e316e126dc11a1157b | [
"MIT"
] | 5 | 2020-10-16T17:27:12.000Z | 2022-03-18T17:52:39.000Z | """ MECHANICAL PARAMETERS """
s0_step_per_rev = 27106
s1_step_per_rev = 27106
pitch_travel_rads = 0.5
yaw_travel_rads = 1.2
pitch_center_rads = 0.21
yaw_center_rads = 0.59
default_vel_radps = 2.5
default_accel_radps2 = 20
trigger_min_pwm = 40
trigger_max_pwm = 120
trigger_hold_s = 0.5
""" PI PINOUTS """
half_press_index = 14
full_press_index = 15
laser_index = 17
""" OPERATION PARAMETERS """
gcode_folder = 'gcode'
audio_path = 'audio'
saveimg_path = '/home/pi/imgs'
loser_mode_bump_pixels = 20
loser_mode_delay_s = 0.5
normal_mode_vertical_bump = 8
normal_mode_horiz_bump = 0
face_mode_close_enough_pixels = 10
aim_lock_fade_s = 0.5 # Soft lock on and fade into full tracking
""" CAMERA PARAMETERS """
video_resolution = (640,480)
laser_center = (269,305)
""" OPENCV PARAMETERS """
tracking_mode = "mosse" # NOT IMPLEMENTED
track_kp = 1500
track_ki = 350
track_kd = 5
lock_on_size_px = (40,40)
lead_ahead_constant = 15 # pixels lead multiplier. Guess and check fam
# Tuning for face finder only
# track_kp = 500
# track_ki = 200
# track_kd = 2
""" FEATHER COMM PARAMETERS """
# Chars used for setting parameters on feather. All vars here must be int
Feather_Parameter_Chars = {
'a': s0_step_per_rev,
'b': s1_step_per_rev,
'c': default_vel_radps,
'd': default_accel_radps2
}
""" DEBUG PARAMS """
DEBUG_MODE = True
SAVE_FRAMES = True
| 18.849315 | 73 | 0.734012 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.34593 |
ea030c574075dd05328271b1ccc630cdf7f9c443 | 1,303 | py | Python | solum-6.0.0/solum/objects/sqlalchemy/execution.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 39 | 2015-09-26T01:30:52.000Z | 2021-05-20T23:37:43.000Z | solum-6.0.0/solum/objects/sqlalchemy/execution.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | solum-6.0.0/solum/objects/sqlalchemy/execution.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 30 | 2015-10-25T18:06:39.000Z | 2020-01-14T12:14:06.000Z | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from solum.objects import execution as abstract
from solum.objects.sqlalchemy import models as sql
class Execution(sql.Base, abstract.Execution):
"""Represent an execution in sqlalchemy."""
__tablename__ = 'execution'
__resource__ = 'executions'
__table_args__ = sql.table_args()
id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)
uuid = sa.Column(sa.String(36))
pipeline_id = sa.Column(sa.Integer, sa.ForeignKey('pipeline.id'))
class ExecutionList(abstract.ExecutionList):
"""Represent a list of executions in sqlalchemy."""
@classmethod
def get_all(cls, context):
return ExecutionList(sql.model_query(context, Execution))
| 33.410256 | 75 | 0.744436 | 588 | 0.451266 | 0 | 0 | 109 | 0.083653 | 0 | 0 | 701 | 0.537989 |
ea045fe7b9c17066011316e1d2c7191ab7f63a8f | 5,143 | py | Python | cryptopunks/utils.py | tesla809/colorpunx | bee4557ab87a3804b249eaad4fb83fc96942fad7 | [
"Apache-2.0"
] | 1 | 2021-11-19T23:56:15.000Z | 2021-11-19T23:56:15.000Z | cryptopunks/utils.py | tesla809/colorpunx | bee4557ab87a3804b249eaad4fb83fc96942fad7 | [
"Apache-2.0"
] | null | null | null | cryptopunks/utils.py | tesla809/colorpunx | bee4557ab87a3804b249eaad4fb83fc96942fad7 | [
"Apache-2.0"
] | null | null | null | """Tools for working with Cryptopunk NFTs; this includes utilities for data analysis and image preparation for training machine learning models using Cryptopunks as training data.
Functions:
get_punk(id)
pixel_to_img(pixel_str, dim)
flatten(img)
unflatten(img)
sort_dict_by_function_of_value(d, f)
add_index_to_colors(colors)
"""
import os
import time
import requests
from collections import OrderedDict
from bs4 import BeautifulSoup
from re import sub
import numpy as np
import pandas as pd
from matplotlib.colors import rgb2hex
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
__ROOT_DIR__ = os.path.dirname(os.path.abspath(__file__))
__PUNK_DIR__ = f"{__ROOT_DIR__}/images/training";
def camel_case(string):
'''
Convert string to camelCase
'''
string = string.strip("\n")
string = sub(r"(_|-)+", " ", string).title().replace(" ", "")
return string[0].lower() + string[1:]
def color_str_to_hex(s):
'''
Convert string representation of numpy pixel array
to a string hex value
'''
return rgb2hex([float(x) for x in s[1:-1].split(' ') if x != ''])
def get_punk(id):
'''
Returns a ndarray with loaded image
'''
return mpimg.imread(f'''{__PUNK_DIR__}/punk{"%04d" % id}.png''')
def pixel_to_img(pixel_str, dim = (24,24)):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
only the pixel's color.
'''
(x,y) = dim
c = np.fromstring(pixel_str[1:-1], float, sep=' ')
return np.full((x, y, 4), c)
def pixel_to_ximg(pixel_strs, dim = (24,24), n=3 ):
'''
Take pixel of format "[r,g,b,b]"
and return an image of size `dim` containing
a matrix of size n*n
'''
(x,y) = (dim[0]//n, dim[1]//n)
m = []
for i in range(0,n):
l=[]
for j in range(0,n):
img = np.full((x, y, 4),
np.fromstring(pixel_strs[i*n + j][1:-1], float, sep=' '))
l.append(img)
m.append(np.concatenate(l, axis=1))
return np.concatenate(m, axis=0)
def flatten(img):
'''
Convert (x,y,z) array containing a pixel in z-dimension
to an (x,y) array with str values for each (i,j)
the intention is to make this easier to work with in ML
training.
'''
return np.array([[str(c) for c in row]
for row in img])
def unflatten(img):
'''
Return a flattend image to valid .png format for display
'''
return np.array([[np.fromstring(c[1:-1], float, sep=' ')
for c in row] for row in img])
def sort_dict_by_function_of_value(d, f = len):
sorted_tuples = sorted(d.items(),
key=lambda item: len(item[1]))
return {k: v for k, v in sorted_tuples}
def add_index_to_colors(colors):
'''
Add a unique, sequential index to the entry for
each color. returned dictionary will be of form
{`color_string`: { `"id": `int`, "punkIds" : `list[int`}}
'''
i=0
d={}
for k in colors.keys():
d[k] = {
'id' : i,
'punkIds' : colors[k]
}
i=i+1
return d
def get_attr_dict():
'''
Read the attr csv and populate a default dict
'''
d=OrderedDict()
with open(f"{__ROOT_DIR__}/data/list_attr_punx.csv") as f:
for attr in f.read().strip('\n').split(','):
d[attr]=-1
return d
def get_punk_attrs(id):
'''
Retrieve `id` cryptopunk from larvalabs.com,
parse HTML to extract type and attribute list
to return list of attributes
'''
typeClass="col-md-10 col-md-offset-1 col-xs-12"
punk_page=requests.get(f"https://www.larvalabs.com/cryptopunks/details/{id}")
if(punk_page.status_code != 200):
print(punk_page.status_code)
return {}
punk_html=punk_page.text
soup = BeautifulSoup(punk_html, 'html.parser')
details = soup.find(id="punkDetails")
punkType = camel_case(details.find(class_=typeClass).find('a').contents[0])
attrs=[punkType]
attrTags = details.find(class_ = "row detail-row")
for attrTag in attrTags.find_all('a'):
attrs.append(camel_case(attrTag.contents[0]))
return attrs
def get_punk_dict(id):
'''
Retrieve a punk page, pull type and attributes
from HTML and return a dictionary of attribute to
(-1,1) mapping where 1 is truthy for existence of
attribute
'''
od = {k:__ATTR_DICT__[k] for k in __ATTR_DICT__}
attrs = get_punk_attrs(id)
for attr in attrs:
od[attr]=1
return od
def get_punks(start, end):
'''
Retrieve punks in range `start` to `end`
'''
punks={}
for id in range(start, end):
print(id)
time.sleep(3.3)
punks[id] = get_punk_dict(id)
return punks
def plot_in_grid(n, images, predictions, labels):
'''
Plot `images` in an n*n grid with
prediction and labels as header
'''
(x,y) = (n,n)
fig = plt.figure(figsize=(9,14))
i=0
for i in range(1,(x*y)+1):
fig.add_subplot(x, y, i)
plt.imshow(images[i])
plt.title(f"{predictions[i][0]},{labels[i][0]}")
plt.axis('off')
i=i+1
return fig
| 27.356383 | 180 | 0.614816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,163 | 0.420572 |
ea058d42499e1a5f03be44819e5b160692dcae59 | 307 | py | Python | newspaper/articles/admin.py | Krishna-Patil/Newspaper_app | 2cf209899c00bb6a086aacb0dff4767f75226cb7 | [
"MIT"
] | null | null | null | newspaper/articles/admin.py | Krishna-Patil/Newspaper_app | 2cf209899c00bb6a086aacb0dff4767f75226cb7 | [
"MIT"
] | null | null | null | newspaper/articles/admin.py | Krishna-Patil/Newspaper_app | 2cf209899c00bb6a086aacb0dff4767f75226cb7 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import *
# Register your models here.
class CommentInLine(admin.TabularInline):
model = Comment
class ArticleAdmin(admin.ModelAdmin):
inlines = [
CommentInLine,
]
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment)
| 17.055556 | 42 | 0.736156 | 143 | 0.465798 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.091205 |
ea06238c6c6f8de7ed088df9c80dadbb3372a8f9 | 1,277 | py | Python | GUI/Generic/GTasks.py | gcewing/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 9 | 2019-07-15T19:03:27.000Z | 2021-11-24T19:50:02.000Z | GUI/Generic/GTasks.py | mnabeelp/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 3 | 2019-09-11T13:22:10.000Z | 2020-08-19T20:13:00.000Z | GUI/Generic/GTasks.py | mnabeelp/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 4 | 2020-02-23T16:50:06.000Z | 2022-02-10T07:15:35.000Z | #
# PyGUI - Tasks - Generic
#
from GUI.Properties import Properties, overridable_property
class Task(Properties):
"""A Task represents an action to be performed after a specified
time interval, either once or repeatedly.
Constructor:
Task(proc, interval, repeat = False, start = True)
Creates a task to call the given proc, which should be
a callable object of no arguments, after the specified
interval in seconds from the time the task is scheduled.
If repeat is true, the task will be automatically re-scheduled
each time the proc is called. If start is true, the task will be
automatically scheduled upon creation; otherwise the start()
method must be called to schedule the task.
"""
interval = overridable_property('interval', "Time in seconds between firings")
repeat = overridable_property('repeat', "Whether to fire repeatedly or once only")
def __del__(self):
self.stop()
scheduled = overridable_property('scheduled',
"True if the task is currently scheduled. Read-only.")
def start(self):
"""Schedule the task if it is not already scheduled."""
raise NotImplementedError("GUI.Task.start")
def stop(self):
"""Unschedules the task if it is currently scheduled."""
raise NotImplementedError("GUI.Task.stop")
| 33.605263 | 83 | 0.740799 | 1,182 | 0.925607 | 0 | 0 | 0 | 0 | 0 | 0 | 930 | 0.728269 |
ea06d13a5da8e06d3e01f08a6a3425f88f182886 | 755 | py | Python | presentingfeatures/forms.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | null | null | null | presentingfeatures/forms.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | 5 | 2021-03-18T20:13:38.000Z | 2022-01-13T00:35:37.000Z | presentingfeatures/forms.py | fahimfarhan/cancer-web-app | 6c5d8c5c90b0909cbd161d2ae87b4f12549bdfef | [
"MIT"
] | null | null | null | from django import forms
from presentingfeatures.models import Status, Investigation
class StatusForm(forms.ModelForm):
class Meta:
model = Status
fields = ('details', 'advice')
class UploadForm(forms.ModelForm):
type_choice = [
('Others', 'Others'), ('Marker', 'Marker'),
('X-ray', 'X-ray'), ('USG', 'USG'),
('CT-Scan', 'CT-Scan'), ('MRI', 'MRI'),
('MRS', 'MRS'), ('PET', 'PET'),
('Echo', 'Echo'),
('CBC', 'CBC'), ('RBS', 'RBS'), ('LFT', 'LFT'), ('KFT', 'KFT'),
('Serum-Electrolytes', 'Serum-Electrolytes'),
]
type = forms.ChoiceField(widget=forms.Select, choices=type_choice)
class Meta:
model = Investigation
fields = ('type', 'file',)
| 26.034483 | 71 | 0.545695 | 663 | 0.878146 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.298013 |
ea088f92e58e2a58e35be9d342ed82158fd80454 | 209 | wsgi | Python | generators/app/templates/settings/apache/backend.wsgi | hallucino5105/generator-my-react-project | 18a7fbe70d1c847df8a043e1f8845e63938dadd4 | [
"Apache-2.0"
] | null | null | null | generators/app/templates/settings/apache/backend.wsgi | hallucino5105/generator-my-react-project | 18a7fbe70d1c847df8a043e1f8845e63938dadd4 | [
"Apache-2.0"
] | 5 | 2020-02-26T11:29:36.000Z | 2020-02-26T11:30:05.000Z | generators/app/templates/settings/apache/backend.wsgi | hallucino5105/generator-my-react-project | 18a7fbe70d1c847df8a043e1f8845e63938dadd4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from app.app import application
| 16.076923 | 59 | 0.77512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.172249 |
ea08f7de993fe4b0b5eb995cb3bc781d63e74534 | 582 | py | Python | mundo2/parte3/partea/ex058.py | fcdennis/CursoPython | 485ef7e706af74eae9ee336714ddd8b493bd8e5d | [
"MIT"
] | null | null | null | mundo2/parte3/partea/ex058.py | fcdennis/CursoPython | 485ef7e706af74eae9ee336714ddd8b493bd8e5d | [
"MIT"
] | null | null | null | mundo2/parte3/partea/ex058.py | fcdennis/CursoPython | 485ef7e706af74eae9ee336714ddd8b493bd8e5d | [
"MIT"
] | null | null | null | from random import randint
computador = randint(0, 10)
print('Sou seu computador...\nAcabei de pensar e um número entre 0 e 10.')
print('Será que você consegue adivinhar qual foi?')
palpite = int(input('Qual é o seu palpite? '))
contador = 1
while palpite != computador:
contador += 1
if palpite > computador:
print('Menos... Tente mais uma vez.')
palpite = int(input('Qual é o seu palpite? '))
else:
print('Mais... Tente mais uma vez.')
palpite = int(input('Qual é o seu palpite? '))
print(f'Acertou em {contador} tentativa. Parabéns!')
| 36.375 | 74 | 0.656357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.499151 |
ea0bc106c32b9bce7d4aafa4b320fbbe8499e3bd | 3,215 | py | Python | June-17-2020/Managing-the-Full-API-Lifecycle/servers/production.py | Nirothipan/labs | 1cdd27dae1b03b78686037ed1bbc69ade3b44f33 | [
"Apache-2.0"
] | 2 | 2020-06-25T08:28:33.000Z | 2022-02-17T14:12:43.000Z | June-17-2020/Managing-the-Full-API-Lifecycle/servers/production.py | Nirothipan/labs | 1cdd27dae1b03b78686037ed1bbc69ade3b44f33 | [
"Apache-2.0"
] | 4 | 2020-04-30T18:25:31.000Z | 2020-07-15T21:36:41.000Z | June-17-2020/Managing-the-Full-API-Lifecycle/servers/production.py | Nirothipan/labs | 1cdd27dae1b03b78686037ed1bbc69ade3b44f33 | [
"Apache-2.0"
] | 13 | 2020-04-30T18:19:53.000Z | 2020-07-21T16:27:51.000Z | #!/usr/bin/env python3
import json
from http import server, HTTPStatus
import socketserver
import ssl
import datetime
import uuid
import time
class EndpointHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
self.common_handler()
def do_POST(self):
self.common_handler()
def common_handler(self):
time.sleep(1)
response = {
"count": 2,
"list": [
{
"id": str(uuid.uuid4()),
"name": "Handset",
"manufacturer": "Samsung Inc",
"model": "QQAR1266",
"price": "$10",
"status": "available"
},
{
"id": str(uuid.uuid4()),
"name": "Charger",
"manufacturer": "Samsung Inc",
"model": "QGGTW24",
"price": "$15",
"status": "available"
}
]
}
if "accessories" in self.path:
response = {
"count": 2,
"list": [
{
"id": str(uuid.uuid4()),
"name": "Charger Unit",
"manufacturer": "Power Gator",
"model": "PA-200mah",
"price": "$5",
"status": "available"
},
{
"id": str(uuid.uuid4()),
"name": "USB Cable",
"manufacturer": "Generic RPC",
"model": "USBCA124",
"price": "$1",
"status": "available"
}
]
}
# response = {"uuid": str(uuid.uuid4()), "time": datetime.datetime.now(
# ).strftime("%A, %d. %B %Y %I:%M:%S %p")}
wire_data_byte = json.dumps(response).encode()
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", "application/json")
self.send_header("Content-length", len(wire_data_byte))
self.end_headers()
self.wfile.write(wire_data_byte)
@staticmethod
def run():
port = EndpointHandler.port
print('INFO: (Secured: {})Sample Server listening on localhost:{}...'.format(EndpointHandler.secured,
port))
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(('', port), EndpointHandler)
cert_path = 'yourpemfile.pem'
print("DEBUG: cert_path = " + cert_path)
if EndpointHandler.secured:
httpd.socket = ssl.wrap_socket(
httpd.socket, server_side=True, certfile=cert_path)
httpd.serve_forever()
port = 9000
protocol_version = 'HTTP/1.1'
secured = False
def main():
"""
Run as a standalone server if needed
"""
EndpointHandler.run()
if __name__ == '__main__':
main()
| 31.519608 | 109 | 0.434215 | 2,930 | 0.911353 | 0 | 0 | 657 | 0.204355 | 0 | 0 | 782 | 0.243235 |
ea0d7555db6e36c6d337cec2eb924498e3225c71 | 259 | py | Python | tilemapbase/__init__.py | Simage/TileMapBase | 85665c24a372028e327396a4b0feaca22c67d3ce | [
"MIT"
] | null | null | null | tilemapbase/__init__.py | Simage/TileMapBase | 85665c24a372028e327396a4b0feaca22c67d3ce | [
"MIT"
] | null | null | null | tilemapbase/__init__.py | Simage/TileMapBase | 85665c24a372028e327396a4b0feaca22c67d3ce | [
"MIT"
] | null | null | null | __version__ = "0.4.6"
from .tiles import init, get_cache
from .mapping import project, to_lonlat, Extent, Plotter, extent_from_frame
from .utils import start_logging
from . import tiles
from . import mapping
from . import utils
from . import ordnancesurvey
| 23.545455 | 75 | 0.787645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.027027 |
ea0e70ed7f48b9ea77840bcc6953a91b092a58f3 | 1,166 | py | Python | src/bpp/migrations/0232_auto_20210101_1751.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/bpp/migrations/0232_auto_20210101_1751.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/bpp/migrations/0232_auto_20210101_1751.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.0.11 on 2021-01-01 16:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0231_ukryj_status_korekty"),
]
operations = [
migrations.AlterField(
model_name="autor",
name="pseudonim",
field=models.CharField(
blank=True,
help_text="\n Jeżeli w bazie danych znajdują się autorzy o zbliżonych imionach, nazwiskach i tytułach naukowych,\n skorzystaj z tego pola aby ułatwić ich rozróżnienie. Pseudonim pokaże się w polach wyszukiwania\n oraz na podstronie autora, po nazwisku i tytule naukowym.",
max_length=300,
null=True,
),
),
migrations.AlterField(
model_name="uczelnia",
name="sortuj_jednostki_alfabetycznie",
field=models.BooleanField(
default=True,
help_text="Jeżeli ustawione na 'FAŁSZ', sortowanie jednostek będzie odbywało się ręcznie\n tzn za pomocą ustalonej przez administratora systemu kolejności. ",
),
),
]
| 36.4375 | 297 | 0.608919 | 1,091 | 0.920675 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.491983 |
ea112a6e9b0c0b4d5227f718d1442eae696135fa | 2,251 | py | Python | Tensorflow_2X_PythonFiles/demo51_texttokenizer.py | mahnooranjum/Tensorflow_DeepLearning | 65ab178d4c17efad01de827062d5c85bdfb9b1ca | [
"MIT"
] | null | null | null | Tensorflow_2X_PythonFiles/demo51_texttokenizer.py | mahnooranjum/Tensorflow_DeepLearning | 65ab178d4c17efad01de827062d5c85bdfb9b1ca | [
"MIT"
] | null | null | null | Tensorflow_2X_PythonFiles/demo51_texttokenizer.py | mahnooranjum/Tensorflow_DeepLearning | 65ab178d4c17efad01de827062d5c85bdfb9b1ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Demo51_TextTokenizer.ipynb
# **Spit some [tensor] flow**
We need to learn the intricacies of tensorflow to master deep learning
`Let's get this over with`
## So we OHE the last NLP problem, why not do the same and feed it to the neural network? Well because, features in a language, are not independent.
Let's explore this:
The quick brown fox jumps over __________________
See you know the end of this sentence because you know the words right?
well wb this:
over _____________________
Now we don't know the end of this sentence.
So in tensorflow, to save computations, we have the embedding layer:
### Step 1: Words to ints
Nothing deep about deep learning ----> 13 43 32 43 98
### Step 2: Ints to word vector
13 43 32 43 98 ------> [0.9, 1.2] [-0.4, 0.2] [0.3, 0.3] [-0.4, 0.2] [0.2, 0.5]
T -----> T x D
### We can use word2vec to make sure the embedding layer has similar words close to each other
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
print(tf.__version__)
from tensorflow.keras.layers import Input, Dropout, Dense, Flatten, SimpleRNN, GRU, LSTM
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam, Adamax
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Get the dataset
data = ["Hello world", "I ain't saying hello to you", "what's up with all the hellos"]
MAX_SIZE = 10000
tokenizer = Tokenizer(num_words=MAX_SIZE)
tokenizer.fit_on_texts(data)
sequences = tokenizer.texts_to_sequences(data)
print(sequences)
print(tokenizer.word_index)
T = 4
data = pad_sequences(sequences,
maxlen = T,
padding = 'post')
print(data)
data = pad_sequences(sequences,
maxlen = T,
padding = 'pre')
print(data)
data = pad_sequences(sequences,
maxlen = T,
truncating = 'pre',
padding = 'post')
print(data)
data = pad_sequences(sequences,
maxlen = T,
truncating = 'post',
padding = 'post')
print(data) | 24.467391 | 149 | 0.665038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.48112 |
ea12cb6de419df1a315ae612d46dcba363f94b68 | 5,472 | py | Python | floodsystem/flood.py | Manz2TheMax/1A_FloodWarning_Group2 | cce498c69daf7ea409463e39aa6beda19203c155 | [
"MIT"
] | null | null | null | floodsystem/flood.py | Manz2TheMax/1A_FloodWarning_Group2 | cce498c69daf7ea409463e39aa6beda19203c155 | [
"MIT"
] | null | null | null | floodsystem/flood.py | Manz2TheMax/1A_FloodWarning_Group2 | cce498c69daf7ea409463e39aa6beda19203c155 | [
"MIT"
] | null | null | null | """This module provides tools for assessing flood risk
"""
from datetime import timedelta
from floodsystem.datafetcher import fetch_measure_levels
import numpy as np
from floodsystem.analysis import polyfit
from matplotlib import dates as date
def stations_level_over_threshold(stations, tol):
"""For a list of MonitoringStation objects (stations) and a tolerance value (tol),
returns a list of tuples containing a MonitoringStation object and its corresponding relative water level.
The returned list is sorted by the relative level in descending order.
Note: "update_water_levels" function needs to be called at least once for this function to work."""
# Create the output list
output = []
for station in stations:
# Get the relative water level. Will be "None" if typical range is inconsistent or the latest level
# is not known
relative_level = station.relative_water_level()
# Check if the relative level is "None" and, if not "None", compare it with the tolerance value
if relative_level is not None and relative_level > tol:
# Append tuple of MonitoringStation object and relative level to the output list
output.append((station, relative_level))
# Sort the list in order of descending relative water levels
output.sort(key=lambda val: val[1], reverse=True)
# Return the output list
return output
def stations_highest_rel_level(stations, N):
"""For a list of MonitoringStaton objects (stations), returns a list of the N stations
at which the water level, relative to the typical range, is highest"""
#Filter list as to not include stations without relative water level
new_stations = list(filter(lambda station: station.relative_water_level() is not None, stations))
#Sorts stations in descending order of relative water level
new_stations.sort(key=lambda station: station.relative_water_level(), reverse = True)
#Return first N stations in lists (N stations with highest water level)
return new_stations[:N]
def get_station_flood_risk(station):
"""For a MonitoringStation object (station), returns flood a risk rating - a number between
0 and 4. Uses data for the relative water level and the rise in the """
flood_risk = 0
rel_level_threshold = 2
rise_threshold = 0.1
#First factor is the current relative water level of station - sets initial risk
rel_water_level = station.relative_water_level()
#If no data available for relative water level, cannot calculate score, so return None
if rel_water_level is None:
return None
if rel_water_level > rel_level_threshold:
flood_risk = 3 #If above threshold, set high risk
else:
flood_risk = 1 #If below threshold, set low risk
#Second factor is the rate of change of the water level (e.g., if rising rapidly, give a high score) - used to adjust risk
level_rise = get_level_rise(station)
#If no data available for level rise, cannot calculate score, so return None
if level_rise is None:
return None
#For decreasing level, reduce flood risk
if level_rise < 0:
flood_risk -= 1
#For increasing level above threshold, increase flood risk
if level_rise > rise_threshold:
flood_risk += 1
return flood_risk
def get_level_rise(station):
"""For a MonitoringStation object (station), returns a the rate of water level rise, specifically
the average value over the last 2 days"""
#Fetch data (if no data available, return None)
times, values = fetch_measure_levels(station.measure_id, timedelta(days=2))
#Only continue if data available, otherwise return None
if times and values and (None in times or None in values) == False:
#Get polynomial approximation of
poly, d0 = polyfit(times, values, p=4)
#Find derivative polynomial
level_der = np.polyder(poly)
#Obtain list of gradients over last 2 days using the derivative polynomial
grads = []
for t in times:
grads.append(level_der(date.date2num(t) - d0))
#Return average of gradient values
return np.average(grads)
else:
return None
def get_town_flood_risk(town, stations_by_town):
"""Obtains the flood risk for a town, based on the flood risks for the towns
respective station, using the same rating system - returned value is the highest
flood risk of the towns stations"""
#Get stations for town
stations_in_town = stations_by_town[town]
flood_risk = get_station_flood_risk(stations_in_town[0])
#Find highest flood risk value from town's stations by iterating through stations
for i in range(1, len(stations_in_town)):
new_flood_risk = get_station_flood_risk(stations_in_town[i])
if new_flood_risk is None:
break
if flood_risk is None or new_flood_risk > flood_risk:
flood_risk = new_flood_risk
#Return highest value
return flood_risk
def get_flood_risk_rating(num):
"""Converts an integer value of a flood risk rating to the rating it
represents - low (0/1), moderate (2), high (3), severe (4)"""
if num == 0 or num == 1:
return "Low"
if num == 2:
return "Moderate"
if num == 3:
return "High"
if num == 4:
return "Severe"
return None #default (for None value or other)
| 38 | 126 | 0.701389 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,819 | 0.515168 |
ea13103cadb7d3efae73e9bac04af504790aa183 | 3,349 | py | Python | Visualization_with_PyGal.py | rocketpy/data_profiling | 83ac32c50a5d5589769a342b945673e8403f3fed | [
"MIT"
] | null | null | null | Visualization_with_PyGal.py | rocketpy/data_profiling | 83ac32c50a5d5589769a342b945673e8403f3fed | [
"MIT"
] | null | null | null | Visualization_with_PyGal.py | rocketpy/data_profiling | 83ac32c50a5d5589769a342b945673e8403f3fed | [
"MIT"
] | 1 | 2021-09-04T13:58:53.000Z | 2021-09-04T13:58:53.000Z | # A python svg graph plotting library and creating interactive charts !
# PyPi: https://pypi.org/project/pygal/
# Docs: http://www.pygal.org/en/stable/index.html
# Chart types: http://www.pygal.org/en/stable/documentation/types/index.html
# Maps: http://www.pygal.org/en/stable/documentation/types/maps/pygal_maps_world.html
# pip install pygal
# pip install pygal_maps_world
import pygal
import seaborn as sns # just for datasets
from pygal.style import Style
# Loading Dataset
df = sns.load_dataset('tips')
# Simple Bar Chart
bar_chart = pygal.Bar()
bar_chart.add('Tip', df['tip'])
bar_chart.title = "Bla bla"
bar_chart.render_to_file('bar_chart.svg')
# bar_chart.render_in_browser()
# Customizing the graph and using a Style
custom_style = Style(colors=('#E80080', '#404040', '#9BC850'))
bar_chart = pygal.Bar(style=custom_style)
bar_chart.title = "Some text"
bar_chart.add("A", [0.95])
bar_chart.add("B", [1.25])
bar_chart.add("C", [1])
bar_chart.render_in_browser()
# Double Bar Chart
bar_chart.add('Tip', df['tip'][:10])
bar_chart.add('Total Bill', df['total'][:10])
bar_chart.render_to_file('bar_chart_2.svg')
# Horizontal bar diagram
line_chart = pygal.HorizontalBar()
line_chart.title = 'Browser usage in February 2012 (in %)'
line_chart.add('IE', 19.5)
line_chart.add('Firefox', 36.6)
line_chart.add('Chrome', 36.3)
line_chart.add('Safari', 4.5)
line_chart.add('Opera', 2.3)
line_chart.render()
# Line Chart
line_chart = pygal.Line()
line_chart.add('Total', df['total'][:15])
line_chart.render_to_file('line.svg')
# Double Line Chart
line_chart.add('Total', df['total_bill'][:15])
line_chart.add('Tip', df['tip'][:15])
line_chart.render_to_file('line_2.svg')
# Box Plot
box_plot = pygal.Box()
box_plot.title = 'Tips'
box_plot.add('Tip', df['tip'])
box_plot.render_to_file('box1.svg')
# Funnel Chart
funnel_chart = pygal.Funnel()
funnel_chart.title = 'Total'
funnel_chart.add('Total', df['total_bill'][:15])
funnel_chart.add('Tip', df['tip'][:15])
funnel_chart.render_to_file('funnel.svg')
# Working with maps
worldmap_chart = pygal.maps.world.World()
worldmap_chart.title = 'Some countries'
worldmap_chart.add('F countries', ['fr', 'fi'])
worldmap_chart.add('M countries', ['ma', 'mc', 'md', 'me', 'mg',
'mk', 'ml', 'mm', 'mn', 'mo',
'mr', 'mt', 'mu', 'mv', 'mw',
'mx', 'my', 'mz'])
worldmap_chart.add('U countries', ['ua', 'ug', 'us', 'uy', 'uz'])
worldmap_chart.render()
# specify a value for a country
worldmap_chart = pygal.maps.world.World()
worldmap_chart.title = 'Minimum deaths by capital punishement (source: Amnesty International)'
worldmap_chart.add('In 2012', {
'af': 14,
'bd': 1,
'by': 3,
'cn': 1000,
'gm': 9,
'in': 1,
'ir': 314,
'iq': 129,
'jp': 7,
'kp': 6,
'pk': 1,
'ps': 6,
'sa': 79,
'so': 6,
'sd': 5,
'tw': 6,
'ae': 1,
'us': 43,
'ye': 28
})
worldmap_chart.render()
# access to continents
supra = pygal.maps.world.SupranationalWorld()
supra.add('Asia', [('asia', 1)])
supra.add('Europe', [('europe', 1)])
supra.add('Africa', [('africa', 1)])
supra.add('North america', [('north_america', 1)])
supra.add('South america', [('south_america', 1)])
supra.add('Oceania', [('oceania', 1)])
supra.add('Antartica', [('antartica', 1)])
supra.render()
| 25.761538 | 94 | 0.654225 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,469 | 0.438638 |
ea13756a0ffd8a68a14fc4db2d7499ba93d9435e | 17,663 | py | Python | robogen/rgkit/backup bots/Dulladob01.py | andrewgailey/robogen | 7e96cfa26d2e6dc383c5d205816ddd98f8f100d7 | [
"Unlicense"
] | null | null | null | robogen/rgkit/backup bots/Dulladob01.py | andrewgailey/robogen | 7e96cfa26d2e6dc383c5d205816ddd98f8f100d7 | [
"Unlicense"
] | null | null | null | robogen/rgkit/backup bots/Dulladob01.py | andrewgailey/robogen | 7e96cfa26d2e6dc383c5d205816ddd98f8f100d7 | [
"Unlicense"
] | null | null | null | # Dulladob 0.1 by Camelot Chess
# http://robotgame.net/viewrobot/7641
import rg
spawn_param = 8 #which turn to we begin bouncing?
suicide_param = 6 #when to blow ourselves up (param*surrounders>hp)?
suicide_fear_param = 6 #which to fear enemies blowing up?
staying_still_bonus = 0.34 #points for staying put
best_center_distance_param = 6 #ideal distance from the center
best_center_distance_weight = 1.01 #points lost for every square off
spawn_weight = 0.34 #points lost being spawn; multiplied by turns since death
adjacent_robot_penalty = 1 #NO NEED TO CHAGE - points lost for adjacent robots
adjacent_friendly_penalty = 0.51 #NO NEED TO CHAGE - points lost for adjacent robots
main_axis_weight = 0.5
#parameters controlling how much hp we need to pin an enemy to spawn. Arguably
#should depend on their hp, and/or happen before running, particularly on first
#turn.
hp_to_pin = {}
hp_to_pin[0] = 6
hp_to_pin[1] = 11
hp_to_pin[2] = 51
hp_to_pin[3] = 51
for x in range(4,11):
hp_to_pin[x] = 51
canonical_spawn_locs = []
for x in range(10):
for y in range(10):
if 'spawn' in rg.loc_types((x,y)):
canonical_spawn_locs.append((x,y))
#TTD:
# - vary some priorities
one_robots = []
two_robots = []
verbose = 0
def urgency(bot1, game):
return 100000 * rg.dist(bot1.location, rg.CENTER_POINT) + 100 * bot1.hp + bot1.location[0]
def greater(bot1, bot2, game):
if(urgency(bot1, game) > urgency(bot2, game)): return 1
if(urgency(bot2, game) > urgency(bot1, game)): return 0
#deliberately runs off the edge; this should be impossible.
def valid(move):
types = rg.loc_types(move)
if 'invalid' in types: return 0
if 'obstacle' in types: return 0
return 1
def not_spawn(move):
types = rg.loc_types(move)
if 'spawn' in types: return 0
return 1
def spawn(move):
return 1 - not_spawn(move)
def surrounded_spawn(move):
for loc in rg.locs_around(move, filter_out=('obstacle', 'invalid', 'spawn')):
return 0
return 1
def equal(bot1, bot2):
if (bot1.location == bot2.location): return 1
return 0
def surrounders(this_robot, game, loc):
number_found = 0
for loc2 in rg.locs_around(loc):
if(loc2 in game.robots):
bot2 = game.robots[loc2]
if bot2.player_id != this_robot.player_id: number_found += 1
#print "surrounders found ", loc, game
return number_found
def distance_from_spawn(square):
#canonise the square
canonical_x = square[0]
canonical_y = square[1]
if(canonical_x > 9): canonical_x = 18-canonical_x
if(canonical_y > 9): canonical_y = 18-canonical_y
if(canonical_x > canonical_y):
canonical_square = (canonical_y, canonical_x)
else:
canonical_square = (canonical_x, canonical_y)
distance = 10
for loc in canonical_spawn_locs:
if rg.wdist(loc, canonical_square) < distance:
distance = rg.wdist(loc,canonical_square)
return distance
def move_towards_if_no_ally(loc, dest, game, illegals):
xmove = towardsx_if_not_spawn(loc, dest)
ymove = towardsy_if_not_spawn(loc, dest)
if(xmove != 'no_move' and (not xmove in illegals) and (not xmove in game.robots)): xvalid = 1
else: xvalid = 0
if(ymove != 'no_move' and (not ymove in illegals) and (not ymove in game.robots)): yvalid = 1
else: yvalid = 0
if(xvalid == 1): return xmove
if(yvalid == 1): return ymove
return 'no_action'
def towardsy_if_not_spawn(loc, dest):
if(dest[1] > loc[1] ):
tentative_move = (loc[0], loc[1]+1)
if valid(tentative_move):
if not_spawn(tentative_move) or spawn(loc):
return tentative_move
if(dest[1] < loc[1]):
tentative_move = (loc[0], loc[1]-1)
if valid(tentative_move):
if not_spawn(tentative_move) or spawn(loc):
return tentative_move
return 'no_move'
def towardsx_if_not_spawn(loc, dest):
if(dest[0] > loc[0]):
tentative_move = (loc[0]+1, loc[1])
if valid(tentative_move):
if not_spawn(tentative_move) or not not_spawn(loc):
return tentative_move
if(dest[0] < loc[0]):
tentative_move = (loc[0]-1, loc[1])
if valid(tentative_move):
if not_spawn(tentative_move) or not not_spawn(loc):
return tentative_move
return 'no_move'
def towardy(loc, dest):
if(dest[1] > loc[1] ):
tentative_move = (loc[0], loc[1]+1)
if valid(tentative_move):
return tentative_move
if(dest[1] < loc[1]):
tentative_move = (loc[0], loc[1]-1)
if valid(tentative_move):
return tentative_move
return 'no_move'
def towardx(loc, dest):
if(dest[0] > loc[0]):
tentative_move = (loc[0]+1, loc[1])
if valid(tentative_move):
return tentative_move
if(dest[0] < loc[0]):
tentative_move = (loc[0]-1, loc[1])
if valid(tentative_move):
return tentative_move
return 'no_move'
def move_towards_either_axis (loc, dest, turn):
targetx = towardsx_if_not_spawn(loc, dest)
targety = towardsy_if_not_spawn(loc, dest)
if targetx == 'no_move': return targety
if targety == 'no_move': return targetx
if turn%2 == 0: return targetx
else: return targety
def destruct_if_doomed_us(this_robot, game, illegals):
if (this_robot.location in illegals): return 'no_action'
if (surrounders(this_robot, game, this_robot.location)*suicide_param > this_robot.hp): return ['suicide']
return 'no_action'
def destruct_if_doomed_enemy(this_robot, game):
if (surrounders(this_robot, game, this_robot.location)*suicide_fear_param > this_robot.hp): return ['suicide']
return 'no_action'
def attack_moving_enemy(this_robot, game, illegals):
if (this_robot.location in illegals): return 'no_action'
square_dictionary = {}
for square in rg.locs_around(this_robot.location):
square_dictionary[square] = 0
if square in game.robots:
square_dictionary[square] -= 40 #don't fire if our robot is there, they probably won't move there
for bot in two_robots:
if bot.player_id != this_robot.player_id:
loc = bot.location
targetx = towardx(this_robot.location, loc)
targety = towardy(this_robot.location, loc)
if targetx != 'no_move':
square_dictionary[targetx] += 70 - bot.hp - rg.dist(rg.CENTER_POINT, targetx)
if targety != 'no_move':
square_dictionary[targety] += 70 - bot.hp - rg.dist(rg.CENTER_POINT, targety)
best_score = 0
best_move = 'no_action'
for square in rg.locs_around(this_robot.location):
if square_dictionary[square] > best_score:
best_score = square_dictionary[square]
best_move = ['attack', square]
return best_move
def attack_if_possible(this_robot, game, illegals):
if (this_robot.location in illegals): return 'no_action'
besthp = 1000
bestloc = 'none'
for bot in one_robots:
if bot.player_id != this_robot.player_id:
if bot.hp < besthp:
besthp = bot.hp
bestloc = bot.location
if(besthp < 1000): return ['attack', bestloc]
return 'no_action'
def strong_hunt_the_weak(this_robot, game, illegals):
if(this_robot.hp < 30): return 'no_action'
weakest_enemy = 20
best_move = 'no_action'
for bot in one_robots:
if bot.player_id != this_robot.player_id:
if bot.hp < weakest_enemy:
weakest_enemy = bot.hp
if bot.hp <= 5 and (not bot.location in illegals) and (not surrounders(this_robot, game, bot.location) > 1):
best_move = ['move', bot.location]
weakest_enemy = bot.hp
elif not this_robot.location in illegals:
best_move = ['attack', bot.location]
weakest_enemy = bot.hp
for bot in two_robots:
if bot.player_id != this_robot.player_id:
if bot.hp < weakest_enemy:
targetx = towardsx_if_not_spawn(this_robot.location, bot.location)
targety = towardsy_if_not_spawn(this_robot.location, bot.location)
if not (targetx == 'no_move'):
if not (targetx in illegals or surrounders(this_robot, game, targetx) > 1):
best_move = ['move', targetx]
weakest_enemy = bot.hp
if not (targety == 'no_move'):
if not (targety in illegals or surrounders(this_robot, game, targety) > 1):
if targetx == 'no_move' or rg.dist(targetx, rg.CENTER_POINT) > rg.dist(targety, rg.CENTER_POINT):
best_move = ['move', targety]
weakest_enemy = bot.hp
return best_move
def safe(this_robot, loc, game):
turns_left = 10 - game.turn % 10
if(turns_left == 10): turns_left = 0
if(turns_left <= 2 and spawn(loc)): return 0
for bot in one_robots:
if(loc == bot.location and bot.player_id != this_robot.player_id):
return 0
for bot in two_robots:
if bot.player_id != this_robot.player_id:
if rg.wdist(loc, bot.location) == 1:
return 0
return 1
def scared(this_robot, game):
num_surrounders = 0
scared = 0
hp = 0
for bot in one_robots:
if bot.player_id != this_robot.player_id:
num_surrounders += 1
hp = bot.hp
last_found = bot
if(destruct_if_doomed_enemy(bot, game) != 'no_action'):
scared = 1
if (num_surrounders > 1):
scared = 1
if (hp > this_robot.hp):
if (surrounders(bot, game, bot.location) == 1) or this_robot.hp < 16:
scared = 1
return scared
def run_if_scared_and_safe(this_robot, game, illegals):
if not scared(this_robot, game):
return 'no_action'
best_distance = 1000
move = 'no_action'
for loc in rg.locs_around(this_robot.location, filter_out=('obstacle', 'invalid', 'spawn')):
if ((not loc in illegals) and safe(this_robot, loc, game) == 1):
if rg.dist(loc, rg.CENTER_POINT) < best_distance:
best_distance = rg.dist(loc, rg.CENTER_POINT)
move = ['move', loc]
return move
def empty_score(this_robot, loc, game):
score = 0
if(this_robot.hp > 25): score -= abs(rg.dist(loc, rg.CENTER_POINT) - best_center_distance_param)*best_center_distance_weight
else: score -= rg.dist(loc, rg.CENTER_POINT)*best_center_distance_weight
for loc2 in rg.locs_around(loc, filter_out=('obstacle', 'invalid')):
if(loc2 in game.robots):
if(game.robots[loc2].player_id != this_robot.player_id): score -= adjacent_robot_penalty
else: score -= adjacent_friendly_penalty
#if we are trying to run away from spawn, non-spawn adjacent squares with no enemies by them are good, because we need to move
if(spawn(loc) & game.turn < 91):
for loc2 in rg.locs_around(loc, filter_out=('obstacle', 'invalid')):
clear_square = 1
for loc3 in rg.locs_around(loc2, filter_out=('obstacle', 'invalid', 'spawn')):
if(loc3 in game.robots and game.robots[loc3].player_id != this_robot.player_id):
clear_square = 0
score += ((game.turn+1) % 10)*spawn_weight*clear_square/2
if(spawn(loc) & game.turn < 91): score -= ((game.turn+1) % 10)*spawn_weight
if(surrounded_spawn(loc) & game.turn < 91): score -= (game.turn % 10)*spawn_weight
return score
def find_empty_space(this_robot, game, illegals):
loc = this_robot.location
best_score = empty_score(this_robot, loc, game) + staying_still_bonus
move = ['guard']
if(loc in illegals): best_score = -10000
for loc2 in rg.locs_around(loc, filter_out=('obstacle', 'invalid')):
score = empty_score(this_robot, loc2, game)
if(loc2 in illegals): score -= 10000
if(score > best_score):
best_score = score
move = ['move', loc2]
return move
def pin_to_spawn(this_robot, game, illegals):
if(game.turn > 95): return 'no_action'
turns_left = 10 - game.turn % 10
if(turns_left == 10): turns_left = 0
if(this_robot.hp < hp_to_pin[turns_left]): return 'no_action'
loc = this_robot.location
for bot in one_robots:
if(bot.player_id != this_robot.player_id):
loc2 = bot.location
if spawn(loc2):
if(not_spawn(loc) and (not loc in illegals)):
return ['guard']
for bot in two_robots:
if(bot.player_id != this_robot.player_id):
loc2 = bot.location
if spawn(loc2):
block_square = move_towards_either_axis(loc, loc2, game.turn)
if(block_square == 'no_move'): return 'no_action'
if(not_spawn(block_square) and (not block_square in illegals)):
return ['move', block_square]
return 'no_action'
def tentative_act(this_robot, game, illegals):
global one_robots
global two_robots
one_robots = []
two_robots = []
locx = this_robot.location[0]
locy = this_robot.location[1]
for x in range(-2, 3):
for y in range(-2, 3):
if (abs(x) + abs(y) in range (1,3)):
checkx = locx + x
checky = locy + y
if ((checkx, checky) in game.robots):
bot = game.robots[(checkx,checky)]
if (abs(x) + abs(y) == 1):
one_robots.append(bot)
else: two_robots.append(bot)
possible_move = strong_hunt_the_weak(this_robot, game, illegals)
if (possible_move != 'no_action'): return possible_move
possible_move = run_if_scared_and_safe(this_robot, game, illegals)
if (possible_move != 'no_action'): return possible_move
possible_move = destruct_if_doomed_us(this_robot, game, illegals)
if (possible_move != 'no_action'): return possible_move
possible_move = pin_to_spawn(this_robot, game, illegals)
if (possible_move != 'no_action'): return possible_move
possible_move = attack_if_possible(this_robot, game, illegals)
if (possible_move != 'no_action'): return possible_move
if(spawn(this_robot.location)):
possible_move = find_empty_space(this_robot, game, illegals)
if(possible_move[0] != 'guard'): return possible_move
possible_move = attack_moving_enemy(this_robot, game, illegals)
if (possible_move != 'no_action'): return possible_move
actual_move = find_empty_space(this_robot, game, illegals)
return actual_move
def empty_score_punish_spawn(this_robot, loc, game):
score = empty_score(this_robot, loc, game)
if(spawn(loc)): score -= 100
if(surrounded_spawn(loc)): score -= 100
return score
def find_empty_space_punish_spawn(this_robot, game, illegals):
loc = this_robot.location
best_score = empty_score_punish_spawn(this_robot, loc, game) + staying_still_bonus
move = ['guard']
if(loc in illegals): best_score = -10000
for loc2 in rg.locs_around(loc, filter_out=('obstacle', 'invalid')):
score = empty_score_punish_spawn(this_robot, loc2, game)
if(loc2 in illegals): score = -10000
if(score > best_score):
best_score = score
move = ['move', loc2]
return move
def run_from_spawn(this_robot, game, illegals):
return find_empty_space_punish_spawn(this_robot, game, illegals)
def at_spawn_after_move(this_robot, move):
if (move[0] != 'move'): return spawn(this_robot.location)
else: return spawn(move[1])
def act_with_illegals(this_robot, game, illegals):
tentative_move = tentative_act(this_robot, game, illegals)
if(game.turn % 10 < 9 and game.turn % 10 > 0): return tentative_move
if(game.turn > 95): return tentative_move
if(not at_spawn_after_move(this_robot, tentative_move)): return tentative_move
return run_from_spawn(this_robot, game, illegals)
def destination_square(bot, move):
if(move[0] == 'move'): return move[1]
else: return bot.location
def act_with_consideration(this_robot, game, illegals):
bots_to_consider = []
bots_to_consider.append(this_robot)
new_bots = []
new_bots.append(this_robot)
while(len(new_bots)):
last_bots = new_bots
new_bots = []
for bot in last_bots:
locx = bot.location[0]
locy = bot.location[1]
for x in range(-2, 3):
for y in range(-2, 3):
if (abs(x) + abs(y) in range (1,3)):
checkx = locx + x
checky = locy + y
if ((checkx, checky) in game.robots):
cand = game.robots[(checkx,checky)]
if(cand.player_id == this_robot.player_id and greater(cand, bot, game) and (not cand in bots_to_consider)):
new_bots.append(cand)
bots_to_consider.append(cand)
sorted_bots = sorted(bots_to_consider, key= lambda bot: -urgency(bot, game))
for bot in sorted_bots:
move = act_with_illegals(bot, game, illegals)
square = destination_square(bot, move)
illegals.add(square)
if(bot == this_robot):
return move
class Robot:
def act(self, game):
return act_with_consideration(self, game, set())
| 39.16408 | 135 | 0.628885 | 96 | 0.005435 | 0 | 0 | 0 | 0 | 0 | 0 | 1,627 | 0.092113 |
ea1590334af24435c30185a2cb73b1bcb47990a6 | 12,795 | py | Python | telestream_cloud_qc_sdk/test/test_video_config.py | pandastream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | null | null | null | telestream_cloud_qc_sdk/test/test_video_config.py | pandastream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | 2 | 2016-07-06T14:13:31.000Z | 2018-03-07T12:54:58.000Z | telestream_cloud_qc_sdk/test/test_video_config.py | Telestream/telestream-cloud-python-sdk | ce0ad503299661a0f622661359367173c06889fc | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.video_config import VideoConfig # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestVideoConfig(unittest.TestCase):
"""VideoConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test VideoConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.video_config.VideoConfig() # noqa: E501
if include_optional :
return VideoConfig(
track_select_test = telestream_cloud_qc.models.track_select_test.track_select_test(
selector = 56,
selector_type = 'TrackIndex',
checked = True, ),
track_id_test = telestream_cloud_qc.models.track_id_test.track_id_test(
track_id = 56,
reject_on_error = True,
checked = True, ),
ignore_vbi_test = telestream_cloud_qc.models.ignore_vbi_test.ignore_vbi_test(
reject_on_error = True,
checked = True, ),
force_color_space_test = telestream_cloud_qc.models.force_color_space_test.force_color_space_test(
color_space = 'CSUnknown',
checked = True, ),
video_segment_detection_test = telestream_cloud_qc.models.video_segment_detection_test.video_segment_detection_test(
black_level_default_or_custom = 'Default',
black_level = 56,
percentage_of_frame = 56,
min_duration_required = 1.337,
min_duration_required_secs_or_frames = 'Seconds',
require_digital_silence = True,
reject_on_error = True,
checked = True, ),
video_layout_test = telestream_cloud_qc.models.layout_test.layout_test(
layout_type = 'LayoutTypeFixedIgnoreStartAndEnd',
start_duration = 1.337,
start_duration_secs_or_frames = 'Seconds',
end_duration = 1.337,
end_duration_secs_or_frames = 'Seconds',
start_enabled = True,
start_hours = 56,
start_minutes = 56,
start_seconds = 56,
start_frames = 56,
end_enabled = True,
end_hours = 56,
end_minutes = 56,
end_seconds = 56,
end_frames = 56,
checked = True, ),
letterboxing_test = telestream_cloud_qc.models.letterboxing_test.letterboxing_test(
ratio_or_lines = 'Ratio',
ratio_horizontal = 56,
ratio_vertical = 56,
lines_top_and_bottom = 56,
lines_left_and_right = 56,
tolerance = 56,
black_level_default_or_custom = 'Default',
black_level = 56,
reject_on_error = True,
checked = True, ),
blanking_test = telestream_cloud_qc.models.blanking_test.blanking_test(
black_level_default_or_custom = 'Default',
black_level = 56,
checked = True, ),
loss_of_chroma_test = telestream_cloud_qc.models.loss_of_chroma_test.loss_of_chroma_test(
level_default_or_custom = 'Default',
level = 56,
tolerance = 56,
reject_on_error = True,
checked = True, ),
chroma_level_test = telestream_cloud_qc.models.chroma_level_test.chroma_level_test(
y_level_default_or_custom = 'Default',
y_level_lower = 56,
y_level_upper = 56,
y_level_max_outside_range = 1.337,
y_level_tolerance_low = 1.337,
y_level_tolerance_high = 1.337,
u_vlevel_default_or_custom = 'Default',
u_vlevel_lower = 56,
u_vlevel_upper = 56,
u_vlevel_max_outside_range = 1.337,
low_pass_filter = 'NoFilter',
reject_on_error = True,
do_correction = True,
checked = True, ),
black_level_test = telestream_cloud_qc.models.black_level_test.black_level_test(
level_default_or_custom = 'Default',
level = 56,
level_max_outside_range = 1.337,
reject_on_error = True,
do_correction = True,
checked = True, ),
rgb_gamut_test = telestream_cloud_qc.models.rgb_gamut_test.rgb_gamut_test(
level_default_or_custom = 'Default',
level_lower = 56,
level_upper = 56,
level_max_outside_range = 1.337,
level_tolerance = 1.337,
low_pass_filter = 'NoFilter',
reject_on_error = True,
do_correction = True,
checked = True, ),
hdr_test = telestream_cloud_qc.models.hdr_test.hdr_test(
hdr_standard = 'GenericHdr',
max_fall_max_enabled = True,
max_fall_max = 56,
max_fall_error_enabled = True,
max_fall_error = 56,
max_cll_max_enabled = True,
max_cll_max = 56,
max_cll_error_enabled = True,
max_cll_error = 56,
always_calculate = True,
always_report = True,
reject_on_error = True,
checked = True, ),
colour_bars_test = telestream_cloud_qc.models.colour_bars_test.colour_bars_test(
color_bar_standard = 'AnyColorBars',
tolerance = 56,
time_range_enabled = True,
start_time = 1.337,
end_time = 1.337,
range_tolerance = 1.337,
time_secs_or_frames = 'Seconds',
not_at_any_other_time = True,
reject_on_error = True,
do_correction = True,
checked = True, ),
black_frame_test = telestream_cloud_qc.models.black_frame_test.black_frame_test(
level_default_or_custom = 'Default',
level = 56,
percentage_of_frame = 56,
start_range_enabled = True,
start_time = 1.337,
end_time = 1.337,
start_range_tolerance = 1.337,
time_secs_or_frames = 'Seconds',
end_range_enabled = True,
end_range = 1.337,
end_range_tolerance = 1.337,
end_secs_or_frames = 'Seconds',
not_at_any_other_time = True,
max_time_allowed = 1.337,
max_time_allowed_secs_or_frames = 'Seconds',
max_time_at_start = True,
max_time_allowed_at_start = 1.337,
max_time_allowed_at_start_secs_or_frames = 'Seconds',
max_time_at_end = True,
max_time_allowed_at_end = 1.337,
max_time_allowed_at_end_secs_or_frames = 'Seconds',
reject_on_error = True,
do_correction = True,
checked = True, ),
single_color_test = telestream_cloud_qc.models.single_color_test.single_color_test(
max_time_allowed = 1.337,
time_secs_or_frames = 'Seconds',
percentage_of_frame = 1.337,
ignore_below = 56,
reject_on_error = True,
checked = True, ),
freeze_frame_test = telestream_cloud_qc.models.freeze_frame_test.freeze_frame_test(
sensitivity = 'Low',
time_range_enabled = True,
start_time = 1.337,
end_time = 1.337,
start_range_tolerance = 1.337,
time_secs_or_frames = 'Seconds',
end_range_enabled = True,
end_range = 1.337,
end_range_duration = 1.337,
end_range_tolerance = 1.337,
end_secs_or_frames = 'Seconds',
not_at_any_other_time = True,
max_time_allowed = 1.337,
max_time_allowed_secs_or_frames = 'Seconds',
reject_on_error = True,
checked = True, ),
blockiness_test = telestream_cloud_qc.models.blockiness_test.blockiness_test(
quality_level = 56,
max_time_below_quality = 1.337,
max_time_below_quality_secs_or_frames = 'Seconds',
reject_on_error = True,
checked = True, ),
field_order_test = telestream_cloud_qc.models.field_order_test.field_order_test(
flagged_field_order = 'UnknownFieldOrder',
baseband_enabled = True,
simple = True,
baseband_field_order = 'UnknownFieldOrder',
reject_on_error = True,
checked = True, ),
cadence_test = telestream_cloud_qc.models.cadence_test.cadence_test(
check_cadence = True,
cadence_required = 'CadenceUnknown',
check_cadence_breaks = True,
report_cadence = True,
check_for_poor_cadence = True,
reject_on_error = True,
checked = True, ),
dropout_test = telestream_cloud_qc.models.dropout_test.dropout_test(
sensitivity = 'Low',
reject_on_error = True,
do_correction = True,
checked = True, ),
digital_dropout_test = telestream_cloud_qc.models.digital_dropout_test.digital_dropout_test(
sensitivity = 'Low',
reject_on_error = True,
checked = True, ),
stripe_test = telestream_cloud_qc.models.stripe_test.stripe_test(
sensitivity = 'Low',
reject_on_error = True,
do_correction = True,
checked = True, ),
corrupt_frame_test = telestream_cloud_qc.models.corrupt_frame_test.corrupt_frame_test(
sensitivity = 'Low',
reject_on_error = True,
do_correction = True,
checked = True, ),
flash_test = telestream_cloud_qc.models.flash_test.flash_test(
check_type = 'PSEStandard',
check_for_extended = True,
check_for_red = True,
check_for_patterns = True,
reject_on_error = True,
do_correction = True,
checked = True, ),
media_offline_test = telestream_cloud_qc.models.media_offline_test.media_offline_test(
reject_on_error = True,
checked = True, )
)
else :
return VideoConfig(
)
def testVideoConfig(self):
"""Test VideoConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 47.040441 | 132 | 0.511059 | 12,314 | 0.962407 | 0 | 0 | 0 | 0 | 0 | 0 | 951 | 0.074326 |
ea181d806b5b9dbcd288d38db58d0fa940f79080 | 223 | py | Python | topics/topic_1/9square_of_number.py | VladBaryliuk/my_trainings | 10c4bf2147c361ab918c591577a076b0d276ede0 | [
"Apache-2.0"
] | null | null | null | topics/topic_1/9square_of_number.py | VladBaryliuk/my_trainings | 10c4bf2147c361ab918c591577a076b0d276ede0 | [
"Apache-2.0"
] | null | null | null | topics/topic_1/9square_of_number.py | VladBaryliuk/my_trainings | 10c4bf2147c361ab918c591577a076b0d276ede0 | [
"Apache-2.0"
] | null | null | null | def square_of_number():
number = "179"
number_for_sum = "179"
for i in range (49):
number = number + number_for_sum
number = int(number)
number = number ** 2
print(number)
square_of_number()
| 22.3 | 40 | 0.623318 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.044843 |
ea1a1e4df5ca384ebf5252f38a1f95b8bff9c295 | 6,275 | py | Python | scannerProyecto2Tejada.py | tej17584/proyecto2DisenoLenguajes | a8aead2908eb4e87c8b7d9d373690766d6dfcd9c | [
"MIT"
] | null | null | null | scannerProyecto2Tejada.py | tej17584/proyecto2DisenoLenguajes | a8aead2908eb4e87c8b7d9d373690766d6dfcd9c | [
"MIT"
] | null | null | null | scannerProyecto2Tejada.py | tej17584/proyecto2DisenoLenguajes | a8aead2908eb4e87c8b7d9d373690766d6dfcd9c | [
"MIT"
] | null | null | null |
# Nombre: Alejandro Tejada
# Curso: Diseño lenguajes de programacion
# Fecha: Abril 2021
# Programa: scannerProyecto2Tejada.py
# Propósito: Este programa tiene como fin leer el file de entrada
# V 1.0
# imports
import pickle
from pprint import pprint as pp
class Scanner():
def __init__(self) -> None:
self.diccionarioSiguientePos = {}
self.AFDConstruidoFinal = []
self.nodosAceptacion = {}
self.stringPrueba = ""
self.abrirFiles()
self.abrirArchivoPrueba()
self.simular()
def abrirFiles(self):
# abrimos todos los files y asignamos
infile = open("dicionarioAFDFinal", 'rb')
self.AFDConstruidoFinal = pickle.load(infile)
infile.close()
infile = open("diccionarioSiguientePos", 'rb')
self.diccionarioSiguientePos = pickle.load(infile)
infile.close()
infile = open("diccionarioEstadosAceptacion", 'rb')
self.nodosAceptacion = pickle.load(infile)
infile.close()
def abrirArchivoPrueba(self):
with open('pruebas.txt', 'r') as f:
self.stringPrueba = f.read()
f.close()
def getStateNumberForArray(self, array):
for valor in self.AFDConstruidoFinal:
if(valor[1] == array):
return valor[0]
def mover(self, estado, caracter):
# Esta funcion retorna el siguiente estado
arrayEvaluar = self.AFDConstruidoFinal
arrayMover = []
for estados in estado:
for x in arrayEvaluar:
variableIn = (ord(caracter)) in x[2]
if(variableIn and len(x[3]) > 0 and estados == x[0]):
estadoSiguiente = self.getStateNumberForArray(x[3])
if(estadoSiguiente not in arrayMover):
arrayMover.append(estadoSiguiente)
return arrayMover
def getFinalStateAFN(self):
arrayValores = []
estadosFinales = self.getFinalStateNumber()
for valor in self.AFDConstruidoFinal:
for x in estadosFinales:
if(str(x) in valor[1]):
arrayValores.append(valor[0])
arrayValores = list(
dict.fromkeys(arrayValores))
return arrayValores
def getFinalStateAFNV2(self):
arrayValores = []
estadosFinales = self.getFinalStateNumber()
for valor in self.AFDConstruidoFinal:
for x in estadosFinales:
if(str(x) in valor[1]):
if(valor[1] not in arrayValores):
arrayValores.append(valor[1])
return arrayValores
def getFinalToken(self, tokenArray):
arrayValores = []
valorRetornar = ""
estadosFinales = self.getFinalStateNumber()
for valor in self.AFDConstruidoFinal:
for x in estadosFinales:
if(str(x) in valor[1]):
for w in tokenArray:
if(w == valor[0]):
if(valor[1] not in arrayValores):
arrayValores.append(valor[1])
dictAceptacion = {}
arrayNumeros = []
# ahora, miramos cual token es
for z in arrayValores:
for estadoPosibles in z:
for llave, valor in self.nodosAceptacion.items():
if(int(estadoPosibles) == llave):
dictAceptacion[llave] = valor
arrayNumeros.append(llave)
if(len(dictAceptacion) > 1):
valorMinimo = min(arrayNumeros)
for llave, valor in dictAceptacion.items():
if(valorMinimo == llave):
valorRetornar = valor
else:
for llave, valor in dictAceptacion.items():
valorRetornar = valor
return valorRetornar
def getFinalStateNumber(self):
array = []
for numero, valor in self.diccionarioSiguientePos.items():
if len(valor) == 0:
array.append(numero)
return array
def simular(self):
# este método simula
print("------------------SIMULACION TOKENS INICIADA-------------------")
S = [0]
S2 = [0]
acumulador = ""
SAcumulado = []
EstadoACeptacion = []
for w in self.stringPrueba:
SAcumulado.append(w)
SAcumulado.append(" ")
contador = 0
while len(SAcumulado) > 0:
if(contador == len(self.stringPrueba)-1):
caracterValuar = self.stringPrueba[contador]
acumulador += caracterValuar
S = self.mover(S, caracterValuar)
token = self.getFinalToken(S)
if(len(token) == 0):
print("TOKEN INVALIDO del valor ", acumulador)
break
else:
pp("El token del valor ----> " +
acumulador + " <--- es: " + token)
break
caracterValuar = self.stringPrueba[contador]
caracterValuar2 = self.stringPrueba[contador+1]
acumulador += caracterValuar
S = self.mover(S, caracterValuar)
S2 = self.mover(S, caracterValuar2)
if(len(S2) == 0 and len(S) > 0):
token = self.getFinalToken(S)
if(len(token) == 0):
print("TOKEN INVALIDO del valor: ")
print(acumulador)
S = [0]
S2 = [0]
acumulador = ""
contador -= 1
else:
pp("El token del valor ----> " +
acumulador + " <---- es: " + token)
S = [0]
S2 = [0]
acumulador = ""
# contador += 1
elif(len(S) == 0):
print("TOKEN INVALIDO del valor: ")
print(acumulador)
S = [0]
S2 = [0]
acumulador = ""
contador += 1
popCharacter = SAcumulado.pop()
print("---------------------------------------------------------------")
print("")
objeSCanner = Scanner()
| 33.55615 | 80 | 0.508526 | 5,988 | 0.953807 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.121695 |
ea1c12db4d5af227141198911161b74bbd00e24e | 1,271 | py | Python | .tox/scenario/lib/python2.7/site-packages/oslo_middleware/__init__.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/oslo_middleware/__init__.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/oslo_middleware/__init__.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = ['CatchErrors',
'CorrelationId',
'CORS',
'Debug',
'Healthcheck',
'HTTPProxyToWSGI',
'RequestId',
'RequestBodySizeLimiter',
'SSLMiddleware']
from oslo_middleware.catch_errors import CatchErrors
from oslo_middleware.correlation_id import CorrelationId
from oslo_middleware.cors import CORS
from oslo_middleware.debug import Debug
from oslo_middleware.healthcheck import Healthcheck
from oslo_middleware.http_proxy_to_wsgi import HTTPProxyToWSGI
from oslo_middleware.request_id import RequestId
from oslo_middleware.sizelimit import RequestBodySizeLimiter
from oslo_middleware.ssl import SSLMiddleware
| 39.71875 | 78 | 0.739575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 682 | 0.536585 |
ea1c349d5a962884cd1e7e0a9b53304ae80ef64b | 648 | py | Python | programmers/lv2_review/openchat.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | programmers/lv2_review/openchat.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | programmers/lv2_review/openchat.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # [카카오] 오픈채팅방
def solution(record):
answer = []
id_dict = {}
for query in record:
q = query.split(" ")
if len(q) >= 3:
id_dict[q[1]] = q[2]
for query in record:
q = query.split(" ")
if q[0] == "Enter":
answer.append(f"{id_dict[q[1]]}님이 들어왔습니다.")
elif q[0] == "Leave":
answer.append(f"{id_dict[q[1]]}님이 나갔습니다.")
return answer
if __name__ == "__main__":
record = [
"Enter uid1234 Muzi",
"Enter uid4567 Prodo",
"Leave uid1234",
"Enter uid1234 Prodo",
"Change uid4567 Ryan",
]
print(solution(record)) | 23.142857 | 55 | 0.498457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.348703 |
ea1c8dc482231f31a61b11ef7f70060a085b4cca | 227 | py | Python | chanjet_openapi_python_sdk/exception/serialize_response_exception.py | Chanjet/chanjet-openapi-python-sdk- | a076ce11d6d1789e657b96d72bbbcff594dbb4e9 | [
"MIT"
] | 2 | 2021-08-12T05:22:56.000Z | 2021-09-08T09:03:38.000Z | chanjet_openapi_python_sdk/exception/serialize_response_exception.py | Chanjet/chanjet-openapi-python-sdk | a076ce11d6d1789e657b96d72bbbcff594dbb4e9 | [
"MIT"
] | null | null | null | chanjet_openapi_python_sdk/exception/serialize_response_exception.py | Chanjet/chanjet-openapi-python-sdk | a076ce11d6d1789e657b96d72bbbcff594dbb4e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/8/6 15:01
# @Author : zc
# @Desc : json格式的response序列化成实例对象异常
class SerializeResponseException(Exception):
def __init__(self, err_msg):
super().__init__(self, err_msg)
| 22.7 | 44 | 0.647577 | 117 | 0.462451 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.509881 |
ea1cac9b8b5465978251233d73081760528adcd8 | 17,699 | py | Python | scrud_django/decorators.py | Django-Stack-Backend/Django-backend-React-frontend | 4c814ab9b97d70a259d4b93e30d118deba9831fd | [
"BSD-3-Clause"
] | 1 | 2021-11-22T20:39:26.000Z | 2021-11-22T20:39:26.000Z | scrud_django/decorators.py | Django-Stack-Backend/Django-backend-React-frontend | 4c814ab9b97d70a259d4b93e30d118deba9831fd | [
"BSD-3-Clause"
] | null | null | null | scrud_django/decorators.py | Django-Stack-Backend/Django-backend-React-frontend | 4c814ab9b97d70a259d4b93e30d118deba9831fd | [
"BSD-3-Clause"
] | null | null | null | from datetime import timezone
from functools import partial, update_wrapper
from django.utils.cache import get_conditional_response
from django.utils.http import http_date, quote_etag
from rest_framework import status
from rest_framework.metadata import BaseMetadata
from rest_framework.response import Response
from rest_framework.views import APIView
from scrud_django import __version__
from scrud_django.utils import get_string_or_evaluate, link_content
class ScrudfulViewFunc:
def __init__(
self,
view_method,
view_is_class_method=True,
etag_func=None,
last_modified_func=None,
schema_link_or_func=None,
schema_rel_or_func=None,
schema_type_or_func=None,
context_link_or_func=None,
context_rel_or_func=None,
context_type_or_func=None,
http_schema_or_func=None,
):
self.view_is_class_method = view_is_class_method
self.view_method = view_method
self.etag_func = etag_func
self.last_modified_func = last_modified_func
self.schema_link_or_func = schema_link_or_func
self.schema_rel_or_func = schema_rel_or_func
self.schema_type_or_func = schema_type_or_func
self.context_link_or_func = context_link_or_func
self.context_rel_or_func = context_rel_or_func
self.context_type_or_func = context_type_or_func
self.http_schema_or_func = http_schema_or_func
update_wrapper(self, self.view_method)
def __get__(self, obj, objtype):
return partial(self.__call__, obj)
def __call__(self, *args, **kwargs):
if self.view_is_class_method:
request = args[1]
else:
request = args[0]
if request.method in ("PUT", "DELETE"):
missing_required_headers = []
if self.etag_func and not request.META.get("HTTP_IF_MATCH"):
missing_required_headers.append("If-Match")
if self.last_modified_func and not request.META.get(
"HTTP_IF_UNMODIFIED_SINCE"
):
missing_required_headers.append("If-Unmodified-Since")
if missing_required_headers:
# TODO Define standard error json
response = Response(
{"missing-required-headers": missing_required_headers}, status=400,
)
return response
# Compute values (if any) for the requested resource.
def get_last_modified():
if self.last_modified_func:
last_modified = self.last_modified_func(*args, **kwargs)
if last_modified:
return http_date(
last_modified.replace(tzinfo=timezone.utc).timestamp()
)
return None
etag = None
last_modified = None
if request.method not in ("POST", "OPTIONS"):
if self.etag_func:
etag = self.etag_func(*args, **kwargs)
etag = etag + __version__ if etag else None
etag = quote_etag(etag) if etag else None
last_modified = get_last_modified()
else:
etag = None
last_modified = None
response = get_conditional_response(
request, etag=etag, last_modified=last_modified
)
if response is None:
response = self.view_method(*args, **kwargs)
schema_link = self.schema_link_header(*args, **kwargs) or ""
context_link = self.context_link_header(*args, **kwargs) or ""
join_links = ", " if schema_link and context_link else ""
link_content = schema_link + join_links + context_link
if etag:
response["ETag"] = etag
if last_modified:
response["Last-Modified"] = last_modified
if link_content:
response["Link"] = link_content
self.add_expose_headers(response)
return response
def add_expose_headers(self, response):
"""If the Link and/or Location header are provided on the response add the
'Access-Control-Expose-Headers` header to expose them over CORS requests.
"""
expose_headers = ""
if "Link" in response:
expose_headers = "Link"
if "Location" in response:
if expose_headers:
expose_headers = expose_headers + ", "
expose_headers = expose_headers + "Location"
if expose_headers:
response["Access-Control-Expose-Headers"] = expose_headers
def schema_link(self, *args, **kwargs):
return get_string_or_evaluate(self.schema_link_or_func, *args, **kwargs)
def schema_link_header(self, *args, **kwargs):
link = self.schema_link(*args, **kwargs)
if link:
link_rel = (
get_string_or_evaluate(self.schema_rel_or_func, *args, **kwargs,)
or "describedBy"
)
link_type = (
get_string_or_evaluate(self.schema_type_or_func, *args, **kwargs,)
or "application/json"
)
return link_content(link, link_rel, link_type)
return None
def context_link(self, *args, **kwargs):
return get_string_or_evaluate(self.context_link_or_func, *args, **kwargs)
def context_link_header(self, *args, **kwargs):
link = self.context_link(*args, **kwargs)
if link:
link_rel = (
get_string_or_evaluate(self.context_rel_or_func, *args, **kwargs,)
or "http://www.w3.org/ns/json-ld#context"
)
link_type = (
get_string_or_evaluate(self.context_type_or_func, *args, **kwargs,)
or "application/ld+json"
)
return link_content(link, link_rel, link_type)
return None
def scrudful(
etag_func=None,
last_modified_func=None,
schema_link_or_func=None,
schema_rel_or_func=None,
schema_type_or_func=None,
context_link_or_func=None,
context_rel_or_func=None,
context_type_or_func=None,
http_schema_or_func=None,
):
"""Decorator to make a view method SCRUDful"""
# TODO what about 400 Bad Request context and schema?
def decorator(view_method):
return ScrudfulViewFunc(
view_method,
etag_func=etag_func,
last_modified_func=last_modified_func,
schema_link_or_func=schema_link_or_func,
schema_rel_or_func=schema_rel_or_func,
schema_type_or_func=schema_type_or_func,
context_link_or_func=context_link_or_func,
context_rel_or_func=schema_rel_or_func,
context_type_or_func=schema_type_or_func,
http_schema_or_func=http_schema_or_func,
)
return decorator
def scrudful_api_view(
etag_func=None,
last_modified_func=None,
schema_link_or_func=None,
schema_rel_or_func=None,
schema_type_or_func=None,
context_link_or_func=None,
context_rel_or_func=None,
context_type_or_func=None,
http_schema_or_func=['GET'],
):
def decorator(view_method, *args, **kwargs):
http_method_names = http_schema_or_func
allowed_methods = set(http_method_names) | {'options'}
cls_attr = {
'__doc__': view_method.__doc__,
'metadata_class': ScrudfulAPIViewMetadata,
}
handler = ScrudfulViewFunc(
lambda self, *args, **kwargs: view_method(*args, **kwargs),
etag_func=etag_func,
last_modified_func=last_modified_func,
schema_link_or_func=schema_link_or_func,
schema_rel_or_func=schema_rel_or_func,
schema_type_or_func=schema_type_or_func,
context_link_or_func=context_link_or_func,
context_rel_or_func=context_rel_or_func,
context_type_or_func=context_type_or_func,
)
for method in http_method_names:
cls_attr[method.lower()] = handler
ScrudAPIView = type('ScrudAPIView', (APIView,), cls_attr)
ScrudAPIView.http_method_names = [method.lower() for method in allowed_methods]
ScrudAPIView.__name__ = view_method.__name__
ScrudAPIView.__module__ = view_method.__module__
ScrudAPIView.permission_classes = getattr(
view_method, 'permission_classes', APIView.permission_classes
)
ScrudAPIView.schema = getattr(view_method, 'schema', APIView.schema)
ScrudAPIView.schema_link_or_func = schema_link_or_func
ScrudAPIView.context_link_or_func = context_link_or_func
# ScrudAPIView.options = options
new_view_method = ScrudAPIView.as_view()
return new_view_method
return decorator
class ScrudfulMetadata(BaseMetadata):
def determine_metadata(self, request, view, *args, **kwargs):
if len(args) > 0 or len(kwargs) > 0: # this is a detail request
return self.determine_metadata_for_detail(request, view)
return self.determine_metadata_for_list(request, view)
def determine_metadata_for_detail(self, request, view):
metadata = dict()
metadata.update(
{
key: value
for key, value in {
"get": self.determine_metadata_for_get(request, view, "retrieve"),
"put": self.determine_metadata_for_put(request, view),
"delete": self.determine_metadata_for_delete(request, view),
}.items()
if value is not None
}
)
return metadata
def determine_metadata_for_list(self, request, view):
metadata = dict()
metadata.update(
{
key: value
for key, value in {
"post": self.determine_metadata_for_post(request, view),
"get": self.determine_metadata_for_get(request, view, "list"),
}.items()
if value is not None
}
)
return metadata
def get_method(self, view, name):
method_partial = getattr(view, name, None)
if method_partial:
return method_partial.func.__self__
return None
def determine_metadata_for_post(self, request, view, name="create"):
create_method = self.get_method(view, name)
if create_method is None:
return None
schema_link = create_method.schema_link(view, request)
context_link = create_method.context_link(view, request)
request_body = {
"description": "The content for the resource to be created.",
"required": True,
}
if schema_link or context_link:
json_content = {}
if schema_link:
json_content["schema"] = schema_link
if context_link:
json_content["context"] = context_link
request_body["content"] = {
"application/json": json_content,
}
metadata = {
"requestBody": request_body,
"responses": {"201": {"description": "CREATED"}},
}
return metadata
def determine_metadata_for_get(self, request, view, name):
list_method = self.get_method(view, name)
if list_method is None:
return
schema_link = list_method.schema_link(view, request)
context_link = list_method.context_link(view, request)
json_content = None
if schema_link or context_link:
json_content = {}
if schema_link:
json_content["schema"] = schema_link
if context_link:
json_content["context"] = context_link
responses = {
"200": {"description": "OK"},
}
if json_content:
responses["200"]["content"] = {
"application/json": json_content,
}
return {
"responses": responses,
}
def required_conditional_headers(self, method):
supports_etag = method.etag_func is not None
supports_last_modified = method.last_modified_func is not None
parameters = None
if supports_etag or supports_last_modified:
parameters = []
if supports_etag:
parameters.append(
{
"in": "header",
"name": "If-Match",
"schema": {"type": "string"},
"required": True,
}
)
if supports_last_modified:
parameters.append(
{
"in": "header",
"name": "If-Unmodified-Since",
"schema": {"type": "string"},
"required": True,
}
)
return parameters
def determine_metadata_for_put(self, request, view, name="update"):
update_method = self.get_method(view, name)
if update_method is None:
return
schema_link = update_method.schema_link(view, request)
context_link = update_method.context_link(view, request)
request_body = {
"description": "The content for the resource to be created.",
"required": True,
}
if schema_link or context_link:
json_content = {}
if schema_link:
json_content["schema"] = schema_link
if context_link:
json_content["context"] = context_link
request_body["content"] = {
"application/json": json_content,
}
metadata = {
"requestBody": request_body,
"responses": {"200": {"description": "OK"}},
}
parameters = self.required_conditional_headers(update_method)
if parameters:
metadata["parameters"] = parameters
return metadata
def determine_metadata_for_delete(self, request, view, name="destroy"):
delete_method = self.get_method(view, name)
if delete_method is None:
return None
metadata = {
"responses": {"200": {"description": "OK"}},
}
parameters = self.required_conditional_headers(delete_method)
if parameters:
metadata["parameters"] = parameters
return metadata
class ScrudfulAPIViewMetadata(ScrudfulMetadata):
def determine_metadata(self, request, view, *args, **kwargs):
metadata = dict()
metadata.update(
{
key: value
for key, value in {
"get": self.determine_metadata_for_get(request, view, "get"),
"post": self.determine_metadata_for_post(request, view, "post"),
"put": self.determine_metadata_for_put(request, view, "put"),
"delete": self.determine_metadata_for_delete(
request, view, "delete"
),
}.items()
if value is not None
}
)
return metadata
def options(view_instance, request, *args, **kwargs):
data = ScrudfulMetadata().determine_metadata(
request, view_instance, *args, **kwargs
)
return Response(data, status=status.HTTP_200_OK)
def scrudful_viewset(cls):
setattr(cls, "options", options)
meta = getattr(cls, "Meta", None)
etag_func = getattr(meta, "etag_func", None)
last_modified_func = getattr(meta, "last_modified_func", None)
schema_link_or_func = getattr(meta, "schema_link_or_func", None)
schema_rel_or_func = getattr(meta, "schema_rel_or_func", None)
schema_type_or_func = getattr(meta, "schema_type_or_func", None)
context_link_or_func = getattr(meta, "context_link_or_func", None)
context_rel_or_func = getattr(meta, "context_rel_or_func", None)
context_type_or_func = getattr(meta, "context_type_or_func", None)
extra_view_methods = getattr(meta, "extra_view_methods", [])
scrudful_item = scrudful(
etag_func=etag_func,
last_modified_func=last_modified_func,
schema_link_or_func=schema_link_or_func,
schema_rel_or_func=schema_rel_or_func,
schema_type_or_func=schema_type_or_func,
context_link_or_func=context_link_or_func,
context_rel_or_func=context_rel_or_func,
context_type_or_func=context_type_or_func,
)
view_methods = ["create", "retrieve", "update", "destroy"]
view_methods.extend(extra_view_methods)
for method_name in view_methods:
method = getattr(cls, method_name, None)
setattr(cls, method_name, scrudful_item(method))
if hasattr(cls, "list"):
scrudful_list = scrudful(
etag_func=getattr(meta, "list_etag_func", None),
last_modified_func=getattr(meta, "list_last_modified_func", None),
schema_link_or_func=getattr(meta, "list_schema_link_or_func", None),
schema_rel_or_func=getattr(meta, "list_schema_rel_or_func", None),
schema_type_or_func=getattr(meta, "list_schema_type_or_func", None),
context_link_or_func=getattr(meta, "list_context_link_or_func", None),
context_rel_or_func=getattr(meta, "list_context_rel_or_func", None),
context_type_or_func=getattr(meta, "list_context_type_or_func", None),
)
list_method = getattr(cls, "list")
setattr(cls, "list", scrudful_list(list_method))
return cls
| 37.818376 | 87 | 0.611277 | 11,928 | 0.673936 | 0 | 0 | 0 | 0 | 0 | 0 | 1,967 | 0.111136 |
ea1ddd5fb3b7764b25bce86e343d6fc2d860a274 | 670 | py | Python | cloud/scripts/estudos/fiis_ifix_gestores.py | codennine/trendfii | d13ee68ddcf80b0d58384c2c1f5972f210ae9d04 | [
"MIT"
] | null | null | null | cloud/scripts/estudos/fiis_ifix_gestores.py | codennine/trendfii | d13ee68ddcf80b0d58384c2c1f5972f210ae9d04 | [
"MIT"
] | 4 | 2019-06-19T20:25:43.000Z | 2021-06-02T03:43:05.000Z | cloud/scripts/estudos/fiis_ifix_gestores.py | codennine/trendfii | d13ee68ddcf80b0d58384c2c1f5972f210ae9d04 | [
"MIT"
] | null | null | null | #coding=utf-8
import os
from bs4 import BeautifulSoup
files = []
gestores = {
'EDY11': {'fiis': [], 'vacancia':0}
}
for r,d,f in os.walk('../gestores_ifix'):
for file in f:
handle = open(os.path.join(r, file), 'r')
html = handle.read()
soup = BeautifulSoup(html, 'html.parser')
handle.close()
gestor = u'%s'%(soup.find('div').text)
print(gestor)
if(not gestor in gestores):
gestores[gestor] = {
'fiis': [],
'vacancia': 0
}
gestores[gestor]['fiis'].append(file.replace('.html', ''))
files.append(file)
print(gestores) | 23.103448 | 66 | 0.51194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.165672 |
ea1e3a374da6546ba9cfee034f40a9ff5ef3cbde | 86 | py | Python | RL/Custom-env/Custom-env/envs/__init__.py | Poppins001/Projects | 386b741c2a8361289d73af658c8c6779ec8315b2 | [
"MIT"
] | 1 | 2018-06-01T14:40:49.000Z | 2018-06-01T14:40:49.000Z | RL/Custom-env/Custom-env/envs/__init__.py | Poppins001/Projects | 386b741c2a8361289d73af658c8c6779ec8315b2 | [
"MIT"
] | null | null | null | RL/Custom-env/Custom-env/envs/__init__.py | Poppins001/Projects | 386b741c2a8361289d73af658c8c6779ec8315b2 | [
"MIT"
] | null | null | null | from Custom-env.envs.Env1 import ENV1_NAME
from Custom-env.envs.Env2 import ENV2_NAME
| 28.666667 | 42 | 0.837209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea1fff46a82c83c0a760acd9e59925eaa45ebb42 | 1,176 | py | Python | Trie/14. Longest Common Prefix/solution.py | Shalini180/LeetCode-Solutions | 0ea364fa40b69dd129230903a0bdbe0fc116db08 | [
"Unlicense"
] | 9 | 2021-03-24T11:21:03.000Z | 2022-02-14T05:05:48.000Z | Trie/14. Longest Common Prefix/solution.py | Shalini180/LeetCode-Solutions | 0ea364fa40b69dd129230903a0bdbe0fc116db08 | [
"Unlicense"
] | 38 | 2021-10-07T18:04:12.000Z | 2021-12-05T05:53:27.000Z | Trie/14. Longest Common Prefix/solution.py | Shalini180/LeetCode-Solutions | 0ea364fa40b69dd129230903a0bdbe0fc116db08 | [
"Unlicense"
] | 27 | 2021-10-06T19:55:48.000Z | 2021-11-18T16:53:20.000Z | '''
Write a function to find the longest common prefix string amongst an array of strings.
If there is no common prefix, return an empty string "".
Example 1:
Input: strs = ["flower","flow","flight"]
Output: "fl"
Constraints:
1 <= strs.length <= 200
0 <= strs[i].length <= 200
strs[i] consists of only lower-case English letters.
'''
class Node:
def __init__(self):
self.children = defaultdict(Node)
self.count = 0
class Trie:
def __init__(self):
self.t = Node()
def add(self,st):
temp = self.t
for i in st:
temp = temp.children[i]
temp.count += 1
return temp.count
def traverse(self,st,n):
temp = self.t
ct = 0
if temp.count == n : ct += 1
for i in st:
temp = temp.children[i]
# print(temp.count)
if temp.count == n : ct += 1
return ct
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
head = Trie()
ans =0
for st in strs:
ma = head.add(st)
ans = head.traverse(strs[0],len(strs))
print(ans)
return strs[0][:ans]
| 23.52 | 86 | 0.547619 | 824 | 0.70068 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.303571 |
ea20af329182c07294d2c1bbed18aee79997d85a | 32 | py | Python | uresnet/iotools/__init__.py | NuTufts/uresnet_pytorch | 3a05f2349ae1e9601d05a80384920d8a22b4bc34 | [
"MIT"
] | null | null | null | uresnet/iotools/__init__.py | NuTufts/uresnet_pytorch | 3a05f2349ae1e9601d05a80384920d8a22b4bc34 | [
"MIT"
] | null | null | null | uresnet/iotools/__init__.py | NuTufts/uresnet_pytorch | 3a05f2349ae1e9601d05a80384920d8a22b4bc34 | [
"MIT"
] | null | null | null | from .iotools import io_factory
| 16 | 31 | 0.84375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea210e1e988d4d420b55731b7d3f040fb88524e4 | 11,867 | py | Python | model_selection.py | elalamik/Regression_Project_Predicting_Age_of_Abalones | f3123ee7ae96fa963d7db7c75699779fd03cd05f | [
"MIT"
] | null | null | null | model_selection.py | elalamik/Regression_Project_Predicting_Age_of_Abalones | f3123ee7ae96fa963d7db7c75699779fd03cd05f | [
"MIT"
] | null | null | null | model_selection.py | elalamik/Regression_Project_Predicting_Age_of_Abalones | f3123ee7ae96fa963d7db7c75699779fd03cd05f | [
"MIT"
] | null | null | null | import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
import pandas as pd
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from sklearn.linear_model import LinearRegression
import sys; import re
def AIC(data,model,model_type,k=2):
if model_type=='linear':
return len(data)* np.log(model.ssr/len(data)) + k * (model.df_model+1)
elif model_type=='logistic' :
return model.aic
def Cp(data,model,sigma2):
return model.ssr/sigma2 - (len(data) - 2.*model.df_model- 1)
def BIC(data,model,model_type='linear'):
if model_type=='linear':
return np.log(model.ssr/model.centered_tss) * len(data) + (model.df_model+1) * np.log(len(data))
elif model_type=='logistic':
return model.bicllf
def regressor(y,X, model_type):
if model_type =="linear":
regressor = sm.OLS(y, X)
regressor_fitted = regressor.fit()
elif model_type == 'logistic':
regressor = sm.GLM(y, X,family=sm.families.Binomial())
regressor_fitted = regressor.fit()
return regressor_fitted
def criterion_f(X,model,model_type,elimination_criterion):
if elimination_criterion=='aic':
return AIC(X,model,model_type)
elif elimination_criterion=='bic':
return AIC(X,model,model_type,k=np.log(len(X)))
def detect_dummies(X,variable):
'''
If no dummies simply returns the variable to remove (or add)
'''
cols = X.columns.tolist()
dummy_cols = []
if (len(X[variable].value_counts())==2) and (X[variable].min()==0) and (X[variable].max()==1):
cols.remove(variable)
dummy_cols.append(variable)
if re.search('^([a-zA-Z0-9]+)[\[_]',variable):
prefix = (re.search('^([a-zA-Z0-9]+)[\[_]',variable).group(1))
for var in cols:
if prefix in var:
dummy_cols.append(var)
else :
dummy_cols.append(variable)
return dummy_cols
def forwardSelection(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Forward Selection : from simple model with only intercept to complete model with all variables present in X
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
return __forwardSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,verbose=verbose)
def backwardSelection(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Backward Selection : from complete with all columns in X to simple model with only intercept
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
return __backwardSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,verbose=verbose )
def bothSelection(X, y, model_type ="linear",elimination_criterion = "aic",start='full',verbose=False):
return __bothSelectionRaw__(X, y, model_type = model_type,elimination_criterion = elimination_criterion,start=start,verbose=verbose)
def __forwardSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
cols = X.columns.tolist()
## Begin from a simple model with only intercept
selected_cols = ["Intercept"]
other_cols = cols.copy()
other_cols.remove("Intercept")
model = regressor(y, X[selected_cols],model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
for i in range(X.shape[1]):
aicvals = pd.DataFrame(columns = ["Cols","aic"])
for j in other_cols:
cols_to_add = detect_dummies(X,j)
model = regressor(y, X[selected_cols+cols_to_add],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion)]],columns = ["Cols","aic"]),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
if aicvals.shape[0] > 0:
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
cols_to_add = detect_dummies(X,aicvals["Cols"][0])
print("Entered :", aicvals["Cols"][0], "\tCriterion :", aicvals["aic"][0])
for i in cols_to_add:
selected_cols.append(i)
other_cols.remove(i)
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[selected_cols],model_type)
print(model.summary())
print("Criterion: "+str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def __backwardSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",verbose=False):
selected_cols = X.columns.tolist()
selected_cols.remove('Intercept')
model = regressor(y,X,model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
for i in range(X.shape[1]):
aicvals = pd.DataFrame(columns = ["Cols","aic"])
if len(selected_cols)==0:
print("break : Only Intercept left")
break
else :
for j in selected_cols:
temp_cols = selected_cols.copy()
### Detect dummies and remove several columns if necessary
cols_to_remove = detect_dummies(X,j)
for i in cols_to_remove:
temp_cols.remove(i)
model = regressor(y, X[['Intercept']+temp_cols],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion)]],columns = ["Cols","aic"]),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
print("Eliminated :" ,aicvals["Cols"][0],"\tCriterion :", aicvals["aic"][0])
cols_removed = detect_dummies(X,aicvals["Cols"][0])
for i in cols_removed:
selected_cols.remove(i)
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[['Intercept']+selected_cols],model_type)
print(str(model.summary())+"\nCriterion: "+ str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def __bothSelectionRaw__(X, y, model_type ="linear",elimination_criterion = "aic",start='full',verbose=False):
'''
Compute model selection based on elimination_criterion (here only aic and bic)
Both direction Selection : from complete (full) with all columns in X to simple model with only intercept, but try to add or delete one variable at each step
X : predictors, pandas dataframe nxp or array nxp
y : output, pandas series (DataFrame with one column), nx1, or 1d array of length n
elimination_criterion : here only aic available
----
returns final model fitted with selected variables
'''
cols = X.columns.tolist()
if start=='full':
removed_cols = []
selected_cols = cols.copy()
selected_cols.remove("Intercept")
else :
selected_cols = []
removed_cols = cols.copy()
removed_cols.remove("Intercept")
model = regressor(y,X[['Intercept']+selected_cols],model_type)
criterion = criterion_f(X,model,model_type,elimination_criterion)
while True :
aicvals = pd.DataFrame(columns = ["Cols","aic",'way'])
###### Try to remove variables still present in the model
if len(selected_cols)==0:
continue
else :
for j in selected_cols:
temp_cols = selected_cols.copy()
### Detect dummies and remove several columns if necessary
cols_to_remove = detect_dummies(X,j)
for i in cols_to_remove:
temp_cols.remove(i)
model = regressor(y, X[['Intercept']+temp_cols],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion),'delete']],columns = ["Cols","aic",'way']),ignore_index=True)
###### Try to add previously removed variables
for j in removed_cols:
cols_to_add = detect_dummies(X,j)
model = regressor(y, X[['Intercept']+selected_cols+cols_to_add],model_type)
aicvals = aicvals.append(pd.DataFrame([[j, criterion_f(X,model,model_type,elimination_criterion),'add']],columns = ["Cols","aic",'way']),ignore_index=True)
aicvals = aicvals.sort_values(by = ["aic"]).reset_index(drop=True)
if verbose :
print(aicvals)
if aicvals.shape[0] > 0:
new_criterion = aicvals["aic"][0]
if new_criterion < criterion:
cols_concerned = detect_dummies(X,aicvals["Cols"][0])
if aicvals["way"][0]=='delete':
print("Eliminated :" ,aicvals["Cols"][0],"\tCriterion :", aicvals["aic"][0])
criterion = new_criterion
for i in cols_concerned:
selected_cols.remove(i)
removed_cols.append(i)
# removed_cols.append(aicvals["Cols"][0])
# selected_cols.remove(aicvals["Cols"][0])
elif aicvals["way"][0]=='add':
print("Entered :", aicvals["Cols"][0], "\tCriterion :", aicvals["aic"][0])
for i in cols_concerned:
selected_cols.append(i)
removed_cols.remove(i)
# selected_cols.append(aicvals["Cols"][0])
# removed_cols.remove(aicvals["Cols"][0])
criterion = new_criterion
else:
print("break : criterion")
break
model = regressor(y,X[['Intercept']+selected_cols],model_type)
print(str(model.summary())+"\nCriterion: "+ str(criterion_f(X,model,model_type,elimination_criterion)))
print("Final Variables:", selected_cols)
return model
def exhaustivesearch_selectionmodel(X,y,vmin=1,vmax=10):
'''
Function to compute exhaustive search for LINEAR regression y ~X : test all models with p features from X with p between vmin and vmax.
For each size p : select the best model based on MSE.
Then compute R2,adj R2, Cp and BIC on selected models.
X : Dataframe of explanatory variables, WITHOUT intercept column, nxp
y : Dataframe of output variable
---------
Returns these different criterion in a DataFrame.
'''
if ('const' in X.columns.tolist()) or ('Intercept' in X.columns.tolist()):
raise SystemExit('Delete Intercept column in X before to pass it to this function')
# sys.exit('Delete Intercept column in X before to pass it to this function')
### First, exhaustive search with LienarRegression() from sklearn and EFS() from mlxtend
### Returns a dictionnary with all estimated models for each model dimension
lm = LinearRegression(fit_intercept=True)
efs1 = EFS(lm,min_features=1,max_features=vmax,scoring='neg_mean_squared_error',print_progress=True,cv=False)
efs1 = efs1.fit(X, y)
#### Find for each model size the best model in terms of (neg) MSE
best_idxs_all = []
for k in range(1,vmax+1):
best_score = -np.infty
best_idx = 0
for i in efs1.subsets_:
if (len(efs1.subsets_[i]['feature_idx'])) == k:
if efs1.subsets_[i]['avg_score'] > best_score:
best_score = efs1.subsets_[i]['avg_score']
best_idx = i
best_idxs_all.append(best_idx)
df_subsets = pd.DataFrame(index=best_idxs_all,columns=['Variables','R2','R2_adj','Cp','BIC','Number of variables (except intercept)'])
X_copy = X.copy()
X_copy = sm.add_constant(X_copy)
full_model = sm.OLS(y,X_copy).fit()
sigma2 = (full_model.ssr)/(len(X_copy)-full_model.df_model-1)
for index in best_idxs_all:
df_subsets['Variables'] = df_subsets['Variables'].astype(object)
variables = (efs1.subsets_[index]['feature_names'])
variables = np.array(variables).tolist()
df_subsets.loc[index,'Number of variables (except intercept)'] = len(variables)
model = sm.OLS(y,X_copy[['const']+variables]).fit()
df_subsets.loc[index,'R2'] = model.rsquared
df_subsets.loc[index,'R2_adj'] = model.rsquared_adj
df_subsets.loc[index,'BIC'] = BIC(X_copy,model)
df_subsets.loc[index,'Cp'] = Cp(X_copy,model,sigma2)
df_subsets.loc[index,'Variables'] = variables
return df_subsets
| 34.397101 | 162 | 0.720317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,768 | 0.317519 |
ea2175fd92a81a7aa44dc7fd222cd723428cd81c | 1,972 | py | Python | play.py | amm042/pyMonty | eb4400d179a73571c02c5bf2b0793bff4a488092 | [
"MIT"
] | null | null | null | play.py | amm042/pyMonty | eb4400d179a73571c02c5bf2b0793bff4a488092 | [
"MIT"
] | null | null | null | play.py | amm042/pyMonty | eb4400d179a73571c02c5bf2b0793bff4a488092 | [
"MIT"
] | null | null | null | """
Montey hall client
Alan Marchiori 2019
"""
import logging
import socket
import time
import argparse
def main(addr, port, delay):
log = logging.getLogger()
server_port = (addr, port)
log.info("Starting game with {}".format(server_port))
#for k in range(10):
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as skt:
# socket options defined here https://linux.die.net/man/3/setsockopt
if skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1):
log.error("setsockopt failed.")
exit(-1)
skt.connect(server_port)
log.info("Game started with {}".format(
server_port
))
skt.send(b"PLAYALAN")
rsp = skt.recv(4096).decode('utf-8').strip()
log.info("Got: {} {}".format(rsp, rsp=='WAIT'))
if rsp == 'WAIT':
log.info("ABORT")
time.sleep(0.1)
continue
skt.send(b"OPEN0")
rsp = skt.recv(4096).decode('utf-8')
log.info("Got: {}".format(rsp))
skt.send(b"DONE")
rsp = skt.recv(4096).decode('utf-8')
log.info("Got: {}".format(rsp))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument(
'-a', '--addr', type=str,
help='ip address of the server', required=False,
default="127.0.0.1")
parser.add_argument(
'-p', '--port', type=int,
help='port the server listens on', required=False,
default=8888)
parser.add_argument(
'-d', '--delay', type=int,
help='delay in seconds', required=False,
default=1)
FORMAT = '%(asctime)-15s %(levelname)-6s: %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
args = parser.parse_args()
# args must match main's parameters!
main(**vars(args))
| 30.338462 | 80 | 0.559838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 506 | 0.256592 |
ea21bdaf4495b9f91ab1a9b78ddc86efc5c91b1c | 687 | py | Python | publishing_boy/tests/utils.py | przemekkot/publishing-boy | 7ed069ab2393d684057ddad196c49a58101e9fda | [
"MIT"
] | null | null | null | publishing_boy/tests/utils.py | przemekkot/publishing-boy | 7ed069ab2393d684057ddad196c49a58101e9fda | [
"MIT"
] | 7 | 2020-02-12T02:27:28.000Z | 2022-02-10T08:54:02.000Z | publishing_boy/tests/utils.py | przemekkot/publishing-boy | 7ed069ab2393d684057ddad196c49a58101e9fda | [
"MIT"
] | null | null | null | import os
import tempfile
from django.core.files.storage import FileSystemStorage
import django.core.files.storage
# dummy django.conf.settings
class Settings():
MEDIA_ROOT = os.path.dirname(os.path.abspath(__file__))
MEDIA_URL = 'http://local/'
FILE_UPLOAD_PERMISSIONS = 0o777
FILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o777
USE_TZ = False
# switch settings
django.core.files.storage.settings = Settings()
def get_test_storage():
temp_dir = tempfile.mkdtemp()
storage = FileSystemStorage(location=temp_dir, base_url='/')
return temp_dir, storage
def get_storage(folder):
storage = FileSystemStorage(location=folder, base_url='/')
return storage
| 24.535714 | 64 | 0.751092 | 210 | 0.305677 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.09607 |
ea2206dfae42e6d124e7e16bb3567b4adca04132 | 1,070 | py | Python | app/report/models/call_table_model.py | michaelscales88/mWreporting_final | b0399fb32fd594c2f5a20d47c2c0dceaecb6f326 | [
"MIT"
] | 2 | 2019-06-10T21:15:03.000Z | 2020-01-02T13:12:45.000Z | app/report/models/call_table_model.py | michaelscales88/python-reporting-app | b0399fb32fd594c2f5a20d47c2c0dceaecb6f326 | [
"MIT"
] | 14 | 2018-01-18T19:07:15.000Z | 2018-05-16T18:44:55.000Z | app/report/models/call_table_model.py | michaelscales88/mWreporting_final | b0399fb32fd594c2f5a20d47c2c0dceaecb6f326 | [
"MIT"
] | null | null | null | # data/models.py
import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from app.extensions import db
class CallTableModel(db.Model):
__tablename__ = 'c_call'
__repr_attrs__ = ['call_id', 'calling_party_number', 'dialed_party_number',
'start_time', 'end_time', 'caller_id']
call_id = db.Column(db.Integer, primary_key=True)
call_direction = db.Column(db.Integer)
calling_party_number = db.Column(db.String)
dialed_party_number = db.Column(db.String)
account_code = db.Column(db.String)
start_time = db.Column(db.DateTime)
end_time = db.Column(db.DateTime)
system_id = db.Column(db.Integer)
caller_id = db.Column(db.String)
inbound_route = db.Column(db.String)
events = db.relationship("EventTableModel", lazy="dynamic")
@hybrid_property
def length(self):
delta = self.end_time - self.start_time
return delta - datetime.timedelta(microseconds=delta.microseconds)
@classmethod
def set_empty(cls, model):
model.data = {}
return model
| 31.470588 | 79 | 0.694393 | 953 | 0.890654 | 0 | 0 | 249 | 0.23271 | 0 | 0 | 135 | 0.126168 |
ea22bbec328c45633bc90472df085445b7749a42 | 342 | py | Python | dataentry/models.py | abrehman90/Integrate-SummerNote-in-Django | a588578f007e153b85e1b18e71fa37d05bdef7ef | [
"MIT"
] | 1 | 2021-12-15T03:47:19.000Z | 2021-12-15T03:47:19.000Z | dataentry/models.py | abrehman90/Integrate-SummerNote-in-Django | a588578f007e153b85e1b18e71fa37d05bdef7ef | [
"MIT"
] | null | null | null | dataentry/models.py | abrehman90/Integrate-SummerNote-in-Django | a588578f007e153b85e1b18e71fa37d05bdef7ef | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class user(models.Model):
name = models.CharField(max_length=25)
phone = models.CharField(max_length=25,default='+92')
email = models.EmailField()
city = models.CharField(max_length=20)
content = models.TextField()
def __str__(self):
return self.name
| 24.428571 | 57 | 0.695906 | 283 | 0.827485 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.090643 |
ea230ac82e5bb4386e749cb1be502c5bdba91b6d | 76 | py | Python | dotfiles/common/.gdbinit.d/eigen.py | HaoZeke/Dotfiles | f4ac24b0d7e08d87b1f402af67e463c528b1b69d | [
"Unlicense"
] | 14 | 2018-10-29T18:54:25.000Z | 2021-12-21T00:22:52.000Z | dotfiles/common/.gdbinit.d/eigen.py | HaoZeke/Dotfiles | f4ac24b0d7e08d87b1f402af67e463c528b1b69d | [
"Unlicense"
] | 1 | 2018-08-20T17:41:10.000Z | 2018-08-20T17:42:23.000Z | dotfiles/common/.gdbinit.d/eigen.py | HaoZeke/Dotfiles | f4ac24b0d7e08d87b1f402af67e463c528b1b69d | [
"Unlicense"
] | 3 | 2018-08-20T17:36:29.000Z | 2021-01-23T05:18:30.000Z | # Eigen pretty printer
__import__('eigengdb').register_eigen_printers(None)
| 25.333333 | 52 | 0.828947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.421053 |
ea23411116c11712b8563c0700e8c3b9b69e0e40 | 1,055 | py | Python | sv/loading.py | thalesians/bayestsa | d54ea04ffa9903473b11c906545e95b2666afb88 | [
"Apache-2.0"
] | 18 | 2017-03-07T19:13:18.000Z | 2021-01-05T00:35:30.000Z | sv/loading.py | HanMeh/bayestsa | d54ea04ffa9903473b11c906545e95b2666afb88 | [
"Apache-2.0"
] | null | null | null | sv/loading.py | HanMeh/bayestsa | d54ea04ffa9903473b11c906545e95b2666afb88 | [
"Apache-2.0"
] | 14 | 2016-12-27T00:09:40.000Z | 2020-12-27T19:23:53.000Z | from collections import OrderedDict
import numpy as np
from pandas import DataFrame
from sv import SVData, CorTiming
def loadSVDataFromBUGSDataset(filepath, logreturnforward, logreturnscale, dtfilepath=None):
dts = None
if dtfilepath is not None:
with open(dtfilepath) as f:
content = f.readlines()
dts = np.array([float(x) for x in content[1:-1]])
with open(filepath) as f:
content = f.readlines()
logreturns = np.array([float(x) for x in content[1:-1]])
times = range(len(logreturns))
if dts is not None:
svdf = DataFrame(OrderedDict((('logreturn', logreturns), ('dt', dts))), index=times)
else:
svdf = DataFrame(OrderedDict((('logreturn', logreturns),)), index=times)
return SVData(
sourcekind='loader',
source=loadSVDataFromBUGSDataset,
svdf=svdf,
params=None,
cortiming=CorTiming.unknown,
logreturnforward=logreturnforward,
logreturnscale=logreturnscale)
| 31.029412 | 92 | 0.629384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.032227 |
ea26327ae6b47dd4a986055221217b74fe00a65e | 3,741 | py | Python | sdk/network/azure-mgmt-privatedns/azure/mgmt/privatedns/models/virtual_network_link.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-privatedns/azure/mgmt/privatedns/models/virtual_network_link.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-privatedns/azure/mgmt/privatedns/models/virtual_network_link.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class VirtualNetworkLink(TrackedResource):
"""Describes a link to virtual network for a Private DNS zone.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Example -
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateDnsZones/{privateDnsZoneName}'.
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Example -
'Microsoft.Network/privateDnsZones'.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: The Azure Region where the resource lives
:type location: str
:param etag: The ETag of the virtual network link.
:type etag: str
:param virtual_network: The reference of the virtual network.
:type virtual_network: ~azure.mgmt.privatedns.models.SubResource
:param registration_enabled: Is auto-registration of virtual machine
records in the virtual network in the Private DNS zone enabled?
:type registration_enabled: bool
:ivar virtual_network_link_state: The status of the virtual network link
to the Private DNS zone. Possible values are 'InProgress' and 'Done'. This
is a read-only property and any attempt to set this value will be ignored.
Possible values include: 'InProgress', 'Completed'
:vartype virtual_network_link_state: str or
~azure.mgmt.privatedns.models.VirtualNetworkLinkState
:ivar provisioning_state: The provisioning state of the resource. This is
a read-only property and any attempt to set this value will be ignored.
Possible values include: 'Creating', 'Updating', 'Deleting', 'Succeeded',
'Failed', 'Canceled'
:vartype provisioning_state: str or
~azure.mgmt.privatedns.models.ProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_link_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'SubResource'},
'registration_enabled': {'key': 'properties.registrationEnabled', 'type': 'bool'},
'virtual_network_link_state': {'key': 'properties.virtualNetworkLinkState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkLink, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.virtual_network = kwargs.get('virtual_network', None)
self.registration_enabled = kwargs.get('registration_enabled', None)
self.virtual_network_link_state = None
self.provisioning_state = None
| 45.621951 | 139 | 0.654103 | 3,218 | 0.860198 | 0 | 0 | 0 | 0 | 0 | 0 | 2,953 | 0.789361 |
ea26c82baaca0ecaf568c61b139222fcd9a4d7cf | 2,986 | py | Python | 13_while_loops_challenges/58_guess_my_word.py | r7asmu7s/art_of_doing_python | 62a03bcca084046c319976fc308bf3de3a2d412d | [
"Unlicense"
] | null | null | null | 13_while_loops_challenges/58_guess_my_word.py | r7asmu7s/art_of_doing_python | 62a03bcca084046c319976fc308bf3de3a2d412d | [
"Unlicense"
] | null | null | null | 13_while_loops_challenges/58_guess_my_word.py | r7asmu7s/art_of_doing_python | 62a03bcca084046c319976fc308bf3de3a2d412d | [
"Unlicense"
] | null | null | null | import random
print('Welcome to the GUESS MY WORD APP.')
game_dict = {"sports": ['basketball', 'baseball', 'soccer', 'football', 'tennis',
'curling'],
"colors": ['orange', 'yellow', 'purple', 'aquamarine', 'violet', 'gold'],
"fruits": ['apple', 'banana', 'watermelon', 'peach', 'mango', 'strawberry'],
"classes": ['english', 'history', 'science', 'mathematics', 'art', 'health'],
}
game_keys = []
for i in game_dict.keys():
game_keys.append(i)
# or we can use list comprehension
# game_keys = [i for i in game_dict.keys()]
playing = True
while playing:
# choosing game category randomly
game_category = random.choice(game_keys)
# or
# game_category = game_keys[random.randint(0, len(game_keys) - 1)]
# choosing a random game word according to the random game category
game_word = random.choice(list(game_dict[game_category]))
# or:
# game_word = game_dict[game_category][random.randint(0, len(game_dict) - 1)]
# creating a list based on the string of game word
game_word_list = list(game_word)
# creating a blank word, which it's components are dashes, unless changed because of an incorrect guess for the game word
blank_word = ['-' for i in game_word]
print('\nGuess a ' + str(len(game_word)) + ' letter word from the following category: ' + game_category)
# using list comprehension, printing the basic blank word
print(''.join([str(i) for i in blank_word]))
guess_count = 0
# a flag allowing to guess
is_allowed_to_guess = True
# number of the allowed guesses based on the number of the letters of the game word
allowed_guesses = len(game_word)
# while loop for guessing
while guess_count < allowed_guesses and is_allowed_to_guess:
guess = input('\nEnter you guess: ').lower().strip()
# correct guess
if guess == game_word:
guess_count += 1
print('\nCorrect. You guessed the word in ' + str(guess_count) + ' guesses.')
is_allowed_to_guess = False
break
# incorrect guess
else:
guess_count += 1
print('That is not correct. Let us reveal a letter to help you.')
# a flag to check if the a dash to fill is available in the blank word
dash_flag = True
# while loop to fill the dashes in the blank word because of a wrong guess
while dash_flag:
# choosing a random dash to fill with a letter
index = random.randint(0, len(blank_word) - 1)
if blank_word[index] == '-':
# filling the dash with the corresponding letter from the correct game word from it's list of letters
blank_word[index] = game_word_list[index]
print(''.join([str(i) for i in blank_word]))
dash_flag = False
# losing the game
else:
print('\nYou did not guess the word correctly with your ' + str(guess_count) + ' guesses. The word was: ' + game_word)
# play again?
choice = input('\nWould you like to play again? (YES/NO): ').lower().strip()
if not choice.startswith('y'):
playing = False | 35.129412 | 123 | 0.673141 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,643 | 0.550234 |
ea26f9c2bf7b019ef8aa7a75cd7953f9dff2b3d1 | 5,247 | py | Python | crunch-shake/parse.py | zhiyanfoo/crunch-shake | ff2bc24615ad037601e912b1b8b3905e4f9d3540 | [
"MIT"
] | 1 | 2019-02-22T19:30:42.000Z | 2019-02-22T19:30:42.000Z | crunch-shake/parse.py | zhiyanfoo/crunch-shake | ff2bc24615ad037601e912b1b8b3905e4f9d3540 | [
"MIT"
] | null | null | null | crunch-shake/parse.py | zhiyanfoo/crunch-shake | ff2bc24615ad037601e912b1b8b3905e4f9d3540 | [
"MIT"
] | null | null | null | from utils import get_matcher
from lookup import ROMAN_TO_INT
from lines import Dialogue, Character, Instruction, Act, Scene
def get_speaking_characters(raw_play_lines, character_matcher):
""" Return a set of all character names
Parameters
----------
raw_play_lines : list of str
lines of the play.
character_matcher : compiled regex expression
used to extract character names from raw_play_lines, regex must include
group called 'name'a.
"""
return { matched_line.group('name').upper() for matched_line in
( character_matcher.search(line) for line in raw_play_lines )
if matched_line }
def parse_raw_text(raw_play_lines, speaking_characters, matcher):
""" Parse the lines of the play which is in HTML
Each line is either ignored or putting into a class derived from a
namedtuple.
Parameters
----------
raw_play_lines : list of str
lines of the play
speaking_characters : set of str
names of characters who speak
matcher : namedtuple
matcher must contain the following the following compiled regex
matchers, with the following groups.
MATCHER : GROUP NAMES
---------------------
dialogue : 'act' , 'scene', 'dialogue' ; opt : 'instruction'
character : 'name'
stage_direction : 'stage_direction'
instruction : no name, uses index 0
act : 'act'
scene : 'scene'
Notes
-----
character_chain
A list of the characters who speak in turn, all capitalized
Example
-------
>>> PLAY_NAME
alls_well_that_ends_well
>>> character_chain
['COUNTESS', 'BERTRAM', 'LAFEU', 'COUNTESS', 'BERTRAM', ...]
"""
known_characters_matcher = get_matcher(speaking_characters, "character")
parsed_lines = []
character_chain = []
for i, line in enumerate(raw_play_lines):
d_match = matcher.dialogue.search(line)
# d has 3-4 groups : act, scene, dialogue, optional instruction
if d_match:
try:
instruction = d_match.group('instruction')
except IndexError:
instruction = None
dialogue = Dialogue(
d_match.group('dialogue'),
process_instructions(
d_match.group('instruction'),
known_characters_matcher,
matcher.instruction,
character_chain[-1]),
character_chain[-1],
d_match.group('act'),
d_match.group('scene'))
parsed_lines.append(dialogue)
continue
c_match = matcher.character.search(line)
if c_match:
name = c_match.group('name').upper()
character_chain.append(name)
parsed_lines.append(Character(name))
continue
sd_match = matcher.stage_direction.search(line)
if sd_match:
stage_direction = sd_match.group('stage_direction')
prev_character = character_chain[-1] if character_chain else None
instruction = process_instructions(
stage_direction,
known_characters_matcher,
matcher.instruction,
prev_character)
parsed_lines.append(instruction)
continue
act_match = matcher.act.search(line)
if act_match:
act_roman = act_match.group('act')
act = ROMAN_TO_INT[act_roman]
parsed_lines.append(Act(act))
prev_character = None
continue
scene_match = matcher.scene.search(line)
if scene_match:
scene_roman = scene_match.group('scene')
scene = ROMAN_TO_INT[scene_roman]
parsed_lines.append(Scene(scene))
prev_character = None
continue
return parsed_lines
def process_instructions(instruction, known_characters_matcher,
instruction_matcher, default_character):
"""
For each sentence only one action (the first) is matched, but a single
instruction can contain multiple sentences, which is why action are
returned as a list. Each action can be applied to multiple characters. Note
that all character names are shifted to uppercase
"""
if instruction is None:
return None
instruction_lines = instruction.split(".")
actions = [ match.group(0) if match else None for match in
( instruction_matcher.search(line)
for line in instruction_lines ) ]
characters = [
[ character.upper()
for character in known_characters]
for known_characters in
( known_characters_matcher.findall(line)
for line in instruction_lines) ]
return Instruction(instruction, actions, characters, default_character)
def preprocess(raw_play_lines, matcher):
speaking_characters = get_speaking_characters(raw_play_lines,
matcher.character)
play_lines = parse_raw_text(raw_play_lines, speaking_characters, matcher)
return speaking_characters, play_lines
| 37.212766 | 79 | 0.61559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,784 | 0.340004 |
ea281895334d9f6eb2e1d0053bb87208fe4b79cf | 428 | py | Python | backend/schemas/comment.py | golani04/bug-tracker | a3f91fe6a2202bd60721ab0b0f4bf46b0fb4d872 | [
"MIT"
] | null | null | null | backend/schemas/comment.py | golani04/bug-tracker | a3f91fe6a2202bd60721ab0b0f4bf46b0fb4d872 | [
"MIT"
] | 1 | 2021-05-21T16:15:52.000Z | 2021-05-21T16:15:52.000Z | backend/schemas/comment.py | golani04/bug-tracker | a3f91fe6a2202bd60721ab0b0f4bf46b0fb4d872 | [
"MIT"
] | null | null | null | from datetime import datetime
from uuid import UUID
from typing import Optional
from pydantic import BaseModel, Field
class CommentBase(BaseModel):
text: str
commenter: UUID
reply_to: Optional[int] = Field(None, description="Replying to the previous comment")
class CommentCreate(CommentBase):
pass
class Comment(CommentBase):
id: int
created_at: datetime
updated_at: Optional[datetime] = None
| 20.380952 | 89 | 0.75 | 301 | 0.703271 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.079439 |
ea2a96d721e0464fc48d0be7b3b409eb02575d4c | 1,216 | py | Python | videos_to_images.py | hikaruya8/dimotion | 68c1e561e2d26f49ada59a82ed9faae373718033 | [
"MIT"
] | null | null | null | videos_to_images.py | hikaruya8/dimotion | 68c1e561e2d26f49ada59a82ed9faae373718033 | [
"MIT"
] | null | null | null | videos_to_images.py | hikaruya8/dimotion | 68c1e561e2d26f49ada59a82ed9faae373718033 | [
"MIT"
] | null | null | null | import os
import subprocess # ターミナルで実行するコマンドを実行できる
# 動画が保存された「MELD.Raw」内にある、train_splits, test_splits, dev_splitsそれぞれで実行
dir_path = './MELD/MELD.Raw/dev_splits/'
# class_list = os.listdir(path=dir_path)
# print(class_list)
# 各クラスの動画ファイルを画像ファイルに変換する
# for class_list_i in (class_list): # クラスごとのループ
# クラスのフォルダへのパスを取得
# class_path = os.path.join(dir_path, class_list_i)
# 各クラスのフォルダ内の動画ファイルをひとつずつ処理するループ
for file_name in os.listdir(dir_path):
# ファイル名と拡張子に分割
name, ext = os.path.splitext(file_name)
# mp4ファイルでない、フォルダなどは処理しない
if ext != '.mp4':
continue
# 動画ファイルを画像に分割して保存するフォルダ名を取得
dst_directory_path = os.path.join(dir_path, name)
# 上記の画像保存フォルダがなければ作成
if not os.path.exists(dst_directory_path):
os.mkdir(dst_directory_path)
# 動画ファイルへのパスを取得
video_file_path = os.path.join(dir_path, file_name)
# ffmpegを実行させ、動画ファイルをjpgにする (高さは256ピクセルで幅はアスペクト比を変えない)
# kineticsの動画の場合10秒になっており、大体300ファイルになる(30 frames /sec)
cmd = 'ffmpeg -i \"{}\" -vcodec mjpeg -vf scale=-1:256 \"{}/image_%05d.jpg\"'.format(
video_file_path, dst_directory_path)
print(cmd)
subprocess.call(cmd, shell=True)
print('\n')
print("動画ファイルを画像ファイルに変換しました。") | 27.636364 | 89 | 0.71875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,252 | 0.694784 |
ea2b625a6e5ae7073ed5a6fd9af95fcd3ef81b59 | 15,396 | py | Python | ur_driver/src/ur_driver/deserializeRT.py | Hugal31/universal_robot | 06d8b9e2f5f86aa54f9f2845f11edbc84e2f951e | [
"Apache-2.0",
"BSD-3-Clause"
] | 749 | 2015-01-20T22:14:20.000Z | 2022-03-31T07:31:51.000Z | ur_driver/src/ur_driver/deserializeRT.py | Hugal31/universal_robot | 06d8b9e2f5f86aa54f9f2845f11edbc84e2f951e | [
"Apache-2.0",
"BSD-3-Clause"
] | 431 | 2015-01-05T02:02:45.000Z | 2022-03-07T15:18:51.000Z | ur_driver/src/ur_driver/deserializeRT.py | Hugal31/universal_robot | 06d8b9e2f5f86aa54f9f2845f11edbc84e2f951e | [
"Apache-2.0",
"BSD-3-Clause"
] | 788 | 2015-01-12T08:11:21.000Z | 2022-03-30T11:53:01.000Z | from __future__ import print_function
import struct
import copy
#this class handles different protocol versions
class RobotStateRT(object):
@staticmethod
def unpack(buf):
rs = RobotStateRT()
(plen, ptype) = struct.unpack_from("!IB", buf)
if plen == 756:
return RobotStateRT_V15.unpack(buf)
elif plen == 812:
return RobotStateRT_V18.unpack(buf)
elif plen == 1044:
return RobotStateRT_V30.unpack(buf)
else:
print("RobotStateRT has wrong length: " + str(plen))
return rs
#this parses RobotStateRT for versions = v1.5
#http://wiki03.lynero.net/Technical/RealTimeClientInterface?foswiki_redirect_cache=9b4574b30760f720c6f79c5f1f2203dd
class RobotStateRT_V15(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V15()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
###
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
return rs
#this parses RobotStateRT for versions <= v1.8 (i.e. 1.6, 1.7, 1.8)
class RobotStateRT_V18(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'tool_acc_values',
'unused',
'tcp_force', 'tool_vector', 'tcp_speed',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V18()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 15x double (15x 8byte)
offset+=120
rs.unused = []
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector = copy.deepcopy(all_values)
#tcp_speed: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_mode: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
return rs
#this parses RobotStateRT for versions >=3.0 (i.e. 3.0)
class RobotStateRT_V30(object):
__slots__ = ['time',
'q_target', 'qd_target', 'qdd_target', 'i_target', 'm_target',
'q_actual', 'qd_actual', 'i_actual', 'i_control',
'tool_vector_actual', 'tcp_speed_actual', 'tcp_force',
'tool_vector_target', 'tcp_speed_target',
'digital_input_bits', 'motor_temperatures', 'controller_timer',
'test_value',
'robot_mode', 'joint_modes', 'safety_mode',
#6xd: unused
'tool_acc_values',
#6xd: unused
'speed_scaling', 'linear_momentum_norm',
#2xd: unused
'v_main', 'v_robot', 'i_robot', 'v_actual']
@staticmethod
def unpack(buf):
offset = 0
message_size = struct.unpack_from("!i", buf, offset)[0]
offset+=4
if message_size != len(buf):
print(("MessageSize: ", message_size, "; BufferSize: ", len(buf)))
raise Exception("Could not unpack RobotStateRT packet: length field is incorrect")
rs = RobotStateRT_V30()
#time: 1x double (1x 8byte)
rs.time = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#q_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_target = copy.deepcopy(all_values)
#qd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_target = copy.deepcopy(all_values)
#qdd_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qdd_target = copy.deepcopy(all_values)
#i_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_target = copy.deepcopy(all_values)
#m_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.m_target = copy.deepcopy(all_values)
#q_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.q_actual = copy.deepcopy(all_values)
#qd_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.qd_actual = copy.deepcopy(all_values)
#i_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_actual = copy.deepcopy(all_values)
#i_control: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.i_control = copy.deepcopy(all_values)
#tool_vector_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_actual = copy.deepcopy(all_values)
#tcp_speed_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_actual = copy.deepcopy(all_values)
#tcp_force: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_force = copy.deepcopy(all_values)
#tool_vector_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tool_vector_target = copy.deepcopy(all_values)
#tcp_speed_target: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.tcp_speed_target = copy.deepcopy(all_values)
#digital_input_bits: 1x double (1x 8byte) ?
rs.digital_input_bits = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#motor_temperatures: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.motor_temperatures = copy.deepcopy(all_values)
#controller_timer: 1x double (1x 8byte)
rs.controller_timer = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#test_value: 1x double (1x 8byte)
rs.test_value = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#robot_mode: 1x double (1x 8byte)
rs.robot_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#joint_modes: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.joint_modes = copy.deepcopy(all_values)
#safety_mode: 1x double (1x 8byte)
rs.safety_mode = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 6x double (6x 8byte)
offset+=48
#tool_acc_values: 3x double (3x 8byte)
all_values = list(struct.unpack_from("!ddd",buf, offset))
offset+=3*8
rs.tool_acc_values = copy.deepcopy(all_values)
#unused: 6x double (6x 8byte)
offset+=48
#speed_scaling: 1x double (1x 8byte)
rs.speed_scaling = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#linear_momentum_norm: 1x double (1x 8byte)
rs.linear_momentum_norm = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#unused: 2x double (2x 8byte)
offset+=16
#v_main: 1x double (1x 8byte)
rs.v_main = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_robot: 1x double (1x 8byte)
rs.v_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#i_robot: 1x double (1x 8byte)
rs.i_robot = struct.unpack_from("!d",buf, offset)[0]
offset+=8
#v_actual: 6x double (6x 8byte)
all_values = list(struct.unpack_from("!dddddd",buf, offset))
offset+=6*8
rs.v_actual = copy.deepcopy(all_values)
return rs
| 36.657143 | 115 | 0.58067 | 14,987 | 0.973435 | 0 | 0 | 13,304 | 0.864121 | 0 | 0 | 4,390 | 0.285139 |
ea2b8336beb92adf459272aedc193a552076d2fb | 1,457 | py | Python | Data-Science/code.py | shikharratna/ga-learner-dsmp-repo | a852a8759679dddcab3e59930240d22fcab9ba73 | [
"MIT"
] | null | null | null | Data-Science/code.py | shikharratna/ga-learner-dsmp-repo | a852a8759679dddcab3e59930240d22fcab9ba73 | [
"MIT"
] | null | null | null | Data-Science/code.py | shikharratna/ga-learner-dsmp-repo | a852a8759679dddcab3e59930240d22fcab9ba73 | [
"MIT"
] | null | null | null | # --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_csv(path)
#path of the data file- path
#Code starts here
data.Gender.replace('-','Agender',inplace=True)
gender_count=data.Gender.value_counts()
gender_count.plot(kind='bar')
# --------------
#Code starts here
alignment=data.Alignment.value_counts()
alignment.plot(kind='pie')
# --------------
#Code starts here
sc_df=pd.DataFrame(data,columns=['Strength','Combat'])
sc_covariance=sc_df.cov()
sc_strength=sc_df.Strength.std()
sc_combat=sc_df.Combat.std()
sc_pearson=sc_df.corr(method='pearson',min_periods=1)
sc_pearson=sc_pearson.Combat[0]
sc_covariance=sc_covariance.Combat[0]
ic_df=pd.DataFrame(data,columns=['Intelligence','Combat'])
ic_covariance=ic_df.cov()
ic_intelligence=ic_df.Intelligence.std()
ic_combat=ic_df.Combat.std()
ic_pearson=ic_df.corr(method='pearson',min_periods=1)
ic_pearson=ic_pearson.Combat[0]
ic_covariance=ic_covariance.Combat[0]
# --------------
#Code starts here
total_high=data.Total.quantile(q=.99)
super_best=data[data.Total>total_high]
super_best_names=list(super_best.Name[:])
print(super_best_names)
# --------------
#Code starts here
fig,(ax_1,ax_2,ax_3)=plt.subplots(1,3,figsize=(20,8))
data.Intelligence.plot(kind='box',ax=ax_1)
data.Speed.plot(kind='box',ax=ax_2)
data.Power.plot(kind='box',ax=ax_3)
ax_1.set_title=('Intelligence1')
| 21.746269 | 59 | 0.710364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 324 | 0.222375 |
ea2b8e4736bc02950c2c39a75c6d5d79e88d9882 | 14,205 | py | Python | stupidb/core.py | mrcrnkovich/stupidb | 4274f60b7f8f2455c0031c73e053964d4d3e3e1d | [
"Apache-2.0"
] | 43 | 2018-12-29T22:14:55.000Z | 2022-03-17T03:38:16.000Z | stupidb/core.py | mrcrnkovich/stupidb | 4274f60b7f8f2455c0031c73e053964d4d3e3e1d | [
"Apache-2.0"
] | 102 | 2021-07-19T21:20:22.000Z | 2022-03-22T02:57:02.000Z | stupidb/core.py | mrcrnkovich/stupidb | 4274f60b7f8f2455c0031c73e053964d4d3e3e1d | [
"Apache-2.0"
] | 3 | 2021-12-04T19:14:33.000Z | 2022-01-08T17:28:36.000Z | """StupiDB. The stupidest database you'll ever come across.
This is project designed to illustate the concepts that underly a typical
relational database implementation, starting at naive execution of table-stakes
features up to rule-based query optimization.
.. warning::
Please do not use this for any other reason than learning. There are no
guarantees here except that there will be bugs.
"""
from __future__ import annotations
import abc
import collections
import functools
import itertools
import typing
from typing import Any, Generic, Iterable, Iterator, Mapping
import toolz
from .aggregation import (
AggregateSpecification,
Nulls,
WindowAggregateSpecification,
row_key_compare,
)
from .functions.associative.core import AssociativeAggregate
from .row import AbstractRow, JoinedRow, Row
from .typehints import (
JoinPredicate,
OrderBy,
PartitionBy,
PartitionKey,
Predicate,
Projector,
)
class Relation(abc.ABC):
"""An abstract relation."""
__slots__ = ("partitioners",)
def __init__(self) -> None:
self.partitioners: Mapping[str, PartitionBy] = {}
def __iter__(self) -> Iterator[AbstractRow]:
"""Iterate over the rows of a :class:`~stupidb.stupidb.Relation`.
This method will reify rows with a new row identifier equal to the row number.
"""
return (
Row.from_mapping(row, _id=id)
for id, row in enumerate(filter(None, self._produce()))
)
@abc.abstractmethod
def _produce(self) -> Iterator[AbstractRow]:
"""Iterate over the rows of a :class:`~stupidb.stupidb.Relation`.
Specific relation should implement this without reifying the row with
the row identifier if possible. Reification is handled in the
:meth:`~stupidb.stupidb.Relation.__iter__` method.
"""
def __repr__(self) -> str:
from stupidb.api import pretty
return pretty(self)
class Table(Relation):
__slots__ = ("rows",)
def __init__(self, rows: Iterable[AbstractRow]) -> None:
super().__init__()
self.rows = rows
@classmethod
def from_iterable(cls, iterable: Iterable[Mapping[str, Any]]) -> Table:
return cls(
Row.from_mapping(mapping, _id=i) for i, mapping in enumerate(iterable)
)
def _produce(self) -> Iterator[AbstractRow]:
return iter(self.rows)
class Projection(Relation):
"""A relation representing column selection.
Attributes
----------
child
aggregations
projections
"""
__slots__ = "child", "aggregations", "projections"
def __init__(
self,
child: Relation,
projections: Mapping[str, Projector | WindowAggregateSpecification],
) -> None:
super().__init__()
self.child = child
self.aggregations: Mapping[str, WindowAggregateSpecification] = {
aggname: aggspec
for aggname, aggspec in projections.items()
if isinstance(aggspec, WindowAggregateSpecification)
}
self.projections: Mapping[str, Projector] = {
name: projector
for name, projector in projections.items()
if callable(projector)
}
def _produce(self) -> Iterator[AbstractRow]:
aggregations = self.aggregations
# we need a row iterator for every aggregation to be fully generic
# since they potentially share no structure
#
# one child iter for *all* projections
# one child iter for *each* window aggregation
child, *rowterators = itertools.tee(self.child, len(aggregations) + 1)
aggnames = aggregations.keys()
aggvalues = aggregations.values()
# The .compute method returns an iterator of aggregation results
# Each element of the iterator is the result of a single column in a
# single row of the corresponding window function
aggrows = (
dict(zip(aggnames, aggrow))
for aggrow in zip(
*map(
WindowAggregateSpecification.compute,
aggvalues,
rowterators,
)
)
)
projections = self.projections
projnames = projections.keys()
projvalues = projections.values()
projrows = (
dict(zip(projnames, (proj(row) for proj in projvalues))) for row in child
)
# Use zip_longest here, because either of aggrows or projrows can be
# empty
return (
Row(toolz.merge(projrow, aggrow), _id=-1)
for aggrow, projrow in itertools.zip_longest(
aggrows, projrows, fillvalue={}
)
)
class Mutate(Projection):
"""A relation representing appending columns to an existing relation."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
# reasign self.child here to avoid clobbering its iteration
# we need to use it twice: once for the computed columns (self.child)
# used during the iteration of super().__iter__() and once for the
# original relation (child)
child, self.child = itertools.tee(self.child)
return (
Row.from_mapping(row, _id=-1)
for row in map(toolz.merge, child, super()._produce())
)
class Aggregation(Generic[AssociativeAggregate], Relation):
"""A relation representing aggregation of columns."""
__slots__ = "child", "metrics"
def __init__(
self,
child: Relation,
metrics: Mapping[str, AggregateSpecification[AssociativeAggregate]],
) -> None:
super().__init__()
self.child = child
self.metrics: Mapping[
str, AggregateSpecification[AssociativeAggregate]
] = metrics
def _produce(self) -> Iterator[AbstractRow]:
aggregations = self.metrics
# initialize aggregates
grouped_aggs: Mapping[
PartitionKey, Mapping[str, AssociativeAggregate]
] = collections.defaultdict(
lambda: {
name: aggspec.aggregate_type() for name, aggspec in aggregations.items()
}
)
child = typing.cast(Relation, self.child)
for row in child:
key = tuple(
(name, keyfunc(row)) for name, keyfunc in child.partitioners.items()
)
for name, agg in grouped_aggs[key].items():
inputs = (getter(row) for getter in aggregations[name].getters)
agg.step(*inputs)
for grouping_key, aggs in grouped_aggs.items():
data = dict(grouping_key)
data.update((name, agg.finalize()) for name, agg in aggs.items())
yield Row.from_mapping(data)
class Selection(Relation):
"""A relation of rows selected based on a predicate.
Attributes
----------
predicate
A callable that takes an :class:`~stupidb.row.AbstractRow` and returns
a :class:`bool`.
"""
__slots__ = "child", "predicate"
def __init__(self, child: Relation, predicate: Predicate) -> None:
super().__init__()
self.child = child
self.predicate = predicate
def _produce(self) -> Iterator[AbstractRow]:
return filter(self.predicate, self.child)
class GroupBy(Relation):
"""A relation representing a partitioning of rows by a key.
Attributes
----------
group_by
A callable that takes an :class:`~stupidb.row.AbstractRow` and returns
an instance of :class:`typing.Hashable`.
"""
__slots__ = "child", "group_by", "partitioners"
def __init__(self, child: Relation, group_by: Mapping[str, PartitionBy]) -> None:
super().__init__()
self.child = child
self.partitioners = group_by
def _produce(self) -> Iterator[AbstractRow]:
return iter(self.child)
class SortBy(Relation):
"""A relation representing rows of its child sorted by one or more keys.
Attributes
----------
order_by
A callable that takes an :class:`~stupidb.row.AbstractRow` and returns
an instance of :class:`~stupidb.protocols.Comparable`.
null_ordering
Whether to place the nulls of a column first or last.
"""
__slots__ = "child", "order_by", "null_ordering"
def __init__(
self, child: Relation, order_by: tuple[OrderBy, ...], null_ordering: Nulls
) -> None:
super().__init__()
self.child = child
self.order_by = order_by
self.null_ordering = null_ordering
def _produce(self) -> Iterator[AbstractRow]:
return iter(
sorted(
self.child,
key=functools.cmp_to_key(
functools.partial(
row_key_compare,
toolz.juxt(*self.order_by),
self.null_ordering,
)
),
)
)
class Limit(Relation):
__slots__ = "child", "offset", "limit"
def __init__(self, child: Relation, *, offset: int, limit: int | None) -> None:
super().__init__()
self.child = child
self.offset = offset
self.limit = limit
def _produce(self) -> Iterator[AbstractRow]:
limit = self.limit
offset = self.offset
return itertools.islice(
self.child,
offset,
None if limit is None else offset + limit,
)
class Join(Relation):
__slots__ = "grouped", "rows"
def __init__(self, left: Relation, right: Relation) -> None:
super().__init__()
self.grouped = itertools.groupby(
(
JoinedRow(left_row, right_row, _id=-1)
for left_row, right_row in itertools.product(left, right)
),
key=lambda row: row.left,
)
self.rows = itertools.chain.from_iterable(rows for _, rows in self.grouped)
class CrossJoin(Join):
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
return iter(self.rows)
class InnerJoin(Join):
__slots__ = ("predicate",)
def __init__(
self, left: Relation, right: Relation, predicate: JoinPredicate
) -> None:
super().__init__(left, right)
self.predicate = predicate
def _produce(self) -> Iterator[AbstractRow]:
return (row for row in self.rows if self.predicate(row.left, row.right))
class LeftJoin(Join):
__slots__ = ("predicate",)
def __init__(
self, left: Relation, right: Relation, predicate: JoinPredicate
) -> None:
super().__init__(left, right)
self.predicate = predicate
def _produce(self) -> Iterator[AbstractRow]:
for left_row, joined_rows in self.grouped:
matched = False
for joined_row in joined_rows:
right_row = joined_row.right
if self.predicate(left_row, right_row):
matched = True
yield JoinedRow(left_row, right_row, _id=-1)
if not matched:
yield JoinedRow(left_row, dict.fromkeys(right_row), _id=-1)
class RightJoin(LeftJoin):
__slots__ = ()
def __init__(
self, left: Relation, right: Relation, predicate: JoinPredicate
) -> None:
super().__init__(right, left, predicate)
def _produce(self) -> Iterator[AbstractRow]:
for row in super()._produce():
yield JoinedRow(row.right, row.left)
class SetOperation(Relation):
"""An abstract set operation."""
__slots__ = "left", "right"
def __init__(self, left: Relation, right: Relation) -> None:
super().__init__()
self.left = left
self.right = right
@staticmethod
def itemize(
mappings: Iterable[AbstractRow],
) -> frozenset[tuple[tuple[str, Any], ...]]:
"""Return a hashable version of `mappings`."""
return frozenset(tuple(mapping.items()) for mapping in mappings)
class Union(SetOperation):
"""Union between two relations."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
return toolz.unique(
itertools.chain(self.left, self.right),
key=lambda row: frozenset(row.items()),
)
class UnionAll(SetOperation):
"""Non-unique union between two relations."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
return itertools.chain(self.left, self.right)
class IntersectAll(SetOperation):
"""Non-unique intersection between two relations."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
left_set = self.itemize(self.left)
right_set = self.itemize(self.right)
left_filtered = (row_items for row_items in left_set if row_items in right_set)
right_filtered = (row_items for row_items in right_set if row_items in left_set)
return (
Row.from_mapping(dict(row))
for row in itertools.chain(left_filtered, right_filtered)
)
class Intersect(SetOperation):
"""Intersection of two relations."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
return (
Row.from_mapping(dict(row))
for row in self.itemize(self.left) & self.itemize(self.right)
)
class Difference(SetOperation):
"""Unique difference between two relations."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
right_set = self.itemize(self.right)
return toolz.unique(
Row.from_mapping(dict(row_items))
for row_items in (tuple(row.items()) for row in self.left)
if row_items not in right_set
)
class DifferenceAll(SetOperation):
"""Non-unique difference between two relations."""
__slots__ = ()
def _produce(self) -> Iterator[AbstractRow]:
right_set = self.itemize(self.right)
return (
Row.from_mapping(dict(row_items))
for row_items in (tuple(row.items()) for row in self.left)
if row_items not in right_set
)
| 28.930754 | 88 | 0.612672 | 13,192 | 0.928687 | 1,566 | 0.110243 | 812 | 0.057163 | 0 | 0 | 3,191 | 0.224639 |
ea2bb80a4845b58954c6062cd938e68d554b3abd | 169 | py | Python | www/aeki/cgi/aeki_config.py | otuk/aeki | 6de9629a74698737e07ae86650925e3051ead5ee | [
"MIT"
] | null | null | null | www/aeki/cgi/aeki_config.py | otuk/aeki | 6de9629a74698737e07ae86650925e3051ead5ee | [
"MIT"
] | null | null | null | www/aeki/cgi/aeki_config.py | otuk/aeki | 6de9629a74698737e07ae86650925e3051ead5ee | [
"MIT"
] | null | null | null | aeki_config = {
"AEKI_HOST": "localhost", # rename to hostname for cgi if you like
"IOT_HOST":"localhost" # assumes iot test device is also on local host
}
| 24.142857 | 75 | 0.674556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.781065 |
ea2bf117e4da0f0fabb4774dddd8eef0987316da | 6,540 | py | Python | Baseline/NABA/naba.py | sarthak-chakraborty/PARIMA | c6ceb6e17fc3c934603fa843febc42a8b6ee5bb1 | [
"MIT"
] | 13 | 2021-03-06T16:53:33.000Z | 2022-02-04T20:28:13.000Z | Baseline/NABA/naba.py | sarthak-chakraborty/Adaptive-360-video | c6ceb6e17fc3c934603fa843febc42a8b6ee5bb1 | [
"MIT"
] | 6 | 2021-06-02T08:08:09.000Z | 2022-03-12T00:58:26.000Z | Baseline/NABA/naba.py | sarthak-chakraborty/Adaptive-360-video | c6ceb6e17fc3c934603fa843febc42a8b6ee5bb1 | [
"MIT"
] | 3 | 2021-05-26T03:32:04.000Z | 2021-07-17T14:34:20.000Z | import numpy as np
import math
import pickle
def get_data(data, frame_nos, dataset, topic, usernum, fps, milisec, width, height, view_width, view_height):
"""
Read and return the viewport data
"""
VIEW_PATH = '../../Viewport/'
view_info = pickle.load(open(VIEW_PATH + 'ds{}/viewport_ds{}_topic{}_user{}'.format(dataset, dataset, topic, usernum), 'rb'), encoding='latin1')
if dataset == 1:
max_frame = int(view_info[-1][0]*1.0*fps/milisec)
for i in range(len(view_info)-1):
frame = int(view_info[i][0]*1.0*fps/milisec)
frame += int(offset*1.0*fps/milisec)
frame_nos.append(frame)
if(frame > max_frame):
break
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
elif dataset == 2:
for k in range(len(view_info)-1):
if view_info[k][0]<=offset+60 and view_info[k+1][0]>offset+60:
max_frame = int(view_info[k][0]*1.0*fps/milisec)
break
for k in range(len(view_info)-1):
if view_info[k][0]<=offset and view_info[k+1][0]>offset:
min_index = k+1
break
prev_frame = 0
for i in range(min_index,len(view_info)-1):
frame = int((view_info[i][0])*1.0*fps/milisec)
if frame == prev_frame:
continue
if(frame > max_frame):
break
frame_nos.append(frame)
X={}
X['VIEWPORT_x']=int(view_info[i][1][1]*width/view_width)
X['VIEWPORT_y']=int(view_info[i][1][0]*height/view_height)
data.append((X, int(view_info[i+1][1][1]*width/view_width),int(view_info[i+1][1][0]*height/view_height)))
prev_frame = frame
return data, frame_nos, max_frame
def tiling(data, frame_nos, max_frame, width, height, nrow_tiles, ncol_tiles, fps, pred_nframe):
"""
Calculate the tiles corresponding to the viewport and segment them into different chunks
"""
count=0
i=0
act_tiles = []
chunk_frames = []
# Leaving the first 5 seconds ( to keep consistent with our model)
while True:
curr_frame = frame_nos[i]
if curr_frame<5*fps:
i=i+1
[inp_i,x,y]=data[curr_frame]
else:
break
# Calulate the tiles and store it in chunks
while True:
curr_frame = frame_nos[i]
nframe = min(pred_nframe, max_frame - frame_nos[i])
if(nframe <= 0):
break
# Add the frames that will be in the current chunk
frames = {i}
for k in range(i+1, len(frame_nos)):
if(frame_nos[k] < curr_frame + nframe):
frames.add(k)
else:
i=k
break
if(i!=k):
i=k
if(i==(len(frame_nos)-1)):
break
frames = sorted(frames)
chunk_frames.append(frames)
# Get the actual tile
for k in range(len(frames)):
[inp_k, x_act, y_act] = data[frames[k]]
# print(x_act, y_act)
actual_tile_col = int(x_act * ncol_tiles / width)
actual_tile_row = int(y_act * nrow_tiles / height)
# print(actual_tile_col, actual_tile_row)
actual_tile_row = actual_tile_row-nrow_tiles if(actual_tile_row >= nrow_tiles) else actual_tile_row
actual_tile_col = actual_tile_col-ncol_tiles if(actual_tile_col >= ncol_tiles) else actual_tile_col
actual_tile_row = actual_tile_row+nrow_tiles if actual_tile_row < 0 else actual_tile_row
actual_tile_col = actual_tile_col+ncol_tiles if actual_tile_col < 0 else actual_tile_col
# print(actual_tile_col, actual_tile_row)
# print()
act_tiles.append((actual_tile_row, actual_tile_col))
return act_tiles, chunk_frames
def alloc_bitrate(frame_nos, chunk_frames, pref_bitrate, nrow_tiles, ncol_tiles):
"""
Allocates equal bitrate to all the tiles
"""
vid_bitrate = []
for i in range(len(chunk_frames)):
chunk = chunk_frames[i]
chunk_bitrate = [[-1 for x in range(ncol_tiles)] for y in range(nrow_tiles)]
chunk_weight = [[1. for x in range(ncol_tiles)] for y in range(nrow_tiles)]
total_weight = sum(sum(x) for x in chunk_weight)
for x in range(nrow_tiles):
for y in range(ncol_tiles):
chunk_bitrate[x][y] = chunk_weight[x][y]*pref_bitrate/total_weight;
vid_bitrate.append(chunk_bitrate)
return vid_bitrate
def calc_qoe(vid_bitrate, act_tiles, frame_nos, chunk_frames, width, height, nrow_tiles, ncol_tiles, player_width, player_height):
"""
Calculate QoE based on the video bitrates
"""
qoe = 0
prev_qoe_1 = 0
weight_1 = 1
weight_2 = 1
weight_3 = 1
tile_width = width/ncol_tiles
tile_height = height/nrow_tiles
for i in range(len(chunk_frames[:55])):
qoe_1, qoe_2, qoe_3, qoe_4 = 0, 0, 0, 0
tile_count = 0
rows, cols = set(), set()
rate = []
chunk = chunk_frames[i]
chunk_bitrate = vid_bitrate[i]
chunk_act = act_tiles[chunk[0]-chunk_frames[0][0] : chunk[-1]-chunk_frames[0][0]]
for j in range(len(chunk_act)):
if(chunk_act[j][0] not in rows or chunk_act[j][1] not in cols):
tile_count += 1
rows.add(chunk_act[j][0])
cols.add(chunk_act[j][1])
row, col = chunk_act[j][0], chunk_act[j][1]
# Find the number of tiles that can be accomodated from the center of the viewport
n_tiles_width = math.ceil((player_width/2 - tile_width/2)/tile_width)
n_tiles_height = math.ceil((player_height/2 - tile_height/2)/tile_height)
tot_tiles = (2 * n_tiles_width+1) * (2 * n_tiles_height+1)
local_qoe = 0
local_rate = [] # a new metric to get the standard deviation of bitrate within the player view (qoe2)
for x in range(2*n_tiles_height+1):
for y in range(2*n_tiles_width+1):
sub_row = row - n_tiles_height + x
sub_col = col - n_tiles_width + y
sub_row = nrow_tiles+row+sub_row if sub_row < 0 else sub_row
sub_col = ncol_tiles+col+sub_col if sub_col < 0 else sub_col
sub_row = sub_row-nrow_tiles if sub_row >= nrow_tiles else sub_row
sub_col = sub_col-ncol_tiles if sub_col >= ncol_tiles else sub_col
local_qoe += chunk_bitrate[sub_row][sub_col]
local_rate.append(chunk_bitrate[sub_row][sub_col])
qoe_1 += local_qoe / tot_tiles
if(len(local_rate)>0):
qoe_2 += np.std(local_rate)
rate.append(local_qoe / tot_tiles)
tile_count = 1 if tile_count==0 else tile_count
qoe_1 /= tile_count
qoe_2 /= tile_count
if(len(rate)>0):
qoe_3 = np.std(rate)
qoe_3 /= tile_count
if(i>0):
qoe_4 = abs(prev_qoe_1 - qoe_1)
qoe += qoe_1 - weight_1*qoe_2 - weight_2*qoe_3 - weight_3*qoe_4
prev_qoe_1 = qoe_1
return qoe
| 29.459459 | 146 | 0.669266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.127064 |
ea2cea171e2a0a408098703594d6df6015c2788a | 333 | py | Python | python/primes.py | matheuskiser/pdx_code_guild | 49a5c62fb468253eb4d9a1fb11166df79bb10873 | [
"MIT"
] | null | null | null | python/primes.py | matheuskiser/pdx_code_guild | 49a5c62fb468253eb4d9a1fb11166df79bb10873 | [
"MIT"
] | null | null | null | python/primes.py | matheuskiser/pdx_code_guild | 49a5c62fb468253eb4d9a1fb11166df79bb10873 | [
"MIT"
] | null | null | null | """
User picks number n and program returns all prime number from 0 until n.
"""
def is_prime(num):
for i in range(2, num):
if (num % i) == 0:
return False
return True
number_picked = int(raw_input("Pick a number: "))
print 2
for i in range(3, number_picked, 2):
if is_prime(i):
print i, | 17.526316 | 72 | 0.600601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.291291 |
ea2d0db0e5013c516e20cfd3f224e6f24b6a4de8 | 9,331 | py | Python | gateway/settings.py | 42cc/dashr-gw | 45ec8f99a730b97b80a4068a0e9ae8991904038e | [
"MIT"
] | 2 | 2018-02-06T12:18:49.000Z | 2018-02-08T08:26:56.000Z | gateway/settings.py | 42cc/dashr-gw | 45ec8f99a730b97b80a4068a0e9ae8991904038e | [
"MIT"
] | null | null | null | gateway/settings.py | 42cc/dashr-gw | 45ec8f99a730b97b80a4068a0e9ae8991904038e | [
"MIT"
] | 3 | 2018-02-12T02:14:11.000Z | 2020-05-21T19:25:41.000Z | """
Django settings for gateway project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
import dj_database_url
import django_cache_url
from kombu import Exchange, Queue
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jksd@$=t_y2_epxck%_^%6mk$l8e6&mq)*++s%q6%yyk3!1v&x'
DASHD_RPCUSER = 'rpcuser'
DASHD_RPCPASSWORD = 'rpcpassword'
DASHD_ACCOUNT_NAME = 'gateway'
DASHD_URL = os.environ.get(
'DASHD_URL',
'http://{}:{}@127.0.0.1:19998'.format(DASHD_RPCUSER, DASHD_RPCPASSWORD),
)
RIPPLE_API_DATA = [
{
'RIPPLE_API_URL': os.environ.get(
'RIPPLED_URL',
'https://s1.ripple.com:51234',
),
},
]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '0.0.0.0']
# Application definition
sys.path.append(os.path.join(BASE_DIR, 'apps'))
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ripple_api',
'compressor',
'webpack_loader',
'ckeditor',
'solo',
'apps.core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gateway.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'apps.core.context_processors.minimal_amounts',
],
'string_if_invalid': '<< MISSING VARIABLE "%s" >>' if DEBUG else ''
},
},
]
WSGI_APPLICATION = 'gateway.wsgi.application'
CACHES = {'default': django_cache_url.config()}
if os.environ.get('REDIS_URL'):
CACHES['default'] = {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': os.environ.get('REDIS_URL')}
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='postgres://gw_user:gw_pass@localhost:5432/gateway',
conn_max_age=600,
),
}
if 'test' in sys.argv:
DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
DATETIME_FORMAT = 'N j, Y, P e'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
# DJANGO COMPRESS
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile} --autoprefix=">0%"'),
)
COMPRESS_CSS_FILTERS = (
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter'
)
# MEDIA
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# WEBPACK LOADER
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/gateway.log'),
'maxBytes': 10 * 1024 * 1024,
'backupCount': 100,
'formatter': 'simple',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'file'],
'level': 'ERROR',
'propagate': True
},
'gateway': {
'handlers': ['mail_admins', 'file', 'console'],
'level': 'INFO',
'propagate': True
},
'celery': {
'handlers': ['mail_admins', 'file', 'console'],
'level': 'INFO',
'propagate': True
},
'ripple': {
'handlers': ['mail_admins', 'file', 'console'],
'level': 'INFO',
'propagate': True
},
}
}
ENCRYPTED_FIELDS_KEYDIR = os.path.join(BASE_DIR, 'fieldkeys')
SOLO_CACHE = 'default'
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
# Redis
REDIS_PORT = 6379
REDIS_DB = 0
REDIS_HOST = os.environ.get('REDIS_PORT_6379_TCP_ADDR', 'redis')
RABBIT_HOSTNAME = os.environ.get('RABBIT_PORT_5672_TCP', 'rabbit')
if RABBIT_HOSTNAME.startswith('tcp://'):
RABBIT_HOSTNAME = RABBIT_HOSTNAME.split('//')[1]
BROKER_URL = os.environ.get('BROKER_URL', '')
if not BROKER_URL:
BROKER_URL = 'amqp://{user}:{password}@{hostname}/{vhost}/'.format(
user=os.environ.get('RABBIT_ENV_USER', 'admin'),
password=os.environ.get('RABBIT_ENV_RABBITMQ_PASS', 'mypass'),
hostname=RABBIT_HOSTNAME,
vhost=os.environ.get('RABBIT_ENV_VHOST', ''))
# We don't want to have dead connections stored on rabbitmq,
# so we have to negotiate using heartbeats
BROKER_HEARTBEAT = '?heartbeat=30'
if not BROKER_URL.endswith(BROKER_HEARTBEAT):
BROKER_URL += BROKER_HEARTBEAT
BROKER_POOL_LIMIT = 1
BROKER_CONNECTION_TIMEOUT = 10
# Celery configuration
# configure queues, currently we have only one
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
# Sensible settings for celery
CELERY_ALWAYS_EAGER = False
CELERY_ACKS_LATE = True
CELERY_TASK_PUBLISH_RETRY = True
CELERY_DISABLE_RATE_LIMITS = False
# By default we will ignore result
# If you want to see results and try out tasks interactively,
# change it to False
# Or change this setting on tasks level
CELERY_IGNORE_RESULT = True
CELERY_SEND_TASK_ERROR_EMAILS = False
CELERY_TASK_RESULT_EXPIRES = 600
# Set redis as celery result backend
CELERY_RESULT_BACKEND = 'redis://%s:%d/%d' % (REDIS_HOST, REDIS_PORT, REDIS_DB)
CELERY_REDIS_MAX_CONNECTIONS = 1
# Don't use pickle as serializer, json is much safer
CELERY_TASK_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ['application/json']
CELERYD_HIJACK_ROOT_LOGGER = False
CELERYD_PREFETCH_MULTIPLIER = 1
CELERYD_MAX_TASKS_PER_CHILD = 1000
# Try to load settings from ``settings_local.py`` file
try:
from settings_local import * # NOQA
except ImportError, e:
sys.stderr.write('settings_local.py not found. Using default settings\n')
sys.stderr.write('%s: %s\n\n' % (e.__class__.__name__, e))
| 27.606509 | 79 | 0.647305 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,090 | 0.545494 |
ea2f6cab964b758bde6ba04ace1b9ebcbb9b589e | 1,961 | py | Python | basics-data-structure/array/processQueries.py | nishantml/Data-Structure-And-Algorithms | 1f75bdbddb5fa4bccba762515460c73280cde65d | [
"MIT"
] | null | null | null | basics-data-structure/array/processQueries.py | nishantml/Data-Structure-And-Algorithms | 1f75bdbddb5fa4bccba762515460c73280cde65d | [
"MIT"
] | null | null | null | basics-data-structure/array/processQueries.py | nishantml/Data-Structure-And-Algorithms | 1f75bdbddb5fa4bccba762515460c73280cde65d | [
"MIT"
] | 1 | 2020-08-25T19:05:19.000Z | 2020-08-25T19:05:19.000Z | """
Given the array queries of positive integers between 1 and m, you have to process all queries[i] (from i=0 to i=queries.length-1) according to the following rules:
In the beginning, you have the permutation P=[1,2,3,...,m].
For the current i, find the position of queries[i] in the permutation P (indexing from 0) and then move this at the beginning of the permutation P. Notice that the position of queries[i] in P is the result for queries[i].
Return an array containing the result for the given queries.
Example 1:
Input: queries = [3,1,2,1], m = 5
Output: [2,1,2,1]
Explanation: The queries are processed as follow:
For i=0: queries[i]=3, P=[1,2,3,4,5], position of 3 in P is 2, then we move 3 to the beginning of P resulting in P=[3,1,2,4,5].
For i=1: queries[i]=1, P=[3,1,2,4,5], position of 1 in P is 1, then we move 1 to the beginning of P resulting in P=[1,3,2,4,5].
For i=2: queries[i]=2, P=[1,3,2,4,5], position of 2 in P is 2, then we move 2 to the beginning of P resulting in P=[2,1,3,4,5].
For i=3: queries[i]=1, P=[2,1,3,4,5], position of 1 in P is 1, then we move 1 to the beginning of P resulting in P=[1,2,3,4,5].
Therefore, the array containing the result is [2,1,2,1].
Example 2:
Input: queries = [4,1,2,2], m = 4
Output: [3,1,2,0]
Example 3:
Input: queries = [7,5,5,8,3], m = 8
Output: [6,5,0,7,5]
Constraints:
1 <= m <= 10^3
1 <= queries.length <= m
1 <= queries[i] <= m
"""
from typing import List
class Solution:
def findQueryPosition(self, p: List[int], query: int) -> int:
for i in range(len(p)):
if p[i] == query:
return i
def processQueries(self, queries: List[int], m: int) -> List[int]:
P = [i for i in range(1, m + 1)]
out = []
for i in range(len(queries)):
position = self.findQueryPosition(P, queries[i]);
out.append(position)
print(position)
P.insert(0, P.pop(position))
return out
| 33.810345 | 221 | 0.63284 | 520 | 0.265171 | 0 | 0 | 0 | 0 | 0 | 0 | 1,412 | 0.720041 |
ea3096bc5a6227f2c2556b76f3b507bcc714068c | 1,876 | py | Python | setup.py | david26694/sktools | 28cb8179b4c895cf00aea1399b2677158bd35bd8 | [
"MIT"
] | 10 | 2020-05-03T09:52:06.000Z | 2021-11-30T07:38:27.000Z | setup.py | david26694/sktools | 28cb8179b4c895cf00aea1399b2677158bd35bd8 | [
"MIT"
] | 7 | 2020-11-19T16:08:09.000Z | 2021-06-17T11:37:31.000Z | setup.py | david26694/sktools | 28cb8179b4c895cf00aea1399b2677158bd35bd8 | [
"MIT"
] | 3 | 2020-06-21T08:46:31.000Z | 2021-08-17T11:32:20.000Z | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ['scikit-learn', 'pandas', 'scipy', 'numpy', 'category_encoders',
'statsmodels']
setup_requirements = []
misc_requirements = [
"pip==21.1",
"bump2version==0.5.11",
"wheel==0.33.6",
"watchdog==0.9.0",
"flake8==3.7.8",
"tox==3.14.0",
"coverage==4.5.4",
"Sphinx==1.8.5",
"sphinx-rtd-theme==0.4.3",
"twine==1.14.0",
"pre-commit==2.6.0",
]
test_requirements = requirements
dev_requirements = misc_requirements + requirements
setup(
author="David Masip Bonet",
author_email='david26694@gmail.com',
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Tools to extend sklearn",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='sktools',
name='sktools',
packages=find_packages(include=['sktools', 'sktools.*']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
extras_require={
"test": test_requirements,
"dev": dev_requirements
},
url='https://github.com/david26694/sktools',
version='0.1.4',
zip_safe=False,
)
| 26.8 | 80 | 0.624733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 847 | 0.451493 |
ea319b174e521eedd1eb8788136bfce6b273b847 | 1,982 | py | Python | test_autogalaxy/util/test_error_util.py | caoxiaoyue/PyAutoGalaxy | ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05 | [
"MIT"
] | 7 | 2021-05-29T08:46:29.000Z | 2022-01-23T14:06:20.000Z | test_autogalaxy/util/test_error_util.py | caoxiaoyue/PyAutoGalaxy | ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05 | [
"MIT"
] | 3 | 2021-01-06T09:42:44.000Z | 2022-03-10T15:52:23.000Z | test_autogalaxy/util/test_error_util.py | caoxiaoyue/PyAutoGalaxy | ad2b4b27404f5bf0f65ba9a0cd7c3ee6570e2d05 | [
"MIT"
] | 3 | 2021-02-10T07:45:16.000Z | 2022-01-21T17:36:40.000Z | from autofit.non_linear.samples.pdf import quantile
import autogalaxy as ag
import numpy as np
def test__quantile_1d_profile():
profile_1d_0 = np.array([1.0, 2.0, 3.0])
profile_1d_1 = np.array([1.0, 2.0, 3.0])
profile_1d_list = [profile_1d_0, profile_1d_1]
median_profile_1d = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.5
)
assert (median_profile_1d == np.array([1.0, 2.0, 3.0])).all()
profile_1d_0 = np.array([1.0, 2.0, 3.0])
profile_1d_1 = np.array([2.0, 4.0, 6.0])
profile_1d_list = [profile_1d_0, profile_1d_1]
median_profile_1d = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.5
)
assert (median_profile_1d == np.array([1.5, 3.0, 4.5])).all()
profile_1d_list = [
profile_1d_0,
profile_1d_0,
profile_1d_0,
profile_1d_1,
profile_1d_1,
profile_1d_1,
profile_1d_1,
]
weights = np.array([9.9996, 9.9996, 9.9996, 1e-4, 1e-4, 1e-4, 1e-4])
median_profile_1d = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.5, weights=weights
)
assert (median_profile_1d == np.array([1.0, 2.0, 3.0])).all()
radial_values = [1.0, 2.0, 3.0, 4.0, 5.0]
weights = [0.1, 0.3, 0.2, 0.05, 0.35]
quantile_result = quantile(x=radial_values, q=0.23, weights=weights)
profile_1d_0 = np.array([1.0])
profile_1d_1 = np.array([2.0])
profile_1d_2 = np.array([3.0])
profile_1d_3 = np.array([4.0])
profile_1d_4 = np.array([5.0])
profile_1d_list = [
profile_1d_0,
profile_1d_1,
profile_1d_2,
profile_1d_3,
profile_1d_4,
]
profile_1d_via_error_util = ag.util.error.quantile_profile_1d(
profile_1d_list=profile_1d_list, q=0.23, weights=weights
)
assert quantile_result == profile_1d_via_error_util[0]
| 27.527778 | 73 | 0.617558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea3310d718c746d6f372cb145583373a0ec60324 | 5,967 | py | Python | src/whereshallwemeet/caller.py | bizzinho/whereShallWeMeet | 449cc1aeb9334688392b8b3ffa4b6def1f308bc5 | [
"MIT"
] | null | null | null | src/whereshallwemeet/caller.py | bizzinho/whereShallWeMeet | 449cc1aeb9334688392b8b3ffa4b6def1f308bc5 | [
"MIT"
] | null | null | null | src/whereshallwemeet/caller.py | bizzinho/whereShallWeMeet | 449cc1aeb9334688392b8b3ffa4b6def1f308bc5 | [
"MIT"
] | null | null | null | import os
import pathlib
import sys
import googlemaps
from datetime import datetime as dt
from typing import Union, Tuple, List
class WhereShallWeMeet:
def __init__(self, configPath: str = None, friendsFile: str = None):
self.configPath = configPath
self._gmaps = None
self.friendsFile = friendsFile
self._friends = None
@property
def friends(self):
if self._friends is None:
self._friends = self._loadFriends(self.friendsFile)
return self._friends
@property
def gmaps(self):
if self._gmaps is None:
self._gmaps = self._establishConnection(self.configPath)
return self._gmaps
def friendStats(self, verbose=True):
# remove people that can't host from destination
startAddresses = [friend["address"] for friend in self.friends]
potentialHosts = [
addie
for i, addie in enumerate(startAddresses)
if self.friends[i]["availableToHost"]
]
DM = self._getDistMatrix(
startAddresses=startAddresses, destinationAddresses=potentialHosts
)
# return dist matrix
# (rows = startpoint, cols = destination)
self._M = self._json2Matrix(DM)
# total travel duration to destination
# XX
def _loadFriends(self, friendsFile: str):
path = pathlib.Path(friendsFile)
if path.suffix == ".csv":
import csv
friends = []
with open(friendsFile, "r") as f:
csvfile = list(csv.reader(f))
# first row is header
header = csvfile[0]
def findCol(word):
shoeFits = list(
filter(
lambda i: [word in col.lower() for col in header][i],
range(len(header)),
)
)
if len(shoeFits) > 1:
raise ValueError(
f"More than one column header contains {word}."
)
elif len(shoeFits) == 0:
raise ValueError(
f"No column header contains keyword {word}"
)
return shoeFits[0]
nameCol = findCol("name")
addressCol = findCol("address")
transitCol = findCol("preferred")
hostCol = findCol("host")
joinsCol = findCol("joins")
for row in csvfile[1:]:
friends.append(
{
"name": row[nameCol],
"address": row[addressCol],
"preferredTransitMode": row[transitCol],
"availableToHost": row[hostCol],
"joinsParty": row[joinsCol],
}
)
elif (path.suffix == ".yaml") or (path.suffix == ".yml"):
import yaml
with open(friendsFile, "r") as f:
ff = yaml.load(f, Loader=yaml.FullLoader)
friends = ff["friends"]
# remove people that don't join
friends = list(filter(lambda x: x["joinsParty"], friends))
# sort by name
friends = sorted(friends, key=lambda elem: elem["name"])
self._friends = friends
def _establishConnection(
self,
configPath: str = None,
) -> googlemaps.client.Client:
if (configPath is None) and (os.environ.get("APITOKEN") is None):
raise ValueError("configPath or APITOKEN env variable must exist.")
elif configPath is not None:
# user passes apitoken via config file
# add parent path to path
path = pathlib.Path(configPath)
sys.path.append(str(path.parent.absolute()))
# import module (infer module name from filename)
cfg = __import__(path.stem)
apitoken = cfg.apitoken
elif os.environ.get("APITOKEN") is not None:
# user passes apitoken via env variable
apitoken = os.environ.get("APITOKEN")
# establish connection
gmaps = googlemaps.Client(key=apitoken)
return gmaps
def _getDirections(
self,
startAddress: str,
destinationAddress: str,
transitMode: str = "transit",
departureTime: Union[str, dt] = dt.now(),
configPath: str = None,
) -> Tuple[dict, int]:
# Geocoding an address
# geocode_start = gmaps.geocode(startAddress)
# use directions api
dir_results = self.gmaps.directions(
startAddress,
destinationAddress,
mode=transitMode,
departure_time=departureTime,
)[0]
# travel duration in seconds
duration = dir_results["legs"][0]["duration"]["value"]
# this is a json-like dict containing the
# output from the directions api
return dir_results, duration
def _getDistMatrix(
self,
startAddresses: Union[str, List[str]],
destinationAddresses: Union[str, List[str]],
transitMode: str = "transit",
departureTime: Union[str, dt] = dt.now(),
configPath: str = None,
) -> Tuple[dict, int]:
dist_results = self.gmaps.distance_matrix(
startAddresses,
destinationAddresses,
mode=transitMode,
departure_time=departureTime,
)
return dist_results
def _json2Matrix(self, jsonMatrix: dict) -> List[List[int]]:
matrix = []
for row in jsonMatrix["rows"]:
rowList = []
for elem in row["elements"]:
if elem["status"] == "OK":
rowList.append(elem["duration"]["value"])
else:
rowList.append(0)
matrix.append(rowList)
return matrix
| 28.6875 | 79 | 0.537121 | 5,835 | 0.977878 | 0 | 0 | 321 | 0.053796 | 0 | 0 | 1,016 | 0.17027 |
ea332701c21bd1585589dad2180e6c25dfdb1886 | 4,451 | py | Python | iCount/plotting/plot_rnamap.py | genialis/iCount | 80dba0f7292a364c62843d71e76c1b22e6268e14 | [
"MIT"
] | null | null | null | iCount/plotting/plot_rnamap.py | genialis/iCount | 80dba0f7292a364c62843d71e76c1b22e6268e14 | [
"MIT"
] | 1 | 2021-09-30T12:55:37.000Z | 2021-09-30T12:55:37.000Z | iCount/plotting/plot_rnamap.py | ulelab/iCount | b9dc1b21b80e4dae77b3ac33734514091fbe3151 | [
"MIT"
] | 4 | 2021-03-23T12:38:55.000Z | 2021-05-14T10:10:00.000Z | """.. Line to protect from pydocstyle D205, D400.
Plot distribution RNA-map
-------------------------
Plot distribution of crosslinks relative to landmark of specific type.
"""
import os
import pandas as pd
import iCount
# pylint: disable=wrong-import-order
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot as plt # pylint: disable=wrong-import-position
# pylint: enable=wrong-import-order
def normalize_cpm(value, total):
"""Normalize by CPM."""
return value / total * 10**6
def parse_results_basic(fname):
"""Parse RNA-maps results file."""
# First read total cdna info:
with open(fname, 'rt') as handle:
total_cdna_line = handle.readline()
total_cdna = int(total_cdna_line.strip().split(':')[1])
df = pd.read_csv(fname, delimiter='\t', header=1, index_col=0)
return df, total_cdna
def parse_results(fname):
"""Parse RNA-maps results file."""
df, total_cdna = parse_results_basic(fname)
landmark_count = len(df)
distro = df.sum(axis=0)
# Perform CPM normalization
normalized_data = {int(pos): normalize_cpm(score, total_cdna) for pos, score in distro.to_dict().items()}
return normalized_data, landmark_count
def smooth(list_, half_window=1):
"""Use box averaging smoothing."""
new_list = []
for i, _ in enumerate(list_):
jjs = [j for j in range(i - half_window, i + half_window + 1) if 0 <= j < len(list_)]
new_list.append(sum([list_[k] for k in jjs]) / len(jjs))
return new_list
def make_outfile_name(fname, imgfmt):
"""Make output filename."""
basename = iCount.files.remove_extension(fname, ['.tsv'])
dirname = os.path.dirname(fname)
return os.path.join(dirname, '{}_distro.{}'.format(basename, imgfmt))
def guess_maptype(fname):
"""Try to get RNA maptype from filename."""
# Since "noncoding-gene-start" can be mistaken for "gene-start" check longest names first.
for mtype in sorted(iCount.analysis.rnamaps.MAP_TYPES.keys(), key=len, reverse=True):
if fname.endswith('{}.tsv'.format(mtype)):
return mtype
def plot_rnamap(fnames,
outfile=None,
up_limit=100,
down_limit=100,
ax=None,
ylim=None,
imgfmt='png',
smoothing=1,
):
"""
Plot distribution RNA-map.
Parameters
----------
fnames : list_str
List of rnamaps result files to plot.
outfile : str
Output file.
up_limit : int
Upstream plot limit.
down_limit : int
Downstream plot limit.
ax : str
An ``matplotlib.axes.Axes`` instance onto which this plot can
be drawn. This is useful if you would like to use this function
to plot this image as a subplot of a more complex figure.
ylim : int
Limit of the y-axis.
imgfmt : str
Image format. Note that image format is automatically
determined from outfile. This parameters only applies if
outfile is None.
smoothing : int
Smoothing half-window. Average smoothing is used.
Returns
-------
None
"""
# Make sure llimits have correct signs.
up_limit = -abs(int(up_limit))
down_limit = abs(int(down_limit))
if not isinstance(fnames, list):
fnames = [fnames]
if not outfile:
outfile = make_outfile_name(fnames[0], imgfmt)
# User can provide axes instance (subplot) into which to plot this heatmap.
# If not given a figure and axes instances are created.
if ax:
is_independent = False
else:
is_independent = True
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
for fname in fnames:
data, landmark_count = parse_results(fname)
if not data:
continue
positions, scores = zip(*sorted(data.items()))
label = '{} ({} landmarks)'.format(
guess_maptype(os.path.basename(fname)),
landmark_count,
)
ax.plot(positions, smooth(scores, smoothing), label=label)
ax.set_xlim((up_limit, down_limit))
ax.set_xlabel('Position')
if ylim:
ax.set_ylim((0, ylim))
ax.set_ylabel('Score [CPM]')
ax.set_title('RNA-map')
ax.grid(b=True, which='major', axis='both')
ax.legend()
if is_independent:
fig.savefig(outfile)
| 28.350318 | 109 | 0.625253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,789 | 0.401932 |
ea33e9758376d259b5c2e71586d76551a1685aa4 | 7,772 | py | Python | search_and_change/search_and_change_num.py | physics-sp/frida-tools | 8ae44d041417152f0717f48513043a320649e9e9 | [
"MIT"
] | 5 | 2020-02-08T12:25:40.000Z | 2021-08-25T16:49:59.000Z | search_and_change/search_and_change_num.py | physics-sp/frida-tools | 8ae44d041417152f0717f48513043a320649e9e9 | [
"MIT"
] | null | null | null | search_and_change/search_and_change_num.py | physics-sp/frida-tools | 8ae44d041417152f0717f48513043a320649e9e9 | [
"MIT"
] | 4 | 2020-06-03T04:27:02.000Z | 2021-06-07T15:16:20.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import time
try:
import frida
except ImportError:
sys.exit('install frida\nsudo pip3 install frida')
# number of times that 'old_value' was find in memory
matches = None
def err(msg):
sys.stderr.write(msg + '\n')
def read(msg): # read input from user
def _invalido():
sys.stdout.write('\033[F\r') # Cursor up one line
blank = ' ' * len(str(leido) + msg)
sys.stdout.write('\r' + blank + '\r')
return read(msg)
try:
leido = input(msg)
except EOFError:
return _invalido()
if leido != '' and leido.isdigit() is False:
return _invalido()
if leido.isdigit():
try:
leido = eval(leido)
except SyntaxError:
return _invalido()
if leido < 1 or leido > matches:
return _invalido()
return leido
def on_message(message, data):
global matches
if message['type'] == 'error':
err('[!] ' + message['stack'])
elif message['type'] == 'send':
# recive amount of matches from js script
matches = message['payload']
else:
print(message)
def main(target_process, usb, old_value, new_value, endianness, signed, bits, alignment):
try:
if usb:
session = frida.get_usb_device().attach(target_process)
else:
session = frida.attach(target_process)
except:
sys.exit('An error ocurred while attaching with the procces')
script = session.create_script("""
function get_pattern(number, isLittleEndian, bits, signed) {
var negative = (number < 0 && signed == "s");
if (number < 0) {
number *= -1;
}
var hex_string = number.toString(16);
if (hex_string.length %% 2 == 1) {
hex_string = '0' + hex_string;
}
var pattern = "";
hex_string.match(/.{2}/g).forEach(function(byte) {
pattern = (isLittleEndian ? byte + " " + pattern : pattern + " " + byte);
});
if (isLittleEndian) {
pattern = pattern.substring(0, pattern.length - 1);
}
else {
pattern = pattern.substring(1, pattern.length);
}
var cantBytes = pattern.split(" ").length;
var bytesReg = Math.floor(bits/8);
for (i = 0; i < (bytesReg - cantBytes); i++) {
pattern = (isLittleEndian ? pattern + ' 00' : '00 ' + pattern);
}
var lenPattern = pattern.length;
if (negative) {
if (isLittleEndian) {
var prev = pattern.substring(lenPattern-1, lenPattern);
var nvo = parseInt(prev);
nvo |= 256;
nvo = nvo.toString();
pattern = pattern.substring(0, lenPattern-1) + nvo;
}
else {
var prev = pattern.substring(0, 2);
var nvo = parseInt(prev);
nvo |= 256;
nvo = nvo.toString();
pattern = nvo + pattern.substring(2);
}
}
return pattern;
}
function get_byte_array(number, isLittleEndian, bits, signed) {
var pattern = get_pattern(number, isLittleEndian, bits, signed);
var byte_array = [];
var bytes = pattern.split(" ");
for (var i = 0; i < bytes.length; i++) {
byte_array.push(parseInt("0x" + bytes[i]));
}
return byte_array;
}
function isAlligned(pointer, bits) {
var bytesInPointer = parseInt(pointer);
var bytesInRegister = bits / 8;
return bytesInPointer %% bytesInRegister === 0;
}
var old_value = %d;
var new_value = %d;
var isLittleEndian = '%s' == "l";
var signed = '%s';
var bits = %d;
var alignment = %d;
var mustBeAlligned = alignment != 0;
// pattern of bytes that frida will search in memory
var pattern = get_pattern(old_value, isLittleEndian, bits, signed);
// new bytes that will be written
var byte_array = get_byte_array(new_value, isLittleEndian, bits, signed);
console.log("[i] searching for " + pattern);
console.log("");
console.log("List of matches:");
// get array of ranges of memory that are readable and writable
var ranges = Process.enumerateRangesSync({protection: 'rw-', coalesce: true});
var counter = 0;
var addresses = {};
for (var i = 0; i < ranges.length; i++) {
var range = ranges[i];
// get array of addresses where 'old_value' was found in this range of memory
var matches = Memory.scanSync(range.base, range.size, pattern);
for (var j = 0; j < matches.length; j++) {
var address = matches[j].address;
// check if address is alligned in memory if user wants it to be
if (!mustBeAlligned || (mustBeAlligned && isAlligned(address, alignment))) {
// save match in array at index counter
addresses[counter ++] = address;
}
}
}
// show all matches found to user
var lenMax = counter.toString().length
for (var i = 0; i < counter; i++) {
var index = (i + 1).toString();
var padding = " ".repeat(lenMax - index.length);
console.log("(" + index + ") " + padding + addresses[i]);
}
// send amount of matches to python
send(counter);
// recive index selected by user from python
recv('input', function(value) {
Memory.writeByteArray(addresses[value.payload - 1], byte_array);
});
""" % (old_value, new_value, endianness, signed, bits, alignment))
script.on('message', on_message)
script.load()
# wait for scan to finish
while matches is None:
pass
if matches == 0:
print('\nNo matches found')
else:
print('\nIndicate which address you want to overwrite. Press <Enter> to detach.')
index = read('index of address:')
if index != '':
# send index selected by user to js script
script.post({'type': 'input', 'payload': int(index)})
print('address overwritten!')
time.sleep(1)
session.detach()
if __name__ == '__main__':
argc = len(sys.argv)
if argc < 4 or argc > 11:
usage = 'Usage: {} [-U] [-e little|big] [-b 64|32|16|8] [-a 64|32] <process name or PID> <old value> <new value>\n'.format(__file__)
usage += 'The \'-U\' option is for mobile instrumentation.\n'
usage += 'The \'-e\' option is to specify the endianness. Little is the default.\n'
usage += 'The \'-b\' option is to specify the size of the variable in bits. 32 is the default.\n'
usage += 'The \'-a\' option is to specify that the variable must be aligned in memory (and not in between registers). This is disabled by default.\n'
# usage += 'Specify if the variable is signed or unsigned with -s or -u.\n'
sys.exit(usage)
usb = False
endianness = 'l'
bits = 32
signed = 'u'
alignment = 0
for i in range(1, argc - 3):
if sys.argv[i] == '-U':
usb = True
elif sys.argv[i] == '-e':
endianness = sys.argv[i + 1]
if endianness not in ['big', 'little']:
sys.exit('Bad \'-e\' parameter. Specify the endianness (big or little).')
endianness = endianness[0]
elif sys.argv[i] == '-b':
size = sys.argv[i + 1]
if size not in ['64', '32', '16', '8']:
sys.exit('Bad \'-b\' parameter. Specify the size of the variable in bits (64, 32, 16 or 8).')
bits = int(size)
elif sys.argv[i] == '-a':
arch = sys.argv[i + 1]
if arch not in ['64', '32']:
sys.exit('Bad \'-a\' parameter. Specify the architecture (32 or 64).')
alignment = int(arch)
if sys.argv[argc - 3].isdigit():
target_process = int(sys.argv[argc - 3])
else:
target_process = sys.argv[argc - 3]
if sys.argv[argc - 2].replace('-', '').isdigit() is False:
sys.exit('<old value> must be a number.')
if sys.argv[argc - 1].replace('-', '').isdigit() is False:
sys.exit('<new value> must be a number.')
old_value = int(sys.argv[argc - 2])
new_value = int(sys.argv[argc - 1])
if old_value < 0 or new_value < 0:
sys.exit('Negative numbers aren\'t suported yet.')
if (old_value > (2 ** (bits - 1)) - 1 and signed == 's') or (old_value > (2 ** bits) - 1 and signed == 'u'):
sys.exit(str(old_value) + ' is too large')
if (new_value > (2 ** (bits - 1)) - 1 and signed == 's') or (new_value > (2 ** bits) - 1 and signed == 'u'):
sys.exit(str(new_value) + ' is too large')
main(target_process, usb, old_value, new_value, endianness, signed, bits, alignment)
| 30.478431 | 151 | 0.634328 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,067 | 0.651956 |
ea35cc90fef2c59b54455f8db397478309f4335a | 17,435 | py | Python | text_extensions_for_pandas/jupyter/span.py | ZachEichen/text-extensions-for-pandas | b3132c26806d57ec168818e43138e55e2583acdf | [
"Apache-2.0"
] | 193 | 2020-05-11T21:15:57.000Z | 2022-03-23T09:59:32.000Z | text_extensions_for_pandas/jupyter/span.py | ZachEichen/text-extensions-for-pandas | b3132c26806d57ec168818e43138e55e2583acdf | [
"Apache-2.0"
] | 207 | 2020-05-07T17:38:13.000Z | 2022-02-11T18:02:13.000Z | text_extensions_for_pandas/jupyter/span.py | ZachEichen/text-extensions-for-pandas | b3132c26806d57ec168818e43138e55e2583acdf | [
"Apache-2.0"
] | 28 | 2020-05-07T16:43:52.000Z | 2022-02-25T15:21:18.000Z | #
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# span.py
#
# Part of text_extensions_for_pandas
#
# Support for span-centric Jupyter rendering and utilities
#
import textwrap
from typing import *
from enum import Enum
import text_extensions_for_pandas.resources
# TODO: This try/except block is for Python 3.6 support, and should be
# reduced to just importing importlib.resources when 3.6 support is dropped.
try:
import importlib.resources as pkg_resources
except ImportError:
import importlib_resources as pkg_resources
# Limits the max number of displayed documents. Matches Pandas' default display.max_seq_items.
_DOCUMENT_DISPLAY_LIMIT = 100
class SetType(Enum):
NESTED=1
OVERLAP=2
class RegionType(Enum):
NESTED=1
COMPLEX=2
SOLO=3
def pretty_print_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
"""
HTML pretty-printing of a series of spans for Jupyter notebooks.
Args:
column: Span column (either character or token spans).
show_offsets: True to generate a table of span offsets in addition
to the marked-up text
"""
# Local import to prevent circular dependencies
from text_extensions_for_pandas.array.span import SpanArray
from text_extensions_for_pandas.array.token_span import TokenSpanArray
if not isinstance(column, (SpanArray, TokenSpanArray)):
raise TypeError(f"Expected SpanArray or TokenSpanArray, but received "
f"{column} of type {type(column)}")
# Gets the main script and stylesheet from the 'resources' sub-package
style_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.css")
script_text: str = pkg_resources.read_text(text_extensions_for_pandas.resources, "span_array.js")
# Declare initial variables common to all render calls
instance_init_script_list: List[str] = []
# For each document, pass the array of spans and document text into the script's render function
document_columns = column.split_by_document()
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(document_columns))):
# Get a javascript representation of the column
span_array = []
token_span_array = []
for e in document_columns[column_index]:
span_array.append(f"""[{e.begin},{e.end}]""")
if hasattr(e, "tokens"):
token_span_array.append(f"""[{e.begin_token},{e.end_token}]""")
document_object_script = f"""
const doc_spans = [{','.join(span_array)}]
const doc_text = '{_get_escaped_doctext(document_columns[column_index])}'
"""
# If the documents are a TokenSpanArray, include the start and end token indices in the document object.
if len(token_span_array) > 0:
document_object_script += f"""
const doc_token_spans = [{','.join(token_span_array)}]
documents.push({{doc_text: doc_text, doc_spans: doc_spans, doc_token_spans: doc_token_spans}})
"""
else:
document_object_script += """
documents.push({doc_text: doc_text, doc_spans: doc_spans})
"""
instance_init_script_list.append(f"""
{{
{document_object_script}
}}
""")
# Defines a list of DOM strings to be appended to the end of the returned HTML.
postfix_tags: List[str] = []
if len(document_columns) > _DOCUMENT_DISPLAY_LIMIT:
postfix_tags.append(f"""
<footer>Documents truncated. Showing {_DOCUMENT_DISPLAY_LIMIT} of {len(document_columns)}</footer>
""")
# Get the show_offsets parameter as a JavaScript boolean
show_offset_string = 'true' if show_offsets else 'false'
return textwrap.dedent(f"""
<style class="span-array-css">
{textwrap.indent(style_text, ' ')}
</style>
<script>
{{
{textwrap.indent(script_text, ' ')}
}}
</script>
<div class="span-array">
{_get_initial_static_html(column, show_offsets)}
<span style="font-size: 0.8em;color: #b3b3b3;">Your notebook viewer does not support Javascript execution. The above rendering will not be interactive.</span>
</div>
<script>
{{
const Span = window.SpanArray.Span
const script_context = document.currentScript
const documents = []
{''.join(instance_init_script_list)}
const instance = new window.SpanArray.SpanArray(documents, {show_offset_string}, script_context)
instance.render()
}}
</script>
{''.join(postfix_tags)}
""")
def _get_escaped_doctext(column: Union["SpanArray", "TokenSpanArray"]) -> List[str]:
# Subroutine of pretty_print_html() above.
# Should only be called for single-document span arrays.
if not column.is_single_document:
raise ValueError("Array contains spans from multiple documents. Can only "
"render one document at a time.")
text = column.document_text
text_pieces = []
for i in range(len(text)):
if text[i] == "'":
text_pieces.append("\\'")
elif text[i] == "\n":
text_pieces.append("\\n")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
def _get_initial_static_html(column: Union["SpanArray", "TokenSpanArray"],
show_offsets: bool) -> str:
# Subroutine of pretty_print_html above.
# Gets the initial static html representation of the column for notebook viewers without JavaScript support.
# Iterates over each document and constructs the DOM string with template literals.
# ! Text inserted into the DOM as raw HTML should always be sanitized to prevent unintended DOM manipulation
# and XSS attacks.
documents = column.split_by_document()
documents_html = []
for column_index in range(min(_DOCUMENT_DISPLAY_LIMIT, len(documents))):
document = documents[column_index]
# Generate a dictionary to store span information, including relationships with spans occupying the same region.
spans = {}
is_token_document = False
sorted_span_ids = []
for i in range(len(document)):
span_data = {}
span_data["id"] = i
span_data["begin"] = document[i].begin
span_data["end"] = document[i].end
if hasattr(document[i], "tokens"):
is_token_document = True
span_data["begin_token"] = document[i].begin_token
span_data["end_token"] = document[i].end_token
span_data["sets"] = []
spans[i] = span_data
sorted_span_ids.append(i)
# Sort IDs
sorted_span_ids.sort(key=lambda id: (spans[id]["begin"], -spans[id]["end"]))
for i in range(len(sorted_span_ids)):
span_data = spans[sorted_span_ids[i]]
for j in range(i+1, len(sorted_span_ids)):
sub_span_data = spans[sorted_span_ids[j]]
# If the spans do not overlap, exit the sub-loop
if(sub_span_data["begin"] >= span_data["end"]):
break
else:
if(sub_span_data["end"] <= span_data["end"]):
span_data["sets"].append({"type": SetType.NESTED, "id": sub_span_data["id"]})
else:
span_data["sets"].append({"type": SetType.OVERLAP, "id": sub_span_data["id"]})
spans[sorted_span_ids[i]] = span_data
# Generate the table rows DOM string from span data.
table_rows_html = []
for i in range(len(spans)):
span = spans[i]
table_rows_html.append(f"""
<tr>
<td><b>{span["id"]}</b></td>
<td>{span["begin"]}</td>
<td>{span["end"]}</td>
""")
if is_token_document:
table_rows_html.append(f"""
<td>{span["begin_token"]}</td>
<td>{span["end_token"]}</td>
""")
table_rows_html.append(f"""
<td>{_get_sanitized_text(document.document_text[span["begin"]:span["end"]])}</td>
</tr>
""")
# Generate the regions of the document_text to highlight from span data.
mark_regions = []
i = 0
while i < len(document):
region = {}
region["root_id"] = i
region["begin"] = spans[i]["begin"]
set_span = _get_set_span(spans, i)
region["end"] = set_span["end"]
if len(spans[i]["sets"]) > 0:
# get set span and type
if(_is_complex(spans, i)):
region["type"] = RegionType.COMPLEX
else:
region["type"] = RegionType.NESTED
else:
region["type"] = RegionType.SOLO
mark_regions.append(region)
i = set_span["highest_id"] + 1
# Generate the document_text DOM string from the regions created above.
context_html = []
if len(mark_regions) == 0:
# There are no marked regions. Just append the sanitized text as a raw string.
context_html.append(_get_sanitized_text(document.document_text))
else:
# Iterate over each marked region and contruct the HTML for preceding text and marked text.
# Then, append that HTML to the list of DOM strings for the document_text.
snippet_begin = 0
for region in mark_regions:
context_html.append(f"""
{_get_sanitized_text(document.document_text[snippet_begin:region["begin"]])}
""")
if region["type"] == RegionType.COMPLEX:
context_html.append(f"""
<span class='mark btn-info complex-set' style='
padding:0.4em;
border-radius:0.35em;
background:linear-gradient(to right, #a0c4ff, #ffadad);
color: black;
'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}
<span class='mark-tag' style='
font-weight: bolder;
font-size: 0.8em;
font-variant: small-caps;
font-variant-caps: small-caps;
font-variant-caps: all-small-caps;
margin-left: 8px;
text-transform: uppercase;
color: black;
'>Set</span>
</span>
""")
elif region["type"] == RegionType.NESTED:
mark_html = []
nested_snippet_begin = region["begin"]
# Iterate over each span nested within the root span of the marked region
for nested_span in map( \
lambda set: spans[set["id"]],
spans[region["root_id"]]["sets"]):
mark_html.append(f"""
{_get_sanitized_text(document.document_text[nested_snippet_begin:nested_span["begin"]])}
<span class='mark btn-warning' style='
padding:0.2em 0.4em;
border-radius:0.35em;
background-color: #ffadad;
color: black;
'>{_get_sanitized_text(document.document_text[nested_span["begin"]:nested_span["end"]])}</span>
""")
nested_snippet_begin = nested_span["end"]
mark_html.append(_get_sanitized_text(document.document_text[nested_snippet_begin:region["end"]]))
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{"".join(mark_html)}</span>
""")
elif region["type"] == RegionType.SOLO:
context_html.append(f"""
<span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}</span>
""")
snippet_begin = region["end"]
context_html.append(_get_sanitized_text(document.document_text[snippet_begin:]))
# Generate the document's DOM string
documents_html.append(f"""
<div class='document'>
<table style='
table-layout: auto;
overflow: hidden;
width: 100%;
border-collapse: collapse;
'>
<thead style='font-variant-caps: all-petite-caps;'>
<th></th>
<th>begin</th>
<th>end</th>
{"<th>begin token</th><th>end token</th>" if is_token_document else ""}
<th style='text-align:right;width:100%'>context</th>
</tr></thead>
<tbody>
{"".join(table_rows_html)}
</tbody>
</table>
<p style='
padding: 1em;
line-height: calc(var(--jp-content-line-height, 1.6) * 1.6);
'>
{"".join(context_html)}
</p>
</div>
""")
# Concat all documents and return the final DOM string
return "".join(documents_html)
def _get_set_span(spans: Dict, id: int) -> Dict:
# Subroutine of _get_initial_static_html() above.
# Recursive algorithm to get the last end and ID values of the set of spans connected to span with the given ID
# Will raise a KeyError exception if an invalid key is given
end = spans[id]["end"]
highest_id = id
# For each span in the set of spans, get the return values and track the greatest endpoint index and ID values.
for set in spans[id]["sets"]:
other = _get_set_span(spans, set["id"])
if other["end"] > end:
end = other["end"]
if other["highest_id"] > highest_id:
highest_id = other["highest_id"]
return {"end": end, "highest_id": highest_id}
def _is_complex(spans: Dict, id: int) -> bool:
# Subroutine of _get_initial_static_html() above.
# Returns True if the provided span should be considered a "Complex" span. Implementation details below.
# Will raise a KeyError exception if an invalid key is given
# If any connection sets are of type:overlap or nested beyond a depth of 1, return True
for set in spans[id]["sets"]:
if set["type"] == SetType.OVERLAP:
return True
elif set["type"] == SetType.NESTED:
if len(spans[set["id"]]["sets"]) > 0:
return True
return False
def _get_sanitized_text(text: str) -> str:
# Subroutine of _get_initial_static_html() above.
# Returns a string with HTML reserved character replacements to avoid issues while rendering text as HTML
text_pieces = []
for i in range(len(text)):
if text[i] == "&":
text_pieces.append("&")
elif text[i] == "<":
text_pieces.append("<")
elif text[i] == ">":
text_pieces.append(">")
elif text[i] == "\"":
# Not strictly necessary, but just in case.
text_pieces.append(""")
elif text[i] == "'":
# Not strictly necessary, but just in case.
text_pieces.append("'")
elif text[i] == "$":
# Dollar sign messes up Jupyter's JavaScript UI.
# Place dollar sign in its own sub-span to avoid being misinterpeted as a LaTeX delimiter
text_pieces.append("<span>$</span>")
elif text[i] == "\n" or text[i] == "\r":
# Support for in-document newlines by replacing with line break elements
text_pieces.append("<br>")
else:
text_pieces.append(text[i])
return "".join(text_pieces)
| 40.735981 | 220 | 0.562604 | 108 | 0.006194 | 0 | 0 | 0 | 0 | 0 | 0 | 10,130 | 0.581015 |
ea371b2906f6a90db71a9dd98eef0ab8bb4b4389 | 243 | py | Python | Comprehensions/05. Filter Numbers.py | milenpenev/Python_Advanced | 2f32012dd682fa9541bbf5fa155f6bdbcfa946de | [
"MIT"
] | null | null | null | Comprehensions/05. Filter Numbers.py | milenpenev/Python_Advanced | 2f32012dd682fa9541bbf5fa155f6bdbcfa946de | [
"MIT"
] | null | null | null | Comprehensions/05. Filter Numbers.py | milenpenev/Python_Advanced | 2f32012dd682fa9541bbf5fa155f6bdbcfa946de | [
"MIT"
] | null | null | null | def is_divisible(number):
divisible = [num for num in range(2, 11) if number % num == 0]
return True if divisible else False
start = int(input())
end = int(input())
print([int(_) for _ in range(start, end + 1) if is_divisible(_)])
| 22.090909 | 66 | 0.654321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea37e30aaa000862cd2a845865431af3031ecbc6 | 188 | py | Python | src/plugins/hk_reporter/platform/__init__.py | panda361/nonebot-hk-reporter | b94f6cc31844e9307e355fc81f387ea42501a014 | [
"MIT"
] | null | null | null | src/plugins/hk_reporter/platform/__init__.py | panda361/nonebot-hk-reporter | b94f6cc31844e9307e355fc81f387ea42501a014 | [
"MIT"
] | null | null | null | src/plugins/hk_reporter/platform/__init__.py | panda361/nonebot-hk-reporter | b94f6cc31844e9307e355fc81f387ea42501a014 | [
"MIT"
] | null | null | null | from .bilibili import Bilibili
from .rss import Rss
from .weibo import Weibo
from .utils import check_sub_target
from .platform import PlatformNoTarget
from .utils import platform_manager
| 26.857143 | 38 | 0.840426 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea384cdec08dafa6c13c92feb566f42fb716a71c | 13,837 | py | Python | Decision_Maker-Temp_Humid/Temp_Humid_Mqtt_Controller.py | cantiusdeepan/Cadrea | bdd341f8e9ee7a5103611d5bdac1a820ab9fdd81 | [
"MIT"
] | null | null | null | Decision_Maker-Temp_Humid/Temp_Humid_Mqtt_Controller.py | cantiusdeepan/Cadrea | bdd341f8e9ee7a5103611d5bdac1a820ab9fdd81 | [
"MIT"
] | null | null | null | Decision_Maker-Temp_Humid/Temp_Humid_Mqtt_Controller.py | cantiusdeepan/Cadrea | bdd341f8e9ee7a5103611d5bdac1a820ab9fdd81 | [
"MIT"
] | null | null | null | ####### File will publish a score between 0-5 on how good an idea
# (0- Don't open, 5 - Very good conditions for opening window)
# it is to open the window to clear the room given the current internal and external weather conditions
# Factors considered - ________
# Internal temperature
# External Temperature (Effect calculated by adaptive modelling formulation - ASHRAE standard)
# External Weather condition(smog,fog etc)
# External Relative Humidity
# External Wind speed/ Air velocity
import json
import time
import paho.mqtt.client as MQTT
import numpy as np
import random
import fpformat
import requests
import socket
class MyMQTT:
def __init__(self, broker, port, notifier):
self.broker = broker
self.port = port
self.notifier = notifier
self._paho_mqtt = MQTT.Client("Temp_Humid_Decision_Maker", False)
self._paho_mqtt.on_connect = self.myOnConnect
self._paho_mqtt.on_message = self.myOnMessageReceived
def myOnConnect(self, paho_mqtt, userdata, flags, rc):
# print ("Connected to message broker with result code: " + str(rc))
pass
def myOnMessageReceived(self, paho_mqtt, userdata, msg):
self.notifier.notify(msg.topic, msg.payload)
def myPublish(self, housIDvar, w):
# print("barbastruzzo")
js_pub = {"data": "temp_window", "value": w}
topic_pub = 'house/' + housIDvar + '/temp_local_controller/temp_window'
self._paho_mqtt.publish(topic_pub, json.dumps(js_pub), 2)
print("Publishing TempHumid decision on MQTT")
# self._paho_mqtt.publish(topic, msg, qos)
def mySubscribe(self, topicExtTemp, topicExtWind, topicExtWeather, topicExtRH, topicIntTemp, topicIntRH, qos=2):
self._paho_mqtt.subscribe(topicExtTemp, qos)
self._paho_mqtt.subscribe(topicExtWind, qos)
self._paho_mqtt.subscribe(topicExtWeather, qos)
self._paho_mqtt.subscribe(topicExtRH, qos)
self._paho_mqtt.subscribe(topicIntTemp, qos)
self._paho_mqtt.subscribe(topicIntRH, qos)
def start(self):
self._paho_mqtt.connect(self.broker, self.port)
self._paho_mqtt.loop_start()
def stop(self):
self._paho_mqtt.loop_stop()
class StartTempHumidMqtt():
def __init__(self):
#####Values to be fetched from local config
# resource catalog base url
self.rc_base_url = ""
# Central config server base URL
self.cc_base_url = ""
self.house_id = 0
self.mqtt_broker = ""
self.mqtt_port = 0
self.getLocalConfig()
self.runningMonthMeanOutTemp = 0.0
## Values to be used in the logical decision making section
# setting default initial values
self.internal_temp = 0.0
self.external_temp = 20.0
self.external_wind = 0.0
self.external_weather = 0
self.l_threshold_temp = 15.0
self.u_threshold_temp = 30.0
self.external_rHumidity = 45.0
self.window = 0.0
self.internal_rhumidity = 45.0
self.last_month_ext_temp_list = np.array([])
self.myMqtt = MyMQTT(self.mqtt_broker, self.mqtt_port, self)
self.myMqtt.start()
def getLocalConfig(self):
json_file = open('local_TH_control_config.json').read()
local_config = json.loads(json_file)
if local_config.get("RC_base_url"):
self.rc_base_url = local_config["RC_base_url"]
else:
print "Problem in local json - Can't get RC url"
if local_config.get("Central_config_base_url"):
self.cc_base_url = local_config["Central_config_base_url"]
else:
print "Problem in local json - Can't get Central config url"
if local_config.get("house_id"):
self.house_id = local_config["house_id"]
else:
print "Problem in local json - Can't get house_id"
if local_config.get("mqtt_broker"):
self.mqtt_broker = local_config["mqtt_broker"]
else:
print "Problem in local json - Can't get mqtt_broker"
if local_config.get("mqtt_port"):
self.mqtt_port = local_config["mqtt_port"]
else:
print "Problem in local json - Can't get mqtt_port"
def thresholdValuesFromCentre(self, url, house_ID, reqString='index.html'):
# URL of the GUIWebservice for Central config file
# url = 'http://192.168.1.71:8081/'
updated_url = url + house_ID + "/" + reqString
print "updated_url:", updated_url
try:
response = requests.get(updated_url)
print response.text
return str(response.text)
except:
print("Error in fetching thingspeak ID from resource catalog")
pass
# Getting thingspeak ID from RC using rasp pi IP
def running_mean(self, current_ext_temp, array_size_limit):
self.last_month_ext_temp_list = np.append(self.last_month_ext_temp_list, current_ext_temp)
self.ext_temp_array_size = self.last_month_ext_temp_list.size
# 12 readings per hour for 24 h = 288 readings per day
# 288 readings per day for 30 days = 8640
if (self.ext_temp_array_size >= array_size_limit):
divisor = array_size_limit
# if array size is at limit, remove the oldest value
self.last_month_ext_temp_list = np.delete(self.last_month_ext_temp_list, 0)
else:
divisor = self.ext_temp_array_size
# DOes cumulative sum - Last value is sum of all values in array
cumsum = np.cumsum(self.last_month_ext_temp_list)
cum_sum_last_month_ext_temp = cumsum[-1]
# print ("Ext Temp Array size:", self.ext_temp_array_size)
# print (c)
# print("CUrrent reading external Temp:",current_ext_temp )
# print("Average monthly mean external temp:", (cum_sum_last_month_ext_temp) / divisor)
return ((cum_sum_last_month_ext_temp) / divisor)
def end(self):
self.myMqtt.stop()
# This is just a local temp and humid controller, there is a central controller making
# decisions based on all input like tmp, humid, wind and dust
def local_temp_test_controller(self):
# house_id = self.getIDfromRC(rc_base_url,'getHID4pi:',local_ip_addr)
internal_temp_topic = 'house/' + self.house_id + '/sensor/temp/internal'
internal_RH_topic = 'house/' + self.house_id + '/sensor/rhumidity/internal'
self.myMqtt.mySubscribe('/wunderground/temp/Turin', '/wunderground/wind/Turin', '/wunderground/weather/Turin',
'/wunderground/rhumidity/Turin',
internal_temp_topic, internal_RH_topic, 2)
wind_multiplier = 1.0
humid_multiplier = 1.0
RH_lower_limit = 10.0
RH_upper_limit = 10.0
comf_temp_range = 7.5
# Getting INITAL THRESHOLDS FOR TEMP - after entering loop- adaptive modelling kicks in
l_threshold_temp = float(self.thresholdValuesFromCentre(self.cc_base_url, self.house_id, "init_temp_low"))
u_threshold_temp = float(self.thresholdValuesFromCentre(self.cc_base_url, self.house_id, "init_temp_high"))
while True:
# getting the following thresholds every five mins from centre
RH_lower_limit = float(self.thresholdValuesFromCentre(self.cc_base_url, self.house_id, "init_RH_low"))
RH_upper_limit = float(self.thresholdValuesFromCentre(self.cc_base_url, self.house_id, "init_RH_high"))
comf_temp_range = float(self.thresholdValuesFromCentre(self.cc_base_url, self.house_id, "tempRange"))
# Higher the window multiplier value, better it is to open window
# Check if outside conditions(excluding temp) allow opening of window and by how much
if self.external_weather > 0:
# Wind speed classification based on : https://www.windows2universe.org/earth/Atmosphere/wind_speeds.html
if self.external_wind <= 1.0:
wind_multiplier = 1
elif 1.1 <= self.external_wind <= 5.9:
wind_multiplier = 2
elif 6.0 <= self.external_wind <= 11.9:
wind_multiplier = 3
elif 12.0 <= self.external_wind <= 19.9:
wind_multiplier = 4
elif self.external_wind > 20.0:
wind_multiplier = 0
if (40.0 <= self.external_rHumidity <= 50.0):
humid_multiplier = 1.25
elif (35.0 <= self.external_rHumidity <= 55.0):
humid_multiplier = 1.15
elif (30.0 <= self.external_rHumidity <= 60.0):
humid_multiplier = 1.0
elif (25.0 <= self.external_rHumidity <= 65.0):
humid_multiplier = 0.75
elif (RH_lower_limit <= self.external_rHumidity <= RH_upper_limit):
humid_multiplier = 0.5
else:
humid_multiplier = 0
# If internal humidity is very bad, and outdoor RH is not very bad, even better to open window
if (20.0 <= self.internal_rhumidity >= 70.0):
humid_multiplier = humid_multiplier * 2
# Impact of outside temperature on inside temperature
######### ASHRAE standard for thermal comfort #############
# http://www.sciencedirect.com/science/article/pii/S2095263513000320
# 12 readings per hour for 24 h = 288 readings per day
# 288 readings per day for 30 days = 8640
# So array size is being set to 8640 for taking monthly mean
self.runningMonthMeanOutTemp = self.running_mean(self.external_temp, 8640)
tComf = 0.31 * (self.runningMonthMeanOutTemp) + 17.8
# Range on both sides from comf temp provided from central config file
print "tComf:", tComf
# print "comf_temp_range:",comf_temp_range
self.l_threshold_temp = float(tComf) - comf_temp_range
self.u_threshold_temp = float(tComf) + comf_temp_range
print"int temp:", self.internal_temp
print"internal_rhumidity:", self.internal_rhumidity
print"external temp:", self.external_temp
print"external_rHumidity:", self.external_rHumidity
print"external_wind:", self.external_wind
print"runningMonthMeanOutTemp:", self.runningMonthMeanOutTemp
print"lower threshold temp:", self.l_threshold_temp
print"higher threshold temp:", self.u_threshold_temp
print("____________________________________________________")
if self.l_threshold_temp <= self.external_temp <= self.u_threshold_temp:
self.window = 1
print "External temp ok - open window"
# <editor-fold desc="Description">
# elif (self.l_threshold_temp > self.internal_temp):
# self.window = 0.5
#
# print "EXT and INT temp both not ok-int lower than lower_threshold, opening window doesn't have major negative impact - open window"
#
#
# elif (self.u_threshold_temp < self.internal_temp):
# self.window = 0.5
#
# print "EXT and INT temp both not ok-int higher than higher_threshold, opening window doesn't have major negative impact - open window"
# # </editor-fold>
else:
self.window = 0
print "EXT temp NOT ok,Negative impact if window opened - Close Window"
print("___________________________________________________")
print "window value based only on temp:", self.window
self.window = self.window * wind_multiplier
print "window value based on temp,wind:", self.window
self.window = self.window * humid_multiplier
print "window value based on temp,wind,humidity:", self.window
print("***************************************************")
self.myMqtt.myPublish(str(self.house_id), str(self.window))
print "window: " + str(self.window)
print("****************************************************")
time.sleep(30)
def notify(self, topic, msg):
# print msg
if topic == "/wunderground/temp/Turin":
self.external_temp = (json.loads(msg)['value'])
if "/sensor/temp/internal" in topic:
internal_temp_temporary = (json.loads(msg)['value'])
if (internal_temp_temporary != "-100"):
self.internal_temp = internal_temp_temporary
# checking if we have value from sensor, if not skip and use default value
else:
self.internal_temp = 0
if "/sensor/rhumidity/internal" in topic:
internal_rhumidity_temporary = (json.loads(msg)['value'])
# checking if we have value from sensor, if not skip and use default value
if (internal_rhumidity_temporary != "-1"):
self.internal_rhumidity = internal_rhumidity_temporary
else:
self.internal_rhumidity = 0
if topic == "/wunderground/wind/Turin":
self.external_wind = float((json.loads(msg)['value']))
if topic == "/wunderground/weather/Turin":
self.external_weather = (json.loads(msg)['value'])
if topic == "/wunderground/rhumidity/Turin":
self.external_rHumidity = (json.loads(msg)['value'])
# print "received under topic %s" % (topic)
if __name__ == "__main__":
start_TH_control = StartTempHumidMqtt()
start_TH_control.local_temp_test_controller()
# time.sleep(30)
# test.end()
| 42.185976 | 152 | 0.627376 | 13,040 | 0.942401 | 0 | 0 | 0 | 0 | 0 | 0 | 4,949 | 0.357664 |
ea38f8206042b01f8b467e0f3e3183966c12c73d | 6,538 | py | Python | pynagios/perf_data.py | jimbrowne/pynagios | f144b5507b1b3966a8f587bd07d0c7845db90182 | [
"MIT"
] | null | null | null | pynagios/perf_data.py | jimbrowne/pynagios | f144b5507b1b3966a8f587bd07d0c7845db90182 | [
"MIT"
] | null | null | null | pynagios/perf_data.py | jimbrowne/pynagios | f144b5507b1b3966a8f587bd07d0c7845db90182 | [
"MIT"
] | 1 | 2022-02-11T09:27:21.000Z | 2022-02-11T09:27:21.000Z | """
Tools for creating performance data for Nagios plugin responses.
If you're adding performance data to a :py:class:`~pynagios.response.Response`
object, then :py:func:`~pynagios.response.Response.set_perf_data` can be
called instead of having to create an entire :py:class:`PerfData` object.
"""
import re
from pynagios.range import Range
class PerfData(object):
"""
This class represents performance data for a response. Since
performance data has a non-trivial response format, this class
is meant to ease the formation of performance data.
"""
def __init__(self, label, value, uom=None, warn=None, crit=None,
minval=None, maxval=None):
"""Creates a new object representing a single performance data
item for a Nagios response.
Performance data is extra key/value data that can be returned
along with a response. The performance data is not used immediately
by Nagios itself, but can be extracted by 3rd party tools and can
often be helpful additional information for system administrators
to view. The `label` can be any string, but `value` must be a
numeric value.
Raises :class:`ValueError` if any of the parameters are invalid.
The exact nature of the error is in the human readable message
attribute of the exception.
:Parameters:
- `label`: Label for the performance data. This must be a
string.
- `value`: Value of the data point. This must be a number whose
characters are in the class of `[-0-9.]`
- `uom` (optional): Unit of measure. This must only be `%`, `s`
for seconds, `c` for continous data, or a unit of bit space
measurement ('b', 'kb', etc.)
- `warn` (optional): Warning range for this metric.
- `crit` (optional): Critical range for this metric.
- `minval` (optional): Minimum value possible for this metric,
if one exists.
- `maxval` (optional): Maximum value possible for this metric,
if one exists.
"""
self.label = label
self.value = value
self.uom = uom
self.warn = warn
self.crit = crit
self.minval = minval
self.maxval = maxval
@property
def value(self):
"""The value of this metric."""
return self._value
@value.setter
def value(self, value):
if value is None:
raise ValueError("value must not be None")
elif not self._is_valid_value(value):
raise ValueError("value must be in class [-0-9.]")
self._value = value
@property
def warn(self):
"""
The warning range of this metric. This return value of this
will always be a :py:class:`~pynagios.range.Range` object, even
if it was set with a string.
"""
return self._warn
@warn.setter
def warn(self, value):
if value is not None and not isinstance(value, Range):
value = Range(value)
self._warn = value
@property
def crit(self):
"""
The critical range of this metric. This return value of this
will always be a :py:class:`~pynagios.range.Range` object,
even if it was set with a string.
"""
return self._crit
@crit.setter
def crit(self, value):
if value is not None and not isinstance(value, Range):
value = Range(value)
self._crit = value
@property
def minval(self):
"""
The minimum value possible for this metric. This doesn't make
a lot of sense if the `uom` is '%', since that is obviously going
to be 0, but this will return whatever was set.
"""
return self._minval
@minval.setter
def minval(self, value):
if not self._is_valid_value(value):
raise ValueError("minval must be in class [-0-9.]")
self._minval = value
@property
def maxval(self):
"""
The maximum value possible for this metric. This doesn't make
a lot of sense if the `uom` is '%', since that is obviously going
to be 100, but this will return whatever was set.
"""
return self._maxval
@maxval.setter
def maxval(self, value):
if not self._is_valid_value(value):
raise ValueError("maxval must be in class [-0-9.]")
self._maxval = value
@property
def uom(self):
"""
The unit of measure (UOM) for this metric.
"""
return self._uom
@uom.setter
def uom(self, value):
valids = ['', 's', '%', 'b', 'kb', 'mb', 'gb', 'tb', 'c']
if value is not None and not str(value).lower() in valids:
raise ValueError("uom must be in: %s" % valids)
self._uom = value
def __str__(self):
"""
Returns the proper string format that should be outputted
in the plugin response string. This format is documented in
depth in the Nagios developer guidelines, but in general looks
like this:
| 'label'=value[UOM];[warn];[crit];[min];[max]
"""
# Quotify the label
label = self._quote_if_needed(self.label)
# Check for None in each and make it empty string if so
uom = self.uom or ''
warn = self.warn or ''
crit = self.crit or ''
minval = self.minval or ''
maxval = self.maxval or ''
# Create the proper format and return it
return "%s=%s%s;%s;%s;%s;%s" % (label, self.value, uom, warn, crit, minval, maxval)
def _is_valid_value(self, value):
"""
Returns boolean noting whether a value is in the proper value
format which certain values for the performance data must adhere to.
"""
value_format = re.compile(r"[-0-9.]+$")
return value is None or value_format.match(str(value))
def _quote_if_needed(self, value):
"""
This handles single quoting the label if necessary. The reason that
this is not done all the time is so that characters can be saved
since Nagios only reads 80 characters and one line of stdout.
"""
if '=' in value or ' ' in value or "'" in value:
# Quote the string and replace single quotes with double single
# quotes and return that
return "'%s'" % value.replace("'", "''")
else:
return value
| 33.701031 | 91 | 0.599725 | 6,192 | 0.947079 | 0 | 0 | 2,511 | 0.384062 | 0 | 0 | 3,974 | 0.607831 |
ea3a1bdee6e7843b3236745c73bb61d6e0e2dd05 | 354 | py | Python | py/priq.py | frasertweedale/drill | 4e71b5348b633fd9beecb243c046f19ddfe131fe | [
"MIT"
] | 1 | 2020-09-02T17:25:26.000Z | 2020-09-02T17:25:26.000Z | py/priq.py | frasertweedale/drill | 4e71b5348b633fd9beecb243c046f19ddfe131fe | [
"MIT"
] | null | null | null | py/priq.py | frasertweedale/drill | 4e71b5348b633fd9beecb243c046f19ddfe131fe | [
"MIT"
] | null | null | null | import heap
class _PriQ(object):
def insert(self, x):
self.heap.insert(x)
def delete(self):
return self.heap.delete()
def __nonzero__(self):
return bool(self.heap)
class Max(_PriQ):
def __init__(self):
self.heap = heap.Max()
class Min(_PriQ):
def __init__(self):
self.heap = heap.Min()
| 15.391304 | 33 | 0.59322 | 333 | 0.940678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea3a5b10c9e85e5dacebb77bf4f8cac72e39e843 | 3,249 | py | Python | report/api/app.py | Aaron-DH/openstack_sample_project | 711a56311806d52b632e4394743bd4bdbacb103a | [
"Apache-2.0"
] | null | null | null | report/api/app.py | Aaron-DH/openstack_sample_project | 711a56311806d52b632e4394743bd4bdbacb103a | [
"Apache-2.0"
] | null | null | null | report/api/app.py | Aaron-DH/openstack_sample_project | 711a56311806d52b632e4394743bd4bdbacb103a | [
"Apache-2.0"
] | null | null | null | from wsgiref import simple_server
import os
from oslo_config import cfg
from oslo_log import log as logging
from paste import deploy
import pecan
from report.api import hooks
from report.agent import rpcapi
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
pecan_opts = [
cfg.StrOpt('root',
default='report.api.controller.root.RootController',
help='Pecan root controller'),
cfg.ListOpt('modules',
default=["report.api"],
help='A list of modules where pecan will search for '
'applications.'),
cfg.BoolOpt('debug',
default=True,
help='Enables the ability to display tracebacks in the '
'browser and interactively debug during '
'development.'),
cfg.BoolOpt('auth_enable',
default=True,
help='Enables user authentication in pecan.')
]
api_opts = [
cfg.StrOpt('host',
default='0.0.0.0',
help='api host'),
cfg.IntOpt('port',
default=8899,
help='api port')
]
CONF.register_opts(pecan_opts, group='pecan')
CONF.register_opts(api_opts, group='api')
def get_pecan_config():
# Set up the pecan configuration.
opts = CONF.pecan
cfg_dict = {
"app": {
"root": opts.root,
"modules": opts.modules,
"debug": True,
"auth_enable": opts.auth_enable
}
}
return pecan.configuration.conf_from_dict(cfg_dict)
def setup_app(config=None, extra_hooks=None):
if not config:
config = get_pecan_config()
app_conf = dict(config.app)
rpcclient = rpcapi.AgentAPI()
app_hooks = [hooks.DBHook(),
hooks.RPCHook(rpcclient)]
app = pecan.make_app(
app_conf.pop('root'),
hooks=app_hooks,
logging=getattr(config, 'logging', {}),
**app_conf
)
return app
def load_app():
# Build the WSGI app
cfg_file = None
cfg_path = cfg.CONF.api_paste_config
if not os.path.isabs(cfg_path):
cfg_file = CONF.find_file(cfg_path)
elif os.path.exists(cfg_path):
cfg_file = cfg_path
if not cfg_file:
raise cfg.ConfigFilesNotFoundError([cfg.CONF.api_paste_config])
LOG.info("Full WSGI config used: %s" % cfg_file)
return deploy.loadapp("config:" + cfg_file, name="reportapi")
def build_server():
# Create the WSGI server and start it
app = load_app()
host = CONF.api.host
port = CONF.api.port
LOG.info('Starting server in PID %s', os.getpid())
LOG.info("Configuration:")
cfg.CONF.log_opt_values(LOG, logging.INFO)
if host == '0.0.0.0':
LOG.info('serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%'
'(vport)s', {'sport': port, 'vport': port})
else:
LOG.info("serving on http://%(host)s:%(port)s",
{'host': host, 'port': port})
server_cls = simple_server.WSGIServer
handler_cls = simple_server.WSGIRequestHandler
srv = simple_server.make_server(
host,
port,
app)
srv.serve_forever()
def app_factory(global_config, **local_conf):
return setup_app()
| 25.382813 | 75 | 0.596183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.230532 |
ea3b4a5064c4a8a783500e634bbd859b7a0ab263 | 2,318 | py | Python | cahoots/parsers/email.py | SerenitySoftwareLLC/cahoots | 866336c51436343ff5e56f83f89dddc82a5693a3 | [
"MIT"
] | 8 | 2015-03-24T15:34:40.000Z | 2016-12-24T22:09:47.000Z | cahoots/parsers/email.py | hickeroar/cahoots | 8fa795d7d933507c6cbf490bd20c1b3562689c5a | [
"MIT"
] | 34 | 2015-03-06T06:27:54.000Z | 2015-05-27T05:23:27.000Z | cahoots/parsers/email.py | hickeroar/cahoots | 8fa795d7d933507c6cbf490bd20c1b3562689c5a | [
"MIT"
] | 4 | 2015-04-05T06:24:50.000Z | 2015-05-30T02:40:21.000Z | """
The MIT License (MIT)
Copyright (c) Serenity Software, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from cahoots.parsers.base import BaseParser
from SereneRegistry import registry
from validate_email import VALID_ADDRESS_REGEXP
import re
class EmailParser(BaseParser):
'''Determines if given data is an email address'''
def __init__(self, config):
"""
:param config: cahoots config
:type config: cahoots.config.BaseConfig
"""
BaseParser.__init__(self, config, "Email", 100)
@staticmethod
def bootstrap(config):
"""
This method is statically called to bootstrap a parser
:param config: cahoots config
:type config: cahoots.config.BaseConfig
"""
email_regex = re.compile(VALID_ADDRESS_REGEXP)
registry.set('EP_valid_regex', email_regex)
def parse(self, data_string):
"""
parses for email addresses
:param data_string: the string we want to parse
:type data_string: str
:return: yields parse result(s) if there are any
:rtype: ParseResult
"""
if len(data_string) > 254 or '@' not in data_string:
return
if registry.get('EP_valid_regex').match(data_string):
yield self.result("Email Address", self.confidence)
| 35.661538 | 78 | 0.716566 | 1,085 | 0.468076 | 468 | 0.201898 | 321 | 0.138481 | 0 | 0 | 1,687 | 0.727783 |
ea3cf21c67467cbaac5cfdc96125b0af5671f94a | 326 | py | Python | src/praxxis/model/update_model.py | blimongi/praxxis | 4c2496c89d1d26d01b91896496342ca60f3d15ae | [
"MIT"
] | 9 | 2019-07-31T23:50:16.000Z | 2021-08-21T00:43:44.000Z | src/praxxis/model/update_model.py | blimongi/praxxis | 4c2496c89d1d26d01b91896496342ca60f3d15ae | [
"MIT"
] | 22 | 2019-08-01T00:37:53.000Z | 2020-03-31T05:01:57.000Z | src/praxxis/model/update_model.py | blimongi/praxxis | 4c2496c89d1d26d01b91896496342ca60f3d15ae | [
"MIT"
] | 7 | 2020-01-03T02:28:36.000Z | 2021-05-13T20:59:19.000Z | """
This file requests a new model from the storage pool.
"""
import os
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import requests
from requests.auth import HTTPBasicAuth
from src.praxxis.sqlite import sqlite_telemetry
def update_model():
"""TODO: implement this"""
pass
| 16.3 | 67 | 0.776074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.266871 |
ea3d55b836b67869362d50e8f2439e202c28d2d7 | 40,516 | py | Python | Process_Data/audio_processing.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 8 | 2020-08-26T13:32:56.000Z | 2022-01-18T21:05:46.000Z | Process_Data/audio_processing.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 1 | 2020-07-24T17:06:16.000Z | 2020-07-24T17:06:16.000Z | Process_Data/audio_processing.py | Wenhao-Yang/DeepSpeaker-pytorch | 99eb8de3357c85e2b7576da2a742be2ffd773ead | [
"MIT"
] | 5 | 2020-12-11T03:31:15.000Z | 2021-11-23T15:57:55.000Z | #!/usr/bin/env python
# encoding: utf-8
import os
import pathlib
import traceback
<<<<<<< HEAD
=======
import random
>>>>>>> Server/Server
import librosa
import numpy as np
import soundfile as sf
import torch
import torch.nn.utils.rnn as rnn_utils
from pydub import AudioSegment
from python_speech_features import fbank, delta, sigproc
from scipy import signal
from scipy.io import wavfile
from scipy.signal import butter, sosfilt
from speechpy.feature import mfe
from speechpy.processing import cmvn, cmvnw
from Process_Data import constants as c
from Process_Data.Compute_Feat.compute_vad import ComputeVadEnergy
from Process_Data.xfcc.common import local_fbank, local_mfcc
def mk_MFB(filename, sample_rate=c.SAMPLE_RATE, use_delta=c.USE_DELTA, use_scale=c.USE_SCALE, use_logscale=c.USE_LOGSCALE):
audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
#audio = audio.flatten()
filter_banks, energies = fbank(audio, samplerate=sample_rate, nfilt=c.FILTER_BANK, winlen=0.025)
if use_logscale:
filter_banks = 20 * np.log10(np.maximum(filter_banks, 1e-5))
if use_delta:
delta_1 = delta(filter_banks, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(filter_banks, Scale=use_scale)
delta_1 = normalize_frames(delta_1, Scale=use_scale)
delta_2 = normalize_frames(delta_2, Scale=use_scale)
frames_features = np.hstack([filter_banks, delta_1, delta_2])
else:
filter_banks = normalize_frames(filter_banks, Scale=use_scale)
frames_features = filter_banks
np.save(filename.replace('.wav', '.npy'), frames_features)
return
def resample_wav(in_wav, out_wav, sr):
try:
samples, samplerate = sf.read(in_wav, dtype='float32')
samples = np.asfortranarray(samples)
samples = librosa.resample(samples, samplerate, sr)
sf.write(file=out_wav, data=samples, samplerate=sr, format='WAV')
except Exception as e:
traceback.print_exc()
raise (e)
def butter_bandpass(cutoff, fs, order=15):
nyq = 0.5 * fs
sos = butter(order, np.array(cutoff) / nyq, btype='bandpass', analog=False, output='sos')
return sos
def butter_bandpass_filter(data, cutoff, fs, order=15):
<<<<<<< HEAD
sos = butter_bandpass(cutoff, fs, order=order)
y = sosfilt(sos, data)
=======
int2float = False
if data.dtype == np.int16:
data = data / 32768.
data = data.astype(np.float32)
int2float = True
sos = butter_bandpass(cutoff, fs, order=order)
y = sosfilt(sos, data)
if int2float:
y = (y * 32768).astype(np.int16)
>>>>>>> Server/Server
return y # Filter requirements.
def make_Fbank(filename, write_path, # sample_rate=c.SAMPLE_RATE,
use_delta=c.USE_DELTA,
use_scale=c.USE_SCALE,
nfilt=c.FILTER_BANK,
use_logscale=c.USE_LOGSCALE,
use_energy=c.USE_ENERGY,
normalize=c.NORMALIZE):
if not os.path.exists(filename):
raise ValueError('wav file does not exist.')
sample_rate, audio = wavfile.read(filename)
# audio, sr = librosa.load(filename, sr=None, mono=True)
#audio = audio.flatten()
filter_banks, energies = fbank(audio,
samplerate=sample_rate,
nfilt=nfilt,
winlen=0.025,
winfunc=np.hamming)
if use_energy:
energies = energies.reshape(energies.shape[0], 1)
filter_banks = np.concatenate((energies, filter_banks), axis=1)
# frames_features[:, 0] = np.log(energies)
if use_logscale:
# filter_banks = 20 * np.log10(np.maximum(filter_banks, 1e-5))
filter_banks = np.log(np.maximum(filter_banks, 1e-5))
# Todo: extract the normalize step?
if use_delta:
delta_1 = delta(filter_banks, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(filter_banks, Scale=use_scale)
delta_1 = normalize_frames(delta_1, Scale=use_scale)
delta_2 = normalize_frames(delta_2, Scale=use_scale)
filter_banks = np.hstack([filter_banks, delta_1, delta_2])
if normalize:
filter_banks = normalize_frames(filter_banks, Scale=use_scale)
frames_features = filter_banks
file_path = pathlib.Path(write_path)
if not file_path.parent.exists():
os.makedirs(str(file_path.parent))
np.save(write_path, frames_features)
# np.save(filename.replace('.wav', '.npy'), frames_features)
return
def compute_fbank_feat(filename, nfilt=c.FILTER_BANK, use_logscale=c.USE_LOGSCALE, use_energy=True, add_energy=True, normalize=c.CMVN, vad=c.VAD):
"""
Making feats more like in kaldi.
:param filename:
:param use_delta:
:param nfilt:
:param use_logscale:
:param use_energy:
:param normalize:
:return:
"""
if not os.path.exists(filename):
raise ValueError('Wav file does not exist.')
sample_rate, audio = wavfile.read(filename)
pad_size = np.ceil((len(audio) - 0.025 * sample_rate) / (0.01 * sample_rate)) * 0.01 * sample_rate - len(audio) + 0.025 * sample_rate
audio = np.lib.pad(audio, (0, int(pad_size)), 'symmetric')
filter_banks, energies = mfe(audio, sample_rate, frame_length=0.025, frame_stride=0.01, num_filters=nfilt, fft_length=512, low_frequency=0, high_frequency=None)
if use_energy:
if add_energy:
# Add an extra dimension to features
energies = energies.reshape(energies.shape[0], 1)
filter_banks = np.concatenate((energies, filter_banks), axis=1)
else:
# replace the 1st dim as energy
energies = energies.reshape(energies.shape[0], 1)
filter_banks[:, 0]=energies[:, 0]
if use_logscale:
filter_banks = np.log(np.maximum(filter_banks, 1e-5))
# filter_banks = np.log(filter_banks)
if normalize=='cmvn':
# vec(array): input_feature_matrix (size:(num_observation, num_features))
norm_fbank = cmvn(vec=filter_banks, variance_normalization=True)
elif normalize=='cmvnw':
norm_fbank = cmvnw(vec=filter_banks, win_size=301, variance_normalization=True)
if use_energy and vad:
voiced = []
ComputeVadEnergy(filter_banks, voiced)
voiced = np.array(voiced)
voiced_index = np.argwhere(voiced==1).squeeze()
norm_fbank = norm_fbank[voiced_index]
return norm_fbank, voiced
return norm_fbank
def GenerateSpect(wav_path, write_path, windowsize=25, stride=10, nfft=c.NUM_FFT):
"""
Pre-computing spectrograms for wav files
:param wav_path: path of the wav file
:param write_path: where to write the spectrogram .npy file
:param windowsize:
:param stride:
:param nfft:
:return: None
"""
if not os.path.exists(wav_path):
raise ValueError('wav file does not exist.')
#pdb.set_trace()
# samples, sample_rate = wavfile.read(wav_path)
sample_rate, samples = sf.read(wav_path, dtype='int16')
sample_rate_norm = int(sample_rate / 1e3)
frequencies, times, spectrogram = signal.spectrogram(x=samples, fs=sample_rate, window=signal.hamming(windowsize * sample_rate_norm), noverlap=(windowsize-stride) * sample_rate_norm, nfft=nfft)
# Todo: store the whole spectrogram
# spectrogram = spectrogram[:, :300]
# while spectrogram.shape[1]<300:
# # Copy padding
# spectrogram = np.concatenate((spectrogram, spectrogram), axis=1)
#
# # raise ValueError("The dimension of spectrogram is less than 300")
# spectrogram = spectrogram[:, :300]
# maxCol = np.max(spectrogram,axis=0)
# spectrogram = np.nan_to_num(spectrogram / maxCol)
# spectrogram = spectrogram * 255
# spectrogram = spectrogram.astype(np.uint8)
# For voxceleb1
# file_path = wav_path.replace('Data/voxceleb1', 'Data/voxceleb1')
# file_path = file_path.replace('.wav', '.npy')
file_path = pathlib.Path(write_path)
if not file_path.parent.exists():
os.makedirs(str(file_path.parent))
np.save(write_path, spectrogram)
# return spectrogram
def Make_Spect(wav_path, windowsize, stride, window=np.hamming,
bandpass=False, lowfreq=0, highfreq=0, log_scale=True,
<<<<<<< HEAD
preemph=0.97, duration=False, nfft=None, normalize=True):
=======
preemph=0.97, duration=False, nfft=None, normalize=False):
>>>>>>> Server/Server
"""
read wav as float type. [-1.0 ,1.0]
:param wav_path:
:param windowsize:
:param stride:
:param window: default to np.hamming
:return: return spectrogram with shape of (len(wav/stride), windowsize * samplerate /2 +1).
"""
# samplerate, samples = wavfile.read(wav_path)
<<<<<<< HEAD
samples, samplerate = sf.read(wav_path, dtype='float32')
=======
samples, samplerate = sf.read(wav_path, dtype='int16')
if not len(samples) > 0:
raise ValueError('wav file is empty?')
>>>>>>> Server/Server
if bandpass and highfreq > lowfreq:
samples = butter_bandpass_filter(data=samples, cutoff=[lowfreq, highfreq], fs=samplerate)
signal = sigproc.preemphasis(samples, preemph)
frames = sigproc.framesig(signal, windowsize * samplerate, stride * samplerate, winfunc=window)
if nfft == None:
nfft = int(windowsize * samplerate)
pspec = sigproc.powspec(frames, nfft)
pspec = np.where(pspec == 0, np.finfo(float).eps, pspec)
if log_scale == True:
feature = np.log(pspec).astype(np.float32)
else:
feature = pspec.astype(np.float32)
# feature = feature.transpose()
if normalize:
feature = normalize_frames(feature)
if duration:
return feature, len(samples) / samplerate
return feature
def Make_Fbank(filename, # sample_rate=c.SAMPLE_RATE,
filtertype='mel', windowsize=0.025, nfft=512, use_delta=c.USE_DELTA, use_scale=c.USE_SCALE,
lowfreq=0, nfilt=c.FILTER_BANK, log_scale=c.USE_LOGSCALE,
use_energy=c.USE_ENERGY, normalize=c.NORMALIZE, duration=False, multi_weight=False):
if not os.path.exists(filename):
raise ValueError('wav file does not exist.')
<<<<<<< HEAD
audio, sample_rate = sf.read(filename, dtype='float32')
=======
# audio, sample_rate = sf.read(filename, dtype='float32')
audio, sample_rate = sf.read(filename, dtype='int16')
assert len(audio) > 0, print('wav file is empty?')
>>>>>>> Server/Server
filter_banks, energies = local_fbank(audio, samplerate=sample_rate, nfilt=nfilt, nfft=nfft, lowfreq=lowfreq,
winlen=windowsize, filtertype=filtertype, winfunc=np.hamming,
multi_weight=multi_weight)
if use_energy:
energies = energies.reshape(energies.shape[0], 1)
filter_banks = np.concatenate((energies, filter_banks), axis=1)
# frames_features[:, 0] = np.log(energies)
if log_scale:
# filter_banks = 20 * np.log10(np.maximum(filter_banks, 1e-5))
<<<<<<< HEAD
filter_banks = 10 * np.log10(filter_banks)
=======
# filter_banks = 10 * np.log10(filter_banks)
filter_banks = np.log(filter_banks)
>>>>>>> Server/Server
if use_delta:
delta_1 = delta(filter_banks, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(filter_banks, Scale=use_scale)
delta_1 = normalize_frames(delta_1, Scale=use_scale)
delta_2 = normalize_frames(delta_2, Scale=use_scale)
filter_banks = np.hstack([filter_banks, delta_1, delta_2])
if normalize:
filter_banks = normalize_frames(filter_banks, Scale=use_scale)
frames_features = filter_banks
if duration:
return frames_features, len(audio) / sample_rate
# np.save(filename.replace('.wav', '.npy'), frames_features)
return frames_features
def Make_MFCC(filename,
filtertype='mel', winlen=0.025, winstep=0.01,
use_delta=c.USE_DELTA, use_scale=c.USE_SCALE,
nfilt=c.FILTER_BANK, numcep=c.FILTER_BANK,
use_energy=c.USE_ENERGY, lowfreq=0, nfft=512,
normalize=c.NORMALIZE,
duration=False):
if not os.path.exists(filename):
raise ValueError('wav file does not exist.')
# sample_rate, audio = wavfile.read(filename)
audio, sample_rate = sf.read(filename, dtype='int16')
# audio, sample_rate = librosa.load(filename, sr=None)
# audio = audio.flatten()
if not len(audio) > 0:
raise ValueError('wav file is empty?')
feats = local_mfcc(audio, samplerate=sample_rate,
nfilt=nfilt, winlen=winlen,
winstep=winstep, numcep=numcep,
nfft=nfft, lowfreq=lowfreq,
highfreq=None, preemph=0.97,
ceplifter=0, appendEnergy=use_energy,
winfunc=np.hamming, filtertype=filtertype)
if use_delta:
delta_1 = delta(feats, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(feats, Scale=use_scale)
delta_1 = normalize_frames(delta_1, Scale=use_scale)
delta_2 = normalize_frames(delta_2, Scale=use_scale)
feats = np.hstack([filter_banks, delta_1, delta_2])
if normalize:
feats = normalize_frames(feats, Scale=use_scale)
if duration:
return feats, len(audio) / sample_rate
# np.save(filename.replace('.wav', '.npy'), frames_features)
return feats
def conver_to_wav(filename, write_path, format='m4a'):
"""
Convert other formats into wav.
:param filename: file path for the audio.
:param write_path:
:param format: formats that ffmpeg supports.
:return: None. write the wav to local.
"""
if not os.path.exists(filename):
raise ValueError('File may not exist.')
if not pathlib.Path(write_path).parent.exists():
os.makedirs(str(pathlib.Path(write_path).parent))
sound = AudioSegment.from_file(filename, format=format)
sound.export(write_path, format="wav")
def read_MFB(filename):
#audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
#audio = audio.flatten()
try:
audio = np.load(filename.replace('.wav', '.npy'))
except Exception:
raise ValueError("Load {} error!".format(filename))
return audio
def read_Waveform(filename):
"""
read features from npy files
:param filename: the path of wav files.
:return:
"""
# audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
# audio = audio.flatten()
audio, sample_rate = sf.read(filename, dtype='int16')
return audio.astype(np.float32).reshape(1, -1)
def read_from_npy(filename):
"""
read features from npy files
:param filename: the path of wav files.
:return:
"""
#audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
#audio = audio.flatten()
audio = np.load(filename.replace('.wav', '.npy'))
return audio
class ConcateVarInput(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
<<<<<<< HEAD
def __init__(self, num_frames=c.NUM_FRAMES_SPECT, remove_vad=False):
=======
def __init__(self, num_frames=c.NUM_FRAMES_SPECT, frame_shift=c.NUM_SHIFT_SPECT,
feat_type='kaldi', remove_vad=False):
>>>>>>> Server/Server
super(ConcateVarInput, self).__init__()
self.num_frames = num_frames
self.remove_vad = remove_vad
<<<<<<< HEAD
=======
self.frame_shift = frame_shift
self.c_axis = 0 if feat_type != 'wav' else 1
>>>>>>> Server/Server
def __call__(self, frames_features):
network_inputs = []
output = frames_features
<<<<<<< HEAD
while len(output) < self.num_frames:
output = np.concatenate((output, frames_features), axis=0)
input_this_file = int(np.ceil(len(output) / self.num_frames))
for i in range(input_this_file):
if i == input_this_file - 1:
network_inputs.append(output[len(output) - self.num_frames:])
else:
network_inputs.append(output[i * self.num_frames:(i + 1) * self.num_frames])
=======
while output.shape[self.c_axis] < self.num_frames:
output = np.concatenate((output, frames_features), axis=self.c_axis)
input_this_file = int(np.ceil(output.shape[self.c_axis] / self.frame_shift))
for i in range(input_this_file):
start = i * self.frame_shift
if start < output.shape[self.c_axis] - self.num_frames:
end = start + self.num_frames
else:
start = output.shape[self.c_axis] - self.num_frames
end = output.shape[self.c_axis]
if self.c_axis == 0:
network_inputs.append(output[start:end])
else:
network_inputs.append(output[:, start:end])
>>>>>>> Server/Server
network_inputs = torch.tensor(network_inputs, dtype=torch.float32)
if self.remove_vad:
network_inputs = network_inputs[:, :, 1:]
return network_inputs
class ConcateInput(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, input_per_file=1, num_frames=c.NUM_FRAMES_SPECT, remove_vad=False):
super(ConcateInput, self).__init__()
self.input_per_file = input_per_file
self.num_frames = num_frames
self.remove_vad = remove_vad
def __call__(self, frames_features):
network_inputs = []
output = frames_features
while len(output) < self.num_frames:
output = np.concatenate((output, frames_features), axis=0)
for i in range(self.input_per_file):
try:
start = np.random.randint(low=0, high=len(output) - self.num_frames + 1)
frames_slice = output[start:start + self.num_frames]
network_inputs.append(frames_slice)
except Exception as e:
print(len(output))
raise e
# pdb.set_trace()
network_inputs = np.array(network_inputs, dtype=np.float32)
if self.remove_vad:
network_inputs = network_inputs[:, :, 1:]
<<<<<<< HEAD
return network_inputs
=======
return torch.tensor(network_inputs.squeeze())
class ConcateNumInput(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, input_per_file=1, num_frames=c.NUM_FRAMES_SPECT, feat_type='kaldi', remove_vad=False):
super(ConcateNumInput, self).__init__()
self.input_per_file = input_per_file
self.num_frames = num_frames
self.remove_vad = remove_vad
self.c_axis = 0 if feat_type != 'wav' else 1
def __call__(self, frames_features):
network_inputs = []
output = frames_features
while output.shape[self.c_axis] < self.num_frames:
output = np.concatenate((output, frames_features), axis=self.c_axis)
if len(output) / self.num_frames >= self.input_per_file:
for i in range(self.input_per_file):
start = i * self.num_frames
frames_slice = output[start:start + self.num_frames] if self.c_axis == 0 else output[:,
start:start + self.num_frames]
network_inputs.append(frames_slice)
else:
for i in range(self.input_per_file):
try:
start = np.random.randint(low=0, high=output.shape[self.c_axis] - self.num_frames + 1)
frames_slice = output[start:start + self.num_frames] if self.c_axis == 0 else output[:,
start:start + self.num_frames]
network_inputs.append(frames_slice)
except Exception as e:
print(len(output))
raise e
# pdb.set_trace()
network_inputs = np.array(network_inputs, dtype=np.float32)
if self.remove_vad:
network_inputs = network_inputs[:, :, 1:]
if len(network_inputs.shape) > 2:
network_inputs = network_inputs.squeeze(0)
return network_inputs
class ConcateNumInput_Test(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, input_per_file=1, num_frames=c.NUM_FRAMES_SPECT, remove_vad=False):
super(ConcateNumInput_Test, self).__init__()
self.input_per_file = input_per_file
self.num_frames = num_frames
self.remove_vad = remove_vad
def __call__(self, frames_features):
network_inputs = []
output = frames_features
while len(output) < self.num_frames:
output = np.concatenate((output, frames_features), axis=0)
start = np.random.randint(low=0, high=len(output) - self.num_frames + 1)
return start, len(output)
>>>>>>> Server/Server
class concateinputfromMFB(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, input_per_file=1, num_frames=c.NUM_FRAMES_SPECT, remove_vad=False):
super(concateinputfromMFB, self).__init__()
self.input_per_file = input_per_file
self.num_frames = num_frames
self.remove_vad = remove_vad
def __call__(self, frames_features):
network_inputs = []
output = frames_features
while len(output) < self.num_frames:
output = np.concatenate((output, frames_features), axis=0)
for i in range(self.input_per_file):
try:
start = np.random.randint(low=0, high=len(output) - self.num_frames + 1)
frames_slice = output[start:start + self.num_frames]
network_inputs.append(frames_slice)
except Exception as e:
print(len(output))
raise e
# pdb.set_trace()
network_inputs = torch.tensor(network_inputs, dtype=torch.float32)
if self.remove_vad:
network_inputs = network_inputs[:, :, 1:]
return network_inputs
class ConcateOrgInput(object):
"""
prepare feats with true length.
"""
def __init__(self, remove_vad=False):
super(ConcateOrgInput, self).__init__()
self.remove_vad = remove_vad
def __call__(self, frames_features):
# pdb.set_trace()
network_inputs = []
output = np.array(frames_features)
if self.remove_vad:
output = output[:, 1:]
network_inputs.append(output)
network_inputs = torch.tensor(network_inputs, dtype=torch.float32)
return network_inputs
def pad_tensor(vec, pad, dim):
"""
args:
vec - tensor to pad
pad - the size to pad to
dim - dimension to pad
return:
a new tensor padded itself to 'pad' in dimension 'dim'
"""
while vec.shape[dim]<pad:
vec = torch.cat([vec, vec], dim=dim)
start = np.random.randint(low=0, high=vec.shape[dim]-pad+1)
return torch.Tensor.narrow(vec, dim=dim, start=start, length=pad)
class PadCollate:
"""
a variant of callate_fn that pads according to the longest sequence in
a batch of sequences
"""
def __init__(self, dim=0, min_chunk_size=200, max_chunk_size=400, normlize=True,
num_batch=0,
fix_len=False):
"""
args:
dim - the dimension to be padded (dimension of time in sequences)
"""
self.dim = dim
self.min_chunk_size = min_chunk_size
self.max_chunk_size = max_chunk_size
self.num_batch = num_batch
self.fix_len = fix_len
self.normlize = normlize
if self.fix_len:
self.frame_len = np.random.randint(low=self.min_chunk_size, high=self.max_chunk_size)
else:
assert num_batch > 0
batch_len = []
self.iteration = 0
# print('==> Generating %d different random length...' % (int(np.ceil(num_batch/100))))
# for i in range(int(np.ceil(num_batch/100))):
# batch_len.append(np.random.randint(low=self.min_chunk_size, high=self.max_chunk_size))
# self.batch_len = np.repeat(batch_len, 100)
print('==> Generating %d different random length...' % (num_batch))
for i in range(num_batch):
batch_len.append(np.random.randint(low=self.min_chunk_size, high=self.max_chunk_size))
self.batch_len = np.array(batch_len)
while np.mean(self.batch_len[:num_batch]) < int((self.min_chunk_size + self.max_chunk_size) / 2):
self.batch_len += 1
self.batch_len = self.batch_len.clip(max=self.max_chunk_size)
print('==> Average of utterance length is %d. ' % (np.mean(self.batch_len[:num_batch])))
def pad_collate(self, batch):
"""
args:
batch - list of (tensor, label)
reutrn:
xs - a tensor of all examples in 'batch' after padding
ys - a LongTensor of all labels in batch
"""
# pdb.set_trace()
if self.fix_len:
frame_len = self.frame_len
else:
# frame_len = np.random.randint(low=self.min_chunk_size, high=self.max_chunk_size)
frame_len = self.batch_len[self.iteration % self.num_batch]
self.iteration += 1
self.iteration %= self.num_batch
if self.iteration == 0:
np.random.shuffle(self.batch_len)
# pad according to max_len
# print()
xs = torch.stack(list(map(lambda x: x[0], batch)), dim=0)
if frame_len < batch[0][0].shape[-2]:
start = np.random.randint(low=0, high=batch[0][0].shape[-2] - frame_len)
end = start + frame_len
xs = xs[:, :, start:end, :].contiguous()
else:
xs = xs.contiguous()
ys = torch.LongTensor(list(map(lambda x: x[1], batch)))
# map_batch = map(lambda x_y: (pad_tensor(x_y[0], pad=frame_len, dim=self.dim - 1), x_y[1]), batch)
# pad_batch = list(map_batch)
#
# xs = torch.stack(list(map(lambda x: x[0], pad_batch)), dim=0)
# ys = torch.LongTensor(list(map(lambda x: x[1], pad_batch)))
return xs, ys
def __call__(self, batch):
return self.pad_collate(batch)
class RNNPadCollate:
"""
a variant of callate_fn that pads according to the longest sequence in
a batch of sequences
"""
def __init__(self, dim=0):
"""
args:
dim - the dimension to be padded (dimension of time in sequences)
"""
self.dim = dim
def pad_collate(self, batch):
"""
args:
batch - list of (tensor, label)
reutrn:
xs - a tensor of all examples in 'batch' after padding
ys - a LongTensor of all labels in batch
"""
# pdb.set_trace()
# pad according to max_len
data = [x[0][0] for x in batch]
data = [x[:, :40].float() for x in data]
data_len = np.array([len(x) for x in data])
sort_idx = np.argsort(-data_len)
sort_data = [data[sort_idx[i]] for i in range(len(sort_idx))]
labels = [x[1] for x in batch]
sort_label = [labels[sort_idx[i]] for i in range(len(sort_idx))]
# data.sort(key=lambda x: len(x), reverse=True)
sort_label = torch.LongTensor(sort_label)
data_length = [len(sq) for sq in sort_data]
p_data = rnn_utils.pad_sequence(sort_data, batch_first=True, padding_value=0)
batch_x_pack = rnn_utils.pack_padded_sequence(p_data, data_length, batch_first=True)
return batch_x_pack, sort_label, data_length
def __call__(self, batch):
return self.pad_collate(batch)
class TripletPadCollate:
"""
a variant of callate_fn that pads according to the longest sequence in
a batch of sequences
"""
def __init__(self, dim=0):
"""
args:
dim - the dimension to be padded (dimension of time in sequences)
"""
self.dim = dim
self.min_chunk_size = 300
self.max_chunk_size = 500
self.num_chunk = np.random.randint(low=self.min_chunk_size, high=self.max_chunk_size)
def pad_collate(self, batch):
"""
args:
batch - list of (tensor, label)
reutrn:
xs - a tensor of all examples in 'batch' after padding
ys - a LongTensor of all labels in batch
"""
# pdb.set_trace()
# find longest sequence
# max_len = max(map(lambda x: x[0].shape[self.dim], batch))
frame_len = self.num_chunk
# pad according to max_len
map_batch = map(lambda x_y: (pad_tensor(x_y[0], pad=frame_len, dim=self.dim),
pad_tensor(x_y[1], pad=frame_len, dim=self.dim),
pad_tensor(x_y[2], pad=frame_len, dim=self.dim),
x_y[3],
x_y[4]), batch)
pad_batch = list(map_batch)
# stack all
xs_a = torch.stack(list(map(lambda x: x[0], pad_batch)), dim=0)
xs_p = torch.stack(list(map(lambda x: x[1], pad_batch)), dim=0)
xs_n = torch.stack(list(map(lambda x: x[2], pad_batch)), dim=0)
ys_a = torch.LongTensor(list(map(lambda x: x[3], pad_batch)))
ys_n = torch.LongTensor(list(map(lambda x: x[4], pad_batch)))
return xs_a, xs_p, xs_n, ys_a, ys_n
def __call__(self, batch):
return self.pad_collate(batch)
class ExtractCollate:
"""
a variant of callate_fn that pads according to the longest sequence in
a batch of sequences
"""
def __init__(self, dim=0):
"""
args:
dim - the dimension to be padded (dimension of time in sequences)
"""
self.dim = dim
self.min_chunk_size = 300
self.max_chunk_size = 500
self.num_chunk = np.random.randint(low=self.min_chunk_size, high=self.max_chunk_size)
def extract_collate(self, batch):
"""
args:
batch - list of (tensor, label)
reutrn:
xs - a tensor of all examples in 'batch' after padding
ys - a LongTensor of all labels in batch
"""
# pdb.set_trace()
# find longest sequence
# max_len = max(map(lambda x: x[0].shape[self.dim], batch))
frame_len = self.num_chunk
# pad according to max_len
map_batch = map(lambda x_y: (pad_tensor(x_y[0], pad=frame_len, dim=self.dim), x_y[1]), batch)
pad_batch = list(map_batch)
# stack all
xs = torch.stack(list(map(lambda x: x[0], pad_batch)), dim=0)
ys = torch.LongTensor(list(map(lambda x: x[1], pad_batch)))
uid = [x[2] for x in batch]
return xs, ys, uid
def __call__(self, batch):
return self.extract_collate(batch)
class truncatedinputfromSpectrogram(object):
"""truncated input from Spectrogram
"""
def __init__(self, input_per_file=1):
super(truncatedinputfromSpectrogram, self).__init__()
self.input_per_file = input_per_file
def __call__(self, frames_features):
network_inputs = []
frames_features = np.swapaxes(frames_features, 0, 1)
num_frames = len(frames_features)
import random
for i in range(self.input_per_file):
j=0
if c.NUM_PREVIOUS_FRAME_SPECT <= (num_frames - c.NUM_NEXT_FRAME_SPECT):
j = random.randrange(c.NUM_PREVIOUS_FRAME_SPECT, num_frames - c.NUM_NEXT_FRAME_SPECT)
#j = random.randrange(c.NUM_PREVIOUS_FRAME_SPECT, num_frames - c.NUM_NEXT_FRAME_SPECT)
# If len(frames_features)<NUM__FRAME_SPECT, then apply zero padding.
if j==0:
frames_slice = np.zeros((c.NUM_FRAMES_SPECT, c.NUM_FFT/2+1), dtype=np.float32)
frames_slice[0:(frames_features.shape[0])] = frames_features
else:
frames_slice = frames_features[j - c.NUM_PREVIOUS_FRAME_SPECT:j + c.NUM_NEXT_FRAME_SPECT]
network_inputs.append(frames_slice)
return np.array(network_inputs)
def read_audio(filename, sample_rate=c.SAMPLE_RATE):
audio, sr = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.flatten()
return audio
#this is not good
#def normalize_frames(m):
# return [(v - np.mean(v)) / (np.std(v) + 2e-12) for v in m]
def normalize_frames(m, Scale=True):
"""
Normalize frames with mean and variance
:param m:
:param Scale:
:return:
"""
if Scale:
return (m - np.mean(m, axis=0)) / (np.std(m, axis=0) + 1e-12)
return (m - np.mean(m, axis=0))
def pre_process_inputs(signal=np.random.uniform(size=32000), target_sample_rate=8000, use_delta=c.USE_DELTA):
filter_banks, energies = fbank(signal, samplerate=target_sample_rate, nfilt=c.FILTER_BANK, winlen=0.025)
delta_1 = delta(filter_banks, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(filter_banks)
delta_1 = normalize_frames(delta_1)
delta_2 = normalize_frames(delta_2)
if use_delta:
frames_features = np.hstack([filter_banks, delta_1, delta_2])
else:
frames_features = filter_banks
num_frames = len(frames_features)
network_inputs = []
"""Too complicated
for j in range(c.NUM_PREVIOUS_FRAME, num_frames - c.NUM_NEXT_FRAME):
frames_slice = frames_features[j - c.NUM_PREVIOUS_FRAME:j + c.NUM_NEXT_FRAME]
#network_inputs.append(np.reshape(frames_slice, (32, 20, 3)))
network_inputs.append(frames_slice)
"""
import random
j = random.randrange(c.NUM_PREVIOUS_FRAME, num_frames - c.NUM_NEXT_FRAME)
frames_slice = frames_features[j - c.NUM_PREVIOUS_FRAME:j + c.NUM_NEXT_FRAME]
network_inputs.append(frames_slice)
return np.array(network_inputs)
class truncatedinput(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __call__(self, input):
#min_existing_frames = min(self.libri_batch['raw_audio'].apply(lambda x: len(x)).values)
want_size = int(c.TRUNCATE_SOUND_FIRST_SECONDS * c.SAMPLE_RATE)
if want_size > len(input):
output = np.zeros((want_size,))
output[0:len(input)] = input
#print("biho check")
return output
else:
return input[0:want_size]
class toMFB(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __call__(self, input):
output = pre_process_inputs(input, target_sample_rate=c.SAMPLE_RATE)
return output
class totensor(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __call__(self, input):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
input = torch.tensor(input, dtype=torch.float32)
return input.unsqueeze(0)
class to2tensor(object):
"""Rescales the input PIL.Image to the given 'size'.
If 'size' is a 2-element tuple or list in the order of (width, height), it will be the exactly size to scale.
If 'size' is a number, it will indicate the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the exactly size or the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __call__(self, pic):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
# if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.tensor(pic, dtype=torch.float32)
return img
class tonormal(object):
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
tensor = tensor - torch.mean(tensor)
return tensor.float()
class mvnormal(object):
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
tensor = (tensor - torch.mean(tensor, dim=-2, keepdim=True)) / torch.std(tensor, dim=-2, keepdim=True).add_(
1e-12)
return tensor.float()
class tolog(object):
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
tensor = torch.log(tensor)
return tensor.float()
| 34.718081 | 197 | 0.626518 | 17,471 | 0.431212 | 0 | 0 | 0 | 0 | 0 | 0 | 12,473 | 0.307854 |
ea3f6b2815870dddcf136d6b81a2a73d2ad33c33 | 3,822 | py | Python | gamestonk_terminal/stocks/dark_pool_shorts/stockgrid_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | 1 | 2022-03-15T13:05:40.000Z | 2022-03-15T13:05:40.000Z | gamestonk_terminal/stocks/dark_pool_shorts/stockgrid_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/dark_pool_shorts/stockgrid_model.py | minhhoang1023/GamestonkTerminal | 195dc19b491052df080178c0cc6a9d535a91a704 | [
"MIT"
] | null | null | null | """ Stockgrid View """
__docformat__ = "numpy"
import logging
from typing import List, Tuple
import pandas as pd
import requests
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_dark_pool_short_positions(sort_field: str, ascending: bool) -> pd.DataFrame:
"""Get dark pool short positions. [Source: Stockgrid]
Parameters
----------
sort_field : str
Field for which to sort by, where 'sv': Short Vol. (1M),
'sv_pct': Short Vol. %%, 'nsv': Net Short Vol. (1M),
'nsv_dollar': Net Short Vol. ($100M), 'dpp': DP Position (1M),
'dpp_dollar': DP Position ($1B)
ascending : bool
Data in ascending order
Returns
----------
pd.DataFrame
Dark pool short position data
"""
d_fields_endpoints = {
"sv": "Short+Volume",
"sv_pct": "Short+Volume+%25",
"nsv": "Net+Short+Volume",
"nsv_dollar": "Net+Short+Volume+$",
"dpp": "Dark+Pools+Position",
"dpp_dollar": "Dark+Pools+Position+$",
}
field = d_fields_endpoints[sort_field]
if ascending:
order = "asc"
else:
order = "desc"
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_data?top={field}&minmax={order}"
response = requests.get(link)
df = pd.DataFrame(response.json()["data"])
df = df[
[
"Ticker",
"Date",
"Short Volume",
"Short Volume %",
"Net Short Volume",
"Net Short Volume $",
"Dark Pools Position",
"Dark Pools Position $",
]
]
return df
@log_start_end(log=logger)
def get_short_interest_days_to_cover(sort_field: str) -> pd.DataFrame:
"""Get short interest and days to cover. [Source: Stockgrid]
Parameters
----------
sort_field : str
Field for which to sort by, where 'float': Float Short %%,
'dtc': Days to Cover, 'si': Short Interest
Returns
----------
pd.DataFrame
Short interest and days to cover data
"""
link = "https://stockgridapp.herokuapp.com/get_short_interest?top=days"
r = requests.get(link)
df = pd.DataFrame(r.json()["data"])
d_fields = {
"float": "%Float Short",
"dtc": "Days To Cover",
"si": "Short Interest",
}
df = df[
["Ticker", "Date", "%Float Short", "Days To Cover", "Short Interest"]
].sort_values(
by=d_fields[sort_field],
ascending=bool(sort_field == "dtc"),
)
return df
@log_start_end(log=logger)
def get_short_interest_volume(ticker: str) -> Tuple[pd.DataFrame, List]:
"""Get price vs short interest volume. [Source: Stockgrid]
Parameters
----------
ticker : str
Stock to get data from
Returns
----------
pd.DataFrame
Short interest volume data
List
Price data
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={ticker}"
response = requests.get(link)
df = pd.DataFrame(response.json()["individual_short_volume_table"]["data"])
df["date"] = pd.to_datetime(df["date"])
return df, response.json()["prices"]["prices"]
@log_start_end(log=logger)
def get_net_short_position(ticker: str) -> pd.DataFrame:
"""Get net short position. [Source: Stockgrid]
Parameters
----------
ticker: str
Stock to get data from
Returns
----------
pd.DataFrame
Net short position
"""
link = f"https://stockgridapp.herokuapp.com/get_dark_pool_individual_data?ticker={ticker}"
response = requests.get(link)
df = pd.DataFrame(response.json()["individual_dark_pool_position_data"])
df["dates"] = pd.to_datetime(df["dates"])
return df
| 25.144737 | 94 | 0.599686 | 0 | 0 | 0 | 0 | 3,584 | 0.937729 | 0 | 0 | 2,163 | 0.565934 |
ea410dc0122ec7353bc207c7a8f3f53b18676252 | 2,918 | py | Python | Communal.Network.v.py | HolocronFoundation/Communal.Network | 41eff8ac3ca54897f19cd584e1cfc58f9326ab80 | [
"CC0-1.0"
] | 1 | 2019-03-08T18:41:26.000Z | 2019-03-08T18:41:26.000Z | Communal.Network.v.py | HolocronFoundation/Communal.Network | 41eff8ac3ca54897f19cd584e1cfc58f9326ab80 | [
"CC0-1.0"
] | null | null | null | Communal.Network.v.py | HolocronFoundation/Communal.Network | 41eff8ac3ca54897f19cd584e1cfc58f9326ab80 | [
"CC0-1.0"
] | null | null | null | item: event({channel_index: indexed(uint256), reply_to_index: indexed(uint256), metadata: indexed(uint256)})
# item_index is the index of the sent item in C.N.
# TODO: Implement channeling
# Channel management
struct channel:
owner: address
name_display: bytes32
name_unique: bytes32 # TODO: finish implementing
about: bytes32
css: bytes32
js: bytes32
channels: map(uint256, channel)
channel_last_opened: public(uint256)
# TODO: Is there functionality which is more core than "reply"
last_item_index: public(uint256)
item_metadata: public(map(uint256, uint256)) # TODO: Check this is implemented correctly
# outer address is the authorizing user
owner: constant(address) = 0xd015FB4e0c7f2D0592fa642189a70ce46C90d612
custom_metadata_mask: constant(uint256) = 2 ** (12*8+1) - 1 - 2 # Fills 12 bytes with F, then sets the second to last bit to 0 TODO: Double check this math
# TODO: Add owner/management functions - most notably update functionality
# Helper methods - used across several disparate paths
@public
@constant
def prep_custom_metadata(custom_metadata: uint256) -> uint256:
return bitwise_and(custom_metadata, custom_metadata_mask)
@public
@constant
def generate_metadata(sender: address, other_metadata: uint256) -> uint256:
# Default_metadata is 1 bit.
# The right bit represents if an item is an IPFS hash of form Qm (sha2-256). This bit is set automatically if there is no metadata sent by the user, but must be set manually if the user sends metadata.
# Should this change in the future or you want to do something
# different now, you should trigger this flag, but also use custom
# metadata to indicate this difference.
return shift(convert(sender, uint256), 96) + other_metadata
# Functions which feed into check_item_then_iterate_last_item, log.item, and generate_metadata
@private
def send_item(channel: uint256, reply_to_index: uint256, sender: address, metadata: uint256):
assert reply_to_index <= self.last_item_index
self.last_item_index += 1
self.item_metadata[self.last_item_index] = self.generate_metadata(sender, metadata)
log.item(channel, reply_to_index, item_metadata[self.last_item_index])
# Functions which feed into send_item
@public
@payable
def send_message_user(item: bytes32, channel: uint256, reply_to_index: uint256 = 0):
self.send_item(channel, reply_to_index, msg.sender, 0)
@public
@payable
def send_hash_user(item: bytes32, channel: uint256, reply_to_index: uint256 = 0):
self.send_item(channel, reply_to_index, msg.sender, 1)
@public
@payable
def send_item_user_with_metadata(item: bytes32, channel: uint256, custom_metadata: uint256, reply_to_index: uint256 = 0):
self.send_item(channel, reply_to_index, msg.sender, self.prep_custom_metadata(custom_metadata))
### Withdraw donations functionality ###
@public
def withdraw():
send(owner, self.balance)
| 39.972603 | 207 | 0.765593 | 0 | 0 | 0 | 0 | 1,693 | 0.580192 | 0 | 0 | 1,079 | 0.369774 |
ea4135e0971c3f38be4f8fa5f8203ba013d2457e | 2,414 | py | Python | floodsystem/geo.py | Ericsong2333/partia-flood-warning-system | 545a257f601535c62b3341059fdf2203e06c8e17 | [
"MIT"
] | null | null | null | floodsystem/geo.py | Ericsong2333/partia-flood-warning-system | 545a257f601535c62b3341059fdf2203e06c8e17 | [
"MIT"
] | null | null | null | floodsystem/geo.py | Ericsong2333/partia-flood-warning-system | 545a257f601535c62b3341059fdf2203e06c8e17 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module contains a collection of functions related to
geographical data.
"""
from floodsystem.utils import sorted_by_key # noqa
import math
def hav(theta):
return math.sin(theta*0.5)**2
def r(theta):
return math.radians(theta)
def dist(coor1,coor2):
hav_phi = hav(r(coor1[0]-coor2[0])) + math.cos(r(coor1[0]))*math.cos(r(coor2[0]))*hav(r(coor1[1]-coor2[1]))
return 2*6371*math.asin(math.sqrt(hav_phi))
def stations_by_distance(stations,p):
ret = []
for i in stations:
c = i.coord
distance = dist(c,p)
ret.append((i,distance))
return sorted_by_key(ret,1)
def rivers_with_station(stations):
return {stat.river for stat in stations}
def stations_by_river(stations):
retdict = {}
for i in rivers_with_station(stations):
stats = []
for j in stations:
if j.river == i:
stats.append(j)
retdict[i] = stats
return retdict
from haversine import haversine,Unit
def stations_within_radius(stations, centre, r):
"""This function returns a list of all station (type MonitoringStation),
within radius r of a geographic coordinate x.
"""
coordinates_results = []
for i in stations:
if haversine(i.coord, centre) < r:
coordinates_results.append(i.name)
coordinates_results.sort()
return coordinates_results
def rivers_by_station_number(stations, N):
"""This fuction determines the N rivers with the greatest number of monitoring stations.
In the case that there are more rivers with the same number of stations as the N th entry, include these rivers in the list.
"""
river_number = []
for key,value in stations_by_river(stations).items():
river_number.append((key,len(value)))
river_number_sorted = sorted_by_key(river_number,1,reverse=True)
river_final = []
count = 0
for river in river_number_sorted:
if count < N:
river_final.append(river)
count += 1
elif count == N:
if river[1] == river_final[-1][1]:
river_final.append(river)
else:
break
else:
break
return river_final
| 28.4 | 130 | 0.609776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.217067 |
ea4292de7cf5cedb5b3ae53916a825188ffc20c5 | 852 | py | Python | codes/Constant.py | YasserDaho/Saliency-3DSal | 4a8ff399c8b24ccc88bb04311d6f9797d0cae2d1 | [
"MIT"
] | 2 | 2020-04-19T13:25:47.000Z | 2020-05-08T17:14:38.000Z | codes/Constant.py | YasserDaho/Saliency-3DSal | 4a8ff399c8b24ccc88bb04311d6f9797d0cae2d1 | [
"MIT"
] | null | null | null | codes/Constant.py | YasserDaho/Saliency-3DSal | 4a8ff399c8b24ccc88bb04311d6f9797d0cae2d1 | [
"MIT"
] | 1 | 2019-09-24T17:42:08.000Z | 2019-09-24T17:42:08.000Z | """""
Path to the Image Dataset directories
"""""
TR_IMG_DIR = './WORKSPACE/DATASET/annotation/'
GT_IMG_DIR = './WORKSPACE/DATASET/annotation/'
"""""
Path to Numpy Video directories
"""""
TR_VID_DIR = './WORKSPACE/DATA/TR_DATA/'
GT_VID_DIR = './WORKSPACE/DATA/GT_DATA/'
"""""
Path to Numpy batches directories
"""""
TR_VGG_DIR = './WORKSPACE/BATCH/VGG-16/'
TR_BATCH_DIR = './WORKSPACE/BATCH/TR_BATCH/'
GT_BATCH_DIR = './WORKSPACE/BATCH/GT_BATCH/'
"""""
Path to the global test dataset directories
"""""
TEST_DIR = './WORKSPACE/TEST/annotation/'
TEST_RES = './WORKSPACE/TEST/result/'
"""""
Path to the text file, containing the dataset video names
"""""
DATASET_INDEX = './train.txt'
TEST_INDEX = './test.txt'
"""""
The new image size
"""""
IMG_SIZE = 224
""""""
""""
The saved model directory
"""
Model_DIR = './WORKSPACE/TRAINED_MODEL/'
| 17.75 | 57 | 0.676056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.75939 |
ea436f37e311aec310471106ec69e43fc23e41c4 | 1,152 | py | Python | cannlytics/lims/qc.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 7 | 2021-05-31T15:30:22.000Z | 2022-02-05T14:12:31.000Z | cannlytics/lims/qc.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 17 | 2021-06-09T01:04:27.000Z | 2022-03-18T14:48:12.000Z | cannlytics/lims/qc.py | mindthegrow/cannlytics | c266bc1169bef75214985901cd3165f415ad9ba7 | [
"MIT"
] | 5 | 2021-06-07T13:52:33.000Z | 2021-08-04T00:09:39.000Z | """
Quality Control Tools | Cannlytics
Author: Keegan Skeate <keegan@cannlytics.com>
Created: 2/6/2021
Updated: 6/23/2021
License: MIT License <https://opensource.org/licenses/MIT>
Perform various quality control checks and analyses to ensure
that your laboratory is operating as desired.
TODO:
- Trend analyte results.
- Create predictions of lab results given available inputs!
- Statistics for internal standards.
"""
def backup_data():
"""Backup data stored in Firestore."""
return NotImplementedError
def calculate_relative_percent_diff():
"""Calculate relative perecent difference between two samples."""
return NotImplementedError
def plot_area_response():
"""Plot area response over time for a group of samples."""
return NotImplementedError
def plot_deviations():
"""Plot deviations in results for a group of samples."""
return NotImplementedError
def track_deviations():
"""Track deviations in results for a group of samples."""
return NotImplementedError
def metrc_reconciliation():
"""Reconcile Metrc data with Firestore data."""
return NotImplementedError
| 24.510638 | 69 | 0.737847 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.663194 |
ea43b6473a80169e05c17d102859f981a7b958d9 | 76 | py | Python | containershare/validator/__init__.py | vsoch/containershare-python | 9db2a4d7c7fcb0c21edd5d2e2b5396d7108fe392 | [
"BSD-3-Clause"
] | null | null | null | containershare/validator/__init__.py | vsoch/containershare-python | 9db2a4d7c7fcb0c21edd5d2e2b5396d7108fe392 | [
"BSD-3-Clause"
] | 1 | 2018-07-30T22:11:56.000Z | 2018-07-30T22:11:56.000Z | containershare/validator/__init__.py | vsoch/containershare-python | 9db2a4d7c7fcb0c21edd5d2e2b5396d7108fe392 | [
"BSD-3-Clause"
] | null | null | null | from .library import LibraryValidator
from .runtime import RuntimeValidator
| 25.333333 | 37 | 0.868421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea442c2de4ff492dd4374c04a850d9f728259783 | 343 | py | Python | packages/pyright-internal/src/tests/samples/genericTypes1.py | sransara/pyright | 4e117682c946b60f2b24fd75a07736954b21f158 | [
"MIT"
] | 1 | 2020-12-28T16:58:24.000Z | 2020-12-28T16:58:24.000Z | packages/pyright-internal/src/tests/samples/genericTypes1.py | sransara/pyright | 4e117682c946b60f2b24fd75a07736954b21f158 | [
"MIT"
] | 1 | 2021-08-31T20:37:43.000Z | 2021-08-31T20:37:43.000Z | packages/pyright-internal/src/tests/samples/genericTypes1.py | sransara/pyright | 4e117682c946b60f2b24fd75a07736954b21f158 | [
"MIT"
] | null | null | null | # This sample tests that the type analyzer flags as an error
# an attempt to assign to or delete a generic type.
from typing import Dict
# This should generate an error because assignment
# of generic types isn't allowed.
Dict[str, int] = {}
# This should generate an error because deletion
# of generic types isn't allowed.
del Dict[str]
| 24.5 | 60 | 0.755102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 275 | 0.801749 |
ea4430c870df5f7cb35f3b4eb8439be6a855324e | 770 | py | Python | extras/scripts/test-client.py | claudiosv/unisparks | 6215faddbc5a656c7f387c3bea811d435b122042 | [
"Apache-2.0"
] | null | null | null | extras/scripts/test-client.py | claudiosv/unisparks | 6215faddbc5a656c7f387c3bea811d435b122042 | [
"Apache-2.0"
] | 3 | 2022-01-26T22:55:56.000Z | 2022-02-04T18:41:54.000Z | extras/scripts/test-client.py | claudiosv/unisparks | 6215faddbc5a656c7f387c3bea811d435b122042 | [
"Apache-2.0"
] | 1 | 2021-10-05T17:42:55.000Z | 2021-10-05T17:42:55.000Z | #!/usr/bin/python
import socket
import sys
import time
import struct
MCADDR = '239.255.223.01'
PORT = 0xDF0D
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((MCADDR, PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCADDR), socket.INADDR_ANY)
s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while 1:
data, addr = s.recvfrom(1024)
try:
(msgcode, reserved, effect, elapsed, beat, hue_med, hue_dev) = struct.unpack("!I12s16sIIBB", data)
print "RX %s:%s %-16s elapsed: %04d beat: %04d hue_med: %03d hue_dev: %03d" % (addr[0], addr[1], effect.rstrip('\0'), elapsed, beat, hue_med, hue_dev)
except Exception as err:
print "RX %d bytes, %s" % (len(data), err)
| 33.478261 | 157 | 0.697403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.187013 |
ea49cec9477540e45ca7a41005246f0770863e18 | 925 | py | Python | algorithms/bfs/levelorder.py | hariharanragothaman/pymaster | b3d033b4d5c75c69f587c94d9d12cd4a349a6a69 | [
"Apache-2.0"
] | 10 | 2020-09-21T22:23:09.000Z | 2022-01-25T16:58:44.000Z | algorithms/bfs/levelorder.py | hariharanragothaman/pymaster | b3d033b4d5c75c69f587c94d9d12cd4a349a6a69 | [
"Apache-2.0"
] | null | null | null | algorithms/bfs/levelorder.py | hariharanragothaman/pymaster | b3d033b4d5c75c69f587c94d9d12cd4a349a6a69 | [
"Apache-2.0"
] | null | null | null | """
Breadth-First Search - Implemented using queues
This can be implemented for both Trees / Graphs
Here, we will use Trees as examples.
"""
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Tree:
def level_order(self, root):
traverse = []
level = 0
if not root:
return traverse
# Put the entire tree into the Queue
q = deque([root])
while q:
traverse.append([])
for i in range(len(q)):
node = q.popleft()
traverse[level].append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
level += 1
return traverse
| 23.717949 | 54 | 0.505946 | 734 | 0.793514 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.195676 |
ea4a0594644cef9b0c271ea046c042822efb0f38 | 1,065 | py | Python | setup.py | mariocesar/boot.py | a75098759e91e4fb6be15ccab3745de13840d8d2 | [
"MIT"
] | 2 | 2018-02-16T01:26:50.000Z | 2021-10-31T09:50:50.000Z | setup.py | mariocesar/boot.py | a75098759e91e4fb6be15ccab3745de13840d8d2 | [
"MIT"
] | null | null | null | setup.py | mariocesar/boot.py | a75098759e91e4fb6be15ccab3745de13840d8d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from setuptools import find_packages, setup
if sys.version_info < (3, 6):
sys.exit('Python 3.6 is the minimum required version')
description, long_description = (
open('README.rst', 'rt').read().split('\n\n', 1))
setup(
name='boot.py',
author='Mario César Señoranis Ayala',
author_email='mariocesar.c50@gmail.com',
version='0.16',
url='https://github.com/mariocesar/boot.py',
description=description,
long_description=f'\n{long_description}',
package_dir={'': 'src'},
packages=find_packages('src'),
python_requires='>=3.6',
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 30.428571 | 71 | 0.634742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.517338 |
ea4b5a3f51b0afdbeab25267ee595b2878076d5e | 132 | py | Python | salesforce_api/models/__init__.py | octopyth/python-salesforce-api | 3f51995f7dc4ae965cb7a594f6f0fb8fcf35ec5d | [
"MIT"
] | 25 | 2019-05-20T06:38:45.000Z | 2022-02-22T02:10:37.000Z | salesforce_api/models/__init__.py | octopyth/python-salesforce-api | 3f51995f7dc4ae965cb7a594f6f0fb8fcf35ec5d | [
"MIT"
] | 19 | 2019-07-02T10:12:09.000Z | 2022-01-09T23:33:21.000Z | salesforce_api/models/__init__.py | octopyth/python-salesforce-api | 3f51995f7dc4ae965cb7a594f6f0fb8fcf35ec5d | [
"MIT"
] | 16 | 2019-12-04T20:45:16.000Z | 2021-12-17T23:29:29.000Z | from . import bulk, deploy, retrieve, tooling
__all__ = [
'bulk',
'deploy',
'retrieve',
'tooling',
'shared'
]
| 12 | 45 | 0.55303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.310606 |
ea4b8753181269244e4522f00e994e35347590a8 | 5,470 | py | Python | radioactivedecay/plots.py | BGameiro2000/radioactivedecay | 98a84345b9f15001dd99b177e131cfa17714804d | [
"MIT"
] | 16 | 2020-09-06T13:33:31.000Z | 2021-10-17T19:05:45.000Z | radioactivedecay/plots.py | BGameiro2000/radioactivedecay | 98a84345b9f15001dd99b177e131cfa17714804d | [
"MIT"
] | 14 | 2020-11-13T18:44:18.000Z | 2021-10-08T23:39:43.000Z | radioactivedecay/plots.py | BGameiro2000/radioactivedecay | 98a84345b9f15001dd99b177e131cfa17714804d | [
"MIT"
] | 5 | 2021-02-16T03:26:45.000Z | 2021-07-20T19:23:04.000Z | """
The plots module defines functions used for creating decay chain diagrams via the Nuclide
class ``plot()`` method, and activity decay graphs via the Inventory class ``plot()`` method.
"""
from typing import List, Set, Optional, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# pylint: disable=too-many-arguments, too-many-locals
def _parse_nuclide_label(nuclide: str) -> str:
"""
Format a nuclide string to mass number, meta-stable state character in
superscript, then element symbol. Output is used on node labels in decay
chain plots.
Parameters
----------
nuclide : str
Nuclide string in element-mass format.
Returns
-------
str
Parsed string for node label in ^{mass}element format.
"""
if nuclide == "SF":
return "various"
nuclide_conversion = {
"0": "\N{SUPERSCRIPT ZERO}",
"1": "\N{SUPERSCRIPT ONE}",
"2": "\N{SUPERSCRIPT TWO}",
"3": "\N{SUPERSCRIPT THREE}",
"4": "\N{SUPERSCRIPT FOUR}",
"5": "\N{SUPERSCRIPT FIVE}",
"6": "\N{SUPERSCRIPT SIX}",
"7": "\N{SUPERSCRIPT SEVEN}",
"8": "\N{SUPERSCRIPT EIGHT}",
"9": "\N{SUPERSCRIPT NINE}",
"m": "\N{MODIFIER LETTER SMALL M}",
"n": "\N{SUPERSCRIPT LATIN SMALL LETTER N}",
"o": "\N{MODIFIER LETTER SMALL O}",
}
element, isotope = nuclide.split("-")
return "".join(map(lambda char: nuclide_conversion[char], list(isotope))) + element
def _parse_decay_mode_label(mode: str) -> str:
"""
Format a decay mode string for edge label on decay chain plot.
Parameters
----------
mode : str
Decay mode string.
Returns
-------
str
Formatted decay mode string for use in an edge label.
"""
mode_conversion = {
"α": "\N{GREEK SMALL LETTER ALPHA}",
"β": "\N{GREEK SMALL LETTER BETA}",
"+": "\N{SUPERSCRIPT PLUS SIGN}",
"-": "\N{SUPERSCRIPT MINUS}",
}
for unformatted, formatted in mode_conversion.items():
mode = mode.replace(unformatted, formatted)
return mode
def _check_fig_axes(
fig_in: Optional[matplotlib.figure.Figure],
axes_in: Optional[matplotlib.axes.Axes],
**kwargs,
) -> Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]:
"""
Checks to see if user supplies Matplotlib Figure and/or Axes objects. Creates them where
necessary.
Parameters
----------
fig_in : None or matplotlib.figure.Figure
matplotlib figure object to use, or None creates one.
axes_in : matplotlib.axes.Axes or None, optional
matplotlib axes object to use, or None creates one.
**kwargs
All additional keyword arguments to supply to plt.subplots().
Returns
-------
fig : matplotlib.figure.Figure
matplotlib figure object used to plot decay chain.
axes : matplotlib.axes.Axes
matplotlib axes object used to plot decay chain.
"""
if fig_in is None and axes_in is None:
fig, axes = plt.subplots(**kwargs)
elif fig_in is None:
axes = axes_in
fig = axes.get_figure()
elif axes_in is None:
fig = fig_in
axes = fig.gca()
else:
fig = fig_in
axes = axes_in
return fig, axes
def _decay_graph(
time_points: np.ndarray,
ydata: np.ndarray,
nuclides: List[str],
xunits: str,
ylabel: str,
xscale: str,
yscale: str,
ylimits: List[float],
display: Set[str],
fig_in: Optional[matplotlib.figure.Figure],
axes_in: Optional[matplotlib.axes.Axes],
**kwargs,
) -> Tuple[matplotlib.figure.Figure, matplotlib.axes.Axes]:
"""
Plots a decay graph showing the change in activity of an inventory over time. Creates
matplotlib fig, axes objects if they are not supplied. Returns fig, axes tuple.
Parameters
----------
time_points : numpy.ndarray
Time points for x-axis.
ydata : numpy.ndarray
y-axis data.
nuclides : list
List of the nuclides (string format is 'H-3', etc.).
xunits : str
Units for decay time axis.
ylabel : str
Units for the y-axis
xscale : str
The time axis scale type to apply ('linear' or 'log').
yscale : str
The y-axis scale type to apply ('linear' or 'log').
ylimits : list
Limits for the y-axis (list or numpy.ndarray with two elements).
display : set of str
Nuclides to display on the graph.
fig_in : None or matplotlib.figure.Figure
matplotlib figure object to use, or None creates one.
axes_in : matplotlib.axes.Axes or None, optional
matplotlib axes object to use, or None creates one.
**kwargs
All additional keyword arguments to supply to matplotlib plot().
Returns
-------
fig : matplotlib.figure.Figure
matplotlib figure object used to plot decay chain.
axes : matplotlib.axes.Axes
matplotlib axes object used to plot decay chain.
"""
fig, axes = _check_fig_axes(fig_in, axes_in)
for idx, label in enumerate(nuclides):
if label in display:
axes.plot(time_points, ydata[idx], label=label, **kwargs)
axes.legend(loc="upper right")
xlabel = "Time (" + xunits + ")"
axes.set(
xlabel=xlabel,
ylabel=ylabel,
xscale=xscale,
yscale=yscale,
)
axes.set_ylim(ylimits)
return fig, axes
| 27.766497 | 93 | 0.619378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,386 | 0.618787 |
ea4b8a09282f55e57d81cc0d165e7b33f0d9e90a | 8,006 | py | Python | quests/models.py | donvvo/questr-master | 6363ffb4c11ef61f3b6976e75c86a5cbc7f38590 | [
"MIT"
] | null | null | null | quests/models.py | donvvo/questr-master | 6363ffb4c11ef61f3b6976e75c86a5cbc7f38590 | [
"MIT"
] | null | null | null | quests/models.py | donvvo/questr-master | 6363ffb4c11ef61f3b6976e75c86a5cbc7f38590 | [
"MIT"
] | null | null | null |
#All Django Imports
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
#All local imports (libs, contribs, models)
from users.models import *
#All external imports (libs, packages)
import hashlib
import jsonfield
import logging
import pytz
import uuid
# Init Logger
logger = logging.getLogger(__name__)
PACKAGE_SELECTION = (
('car', 'Car'),
('van', 'Van'),
('minivan', 'Minivan'),
)
STATUS_SELECTION = (
('new', 'New'),
('accepted', 'Accepted'),
('completed', 'Completed')
)
CITY_SELECTION = (
('Toronto', 'Toronto'),
('Brampton', 'Brampton'),
('Markham', 'Markham'),
('Mississauga', 'Mississauga'),
('Richmond Hill', 'Richmond Hill'),
('Vaughan', 'Vaughan'),
('Oakville', 'Oakville')
)
def validate_pickuptime(pickup_time):
if (
pickup_time - timezone.now().astimezone(
pytz.timezone(settings.TIME_ZONE))
).total_seconds() < 0:
raise ValidationError('Pickup time cannot be before current time!')
class Quests(models.Model):
# Calculating delivery code before hand and inserting
# it as default so that it won't be tampered with.
hashstring = hashlib.sha256(
str(timezone.now()) + str(timezone.now()) + str(uuid.uuid4())
).hexdigest()
calc_delivery_code = hashstring[:3]+hashstring[-2:]
calc_tracking_number = hashstring[10:15]+hashstring[-15:-10]
current_time = timezone.now
questrs = models.ForeignKey(QuestrUserProfile, related_name='quests')
description = models.TextField(_('description'), blank=True)
title = models.CharField(
_('title'),
max_length=100,
blank=False
)
reward = models.DecimalField(
_('reward'),
decimal_places=2,
max_digits=1000)
item_images = models.ImageField(
_('item_images'),
max_length=9999,
upload_to='quest-item-cdn',
blank=True
)
map_image = models.URLField(
_('map_image'),
max_length=9999,
default=''
)
status = models.TextField(
_('status'),
choices=STATUS_SELECTION,
default='New'
)
creation_date = models.DateTimeField(
_('creation_date'),
default=current_time
)
size = models.TextField(
_('size'),
choices=PACKAGE_SELECTION,
default="backpack"
)
shipper = models.TextField(
_('shipper'),
blank=True,
null=True
)
pickup = jsonfield.JSONField(_('pickup'), default={})
dropoff = jsonfield.JSONField(_('dropoff'), default={})
isaccepted = models.BooleanField(_('isaccepted'), default=False)
isnotified = models.BooleanField(_('isnotified'), default=False)
is_questr_reviewed = models.BooleanField(
_('is_questr_reviewed'),
default=False
)
is_shipper_reviewed = models.BooleanField(
_('is_shipper_reviewed'),
default=False
)
is_complete = models.BooleanField(_('is_complete'), default=False)
ishidden = models.BooleanField(_('ishidden'), default=False)
distance = models.DecimalField(
_('distance'),
decimal_places=2,
max_digits=1000,
default=0
)
delivery_date = models.DateTimeField(
_('delivery_date'),
blank=True,
null=True
)
available_couriers = jsonfield.JSONField(
_('available_couriers'),
default={}
)
delivery_code = models.TextField(_('delivery_code'), blank=True)
tracking_number = models.TextField(_('tracking_number'), blank=True)
pickup_time = models.DateTimeField(
_('pickup_time'),
blank=True,
validators=[validate_pickuptime]
)
considered_couriers = models.TextField(_('considered_couriers'), default=[])
def __unicode__(self):
return str(self.id)
def get_delivery_code(self):
hashstring = hashlib.sha256(
str(timezone.now()) + str(timezone.now()) + str(uuid.uuid4())
).hexdigest()
return hashstring[:3]+hashstring[-2:]
def get_tracking_number(self):
hashstring = hashlib.sha256(
str(timezone.now()) + str(timezone.now()) + str(uuid.uuid4())
).hexdigest()
return hashstring[10:15]+hashstring[-15:-10]
#Overriding
def save(self, *args, **kwargs):
if not self.delivery_code:
self.delivery_code = self.get_delivery_code()
if not self.tracking_number:
self.tracking_number = self.get_tracking_number()
if not self.pickup_time:
logging.warn("no pickup time")
self.pickup_time = self.creation_date
super(Quests, self).save(*args, **kwargs)
# self.create_item_images_normal()
class QuestComments(models.Model):
quest = models.ForeignKey(Quests)
questr = models.ForeignKey(QuestrUserProfile)
time = models.DateTimeField(_('time'))
comment = models.TextField(_('comment'))
def __unicode__(self):
return self.id
class QuestTransactional(models.Model):
quest_code = models.CharField(_('quest_code'), max_length=64, unique=True)
quest = models.ForeignKey(Quests)
shipper = models.ForeignKey(QuestrUserProfile)
transaction_type = models.IntegerField(_('transaction_type'), default=1)
status = models.BooleanField(_('status'), default=False)
def generate_hash(self):
return hashlib.sha256(
str(timezone.now()) + str(self.shipper.email)
).hexdigest()
def get_truncated_quest_code(self):
return self.quest_code[:7]
def get_token_id(self):
return self.quest_code[-6:]
REQUIRED_FIELDS = [
'quest_code', 'id', 'quest', 'shipper', 'transaction_type']
def __unicode__(self):
return "{0}:{1} {2}".format(self.quest_code, self.quest, self.shipper)
#Overriding
def save(self, *args, **kwargs):
#check if the row with this hash already exists.
if not self.quest_code:
self.quest_code = self.generate_hash()
# self.my_stuff = 'something I want to save in that field'
super(QuestTransactional, self).save(*args, **kwargs)
class QuestToken(models.Model):
token_id = models.CharField(_('id'), max_length=20, primary_key=True)
timeframe = models.DateTimeField(_('create_date'), default=timezone.now)
def is_alive(self):
timedelta = timezone.now() - self.timeframe
hours = 2
allowable_time = float(hours * 60 * 60)
return timedelta.total_seconds() < allowable_time
def __unicode__(self):
return "Token verifying ..."
# Overriding
def save(self, *args, **kwargs):
if not self.timeframe:
self.timeframe = timezone.now()
super(QuestToken, self).save(*args, **kwargs)
class QuestEvents(models.Model):
"""Models for QuestEvents"""
current_time = timezone.now
quest = models.ForeignKey(Quests)
event = models.IntegerField(_('event'), max_length=2, default=1)
updated_on = models.DateTimeField(
_('updated_on'),
default=current_time
)
extrainfo = jsonfield.JSONField(
_('extrainfo'),
default='{}',
max_length=9999
)
def save(self, *args, **kwargs):
if not self.updated_on:
self.updated_on = current_time
super(QuestEvents, self).save(*args, **kwargs)
# class QuestPricing(models.Model):
# """Pricing model for quests"""
# current_time = timezone.now
# pricing = jsonfield.JSONField(_('pricing'), default={})
# questrs = models.ForeignKey(QuestrUserProfile, unique=True)
# updated_on = models.DateTimeField(
# _('updated_on'),
# default=current_time
# )
# def save(self, *args, **kwargs):
# if not self.updated_on:
# self.updated_on = current_time
# super(QuestPricing, self).save(*args, **kwargs)
| 29.433824 | 80 | 0.635648 | 6,357 | 0.794029 | 0 | 0 | 0 | 0 | 0 | 0 | 1,758 | 0.219585 |
ea4b97e6ac91091a8e88abbbe3bf71c11b15a7ef | 9,436 | py | Python | game/base_game.py | hilearn/ai-game | 5eead5964fc9a4481317402374b13109e09f56c2 | [
"MIT"
] | null | null | null | game/base_game.py | hilearn/ai-game | 5eead5964fc9a4481317402374b13109e09f56c2 | [
"MIT"
] | 3 | 2021-10-03T08:46:08.000Z | 2021-10-04T18:14:56.000Z | game/base_game.py | hilearn/ai-game | 5eead5964fc9a4481317402374b13109e09f56c2 | [
"MIT"
] | null | null | null | import time
import random
from enum import Enum
from dataclasses import dataclass
from .gameobject import Action, GameObject, Player, Weapon
from .map import Cell, Map
class Direction(Enum):
LEFT = 1
RIGHT = 2
UP = 3
DOWN = 4
def to_action(self):
if self is self.UP:
return Action.MOVE_UP
elif self is self.LEFT:
return Action.MOVE_LEFT
elif self is self.RIGHT:
return Action.MOVE_RIGHT
elif self is self.DOWN:
return Action.MOVE_DOWN
else:
assert False
@dataclass
class Rect:
top: int
bottom: int
left: int
right: int
@dataclass
class ObjectInGame:
gameobject: GameObject
y: int # top
x: int # left
direction: Direction
size: tuple[int, int]
@property
def rect(self):
return Rect(self.y, self.y + self.size[0],
self.x, self.x + self.size[1])
@dataclass
class PlayerInGame(ObjectInGame):
last_shot: float = None # timestamp
@dataclass
class Observation:
map_: Map
objects: list[ObjectInGame]
cell_size: int
class Game:
FENCE_SIZE = 50
MAX_NUM_TICKS = 2880 # number of ticks till the end of the game
def __init__(self, map_: Map, players: list[Player], *, ticks_per_sec=24):
self.map_ = map_
self.cell_size = self.map_.cell_size
self.ticks_per_sec = ticks_per_sec
self.borders = Rect(0, self.map_.size()[0] * self.cell_size - 1,
0, self.map_.size()[1] * self.cell_size - 1)
assert (len(self.map_.spawn_points) >= len(players)
), "Can't have more players than spawn points"
self.clear(players)
def clear(self, players: list[Player]):
spawn_points = self.map_.spawn_points[:] # copy
random.shuffle(spawn_points)
half_cell = self.cell_size // 2
quarter_cell = self.cell_size // 4
self.objects = [
PlayerInGame(
player,
spawn_point[0] * self.cell_size + quarter_cell,
spawn_point[1] * self.cell_size + quarter_cell,
Direction.UP,
(half_cell, half_cell)
)
for player, spawn_point in zip(players, spawn_points)
]
self.tick = 0
if self.ticks_per_sec == 0:
self.tick_length = 0
else:
self.tick_length = 1 / self.ticks_per_sec
def run(self):
for player_obj in self.objects:
player_obj.gameobject.connect()
self.end = time.time()
for self.tick in range(self.MAX_NUM_TICKS):
delta = self.end - time.time()
time.sleep(max(delta, 0))
if delta < -2:
start = time.time()
else:
start = self.end
self.end = start + self.tick_length
for object_ in self.objects:
object_.gameobject.observe(self.sight(object_))
for object_ in self.objects[:]:
self.act(object_, object_.gameobject.decide())
if self.ended:
break
def act(self, object_: ObjectInGame, actions: list[Action]):
if actions is None:
return
eighth_cell = self.cell_size // 8
for action in actions:
speed = object_.gameobject.stats.speed
if action == Action.NOTHING:
continue
elif action == Action.MOVE_LEFT:
self.move(object_, Direction.LEFT, speed)
elif action == Action.MOVE_RIGHT:
self.move(object_, Direction.RIGHT, speed)
elif action == Action.MOVE_UP:
self.move(object_, Direction.UP, speed)
elif action == Action.MOVE_DOWN:
self.move(object_, Direction.DOWN, speed)
elif action == Action.SHOOT:
if (object_.last_shot is None
or self.end - object_.last_shot
>= object_.gameobject.stats.reload_time):
self.objects.append(
ObjectInGame(
object_.gameobject.create_weapon(
object_.direction.to_action()),
object_.y + 3 * eighth_cell // 2,
object_.x + 3 * eighth_cell // 2,
object_.direction,
(eighth_cell, eighth_cell)
)
)
object_.last_shot = self.end
def move(self, object_: ObjectInGame, direction: Direction, speed: int):
object_.direction = direction
if direction is Direction.LEFT:
speed = min(speed, object_.rect.left - self.borders.left)
if Cell.WALL in {self.get_cell(object_.rect.left - speed,
object_.rect.top),
self.get_cell(object_.rect.left - speed,
object_.rect.bottom)}:
speed2 = object_.rect.left % self.cell_size
speed = min(speed, speed2)
elif direction is Direction.RIGHT:
speed = min(speed, self.borders.right - object_.rect.right)
if Cell.WALL in {self.get_cell(object_.rect.right + speed,
object_.rect.top),
self.get_cell(object_.rect.right + speed,
object_.rect.bottom)}:
speed2 = self.cell_size - object_.rect.right % self.cell_size
if speed2 == self.cell_size:
speed2 = 0
speed = min(speed, speed2)
elif direction is Direction.UP:
speed = min(speed, object_.rect.top - self.borders.top)
if Cell.WALL in {self.get_cell(object_.rect.right,
object_.rect.top - speed),
self.get_cell(object_.rect.left,
object_.rect.top - speed)}:
speed2 = object_.rect.top % self.cell_size
speed = min(speed, speed2)
elif direction is Direction.DOWN:
speed = min(speed, self.borders.bottom - object_.rect.bottom)
if Cell.WALL in {self.get_cell(object_.rect.right,
object_.rect.bottom + speed),
self.get_cell(object_.rect.left,
object_.rect.bottom + speed)}:
speed2 = self.cell_size - object_.rect.bottom % self.cell_size
if speed2 == self.cell_size:
speed2 = 0
speed = min(speed, speed2)
if speed == 0:
if isinstance(object_.gameobject, Weapon):
self.objects.remove(object_)
return
if direction is Direction.LEFT:
yx = (0, -speed)
elif direction is Direction.RIGHT:
yx = (0, speed)
elif direction is Direction.UP:
yx = (-speed, 0)
elif direction is Direction.DOWN:
yx = (speed, 0)
object_.x += yx[1]
object_.y += yx[0]
self.triggers(object_)
def get_cell(self, x, y):
return self.map_[y // self.cell_size][x // self.cell_size]
def triggers(self, object_: ObjectInGame):
for other in self.objects:
if other is object_:
continue
self.collide(object_, other)
def collide(self, obj1: ObjectInGame, obj2: ObjectInGame):
if (isinstance(obj1.gameobject, Weapon) and
isinstance(obj2.gameobject, Player)):
player = obj2.gameobject
weapon = obj1.gameobject
weapon_obj = obj1
player_obj = obj2
elif (isinstance(obj2.gameobject, Weapon) and
isinstance(obj1.gameobject, Player)):
player = obj1.gameobject
weapon = obj2.gameobject
weapon_obj = obj2
player_obj = obj1
else:
return
if weapon.player is not player and self.hit(obj1, obj2):
player.damage(weapon)
if weapon_obj in self.objects:
# in case weapon hits two people
self.objects.remove(weapon_obj)
if player.health <= 0:
self.objects.remove(player_obj)
weapon.player.kill()
else:
weapon.player.hit()
@staticmethod
def hit(first: ObjectInGame, second: ObjectInGame):
# If one rectangle is on left side of other
if (first.rect.left > second.rect.right) or (
second.rect.left > first.rect.right):
return False
# If one rectangle is above other
if (first.rect.bottom < second.rect.top) or (
second.rect.bottom < first.rect.top):
return False
return True
def sight(self, object_in_game: ObjectInGame) -> Observation:
return Observation(self.map_, self.objects, self.cell_size)
@property
def ended(self):
return ((self.tick >= self.MAX_NUM_TICKS)
or len(self.objects) == 1)
class RemoteGame(Game):
pass
class TrainableGame(Game):
TICK_SIZE = 0
| 34.312727 | 78 | 0.533171 | 9,200 | 0.974989 | 0 | 0 | 1,115 | 0.118164 | 0 | 0 | 221 | 0.023421 |
ea4c18689ee08c3f8d8b39e185c87dfd9d7065ed | 10,313 | py | Python | AgendaContato/lib.py | jooaomarcos/Projeto-Agenda-de-Contato-em-Python | 18b4aba4c5b5232951d8861fd67e7562160165c3 | [
"MIT"
] | null | null | null | AgendaContato/lib.py | jooaomarcos/Projeto-Agenda-de-Contato-em-Python | 18b4aba4c5b5232951d8861fd67e7562160165c3 | [
"MIT"
] | null | null | null | AgendaContato/lib.py | jooaomarcos/Projeto-Agenda-de-Contato-em-Python | 18b4aba4c5b5232951d8861fd67e7562160165c3 | [
"MIT"
] | null | null | null | from banco import con
from time import sleep
import os
# Validação de Valor Inteiro
def leiaint(valor):
while True:
try:
ent = int(input(valor))
except:
print('\033[1;33mDigite um valor inteiro\033[m')
else:
break
return ent
# Validação de String
def leiaTexto(txt):
while True:
try:
ent = str(input(txt))
except:
if ent.isnumeric():
print('\033[1;33mDigite um texto válido\033[m')
else:
break
return ent
# Cabecalho
def cabecalho(msg):
print('-'*40)
print(msg.center(40).upper())
print('-'*40)
# Menu Principal
def menuprincipal():
print('''
[1] - Inserir Contato
[2] - Listar Contatos
[3] - Consultar Contato
[4] - Editar Contato
[5] - Excluir
[6] - Sair
''')
# Inserir Contato
def insertContato():
cabecalho('NOVO CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, insira um registro válido\033[m')
else:
break
while True:
nome = leiaTexto('NOME: ').strip().title()
if len(nome) == 0 or nome.isnumeric():
print('\033[1;33mPreencha o campo\033[m')
else:
break
while True:
matr = leiaTexto('CHAPA: ').strip().upper()
if len(matr) <= 4 or len(matr) > 5:
print('\033[1;33mPor favor, insira uma matricula válida\033[m')
else:
break
while True:
func = leiaTexto('FUNÇÃO: ').strip().title()
if len(func) == 0 or func.isnumeric():
print('\033[1;33mPreencha o campo\033[m')
else:
break
while True:
period = leiaint('PERÍODO: ')
if period < 1 or period > 2:
print('\033[1;33mPor favor, insira um período corretamente\033[m')
else:
break
while True:
tel = leiaTexto('TELEFONE 1: ').strip()
if len(tel) < 11 or len(tel) > 14:
print('\033[1;33mPor favor, Insira um telefone válido\033[m')
else:
break
while True:
tel_2 = leiaTexto('TELEFONE 2: ').strip()
if len(tel_2) > 14:
print('\033[1;33mTelefone Inválido\033[m')
else:
break
except:
print('\033[1;31mErro na Inserção de dados\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
ssql = 'SELECT * FROM contato WHERE registro= "'+regs+'"'
c.execute(ssql)
inserir = c.fetchall()
except:
print('\033[1;33mErro na conferência\033[m')
else:
if inserir:
print('\033[1;33mCONTATO JÁ EXISTE\033[m')
else:
try:
sql = 'INSERT INTO contato(registro, nome, matricula, funcao, periodo, telefone, telefone_2) SELECT "'+regs+'", "'+nome+'", "'+matr+'", "'+func+'", "'+str(period)+'", "'+tel+'", "'+tel_2+'" WHERE NOT EXISTS (SELECT 1 FROM contato WHERE registro = "'+regs+'")'
c.execute(sql)
except:
print(f'Erro ao inserir contato')
else:
print('\033[1;32mCONTATO INSERIDO COM SUCESSO!\033[m')
con.commit()
# Listar Contatos
def listarContatos():
cabecalho('LISTAR CONTATOS')
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
lsql = 'SELECT * FROM contato ORDER BY registro asc'
c.execute(lsql)
except:
print('\033[1;33mErro ao listar contatos\033[m')
else:
dados = c.fetchall()
contador = 0
limite = 30
for d in dados:
print(f'\033[1;36mNº REGISTRO:\033[m{d[1]} \033[1;36mNOME:\033[m{d[2]:<32} \033[1;36mCHAPA:\033[m{d[3]} \033[1;36mFUNÇÃO:\033[m{d[4]:<10} \033[1;36mPERÍODO:\033[m{d[5]} \033[1;36mTELEFONE:\033[m{d[6]} \033[1;36mTELEFONE 2:\033[m{d[7]}')
print()
contador += 1
if contador > limite:
contador = 0
os.system('pause')
os.system('cls')
con.commit()
while True:
v = leiaint('PRESSIONE 8 PARA VOLTAR AO MENU: ')
if v < 8 or v > 8 :
print('\033[1;33mpressione a tecla correta\033[m')
else:
break
os.system('cls')
# Consultar Contato
def consContato():
cabecalho('CONSULTAR CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, insira um registro válido\033[m')
else:
break
except:
print('\033[1;31mErro na consulta do contato\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
csql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(csql)
mostra = c.fetchall()
except:
print('\033[1;33mErro ao Consultar Contato\033[m')
else:
if mostra:
for m in mostra:
print(f'\033[1;36mNº REGISTRO:\033[m{m[1]} \033[1;36mNOME:\033[m{m[2]} \033[1;36mCHAPA:\033[m{m[3]} \033[1;36mFUNÇÃO:\033[m{m[4]:^<8} \033[1;36mPERÍODO:\033[m{m[5]} \033[1;36mTELEFONE:\033[m{m[6]} \033[1;36mTELEFONE 2:\033[m{m[7]}')
else:
print('\033[1;33mESSE CONTATO NÃO ESTÁ CADASTRADO\033[m')
con.commit()
# Editar Contato
def editContato():
cabecalho('EDITAR CONTATO')
try:
while True:
regs = leiaTexto('Nº REGISTRO: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, digite um registro válido\033[m')
else:
break
except:
print('\033[1;33mErro no contato\033[m')
else:
try:
c = con.cursor()
except:
print('\033[1;31mErro na Conexão com Banco de Dados\033[m')
else:
try:
sql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(sql)
mostra = c.fetchall()
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
if mostra:
while True:
period = leiaint('PERÍODO: ')
if period < 1 or period > 2:
print('\033[1;33mPor favor, insira um período corretamente\033[m')
else:
break
while True:
tel = leiaTexto('TELEFONE 1: ').strip()
if len(tel) < 11 or len(tel) > 14:
print('\033[1;33mPor favor, Insira um telefone válido\033[m')
else:
break
while True:
tel_2 = leiaTexto('TELEFONE 2: ').strip()
if len(tel_2) > 14:
print('\033[1;33mTelefone Inválido\033[m')
else:
break
esql = 'UPDATE contato SET periodo="'+str(period)+'", telefone="'+tel+'", telefone_2="'+tel_2+'" WHERE registro= "'+regs+'"'
c.execute(esql)
con.commit()
print('\033[1;32mCONTATO ALTERADO COM SUCESSO!\033[m')
sleep(1)
else:
print('\033[1;33mCONTATO NÃO ESTÁ CADASTRADO\033[m')
# Deletar Contato
def apagaContato():
cabecalho('APAGAR CONTATO')
try:
while True:
regs = leiaTexto('Nº Registro que deseja apagar o contato: ').strip()
if len(regs) < 5 or len(regs) > 5:
print('\033[1;33mPor favor, digite um registro válido\033[m')
else:
break
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
try:
c = con.cursor()
except ConnectionError:
print('\033[1;31mErro na conexão com o banco de dados\033[m')
else:
try:
sql = 'SELECT * FROM contato WHERE registro = "'+regs+'"'
c.execute(sql)
mostra = c.fetchall()
except:
print('\033[1;33mErro na busca do contato\033[m')
else:
while True:
resp = leiaTexto('Tem certeza que deseja apagar o registro [S/N] ?: ').strip().upper()[0]
if resp not in 'SN':
print('Responda')
else:
break
if resp in 'S':
if mostra:
try:
dsql = 'DELETE FROM contato WHERE registro = "'+regs+'"'
c.execute(dsql)
except:
print('\033[1;33mErro ao deletar contato\033[m')
else:
print('\033[1;32mCONTATO DELETADO COM SUCESSO!\033[m')
con.commit()
else:
print('\033[1;33mCONTATO NÃO ESTÁ CADASTRADO\033[m')
else:
print('nada deletado')
| 36.441696 | 283 | 0.456511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,585 | 0.346042 |
ea4c67653d6bf178d84a235b1ed66aa70ed28be1 | 519 | py | Python | demos/reusable/hello/src/hello/web/tests/test_views.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 17 | 2020-08-29T18:45:51.000Z | 2022-03-02T19:37:13.000Z | demos/reusable/hello/src/hello/web/tests/test_views.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 29 | 2020-07-18T04:34:03.000Z | 2021-07-06T09:42:36.000Z | demos/reusable/hello/src/hello/web/tests/test_views.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 1 | 2022-03-14T08:41:42.000Z | 2022-03-14T08:41:42.000Z | import unittest
from app import main
from wheezy.http.functional import WSGIClient
path_for = main.options["path_for"]
class HelloTestCase(unittest.TestCase):
def setUp(self):
self.client = WSGIClient(main)
def tearDown(self):
self.client = None
def path_for(self, name, **kwargs):
return "/" + path_for("hello:" + name, **kwargs)
def test_welcome(self):
assert 200 == self.client.get(self.path_for("welcome"))
assert "Hello World!" in self.client.content
| 23.590909 | 63 | 0.666667 | 395 | 0.761079 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.084778 |
ea4caab7d834c695bbaddfe9cef2659ee71e7d1c | 713 | py | Python | support/distribute/binbuild/build/make_cert_links.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 131 | 2015-06-04T09:06:57.000Z | 2022-02-01T12:11:29.000Z | support/distribute/binbuild/build/make_cert_links.py | rknop/amuse | 85d5bdcc29cfc87dc69d91c264101fafd6658aec | [
"Apache-2.0"
] | 690 | 2015-10-17T12:18:08.000Z | 2022-03-31T16:15:58.000Z | support/distribute/binbuild/build/make_cert_links.py | rieder/amuse | 3ac3b6b8f922643657279ddee5c8ab3fc0440d5e | [
"Apache-2.0"
] | 102 | 2015-01-22T10:00:29.000Z | 2022-02-09T13:29:43.000Z | import os
import os.path
import subprocess
import sys
if __name__ == "__main__":
dirname = sys.argv[1]
for x in os.listdir(dirname):
if x.endswith('.crt'):
try:
filename = os.path.join(dirname, x)
filehash = subprocess.check_output(['openssl', 'x509', '-noout', '-hash', '-in', filename]).strip()
filehash += '.0'
hash_filename = os.path.join(dirname, filehash)
if os.path.exists(hash_filename):
print(x, filehash)
os.remove(hash_filename)
os.symlink(x, hash_filename)
except:
print("error in handling file:", filename)
| 33.952381 | 115 | 0.530154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.112202 |
ea4cdc13bf9b75390076a5ba97dd800d3216b133 | 2,525 | py | Python | tests/pipeline/nodes/augment/test_contrast.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | 1 | 2021-12-02T05:15:58.000Z | 2021-12-02T05:15:58.000Z | tests/pipeline/nodes/augment/test_contrast.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | null | null | null | tests/pipeline/nodes/augment/test_contrast.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test for augment contrast node
"""
import numpy as np
import pytest
from peekingduck.pipeline.nodes.augment.contrast import Node
@pytest.fixture
def contrast_same():
node = Node({"input": ["img"], "output": ["img"], "alpha": 1.0})
return node
@pytest.fixture
def contrast_increase():
node = Node({"input": ["img"], "output": ["img"], "alpha": 2.0})
return node
class TestContrast:
def test_no_change(self, contrast_same, create_image):
original_img = create_image((28, 28, 3))
input1 = {"img": original_img}
results = contrast_same.run(input1)
np.testing.assert_equal(original_img, results["img"])
def test_increase_contrast(self, contrast_increase):
original_img = np.ones(shape=(28, 28, 3), dtype=np.uint8)
input1 = {"img": original_img}
results = contrast_increase.run(input1)
assert original_img.shape == results["img"].shape
with pytest.raises(AssertionError):
np.testing.assert_equal(original_img, results["img"])
np.testing.assert_equal(results["img"][0][0], original_img[0][0] * 2)
def test_overflow(self, contrast_increase):
# Test positive overflow - any values that sum up to higher than 255 will
# be clipped at 255
bright_img = np.ones(shape=(28, 28, 3), dtype=np.uint8) * 250
bright_input = {"img": bright_img}
results = contrast_increase.run(bright_input)
np.testing.assert_equal(results["img"][0][0], np.array([255, 255, 255]))
def test_beta_range(self):
with pytest.raises(ValueError) as excinfo:
Node({"input": ["img"], "output": ["img"], "alpha": -0.5})
assert str(excinfo.value) == "alpha must be between [0.0, 3.0]"
with pytest.raises(ValueError) as excinfo:
Node({"input": ["img"], "output": ["img"], "alpha": 3.1})
assert str(excinfo.value) == "alpha must be between [0.0, 3.0]"
| 36.071429 | 81 | 0.662574 | 1,557 | 0.616634 | 0 | 0 | 246 | 0.097426 | 0 | 0 | 930 | 0.368317 |
ea4dead28982f46428283499653ee29c5df82988 | 5,343 | py | Python | DeformationLearningSolver/scripts/DLS/core/fnData.py | WebberHuang/DeformationLearningSolver | c58f2c7eb8e4f172948de9acd2b3e6cb39bb8bc2 | [
"BSD-3-Clause"
] | 160 | 2015-12-29T15:11:36.000Z | 2022-02-22T11:40:11.000Z | DeformationLearningSolver/scripts/DLS/core/fnData.py | wentingwei/DeformationLearningSolver | 9c9f42678f2fb1c7a0318527a616b171064130b5 | [
"BSD-3-Clause"
] | 2 | 2016-08-05T10:54:31.000Z | 2017-09-04T21:34:42.000Z | DeformationLearningSolver/scripts/DLS/core/fnData.py | wentingwei/DeformationLearningSolver | 9c9f42678f2fb1c7a0318527a616b171064130b5 | [
"BSD-3-Clause"
] | 51 | 2015-12-29T16:03:38.000Z | 2022-01-03T15:02:24.000Z | __author__ = "Webber Huang"
__contact__ = "xracz.fx@gmail.com"
__website__ = "http://riggingtd.com"
import maya.cmds as cmds
import maya.OpenMaya as om
import maya.OpenMayaAnim as oma
from DLS.core import utils
class FnSkinCluster(object):
def __init__(self, skinCluster=None):
"""
Args:
skinCluster (str, Optional): Defaults to None
"""
self.skinCluster = skinCluster
if skinCluster:
self.fn = oma.MFnSkinCluster(utils.getDependNode(skinCluster))
def setSkinCluster(self, skinCluster):
"""
Args:
skinCluster (str, Optional): Defaults to None
Returns:
SkinClusterFn
"""
self.skinCluster = skinCluster
self.fn = oma.MFnSkinCluster(utils.getDependNode(skinCluster))
return self
def getLogicalInfluenceIndex(self,influence):
"""
Args:
influence (str)
Returns:
int
"""
try:
dagPath = utils.getDagPath(influence)
except:
raise utils.UserInputError("Could not find influence '%s' in %s" %
(influence, self.skinCluster))
return self.fn.indexForInfluenceObject(dagPath)
#----------------------------------------------------------------------
def getPhysicalInfluenceIndex(self, influence):
"""
Args:
influence (str)
Returns:
int
"""
matrices = cmds.listConnections("%s.matrix" % self.skinCluster, s=1, d=0)
return matrices.index(influence)
#----------------------------------------------------------------------
def getInfluenceData(self, influence):
"""
Args:
influence (str)
Returns:
WeightData
"""
try:
dagPath = utils.getDagPath(influence)
except:
raise utils.UserInputError("Could not find influence '%s' in %s" %
(influence, self.skinCluster))
selList = om.MSelectionList()
weights = om.MDoubleArray()
self.fn.getPointsAffectedByInfluence(dagPath, selList, weights)
componentStr = []
selList.getSelectionStrings(componentStr)
componentStr = cmds.ls(componentStr, ap=1, fl=1)
weights = [w for w in weights]
return WeightData(componentStr, weights)
#----------------------------------------------------------------------
def listInfluences(self, asDagPath=True):
"""
Returns:
list
"""
dagPaths = om.MDagPathArray()
self.fn.influenceObjects(dagPaths)
if asDagPath: return dagPaths
else: return [dagPaths[i].partialPathName() for i in xrange(dagPaths.length())]
#----------------------------------------------------------------------
def getWeightData(self, elements):
"""
Args:
elements (list)
Returns:
SkinWeightData
"""
dagPath, components = utils.getDagPathComponents(elements)
# Get all influences
infs = self.listInfluences(asDagPath=False)
influenceIndices = om.MIntArray()
[influenceIndices.append(self.getPhysicalInfluenceIndex(inf)) for inf in infs]
# Get all weights
weights = om.MDoubleArray()
self.fn.getWeights(dagPath, components, influenceIndices, weights)
weights = [w for w in weights]
return SkinWeightData(elements, infs, weights)
#----------------------------------------------------------------------
def setWeightData(self, data, normalize=True):
"""
Args:
data (SkinWeightData)
normalize (bool, Optional): Defaults to True
"""
# Construct dagPath and components
compList = data.getComponents()
dagPath, components = utils.getDagPathComponents(compList)
# Construct influence indices
influenceIndices = om.MIntArray()
[influenceIndices.append(self.getPhysicalInfluenceIndex(inf)) for inf in data.getInfluences()]
# Construct weights
weights = om.MDoubleArray()
[weights.append(w) for w in data.getWeights()]
oldValues = om.MDoubleArray()
self.fn.getWeights(dagPath, components, influenceIndices, oldValues)
self.fn.setWeights(dagPath, components, influenceIndices, weights, normalize, oldValues)
#----------------------------------------------------------------------
def flushWeights(self, influence):
"""
Args:
influence (str)
"""
weightData = self.getInfluenceData(influence)
skinData = SkinWeightData(weightData.getElements(), [influence], weightData.getWeights())
[skinData.addInfluence(comp, influence, 0.0) for comp in skinData.getComponents()]
self.setWeightData(skinData)
#----------------------------------------------------------------------
def getInfluenceTransforms(self, space=om.MSpace.kObject):
infs = self.listInfluences()
if space == om.MSpace.kWorld:
return [infs[i].inclusiveMatrix() for i in xrange(infs.length())]
return [om.MFnTransform(infs[i]).transformation().asMatrix()
for i in xrange(infs.length())] | 32.381818 | 102 | 0.546509 | 5,128 | 0.95976 | 0 | 0 | 0 | 0 | 0 | 0 | 1,553 | 0.290661 |
ea4f5bbe5c9fe40d0892aa0b04a02c7abbbc8a2d | 64 | py | Python | src/dump1090exporter/__init__.py | bgulla/dump1090-exporter | 147f23451a0607b72a320e2901160f3f747b3a56 | [
"MIT"
] | 60 | 2016-10-26T11:11:40.000Z | 2022-03-06T14:32:44.000Z | src/dump1090exporter/__init__.py | bgulla/dump1090-exporter | 147f23451a0607b72a320e2901160f3f747b3a56 | [
"MIT"
] | 20 | 2017-06-14T06:09:17.000Z | 2022-03-16T01:20:59.000Z | src/dump1090exporter/__init__.py | bgulla/dump1090-exporter | 147f23451a0607b72a320e2901160f3f747b3a56 | [
"MIT"
] | 14 | 2017-07-03T19:18:38.000Z | 2022-02-04T14:38:54.000Z | from .exporter import Dump1090Exporter
__version__ = "21.10.0"
| 16 | 38 | 0.78125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.140625 |
ea4f94dc62eb152286ddb45f21d8cbc92ac2d88f | 1,697 | py | Python | sample_data/make_sample_data.py | mhernan88/reshape_tools | e08de72629079457f4194a8a14dbb8641b5b0a13 | [
"MIT"
] | null | null | null | sample_data/make_sample_data.py | mhernan88/reshape_tools | e08de72629079457f4194a8a14dbb8641b5b0a13 | [
"MIT"
] | null | null | null | sample_data/make_sample_data.py | mhernan88/reshape_tools | e08de72629079457f4194a8a14dbb8641b5b0a13 | [
"MIT"
] | null | null | null | import pandas as pd
from datetime import datetime
def sample_data1() -> pd.DataFrame:
times = [
datetime(year=2020, month=11, day=1),
datetime(year=2020, month=11, day=2),
datetime(year=2020, month=11, day=3),
datetime(year=2020, month=11, day=4),
datetime(year=2020, month=11, day=5),
datetime(year=2020, month=12, day=6),
datetime(year=2020, month=12, day=7),
datetime(year=2020, month=12, day=8),
datetime(year=2020, month=12, day=9),
datetime(year=2020, month=12, day=10),
]
val1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
val2 = [20, 40, 60, 80, 100, 120, 140, 160, 180, 200]
out = pd.DataFrame({"times": times, "val1": val1, "val2": val2})
out["times"] = (out["times"] - datetime(1970, 1, 1)).dt.total_seconds()
return out
def sample_data2() -> pd.DataFrame:
times = [
datetime(year=2020, month=11, day=1, hour=8),
datetime(year=2020, month=11, day=1, hour=9),
datetime(year=2020, month=11, day=1, hour=10),
datetime(year=2020, month=11, day=1, hour=11),
datetime(year=2020, month=11, day=1, hour=12),
datetime(year=2020, month=11, day=2, hour=8),
datetime(year=2020, month=11, day=2, hour=9),
datetime(year=2020, month=11, day=2, hour=10),
datetime(year=2020, month=11, day=2, hour=11),
datetime(year=2020, month=11, day=2, hour=12),
]
val1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
val2 = [20, 40, 60, 80, 100, 120, 140, 160, 180, 200]
out = pd.DataFrame({"times": times, "val1": val1, "val2": val2})
out["times"] = (out["times"] - datetime(1970, 1, 1)).dt.total_seconds()
return out
| 37.711111 | 75 | 0.5769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.038892 |
ea5034df827000a7c021ce8d10922be02ea67910 | 70 | py | Python | chatrender/celery.py | The-Politico/django-politico-slackchat-renderer | adb3ed2ba5039a97ee7b021d39aa40cab11e5661 | [
"MIT"
] | 2 | 2018-07-02T16:49:35.000Z | 2018-07-09T03:52:28.000Z | chatrender/celery.py | The-Politico/django-politico-slackchat-renderer | adb3ed2ba5039a97ee7b021d39aa40cab11e5661 | [
"MIT"
] | 42 | 2018-02-14T21:28:54.000Z | 2022-02-10T18:30:58.000Z | chatrender/celery.py | The-Politico/django-politico-slackchat-renderer | adb3ed2ba5039a97ee7b021d39aa40cab11e5661 | [
"MIT"
] | null | null | null | # flake8: noqa
from chatrender.tasks.publish import publish_slackchat
| 23.333333 | 54 | 0.842857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.2 |
ea507ff5ce0b048fae3891ba72d4dee04d5ab84a | 5,242 | py | Python | disentanglement_lib/methods/unsupervised/unsupervised_train.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/methods/unsupervised/unsupervised_train.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/methods/unsupervised/unsupervised_train.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training protocol used for unsupervised disentanglement models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from disentanglement_lib.data.ground_truth import named_data
from disentanglement_lib.data.ground_truth import util
from disentanglement_lib.data.ground_truth.ground_truth_data import *
from disentanglement_lib.methods.shared import losses
from disentanglement_lib.methods.unsupervised import gaussian_encoder_model
from disentanglement_lib.methods.unsupervised import model # pylint: disable=unused-import
from disentanglement_lib.methods.unsupervised.gaussian_encoder_model import GaussianModel
from disentanglement_lib.methods.unsupervised.model import gaussian_log_density
from disentanglement_lib.utils import results
from disentanglement_lib.evaluation.metrics import mig
import numpy as np
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
import gin
import pathlib, shutil
import wandb
from disentanglement_lib.utils.hub import convert_model
from disentanglement_lib.utils.mi_estimators import estimate_entropies
from disentanglement_lib.visualize.visualize_util import plt_sample_traversal
@gin.configurable("train", denylist=[])
class Train(pl.LightningModule):
"""Trains the estimator and exports the snapshot and the gin config.
The use of this function requires the gin binding 'dataset.name' to be
specified as that determines the data set used for training.
Args:
model: GaussianEncoderModel that should be trained and exported.
training_steps: Integer with number of training steps.
random_seed: Integer with random seed used for training.
batch_size: Integer with the batch size.
name: Optional string with name of the model (can be used to name models).
model_num: Optional integer with model number (can be used to identify
models).
"""
def __init__(self,
model=gin.REQUIRED,
training_steps=gin.REQUIRED,
random_seed=gin.REQUIRED,
batch_size=gin.REQUIRED,
opt_name=torch.optim.Adam,
lr=5e-4,
eval_numbers=10,
name="",
model_num=None):
super().__init__()
self.training_steps = training_steps
self.random_seed = random_seed
self.batch_size = batch_size
self.lr = lr
self.name = name
self.model_num = model_num
self.eval_numbers = eval_numbers
wandb.config['dataset'] = gin.query_parameter('dataset.name')
self.save_hyperparameters()
self.opt_name = opt_name
self.data = named_data.get_named_ground_truth_data()
img_shape = np.array(self.data.observation_shape)[[2, 0, 1]].tolist()
# img_shape = [1,64,64]
self.ae = model(img_shape)
def training_step(self, batch, batch_idx):
if (self.global_step + 1) % (self.training_steps // self.eval_numbers) == 0:
self.evaluate()
x = batch
loss, summary = self.ae.model_fn(x.float(), None)
self.log_dict(summary)
return loss
def evaluate(self) -> None:
model = self.ae
model.cpu()
model.eval()
dic_log = {}
dic_log.update(self.visualize_model(model))
wandb.log(dic_log)
model.cuda()
model.train()
def visualize_model(self, model) -> dict:
_encoder, _decoder = convert_model(model)
num_latent = self.ae.num_latent
mu = torch.zeros(1, num_latent)
fig = plt_sample_traversal(mu, _decoder, 8, range(num_latent), 2)
return {'traversal': wandb.Image(fig)}
def train_dataloader(self) -> DataLoader:
dl = DataLoader(self.data,
batch_size=self.batch_size,
num_workers=4,
shuffle=True,
pin_memory=True)
return dl
def configure_optimizers(self):
optimizer = self.opt_name(self.parameters(), lr=self.lr)
return optimizer
def save_model(self, file):
dir = '/tmp/models/' + str(np.random.randint(99999))
file_path = os.path.join(dir, file)
pathlib.Path(dir).mkdir(parents=True, exist_ok=True)
torch.save(self.ae.state_dict(), file_path)
wandb.save(file_path, base_path=dir)
| 38.262774 | 91 | 0.689432 | 3,213 | 0.612934 | 0 | 0 | 3,253 | 0.620565 | 0 | 0 | 1,488 | 0.283861 |
ea50fbdd5b47c4790c000e6a763c6ba66c891802 | 763 | py | Python | chat/models.py | horsehair/unimate | 13fd7336307120ea36ff7eb8a28f6d1966222130 | [
"MIT"
] | null | null | null | chat/models.py | horsehair/unimate | 13fd7336307120ea36ff7eb8a28f6d1966222130 | [
"MIT"
] | null | null | null | chat/models.py | horsehair/unimate | 13fd7336307120ea36ff7eb8a28f6d1966222130 | [
"MIT"
] | null | null | null | from django.db import models
from django.db.models import Model, TextField, DateTimeField, ForeignKey, CASCADE
from accounts.models import User
from rooms.models import Room
# Create your models here.
class MessageModel(Model):
"""
This class represents a chat message. It has a owner (user), timestamp and
the message body.
"""
user = ForeignKey(User, on_delete=CASCADE, verbose_name='user',
related_name='from_user', db_index=True)
room = ForeignKey(Room, on_delete=CASCADE, verbose_name='room',
related_name='to_room', db_index=True)
timestamp = DateTimeField('timestamp', auto_now_add=True, editable=False,
db_index=True)
body = TextField('body')
| 40.157895 | 81 | 0.671035 | 560 | 0.733945 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.245085 |
ea528f46ed31b6469c3c5eda3e10a9823f9f1742 | 1,088 | py | Python | tests/test_replica/test_factory.py | maxipavlovic/django-cqrs | d401819b5bca7c2e833d44e8426251fdd4b6b8b9 | [
"Apache-2.0"
] | 52 | 2020-05-28T13:25:36.000Z | 2022-03-05T12:31:45.000Z | tests/test_replica/test_factory.py | maxipavlovic/django-cqrs | d401819b5bca7c2e833d44e8426251fdd4b6b8b9 | [
"Apache-2.0"
] | 26 | 2020-07-30T08:17:13.000Z | 2022-02-18T12:54:38.000Z | tests/test_replica/test_factory.py | maxipavlovic/django-cqrs | d401819b5bca7c2e833d44e8426251fdd4b6b8b9 | [
"Apache-2.0"
] | 15 | 2020-05-28T13:25:40.000Z | 2022-03-07T04:14:36.000Z | # Copyright © 2021 Ingram Micro Inc. All rights reserved.
from dj_cqrs.constants import SignalType
from dj_cqrs.controller.consumer import route_signal_to_replica_model
from dj_cqrs.mixins import ReplicaMixin
import pytest
def test_bad_model(caplog):
route_signal_to_replica_model(SignalType.SAVE, 'invalid', {})
assert 'No model with such CQRS_ID: invalid.' in caplog.text
@pytest.mark.django_db
def test_bad_signal(caplog):
route_signal_to_replica_model('invalid', 'basic', {})
assert 'Bad signal type "invalid" for CQRS_ID "basic".' in caplog.text
@pytest.mark.django_db
def test_save_model(mocker):
cqrs_save_mock = mocker.patch.object(ReplicaMixin, 'cqrs_save')
route_signal_to_replica_model(SignalType.SAVE, 'basic', {}, {})
cqrs_save_mock.assert_called_once_with({}, previous_data={})
@pytest.mark.django_db
def test_delete_model(mocker):
cqrs_delete_mock = mocker.patch.object(ReplicaMixin, 'cqrs_delete')
route_signal_to_replica_model(SignalType.DELETE, 'basic', {'id': 1})
cqrs_delete_mock.assert_called_once_with({'id': 1})
| 31.085714 | 74 | 0.769301 | 0 | 0 | 0 | 0 | 692 | 0.635445 | 0 | 0 | 216 | 0.198347 |
ea52a83735f5ee01c90e086d751f1daff7428111 | 56,867 | py | Python | openff/interchange/components/smirnoff.py | openforcefield/openff-interchange | 275bd4146dd2724c5eeb2b52d3177b53371edb7c | [
"MIT"
] | 10 | 2021-06-17T20:10:53.000Z | 2022-02-24T15:43:25.000Z | openff/interchange/components/smirnoff.py | openforcefield/openff-interchange | 275bd4146dd2724c5eeb2b52d3177b53371edb7c | [
"MIT"
] | 198 | 2021-06-11T19:49:08.000Z | 2022-03-31T13:33:12.000Z | openff/interchange/components/smirnoff.py | openforcefield/openff-interchange | 275bd4146dd2724c5eeb2b52d3177b53371edb7c | [
"MIT"
] | 7 | 2021-06-18T18:17:32.000Z | 2022-01-25T18:40:52.000Z | """Models and utilities for processing SMIRNOFF data."""
import abc
import copy
import functools
from collections import defaultdict
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
Tuple,
Type,
TypeVar,
Union,
)
import numpy as np
from openff.toolkit.topology import Molecule
from openff.toolkit.typing.engines.smirnoff.parameters import (
AngleHandler,
BondHandler,
ChargeIncrementModelHandler,
ConstraintHandler,
ElectrostaticsHandler,
ImproperTorsionHandler,
LibraryChargeHandler,
ParameterHandler,
ProperTorsionHandler,
ToolkitAM1BCCHandler,
UnassignedProperTorsionParameterException,
UnassignedValenceParameterException,
VirtualSiteHandler,
vdWHandler,
)
from openff.units import unit
from openff.units.openmm import from_openmm
from openmm import unit as omm_unit
from pydantic import Field
from typing_extensions import Literal
from openff.interchange.components.potentials import (
Potential,
PotentialHandler,
WrappedPotential,
)
from openff.interchange.exceptions import (
InvalidParameterHandlerError,
MissingParametersError,
SMIRNOFFParameterAttributeNotImplementedError,
)
from openff.interchange.models import PotentialKey, TopologyKey, VirtualSiteKey
from openff.interchange.types import FloatQuantity
kcal_mol = omm_unit.kilocalorie_per_mole
kcal_mol_angstroms = kcal_mol / omm_unit.angstrom ** 2
kcal_mol_radians = kcal_mol / omm_unit.radian ** 2
if TYPE_CHECKING:
from openff.toolkit.topology import Topology
from openff.interchange.components.mdtraj import _OFFBioTop
ElectrostaticsHandlerType = Union[
ElectrostaticsHandler,
ChargeIncrementModelHandler,
LibraryChargeHandler,
ToolkitAM1BCCHandler,
]
T = TypeVar("T", bound="SMIRNOFFPotentialHandler")
TP = TypeVar("TP", bound="PotentialHandler")
class SMIRNOFFPotentialHandler(PotentialHandler, abc.ABC):
"""Base class for handlers storing potentials produced by SMIRNOFF force fields."""
@classmethod
@abc.abstractmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
raise NotImplementedError()
@classmethod
@abc.abstractmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
raise NotImplementedError()
# @classmethod
# @abc.abstractmethod
# def valence_terms(cls, topology):
# """Return an interable of all of one type of valence term in this topology."""
# raise NotImplementedError()
@classmethod
def check_supported_parameters(cls, parameter_handler: ParameterHandler):
"""Verify that a parameter handler is in an allowed list of handlers."""
for parameter in parameter_handler.parameters:
for parameter_attribute in parameter._get_defined_parameter_attributes():
if parameter_attribute not in cls.supported_parameters():
raise SMIRNOFFParameterAttributeNotImplementedError(
parameter_attribute,
)
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey]."""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
topology_key = TopologyKey(atom_indices=key)
potential_key = PotentialKey(
id=val.parameter_type.smirks, associated_handler=parameter_handler_name
)
self.slot_map[topology_key] = potential_key
if self.__class__.__name__ in ["SMIRNOFFBondHandler", "SMIRNOFFAngleHandler"]:
valence_terms = self.valence_terms(topology) # type: ignore[attr-defined]
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: TP,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
if hasattr(handler, "fractional_bond_order_method"):
if getattr(parameter_handler, "fractional_bondorder_method", None):
handler.fractional_bond_order_method = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_method # type: ignore[attr-defined]
)
handler.fractional_bond_order_interpolation = ( # type: ignore[attr-defined]
parameter_handler.fractional_bondorder_interpolation # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFBondHandler(SMIRNOFFPotentialHandler):
"""Handler storing bond potentials as produced by a SMIRNOFF force field."""
type: Literal["Bonds"] = "Bonds"
expression: Literal["k/2*(r-length)**2"] = "k/2*(r-length)**2"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "k_bondorder", "length_bondorder"]
@classmethod
def valence_terms(cls, topology):
"""Return all bonds in this topology."""
return [list(b.atoms) for b in topology.topology_bonds]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
# TODO: Should the slot_map always be reset, or should we be able to partially
# update it? Also Note the duplicated code in the child classes
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
if param.k_bondorder or param.length_bondorder:
top_bond = topology.get_bond_between(*key)
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
valence_terms = self.valence_terms(topology)
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=valence_terms,
exception_cls=UnassignedValenceParameterException,
)
def store_potentials(self, parameter_handler: "BondHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
if self.potentials:
self.potentials = dict()
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
if parameter.k_bondorder:
data = parameter.k_bondorder
else:
data = parameter.length_bondorder
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
pots.append(
Potential(
parameters={
"k": parameter.k_bondorder[map_key],
"length": parameter.length_bondorder[map_key],
},
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
potential = Potential( # type: ignore[assignment]
parameters={
"k": parameter.k,
"length": parameter.length,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "BondHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFBondHandler from toolkit data.
"""
# TODO: This method overrides SMIRNOFFPotentialHandler.from_toolkit in order to gobble up
# a ConstraintHandler. This seems like a good solution for the interdependence, but is also
# not a great practice. A better solution would involve not overriding the method with a
# different function signature.
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError
handler: T = cls(type="Bonds", expression="k/2*(r-length)**2")
if (
any(
getattr(p, "k_bondorder", None) is not None
for p in parameter_handler.parameters
)
) or (
any(
getattr(p, "length_bondorder", None) is not None
for p in parameter_handler.parameters
)
):
for ref_mol in topology.reference_molecules:
# TODO: expose conformer generation and fractional bond order assigment
# knobs to user via API
ref_mol.generate_conformers(n_conformers=1)
ref_mol.assign_fractional_bond_orders(
bond_order_model=handler.fractional_bond_order_method.lower(), # type: ignore[attr-defined]
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFConstraintHandler(SMIRNOFFPotentialHandler):
"""Handler storing constraint potentials as produced by a SMIRNOFF force field."""
type: Literal["Constraints"] = "Constraints"
expression: Literal[""] = ""
constraints: Dict[
PotentialKey, bool
] = dict() # should this be named potentials for consistency?
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [BondHandler, ConstraintHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "length", "distance"]
@classmethod
def _from_toolkit( # type: ignore[override]
cls: Type[T],
parameter_handler: List,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFPotentialHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(type(parameter_handler))
handler = cls()
handler.store_constraints( # type: ignore[attr-defined]
parameter_handlers=parameter_handlers, topology=topology
)
return handler
def store_constraints(
self,
parameter_handlers: Any,
topology: "_OFFBioTop",
) -> None:
"""Store constraints."""
if self.slot_map:
self.slot_map = dict()
constraint_handler = [
p for p in parameter_handlers if type(p) == ConstraintHandler
][0]
constraint_matches = constraint_handler.find_matches(topology)
if any([type(p) == BondHandler for p in parameter_handlers]):
bond_handler = [p for p in parameter_handlers if type(p) == BondHandler][0]
bonds = SMIRNOFFBondHandler._from_toolkit(
parameter_handler=bond_handler,
topology=topology,
)
else:
bond_handler = None
bonds = None
for key, match in constraint_matches.items():
topology_key = TopologyKey(atom_indices=key)
smirks = match.parameter_type.smirks
distance = match.parameter_type.distance
if distance is not None:
# This constraint parameter is fully specified
potential_key = PotentialKey(
id=smirks, associated_handler="Constraints"
)
distance = match.parameter_type.distance
else:
# This constraint parameter depends on the BondHandler ...
if bond_handler is None:
raise MissingParametersError(
f"Constraint with SMIRKS pattern {smirks} found with no distance "
"specified, and no corresponding bond parameters were found. The distance "
"of this constraint is not specified."
)
# ... so use the same PotentialKey instance as the BondHandler to look up the distance
potential_key = bonds.slot_map[topology_key] # type: ignore[union-attr]
self.slot_map[topology_key] = potential_key
distance = bonds.potentials[potential_key].parameters["length"] # type: ignore[union-attr]
potential = Potential(
parameters={
"distance": distance,
}
)
self.constraints[potential_key] = potential # type: ignore[assignment]
class SMIRNOFFAngleHandler(SMIRNOFFPotentialHandler):
"""Handler storing angle potentials as produced by a SMIRNOFF force field."""
type: Literal["Angles"] = "Angles"
expression: Literal["k/2*(theta-angle)**2"] = "k/2*(theta-angle)**2"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [AngleHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "k", "angle"]
@classmethod
def valence_terms(cls, topology):
"""Return all angles in this topology."""
return list(topology.angles)
def store_potentials(self, parameter_handler: "AngleHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
# ParameterHandler.get_parameter returns a list, although this
# should only ever be length 1
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"k": parameter.k,
"angle": parameter.angle,
},
)
self.potentials[potential_key] = potential
@classmethod
def f_from_toolkit(
cls: Type[T],
parameter_handler: "AngleHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFAngleHandler from toolkit data.
"""
handler = cls()
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
class SMIRNOFFProperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing proper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ProperTorsions"] = "ProperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
fractional_bond_order_method: Literal["AM1-Wiberg"] = "AM1-Wiberg"
fractional_bond_order_interpolation: Literal["linear"] = "linear"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ProperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf", "k_bondorder"]
def store_matches(
self,
parameter_handler: "ProperTorsionHandler",
topology: "_OFFBioTop",
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
param = val.parameter_type
n_terms = len(val.parameter_type.phase)
for n in range(n_terms):
smirks = param.smirks
if param.k_bondorder:
# The relevant bond order is that of the _central_ bond in the torsion
top_bond = topology.get_bond_between(key[1], key[2])
fractional_bond_order = top_bond.bond.fractional_bond_order
if not fractional_bond_order:
raise RuntimeError(
"Bond orders should already be assigned at this point"
)
else:
fractional_bond_order = None
topology_key = TopologyKey(
atom_indices=key, mult=n, bond_order=fractional_bond_order
)
potential_key = PotentialKey(
id=smirks,
mult=n,
associated_handler="ProperTorsions",
bond_order=fractional_bond_order,
)
self.slot_map[topology_key] = potential_key
parameter_handler._check_all_valence_terms_assigned(
assigned_terms=matches,
valence_terms=list(topology.propers),
exception_cls=UnassignedProperTorsionParameterException,
)
def store_potentials(self, parameter_handler: "ProperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for topology_key, potential_key in self.slot_map.items():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
# n_terms = len(parameter.k)
if topology_key.bond_order: # type: ignore[union-attr]
bond_order = topology_key.bond_order # type: ignore[union-attr]
data = parameter.k_bondorder[n]
coeffs = _get_interpolation_coeffs(
fractional_bond_order=bond_order,
data=data,
)
pots = []
map_keys = [*data.keys()]
for map_key in map_keys:
parameters = {
"k": parameter.k_bondorder[n][map_key],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
pots.append(
Potential(
parameters=parameters,
map_key=map_key,
)
)
potential = WrappedPotential(
{pot: coeff for pot, coeff in zip(pots, coeffs)}
)
else:
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": parameter.idivf[n] * unit.dimensionless,
}
potential = Potential(parameters=parameters) # type: ignore[assignment]
self.potentials[potential_key] = potential
class SMIRNOFFImproperTorsionHandler(SMIRNOFFPotentialHandler):
"""Handler storing improper torsions potentials as produced by a SMIRNOFF force field."""
type: Literal["ImproperTorsions"] = "ImproperTorsions"
expression: Literal[
"k*(1+cos(periodicity*theta-phase))"
] = "k*(1+cos(periodicity*theta-phase))"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [ImproperTorsionHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
return ["smirks", "id", "k", "periodicity", "phase", "idivf"]
def store_matches(
self, parameter_handler: "ImproperTorsionHandler", topology: "_OFFBioTop"
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val in matches.items():
parameter_handler._assert_correct_connectivity(
val,
[
(0, 1),
(1, 2),
(1, 3),
],
)
n_terms = len(val.parameter_type.k)
for n in range(n_terms):
smirks = val.parameter_type.smirks
non_central_indices = [key[0], key[2], key[3]]
for permuted_key in [
(
non_central_indices[i],
non_central_indices[j],
non_central_indices[k],
)
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]:
topology_key = TopologyKey(
atom_indices=(key[1], *permuted_key), mult=n
)
potential_key = PotentialKey(
id=smirks, mult=n, associated_handler="ImproperTorsions"
)
self.slot_map[topology_key] = potential_key
def store_potentials(self, parameter_handler: "ImproperTorsionHandler") -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
for potential_key in self.slot_map.values():
smirks = potential_key.id
n = potential_key.mult
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
parameters = {
"k": parameter.k[n],
"periodicity": parameter.periodicity[n] * unit.dimensionless,
"phase": parameter.phase[n],
"idivf": 3.0 * unit.dimensionless,
}
potential = Potential(parameters=parameters)
self.potentials[potential_key] = potential
class _SMIRNOFFNonbondedHandler(SMIRNOFFPotentialHandler, abc.ABC):
"""Base class for handlers storing non-bonded potentials produced by SMIRNOFF force fields."""
type: Literal["nonbonded"] = "nonbonded"
cutoff: FloatQuantity["angstrom"] = Field( # type: ignore
9.0 * unit.angstrom,
description="The distance at which pairwise interactions are truncated",
)
scale_13: float = Field(
0.0, description="The scaling factor applied to 1-3 interactions"
)
scale_14: float = Field(
0.5, description="The scaling factor applied to 1-4 interactions"
)
scale_15: float = Field(
1.0, description="The scaling factor applied to 1-5 interactions"
)
class SMIRNOFFvdWHandler(_SMIRNOFFNonbondedHandler):
"""Handler storing vdW potentials as produced by a SMIRNOFF force field."""
type: Literal["vdW"] = "vdW" # type: ignore[assignment]
expression: Literal[
"4*epsilon*((sigma/r)**12-(sigma/r)**6)"
] = "4*epsilon*((sigma/r)**12-(sigma/r)**6)"
method: Literal["cutoff", "pme", "no-cutoff"] = Field("cutoff")
mixing_rule: Literal["lorentz-berthelot", "geometric"] = Field(
"lorentz-berthelot",
description="The mixing rule (combination rule) used in computing pairwise vdW interactions",
)
switch_width: FloatQuantity["angstrom"] = Field( # type: ignore
1.0 * unit.angstrom,
description="The width over which the switching function is applied",
)
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [vdWHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attributes."""
return ["smirks", "id", "sigma", "epsilon", "rmin_half"]
def store_potentials(self, parameter_handler: vdWHandler) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
self.method = parameter_handler.method.lower()
self.cutoff = parameter_handler.cutoff
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter = parameter_handler.get_parameter({"smirks": smirks})[0]
try:
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
except AttributeError:
# Handle rmin_half pending https://github.com/openforcefield/openff-toolkit/pull/750
potential = Potential(
parameters={
"sigma": parameter.sigma,
"epsilon": parameter.epsilon,
},
)
self.potentials[potential_key] = potential
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: "vdWHandler",
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFvdWHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
for parameter_handler in parameter_handlers:
if type(parameter_handler) not in cls.allowed_parameter_handlers():
raise InvalidParameterHandlerError(
f"Found parameter handler type {type(parameter_handler)}, which is not "
f"supported by potential type {type(cls)}"
)
handler = cls(
scale_13=parameter_handler.scale13,
scale_14=parameter_handler.scale14,
scale_15=parameter_handler.scale15,
cutoff=parameter_handler.cutoff,
mixing_rule=parameter_handler.combining_rules.lower(),
method=parameter_handler.method.lower(),
switch_width=parameter_handler.switch_width,
)
handler.store_matches(parameter_handler=parameter_handler, topology=topology)
handler.store_potentials(parameter_handler=parameter_handler)
return handler
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["vdw", "VirtualSites"]
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atoms, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
top_key = VirtualSiteKey(
atom_indices=atoms,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
pot_key = PotentialKey(
id=virtual_site_type.smirks, associated_handler=virtual_site_type.type
)
pot = Potential(
parameters={
"sigma": virtual_site_type.sigma,
"epsilon": virtual_site_type.epsilon,
# "distance": virtual_site_type.distance,
}
)
# if virtual_site_type.type in {"MonovalentLonePair", "DivalentLonePair"}:
# pot.parameters.update(
# {
# "outOfPlaneAngle": virtual_site_type.outOfPlaneAngle,
# }
# )
# if virtual_site_type.type in {"MonovalentLonePair"}:
# pot.parameters.update(
# {
# "inPlaneAngle": virtual_site_type.inPlaneAngle,
# }
# )
self.slot_map.update({top_key: pot_key})
self.potentials.update({pot_key: pot})
class SMIRNOFFElectrostaticsHandler(_SMIRNOFFNonbondedHandler):
"""
A handler which stores any electrostatic parameters applied to a topology.
This handler is responsible for grouping together
* global settings for the electrostatic interactions such as the cutoff distance
and the intramolecular scale factors.
* partial charges which have been assigned by a ``ToolkitAM1BCC``,
``LibraryCharges``, or a ``ChargeIncrementModel`` parameter
handler.
* charge corrections applied by a ``SMIRNOFFChargeIncrementHandler``.
rather than having each in their own handler.
"""
type: Literal["Electrostatics"] = "Electrostatics" # type: ignore[assignment]
expression: Literal["coul"] = "coul"
method: Literal["pme", "cutoff", "reaction-field", "no-cutoff"] = Field("pme")
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [
LibraryChargeHandler,
ChargeIncrementModelHandler,
ToolkitAM1BCCHandler,
ElectrostaticsHandler,
]
@classmethod
def supported_parameters(cls):
"""Return a list of supported parameter attribute names."""
pass
@property
def charges(self) -> Dict[Union[TopologyKey, VirtualSiteKey], unit.Quantity]:
"""Get the total partial charge on each atom, excluding virtual sites."""
return self.get_charges(include_virtual_sites=False)
@property
def charges_with_virtual_sites(
self,
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom, including virtual sites."""
return self.get_charges(include_virtual_sites=True)
def get_charges(
self, include_virtual_sites=False
) -> Dict[Union[VirtualSiteKey, TopologyKey], unit.Quantity]:
"""Get the total partial charge on each atom or particle."""
charges: DefaultDict[
Union[TopologyKey, VirtualSiteKey], FloatQuantity
] = defaultdict(lambda: 0.0 * unit.e)
for topology_key, potential_key in self.slot_map.items():
potential = self.potentials[potential_key]
for parameter_key, parameter_value in potential.parameters.items():
if parameter_key == "charge_increments":
if type(topology_key) != VirtualSiteKey:
raise RuntimeError
charge = -1.0 * np.sum(parameter_value)
# assumes virtual sites can only have charges determined in one step
# also, topology_key is actually a VirtualSiteKey
charges[topology_key] = charge
elif parameter_key in ["charge", "charge_increment"]:
charge = parameter_value
charges[topology_key.atom_indices[0]] += charge # type: ignore
else:
raise NotImplementedError()
returned_charges: Dict[
Union[VirtualSiteKey, TopologyKey], unit.Quantity
] = dict()
for index, charge in charges.items():
if isinstance(index, int):
returned_charges[TopologyKey(atom_indices=(index,))] = charge
else:
if include_virtual_sites:
returned_charges[index] = charge
return returned_charges
@classmethod
def parameter_handler_precedence(cls) -> List[str]:
"""
Return the order in which parameter handlers take precedence when computing charges.
"""
return ["LibraryCharges", "ChargeIncrementModel", "ToolkitAM1BCC"]
@classmethod
def _from_toolkit(
cls: Type[T],
parameter_handler: Any,
topology: "Topology",
) -> T:
"""
Create a SMIRNOFFElectrostaticsHandler from toolkit data.
"""
if isinstance(parameter_handler, list):
parameter_handlers = parameter_handler
else:
parameter_handlers = [parameter_handler]
toolkit_handler_with_metadata = [
p for p in parameter_handlers if type(p) == ElectrostaticsHandler
][0]
handler = cls(
type=toolkit_handler_with_metadata._TAGNAME,
scale_13=toolkit_handler_with_metadata.scale13,
scale_14=toolkit_handler_with_metadata.scale14,
scale_15=toolkit_handler_with_metadata.scale15,
cutoff=toolkit_handler_with_metadata.cutoff,
method=toolkit_handler_with_metadata.method.lower(),
)
handler.store_matches(parameter_handlers, topology)
return handler
def _from_toolkit_virtual_sites(
self,
parameter_handler: "VirtualSiteHandler",
topology: "Topology",
):
# TODO: Merge this logic into _from_toolkit
if not all(
isinstance(
p,
(
VirtualSiteHandler.VirtualSiteBondChargeType,
VirtualSiteHandler.VirtualSiteMonovalentLonePairType,
VirtualSiteHandler.VirtualSiteDivalentLonePairType,
VirtualSiteHandler.VirtualSiteTrivalentLonePairType,
),
)
for p in parameter_handler.parameters
):
raise NotImplementedError("Found unsupported virtual site types")
matches = parameter_handler.find_matches(topology)
for atom_indices, parameter_match in matches.items():
virtual_site_type = parameter_match[0].parameter_type
virtual_site_key = VirtualSiteKey(
atom_indices=atom_indices,
type=virtual_site_type.type,
match=virtual_site_type.match,
)
virtual_site_potential_key = PotentialKey(
id=virtual_site_type.smirks,
associated_handler="VirtualSiteHandler",
)
virtual_site_potential = Potential(
parameters={
"charge_increments": from_openmm(
virtual_site_type.charge_increment
),
}
)
matches = {}
potentials = {}
self.slot_map.update({virtual_site_key: virtual_site_potential_key})
self.potentials.update({virtual_site_potential_key: virtual_site_potential})
# TODO: Counter-intuitive that toolkit regression tests pass by using the counter
# variable i as if it was the atom index - shouldn't it just use atom_index?
for i, atom_index in enumerate(atom_indices): # noqa
topology_key = TopologyKey(atom_indices=(i,), mult=2)
potential_key = PotentialKey(
id=virtual_site_type.smirks,
mult=i,
associated_handler="VirtualSiteHandler",
)
charge_increment = getattr(
virtual_site_type, f"charge_increment{i + 1}"
)
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
self.slot_map.update(matches)
self.potentials.update(potentials)
@classmethod
@functools.lru_cache(None)
def _compute_partial_charges(cls, molecule: Molecule, method: str) -> unit.Quantity:
"""Call out to the toolkit's toolkit wrappers to generate partial charges."""
molecule = copy.deepcopy(molecule)
molecule.assign_partial_charges(method)
return from_openmm(molecule.partial_charges)
@classmethod
def _library_charge_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: LibraryChargeHandler.LibraryChargeType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched library charge parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, (atom_index, charge) in enumerate(zip(atom_indices, parameter.charge)):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="LibraryCharges"
)
potential = Potential(parameters={"charge": from_openmm(charge)})
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _charge_increment_to_potentials(
cls,
atom_indices: Tuple[int, ...],
parameter: ChargeIncrementModelHandler.ChargeIncrementType,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Map a matched charge increment parameter to a set of potentials.
"""
matches = {}
potentials = {}
for i, atom_index in enumerate(atom_indices):
topology_key = TopologyKey(atom_indices=(atom_index,))
potential_key = PotentialKey(
id=parameter.smirks, mult=i, associated_handler="ChargeIncrementModel"
)
# TODO: Handle the cases where n - 1 charge increments have been defined,
# maybe by implementing this in the TK?
charge_increment = getattr(parameter, f"charge_increment{i + 1}")
potential = Potential(
parameters={"charge_increment": from_openmm(charge_increment)}
)
matches[topology_key] = potential_key
potentials[potential_key] = potential
return matches, potentials
@classmethod
def _find_slot_matches(
cls,
parameter_handler: Union["LibraryChargeHandler", "ChargeIncrementModelHandler"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a slot based parameter handler.
"""
# Ideally this would be made redundant by OpenFF TK #971
unique_parameter_matches = {
tuple(sorted(key)): (key, val)
for key, val in parameter_handler.find_matches(
reference_molecule.to_topology()
).items()
}
parameter_matches = {key: val for key, val in unique_parameter_matches.values()}
matches, potentials = {}, {}
for key, val in parameter_matches.items():
parameter = val.parameter_type
if isinstance(parameter_handler, LibraryChargeHandler):
(
parameter_matches,
parameter_potentials,
) = cls._library_charge_to_potentials(key, parameter)
elif isinstance(parameter_handler, ChargeIncrementModelHandler):
(
parameter_matches,
parameter_potentials,
) = cls._charge_increment_to_potentials(key, parameter)
else:
raise NotImplementedError()
matches.update(parameter_matches)
potentials.update(parameter_potentials)
return matches, potentials
@classmethod
def _find_am1_matches(
cls,
parameter_handler: Union["ToolkitAM1BCCHandler", ChargeIncrementModelHandler],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""Construct a slot and potential map for a charge model based parameter handler."""
reference_molecule = copy.deepcopy(reference_molecule)
reference_smiles = reference_molecule.to_smiles(
isomeric=True, explicit_hydrogens=True, mapped=True
)
method = getattr(parameter_handler, "partial_charge_method", "am1bcc")
partial_charges = cls._compute_partial_charges(
reference_molecule, method=method
)
matches = {}
potentials = {}
for i, partial_charge in enumerate(partial_charges):
potential_key = PotentialKey(
id=reference_smiles, mult=i, associated_handler="ToolkitAM1BCC"
)
potentials[potential_key] = Potential(parameters={"charge": partial_charge})
matches[TopologyKey(atom_indices=(i,))] = potential_key
return matches, potentials
@classmethod
def _find_reference_matches(
cls,
parameter_handlers: Dict[str, "ElectrostaticsHandlerType"],
reference_molecule: Molecule,
) -> Tuple[Dict[TopologyKey, PotentialKey], Dict[PotentialKey, Potential]]:
"""
Construct a slot and potential map for a particular reference molecule and set of parameter handlers.
"""
matches = {}
potentials = {}
expected_matches = {i for i in range(reference_molecule.n_atoms)}
for handler_type in cls.parameter_handler_precedence():
if handler_type not in parameter_handlers:
continue
parameter_handler = parameter_handlers[handler_type]
slot_matches, am1_matches = None, None
slot_potentials: Dict = {}
am1_potentials: Dict = {}
if handler_type in ["LibraryCharges", "ChargeIncrementModel"]:
slot_matches, slot_potentials = cls._find_slot_matches(
parameter_handler, reference_molecule
)
if handler_type in ["ToolkitAM1BCC", "ChargeIncrementModel"]:
am1_matches, am1_potentials = cls._find_am1_matches(
parameter_handler, reference_molecule
)
if slot_matches is None and am1_matches is None:
raise NotImplementedError()
elif slot_matches is not None and am1_matches is not None:
am1_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=0
): potential_key
for topology_key, potential_key in am1_matches.items()
}
slot_matches = {
TopologyKey(
atom_indices=topology_key.atom_indices, mult=1
): potential_key
for topology_key, potential_key in slot_matches.items()
}
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
matched_atom_indices.intersection_update(
{index for key in am1_matches for index in key.atom_indices}
)
elif slot_matches is not None:
matched_atom_indices = {
index for key in slot_matches for index in key.atom_indices
}
else:
matched_atom_indices = {
index for key in am1_matches for index in key.atom_indices # type: ignore[union-attr]
}
if matched_atom_indices != expected_matches:
# Handle the case where a handler could not fully assign the charges
# to the whole molecule.
continue
matches.update(slot_matches if slot_matches is not None else {})
matches.update(am1_matches if am1_matches is not None else {})
potentials.update(slot_potentials)
potentials.update(am1_potentials)
break
found_matches = {index for key in matches for index in key.atom_indices}
if found_matches != expected_matches:
raise RuntimeError(
f"{reference_molecule.to_smiles(explicit_hydrogens=False)} could "
f"not be fully assigned charges."
)
return matches, potentials
def store_matches(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of slots and unique potential identifiers.
"""
# Reshape the parameter handlers into a dictionary for easier referencing.
parameter_handlers = {
handler._TAGNAME: handler
for handler in (
parameter_handler
if isinstance(parameter_handler, list)
else [parameter_handler]
)
}
self.potentials = dict()
self.slot_map = dict()
reference_molecules = [*topology.reference_molecules]
for reference_molecule in reference_molecules:
matches, potentials = self._find_reference_matches(
parameter_handlers, reference_molecule
)
match_mults = defaultdict(set)
for top_key in matches:
match_mults[top_key.atom_indices].add(top_key.mult)
self.potentials.update(potentials)
for top_mol in topology._reference_molecule_to_topology_molecules[
reference_molecule
]:
for topology_particle in top_mol.atoms:
reference_index = topology_particle.atom.molecule_particle_index
topology_index = topology_particle.topology_particle_index
for mult in match_mults[(reference_index,)]:
top_key = TopologyKey(atom_indices=(topology_index,), mult=mult)
self.slot_map[top_key] = matches[
TopologyKey(atom_indices=(reference_index,), mult=mult)
]
def store_potentials(
self,
parameter_handler: Union[
"ElectrostaticsHandlerType", List["ElectrostaticsHandlerType"]
],
) -> None:
"""
Populate self.potentials with key-val pairs of [TopologyKey, PotentialKey].
"""
# This logic is handled by ``store_matches`` as we may need to create potentials
# to store depending on the handler type.
pass
class SMIRNOFFVirtualSiteHandler(SMIRNOFFPotentialHandler):
"""
A handler which stores the information necessary to construct virtual sites (virtual particles).
"""
type: Literal["Bonds"] = "Bonds"
expression: Literal[""] = ""
virtual_site_key_topology_index_map: Dict["VirtualSiteKey", int] = Field(
dict(),
description="A mapping between VirtualSiteKey objects (stored analogously to TopologyKey objects"
"in other handlers) and topology indices describing the associated virtual site",
)
exclusion_policy: Literal["parents"] = "parents"
@classmethod
def allowed_parameter_handlers(cls):
"""Return a list of allowed types of ParameterHandler classes."""
return [VirtualSiteHandler]
@classmethod
def supported_parameters(cls):
"""Return a list of parameter attributes supported by this handler."""
return ["distance", "outOfPlaneAngle", "inPlaneAngle"]
def store_matches(
self,
parameter_handler: ParameterHandler,
topology: Union["Topology", "_OFFBioTop"],
) -> None:
"""
Populate self.slot_map with key-val pairs of [TopologyKey, PotentialKey].
Differs from SMIRNOFFPotentialHandler.store_matches because each key
can point to multiple potentials (?); each value in the dict is a
list of parametertypes, whereas conventional handlers don't have lists
"""
virtual_site_index = topology.n_topology_atoms
parameter_handler_name = getattr(parameter_handler, "_TAGNAME", None)
if self.slot_map:
self.slot_map = dict()
matches = parameter_handler.find_matches(topology)
for key, val_list in matches.items():
for val in val_list:
virtual_site_key = VirtualSiteKey(
atom_indices=key,
type=val.parameter_type.type,
match=val.parameter_type.match,
)
potential_key = PotentialKey(
id=val.parameter_type.smirks,
associated_handler=parameter_handler_name,
)
self.slot_map[virtual_site_key] = potential_key
self.virtual_site_key_topology_index_map[
virtual_site_key
] = virtual_site_index
virtual_site_index += 1
def store_potentials(self, parameter_handler: ParameterHandler) -> None:
"""Store VirtualSite-specific parameter-like data."""
if self.potentials:
self.potentials = dict()
for potential_key in self.slot_map.values():
smirks = potential_key.id
parameter_type = parameter_handler.get_parameter({"smirks": smirks})[0]
potential = Potential(
parameters={
"distance": parameter_type.distance,
},
)
for attr in ["outOfPlaneAngle", "inPlaneAngle"]:
if hasattr(parameter_type, attr):
potential.parameters.update(
{attr: from_openmm(getattr(parameter_type, attr))}
)
self.potentials[potential_key] = potential
def _get_local_frame_weights(self, virtual_site_key: "VirtualSiteKey"):
if virtual_site_key.type == "BondCharge":
origin_weight = [1.0, 0.0]
x_direction = [-1.0, 1.0]
y_direction = [-1.0, 1.0]
elif virtual_site_key.type == "MonovalentLonePair":
origin_weight = [1, 0.0, 0.0]
x_direction = [-1.0, 1.0, 0.0]
y_direction = [-1.0, 0.0, 1.0]
elif virtual_site_key.type == "DivalentLonePair":
origin_weight = [0.0, 1.0, 0.0]
x_direction = [0.5, -1.0, 0.5]
y_direction = [1.0, -1.0, 1.0]
elif virtual_site_key.type == "TrivalentLonePair":
origin_weight = [0.0, 1.0, 0.0, 0.0]
x_direction = [1 / 3, -1.0, 1 / 3, 1 / 3]
y_direction = [1.0, -1.0, 0.0, 0.0]
return origin_weight, x_direction, y_direction
def _get_local_frame_position(self, virtual_site_key: "VirtualSiteKey"):
potential_key = self.slot_map[virtual_site_key]
potential = self.potentials[potential_key]
if virtual_site_key.type == "BondCharge":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
elif virtual_site_key.type == "MonovalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
psi = potential.parameters["outOfPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.array(
[np.cos(theta) * np.cos(psi), np.sin(theta) * np.cos(psi), np.sin(psi)]
)
local_frame_position = factor * distance
elif virtual_site_key.type == "DivalentLonePair":
distance = potential.parameters["distance"]
theta = potential.parameters["inPlaneAngle"].m_as(unit.radian) # type: ignore
factor = np.asarray([-1.0 * np.cos(theta), 0.0, np.sin(theta)])
local_frame_position = factor * distance
elif virtual_site_key.type == "TrivalentLonePair":
distance = potential.parameters["distance"]
local_frame_position = np.asarray([-1.0, 0.0, 0.0]) * distance
return local_frame_position
def library_charge_from_molecule(
molecule: "Molecule",
) -> LibraryChargeHandler.LibraryChargeType:
"""Given an OpenFF Molecule with charges, generate a corresponding LibraryChargeType."""
if molecule.partial_charges is None:
raise ValueError("Input molecule is missing partial charges.")
smirks = molecule.to_smiles(mapped=True)
charges = molecule.partial_charges
library_charge_type = LibraryChargeHandler.LibraryChargeType(
smirks=smirks, charge=charges
)
return library_charge_type
def _get_interpolation_coeffs(fractional_bond_order, data):
x1, x2 = data.keys()
coeff1 = (x2 - fractional_bond_order) / (x2 - x1)
coeff2 = (fractional_bond_order - x1) / (x2 - x1)
return coeff1, coeff2
SMIRNOFF_POTENTIAL_HANDLERS = [
SMIRNOFFBondHandler,
SMIRNOFFConstraintHandler,
SMIRNOFFAngleHandler,
SMIRNOFFProperTorsionHandler,
SMIRNOFFImproperTorsionHandler,
SMIRNOFFvdWHandler,
SMIRNOFFElectrostaticsHandler,
]
| 37.760292 | 112 | 0.60441 | 53,922 | 0.948212 | 0 | 0 | 20,036 | 0.352331 | 0 | 0 | 13,019 | 0.228938 |
ea52d5083a4473548f4cfa6f4f7011c183c68791 | 15,926 | py | Python | homeworks/HW5/task1_tkinter_sympy.py | vzavadskyi/made-robotics | 5bd913bcf26f5a35ee42348e636b7f8fde5d35f8 | [
"Apache-2.0"
] | null | null | null | homeworks/HW5/task1_tkinter_sympy.py | vzavadskyi/made-robotics | 5bd913bcf26f5a35ee42348e636b7f8fde5d35f8 | [
"Apache-2.0"
] | null | null | null | homeworks/HW5/task1_tkinter_sympy.py | vzavadskyi/made-robotics | 5bd913bcf26f5a35ee42348e636b7f8fde5d35f8 | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import math
import heapq
from sympy import Point, Polygon
# from shapely.geometry import Point, Polygon
'''================= Your classes and methods ================='''
ids = []
class PriorityQueue:
def __init__(self):
self.heap = []
self.count = 0
def push(self, item, priority):
entry = (priority, self.count, item)
heapq.heappush(self.heap, entry)
self.count += 1
def pop(self):
(_, _, item) = heapq.heappop(self.heap)
return item
def isEmpty(self):
return len(self.heap) == 0
def update(self, item, priority):
for index, (p, c, i) in enumerate(self.heap):
if i == item:
if p <= priority:
break
del self.heap[index]
self.heap.append((priority, c, item))
heapq.heapify(self.heap)
break
else:
self.push(item, priority)
def rotate(points, angle, center):
angle = math.radians(angle)
cos_val = math.cos(angle)
sin_val = math.sin(angle)
cx, cy = center
new_points = []
for x_old, y_old in points:
x_old -= cx
y_old -= cy
x_new = x_old * cos_val - y_old * sin_val
y_new = x_old * sin_val + y_old * cos_val
new_points.append((x_new + cx, y_new + cy))
return new_points
def get_polygon_from_position(position):
x, y, yaw = position
points = [(x - 50, y - 100), (x + 50, y - 100), (x + 50, y + 100), (x - 50, y + 100)]
# Заменил new_points по предложению из чата https://discord.com/channels/681511874449244211/884816389809004584/910841777181831168
new_points = rotate(points, yaw * 180 / math.pi, (x, y))
return Polygon(*list(map(Point, new_points)))
def get_polygon_from_obstacle(obstacle):
points = [(obstacle[0], obstacle[1]), (obstacle[2], obstacle[3]), (obstacle[4], obstacle[5]),
(obstacle[6], obstacle[7])]
return Polygon(*list(map(Point, points)))
def collides(position, obstacle, block_diag):
x1, y1, _ = position
x2 = sum([x for i, x in enumerate(obstacle) if i % 2 == 0]) / 4
y2 = sum([x for i, x in enumerate(obstacle) if i % 2 == 1]) / 4
if ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5 > block_diag:
return False
return get_polygon_from_position(position).intersection(get_polygon_from_obstacle(obstacle))
path_list = []
class Window():
'''================= Your Main Function ================='''
def go(self, event):
print("Start position:", self.get_start_position())
print("Target position:", self.get_target_position())
print("Obstacles:", self.get_obstacles())
global path_list
for id in path_list:
self.canvas.delete(id)
path = self.star_search(self.naive_heuristic, [self.get_start_position()], 60)
path = self.star_search(self.custom_heuristic, path, 40)
global ids
for point in path:
id = self.canvas.create_oval(point[0] - 2, point[1] - 2, point[0] + 2, point[1] + 2, fill='yellow')
ids.append(id)
self.root.update()
path_list = ids
def draw_path(self, path):
ids_path = []
for point in path:
id = self.canvas.create_oval(point[0] - 2, point[1] - 2, point[0] + 2, point[1] + 2, fill='yellow')
ids_path.append(id)
self.root.update()
for id in ids_path:
self.canvas.delete(id)
def star_search(self, heuristic, path, step_size):
p_queue = PriorityQueue()
history = set()
heur = heuristic(path[-1])
p_queue.push((path[-1], path), heur)
block_diag = (100 ** 2 + 200 ** 2) ** 0.5
while not p_queue.isEmpty():
node = p_queue.pop()
if self.target_reached(node[0], heuristic):
print('Route found')
break
for step in self.get_steps(node[0], step_size, block_diag):
if (round(step[0], 1), round(step[1], 1), round(step[2], 1)) in history:
continue
path = node[1][:]
path.append(step)
history.add((round(step[0], 1), round(step[1], 1), round(step[2], 1)))
prior = (len(path) + 1) * step_size
heur = heuristic(step)
p_queue.push((step, path), prior + heur)
self.draw_path(path)
return path
def path_parameters(self, position):
target_x, target_y, target_yaw = self.get_target_position()
x, y, yaw = position
dist_x = target_x - x
dist_y = target_y - y
distance = (dist_x ** 2 + dist_y ** 2) ** 0.5
path_yaw = math.atan(abs(dist_x / dist_y))
if (dist_x >= 0) and (dist_y >= 0):
path_yaw = math.pi - path_yaw
elif (dist_x <= 0) and (dist_y >= 0):
path_yaw = path_yaw - math.pi
elif (dist_x >= 0) and (dist_y <= 0):
path_yaw = path_yaw
elif (dist_x <= 0) and (dist_y <= 0):
path_yaw = -path_yaw
rotation_1 = abs(path_yaw - target_yaw)
if rotation_1 > math.pi:
rotation_1 = abs(rotation_1 - 2 * math.pi)
rotation_2 = abs(target_yaw - yaw)
if rotation_2 > math.pi:
rotation_2 = abs(rotation_2 - 2 * math.pi)
return distance, rotation_1, rotation_2
def naive_heuristic(self, position):
distance, rotation_1, rotation_2 = self.path_parameters(position)
rotation = rotation_1 + rotation_2
return 2000 * rotation
def custom_heuristic(self, position):
distance, rotation_1, rotation_2 = self.path_parameters(position)
rotation = rotation_1 + rotation_2
return distance + 450 * rotation + 100 * rotation
def target_reached(self, position, heuristic):
return heuristic(position) < 50
def get_steps(self, position, step_size, block_diag):
x, y, yaw = position
view_width = math.pi * step_size / 180
var_number = 3
add_yaw = [view_width / var_number * i for i in range(var_number)]
add_yaw = [x - view_width / var_number * ((var_number - 1) / 2) for x in add_yaw]
results = []
for angl in add_yaw:
result_yaw = yaw + angl
result_x = x + step_size * math.sin(result_yaw)
result_y = y - step_size * math.cos(result_yaw)
number_of_collisions = 0
for obstacle in self.get_obstacles():
if collides([result_x, result_y, result_yaw], obstacle, block_diag):
number_of_collisions += 1
if number_of_collisions > 0:
continue
results.append([result_x, result_y, result_yaw])
return results
'''================= Interface Methods ================='''
def get_obstacles(self):
obstacles = []
potential_obstacles = self.canvas.find_all()
for i in potential_obstacles:
if (i > 2):
coords = self.canvas.coords(i)
if coords:
obstacles.append(coords)
return obstacles
def get_start_position(self):
x, y = self.get_center(2) # Purple block has id 2
yaw = self.get_yaw(2)
return x, y, yaw
def get_target_position(self):
x, y = self.get_center(1) # Green block has id 1
yaw = self.get_yaw(1)
return x, y, yaw
def get_center(self, id_block):
coords = self.canvas.coords(id_block)
center_x, center_y = ((coords[0] + coords[4]) / 2, (coords[1] + coords[5]) / 2)
return [center_x, center_y]
def get_yaw(self, id_block):
center_x, center_y = self.get_center(id_block)
first_x = 0.0
first_y = -1.0
second_x = 1.0
second_y = 0.0
points = self.canvas.coords(id_block)
end_x = (points[0] + points[2]) / 2
end_y = (points[1] + points[3]) / 2
direction_x = end_x - center_x
direction_y = end_y - center_y
length = math.hypot(direction_x, direction_y)
unit_x = direction_x / length
unit_y = direction_y / length
cos_yaw = unit_x * first_x + unit_y * first_y
sign_yaw = unit_x * second_x + unit_y * second_y
if (sign_yaw >= 0):
return math.acos(cos_yaw)
else:
return -math.acos(cos_yaw)
def get_vertices(self, id_block):
return self.canvas.coords(id_block)
'''=================================================='''
def rotate(self, points, angle, center):
angle = math.radians(angle)
cos_val = math.cos(angle)
sin_val = math.sin(angle)
cx, cy = center
new_points = []
for x_old, y_old in points:
x_old -= cx
y_old -= cy
x_new = x_old * cos_val - y_old * sin_val
y_new = x_old * sin_val + y_old * cos_val
new_points.append(x_new + cx)
new_points.append(y_new + cy)
return new_points
def start_block(self, event):
widget = event.widget
widget.start_x = event.x
widget.start_y = event.y
def in_rect(self, point, rect):
x_start, x_end = min(rect[::2]), max(rect[::2])
y_start, y_end = min(rect[1::2]), max(rect[1::2])
if x_start < point[0] < x_end and y_start < point[1] < y_end:
return True
def motion_block(self, event):
widget = event.widget
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
coords = widget.coords(i)
id = i
break
res_cords = []
try:
coords
except:
return
for ii, i in enumerate(coords):
if ii % 2 == 0:
res_cords.append(i + event.x - widget.start_x)
else:
res_cords.append(i + event.y - widget.start_y)
widget.start_x = event.x
widget.start_y = event.y
widget.coords(id, res_cords)
widget.center = ((res_cords[0] + res_cords[4]) / 2, (res_cords[1] + res_cords[5]) / 2)
def draw_block(self, points, color):
x = self.canvas.create_polygon(points, fill=color)
return x
def distance(self, x1, y1, x2, y2):
return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5
def set_id_block(self, event):
widget = event.widget
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
coords = widget.coords(i)
id = i
widget.id_block = i
break
widget.center = ((coords[0] + coords[4]) / 2, (coords[1] + coords[5]) / 2)
def rotate_block(self, event):
angle = 0
widget = event.widget
if widget.id_block == None:
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
coords = widget.coords(i)
id = i
widget.id_block == i
break
else:
id = widget.id_block
coords = widget.coords(id)
wx, wy = event.x_root, event.y_root
try:
coords
except:
return
block = coords
center = widget.center
x, y = block[2], block[3]
cat1 = self.distance(x, y, block[4], block[5])
cat2 = self.distance(wx, wy, block[4], block[5])
hyp = self.distance(x, y, wx, wy)
if wx - x > 0:
angle = math.acos((cat1 ** 2 + cat2 ** 2 - hyp ** 2) / (2 * cat1 * cat2))
elif wx - x < 0:
angle = -math.acos((cat1 ** 2 + cat2 ** 2 - hyp ** 2) / (2 * cat1 * cat2))
new_block = self.rotate([block[0:2], block[2:4], block[4:6], block[6:8]], angle, center)
self.canvas.coords(id, new_block)
def delete_block(self, event):
widget = event.widget.children["!canvas"]
for i in range(1, 10):
if widget.coords(i) == []:
break
if self.in_rect([event.x, event.y], widget.coords(i)):
widget.coords(i, [0, 0])
break
def create_block(self, event):
block = [[0, 100], [100, 100], [100, 300], [0, 300]]
id = self.draw_block(block, "black")
self.canvas.tag_bind(id, "<Button-1>", self.start_block)
self.canvas.tag_bind(id, "<Button-3>", self.set_id_block)
self.canvas.tag_bind(id, "<B1-Motion>", self.motion_block)
self.canvas.tag_bind(id, "<B3-Motion>", self.rotate_block)
def make_draggable(self, widget):
widget.bind("<Button-1>", self.drag_start)
widget.bind("<B1-Motion>", self.drag_motion)
def drag_start(self, event):
widget = event.widget
widget.start_x = event.x
widget.start_y = event.y
def drag_motion(self, event):
widget = event.widget
x = widget.winfo_x() - widget.start_x + event.x + 200
y = widget.winfo_y() - widget.start_y + event.y + 100
widget.place(rely=0.0, relx=0.0, x=x, y=y)
def create_button_create(self):
button = Button(
text="New",
bg="#555555",
activebackground="blue",
borderwidth=0
)
button.place(rely=0.0, relx=0.0, x=200, y=100, anchor=SE, width=200, height=100)
button.bind("<Button-1>", self.create_block)
def create_green_block(self, center_x):
block = [[center_x - 50, 100],
[center_x + 50, 100],
[center_x + 50, 300],
[center_x - 50, 300]]
id = self.draw_block(block, "green")
self.canvas.tag_bind(id, "<Button-1>", self.start_block)
self.canvas.tag_bind(id, "<Button-3>", self.set_id_block)
self.canvas.tag_bind(id, "<B1-Motion>", self.motion_block)
self.canvas.tag_bind(id, "<B3-Motion>", self.rotate_block)
def create_purple_block(self, center_x, center_y):
block = [[center_x - 50, center_y - 300],
[center_x + 50, center_y - 300],
[center_x + 50, center_y - 100],
[center_x - 50, center_y - 100]]
id = self.draw_block(block, "purple")
self.canvas.tag_bind(id, "<Button-1>", self.start_block)
self.canvas.tag_bind(id, "<Button-3>", self.set_id_block)
self.canvas.tag_bind(id, "<B1-Motion>", self.motion_block)
self.canvas.tag_bind(id, "<B3-Motion>", self.rotate_block)
def create_button_go(self):
button = Button(
text="Go",
bg="#555555",
activebackground="blue",
borderwidth=0
)
button.place(rely=0.0, relx=1.0, x=0, y=200, anchor=SE, width=100, height=200)
button.bind("<Button-1>", self.go)
def run(self):
root = self.root
self.create_button_create()
self.create_button_go()
self.create_green_block(self.width / 2)
self.create_purple_block(self.width / 2, self.height)
root.bind("<Delete>", self.delete_block)
root.mainloop()
def __init__(self):
self.root = Tk()
self.root.title("")
self.width = self.root.winfo_screenwidth()
self.height = self.root.winfo_screenheight()
self.root.geometry('{}x{}'.format(self.width, self.height))
self.canvas = Canvas(self.root, bg="#777777", height=self.height, width=self.width)
self.canvas.pack()
# self.points = [0, 500, 500/2, 0, 500, 500]
if __name__ == "__main__":
run = Window()
run.run() | 33.599156 | 133 | 0.545649 | 14,207 | 0.890609 | 0 | 0 | 0 | 0 | 0 | 0 | 913 | 0.057234 |
ea52ff82a564cab19ee80cfe0f0d2458594e285e | 6,035 | py | Python | net/views/enhance.py | spake/astrometry.net | 12c76f4a44fe90a009eeb962f2ae28b0791829b8 | [
"BSD-3-Clause"
] | 1 | 2020-03-06T05:15:07.000Z | 2020-03-06T05:15:07.000Z | net/views/enhance.py | spake/astrometry.net | 12c76f4a44fe90a009eeb962f2ae28b0791829b8 | [
"BSD-3-Clause"
] | null | null | null | net/views/enhance.py | spake/astrometry.net | 12c76f4a44fe90a009eeb962f2ae28b0791829b8 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import pylab as plt
import numpy as np
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import Context, RequestContext, loader
from astrometry.net.models import *
from astrometry.util.resample import *
from astrometry.net.tmpfile import *
def simple_histeq(pixels, getinverse=False, mx=256):
assert(pixels.dtype in [np.uint8, np.uint16])
if not getinverse:
h = np.bincount(pixels, minlength=mx)
# pixel value -> quantile map.
# If you imagine jittering the pixels so there are no repeats,
# this assigns the middle quantile to a pixel value.
quant = h * 0.5
cs = np.cumsum(h)
quant[1:] += cs[:-1]
quant /= float(cs[-1])
# quant = np.cumsum(h / float(h.sum()))
return quant[pixels]
# This inverse function has slightly weird properties -- it
# puts a ramp across each pixel value, so inv(0.) may produce
# values as small as -0.5, and inv(1.) may produce 255.5
h = np.bincount(pixels.astype(int)+1, minlength=mx+1)
quant = h[1:] * 0.5
cs = np.cumsum(h)
quant[1:] += cs[1:-1]
quant /= float(cs[-1])
# interp1d is fragile -- remove duplicate "yy" values that
# otherwise cause nans.
yy = cs / float(cs[-1])
xx = np.arange(mx + 1) - 0.5
I = np.append([0], 1 + np.flatnonzero(np.diff(yy)))
print('mx:', mx)
print('xx:', len(xx))
print('yy:', len(yy))
print('I:', I.min(), I.max())
yy = yy[I]
xx = xx[I]
xx[-1] = mx-0.5
# print 'yy', yy[0], yy[-1]
# print 'xx', xx[0], xx[-1]
inv = interp1d(yy, xx, kind='linear')
return quant[pixels], inv
def enhanced_ui(req, user_image_id=None):
ui = UserImage.objects.get(id=user_image_id)
job = ui.get_best_job()
return enhanced_image(req, job_id=job.id, size='display')
def enhanced_image(req, job_id=None, size=None):
job = get_object_or_404(Job, pk=job_id)
ui = job.user_image
cal = job.calibration
tan = cal.raw_tan
nside,hh = get_healpixes_touching_wcs(tan)
tt = 'hello %s, job %s, nside %s, hh %s' % (ui, job, nside, hh)
ver = EnhanceVersion.objects.get(name='v4')
print('Using', ver)
EIms = EnhancedImage.objects.filter(version=ver)
ens = []
for hp in hh:
en = EIms.filter(nside=nside, healpix=hp, version=ver)
if len(en):
ens.extend(list(en))
for dnside in range(1, 3):
if len(ens) == 0:
bignside = nside / (2**dnside)
nil,hh = get_healpixes_touching_wcs(tan, nside=bignside)
tt += 'bigger healpixes: %s: %s' % (bignside, hh)
for hp in hh:
en = EIms.filter(nside=bignside, healpix=hp)
if len(en):
ens.extend(list(en))
tt = tt + ', EnhancedImages: ' + ', '.join('%s'%e for e in ens)
img = ui.image
W,H = img.width, img.height
tt = tt + 'image size %ix%i' % (W,H)
#return HttpResponse(tt)
targetwcs = tan.to_tanwcs()
#print 'Target WCS:', targetwcs
#print 'W,H', W,H
logmsg('wcs:', str(targetwcs))
if size == 'display':
scale = float(ui.image.get_display_image().width)/ui.image.width
logmsg('scaling:', scale)
targetwcs = targetwcs.scale(scale)
logmsg('scaled wcs:', str(targetwcs))
H,W = targetwcs.get_height(), targetwcs.get_width()
img = ui.image.get_display_image()
print(tt)
ee = np.zeros((H,W,3), np.float32)
imgdata = None
df = img.disk_file
ft = df.file_type
fn = df.get_path()
if 'JPEG' in ft:
print('Reading', fn)
I = plt.imread(fn)
print('Read', I.shape, I.dtype)
if len(I.shape) == 2:
I = I[:,:,np.newaxis].repeat(3, axis=2)
assert(len(I.shape) == 3)
if I.shape[2] > 3:
I = I.shape[:,:,:3]
# vertical FLIP to match WCS
I = I[::-1,:,:]
imgdata = I
mapped = np.zeros_like(imgdata)
for en in ens:
logmsg('Resampling %s' % en)
wcs = en.wcs.to_tanwcs()
try:
Yo,Xo,Yi,Xi,nil = resample_with_wcs(targetwcs, wcs, [], 3)
except OverlapError:
continue
#logmsg(len(Yo), 'pixels')
enI,enW = en.read_files()
#print 'Cals included in this Enhanced image:'
#for c in en.cals.all():
# print ' ', c
#logmsg('en:', enI.min(), enI.max())
if imgdata is not None:
mask = (enW[Yi,Xi] > 0)
for b in range(3):
enI[:,:,b] /= enI[:,:,b].max()
if imgdata is not None:
idata = imgdata[Yo[mask],Xo[mask],b]
DI = np.argsort((idata + np.random.uniform(size=idata.shape))/255.)
EI = np.argsort(enI[Yi[mask], Xi[mask], b])
Erank = np.zeros_like(EI)
Erank[EI] = np.arange(len(Erank))
mapped[Yo[mask],Xo[mask],b] = idata[DI[Erank]]
else:
# Might have to average the coverage here...
ee[Yo,Xo,b] += enI[Yi,Xi,b]
# ee[Yo[mask],Xo[mask],b] += enI[Yi[mask],Xi[mask],b]
tempfn = get_temp_file(suffix='.png')
if imgdata is not None:
im = mapped
else:
im = np.clip(ee, 0., 1.)
dpi = 100
figsize = [x / float(dpi) for x in im.shape[:2][::-1]]
fig = plt.figure(figsize=figsize, frameon=False, dpi=dpi)
plt.clf()
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.imshow(im, interpolation='nearest')
# rdfn = job.get_rdls_file()
# rd = fits_table(rdfn)
# ok,x,y = targetwcs.radec2pixelxy(rd.ra, rd.dec)
# plt.plot(x, y, 'o', mec='r', mfc='none', ms=10)
plt.savefig(tempfn)
print('Wrote', tempfn)
f = open(tempfn)
res = HttpResponse(f)
res['Content-Type'] = 'image/png'
return res
| 31.432292 | 93 | 0.565866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,265 | 0.209611 |
ea53a2f6728326b1b36786fedcd4897662372db2 | 1,636 | py | Python | libpycr/editor.py | JcDelay/pycr | f729e003473b421b76bc49c5d55d06d7086d63cc | [
"Apache-2.0"
] | 1 | 2015-03-12T10:34:38.000Z | 2015-03-12T10:34:38.000Z | libpycr/editor.py | JcDelay/pycr | f729e003473b421b76bc49c5d55d06d7086d63cc | [
"Apache-2.0"
] | null | null | null | libpycr/editor.py | JcDelay/pycr | f729e003473b421b76bc49c5d55d06d7086d63cc | [
"Apache-2.0"
] | null | null | null | """This module provides convenient use of EDITOR"""
import os
import subprocess
import tempfile
from libpycr.config import Config
def get_editor():
"""Return the user's editor, or vi if not defined
:rtype: str
"""
return os.environ.get('EDITOR') or os.environ.get('VISUAL') or 'vi'
def strip_comments(data, line_comment='#'):
"""Strip comments from the input string and return the result
:param data: multiline text to strip comments from
:type data: str
:param line_comment: the line comment delimiter
:type line_comment: str
:rtype: str
"""
return '\n'.join([l for l in data.splitlines()
if l and l[0] != line_comment]).strip()
def raw_input_editor(default=None):
"""Request user input by firing an editor
Like the built-in raw_input(), except that it uses a visual text editor for
ease of editing.
:param editor: the editor to use
:type editor: str
:param default: the initital content of the editor
:type default: str | None
"""
editor = Config.get('core.editor', get_editor())
with tempfile.NamedTemporaryFile(mode='r+', delete=False) as tmpfile:
if default:
tmpfile.write(default)
tmpfile.flush()
# NOTE: We need to close then re-open the file after edition to ensure
# that buffers are correctly emptied on all platforms.
tmpfile.close()
subprocess.check_call([editor, tmpfile.name])
with open(tmpfile.name) as comment_file:
comment = comment_file.read().strip()
os.unlink(tmpfile.name)
return comment
| 25.5625 | 79 | 0.654034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 830 | 0.507335 |