hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace44d7f3cbe7498c78b564bc4d3ce76f8bec018 | 848 | py | Python | app/pytorch/book/chp004/e1/dropout.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | app/pytorch/book/chp004/e1/dropout.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | app/pytorch/book/chp004/e1/dropout.py | yt7589/aqp | c9c1c79facdea7ace73e2421e8a5868d87fb58dd | [
"Apache-2.0"
] | null | null | null | #
import numpy as np
from layer import Layer
class Dropout(Layer):
"""A layer that randomly sets a fraction p of the output units of the previous layer
to zero.
Parameters:
-----------
p: float
The probability that unit x is set to zero.
"""
def __init__(self, p=0.2):
self.p = p
self._mask = None
self.input_shape = None
self.n_units = None
self.pass_through = True
self.trainable = True
def forward_pass(self, X, Y=None, training=True):
c = (1 - self.p)
if training:
self._mask = np.random.uniform(size=X.shape) > self.p
c = self._mask
a = X * c
return a, a
def backward_pass(self, accum_grad):
return accum_grad * self._mask
def output_shape(self):
return self.input_shape | 24.941176 | 88 | 0.579009 |
ace44e251c0fc803b237afd48a69cabbfbcd1db1 | 1,867 | py | Python | functions/enthalpy.py | paulhinta/mech-341 | 1e62fe9a53badf5d186912681fdb9420c3448c70 | [
"MIT"
] | null | null | null | functions/enthalpy.py | paulhinta/mech-341 | 1e62fe9a53badf5d186912681fdb9420c3448c70 | [
"MIT"
] | null | null | null | functions/enthalpy.py | paulhinta/mech-341 | 1e62fe9a53badf5d186912681fdb9420c3448c70 | [
"MIT"
] | null | null | null | import sympy as sp
from sympy import lambdify, nsolve
#Python functions to calculate the enthalpy & adiabatic flame temp of a species based on the a_n & b_n constants
#calculate enthalpy (absolute value) using sympy
def enthalpy(input:dict, T:int=298.15):
x = sp.Symbol("x")
#define the polynomial
def f(x):
return input["a1"]*x**-2 + input["a2"]*x**-1 + input["a3"] + input["a4"]*x + input["a5"]*x**2 + input["a6"]*x**3 + input["a7"]*x**4
#print(f(x))
#integrate the polynomial
int_f = sp.integrate(f(x),x)
b1 = input["b1"]
#evaluate the polynomial
return 8.314*float(int_f.subs(x, T) + b1)
#Symbolically represent the enthalpy as a function of T
#this does the same as the above function, but returns a function of T rather than evaluating the definite integral
def enthalpy_function(input:dict, T=298.15):
x = sp.Symbol("x")
def f(x):
return input["a1"]*x**-2 + input["a2"]*x**-1 + input["a3"] + input["a4"]*x + input["a5"]*x**2 + input["a6"]*x**3 + input["a7"]*x**4
int_f = sp.integrate(f(x),x)
b1 = input["b1"]
return 8.314*(int_f.subs(x, T) + b1)
#this function uses the enthalpy as a function of T (above) to evaluate the AFT for question 1
#each instance of an aft will have water, nitrogen, and an excess gas (h2 or o2)
def aft_t1(water:dict, nitrogen:dict, extra:dict, er:float=1.0):
er_2 = er
if er >=1:
er_2 = 1
T = sp.Symbol("T")
#sub the enthalpy as a function of T
def f(T):
return 2*er_2*enthalpy_function(water, T) + 3.76*enthalpy_function(nitrogen, T) + abs((er - 1))*enthalpy_function(extra, T)
eq = f(T)
#numerically solve the function of T
#this is done numerically because it's a nonlinear function, sympy can only symbolically solve linear functions
T_aft = nsolve(eq, 1)
return T_aft | 34.574074 | 139 | 0.643278 |
ace44fb1ca90f8c4dc3ae5c44dd775528ba3708c | 3,382 | py | Python | speed_extractor.py | SecSuperN0va/automated-speedtest | 9f602c3f6e9adbb9e488949a1de2eae269c8aa3e | [
"MIT"
] | 2 | 2018-07-19T13:56:14.000Z | 2018-07-19T14:10:52.000Z | speed_extractor.py | SecSuperN0va/automated-speedtest | 9f602c3f6e9adbb9e488949a1de2eae269c8aa3e | [
"MIT"
] | 3 | 2018-07-19T10:34:07.000Z | 2019-10-01T19:08:31.000Z | speed_extractor.py | SecSuperN0va/automated-speedtest | 9f602c3f6e9adbb9e488949a1de2eae269c8aa3e | [
"MIT"
] | 3 | 2019-10-01T19:34:55.000Z | 2019-10-19T07:51:47.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
import time
import datetime
import os
import pandas as pd
from pandas import ExcelWriter
import urllib
pd.set_option('display.max_columns', 10)
#df = pd.DataFrame(columns=['Result_ID', 'Date', 'Time', 'Ping', 'Download_Speed', 'Upload_Speed', 'Server_Name', 'Server_Place', 'Result_URL'])
df = pd.read_excel('jio_speed_test.xls')
print df
browser = webdriver.Firefox()
while True:
try:
browser.set_page_load_timeout(200)
browser.get("http://www.speedtest.net/")
except TimeoutException as ex:
print("Exception has been thrown. " + str(ex))
continue
goClick = None
while not goClick:
try:
goClick = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[1]/a/span[3]')
except NoSuchElementException:
time.sleep(2)
time.sleep(2)
goClick.click()
j=0
resultID = None
while not resultID:
if j>= 250:
break
try:
resultID = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[3]/div/div[1]/div[1]/div/div[2]/div[2]/a')
except NoSuchElementException:
time.sleep(1)
j = j + 1
if j>= 250:
continue
time.sleep(2)
resultID = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[3]/div/div[1]/div[1]/div/div[2]/div[2]/a')
print resultID.text
downspeed = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[3]/div/div[1]/div[2]/div[2]/div/div[2]/span').text
print downspeed
upspeed = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[3]/div/div[1]/div[2]/div[3]/div/div[2]/span').text
print upspeed
pingg = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[3]/div/div[1]/div[2]/div[1]/div/div[2]/span').text
print pingg
server_name = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[4]/div/div[3]/div/div/div[2]/a').text
print server_name
server_place = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[3]/div/div[4]/div/div[3]/div/div/div[3]/span').text
print server_place
ts = time.time()
stamp_date = datetime.datetime.fromtimestamp(ts).strftime('%d-%m-%Y')
stamp_time = datetime.datetime.fromtimestamp(ts).strftime('%H-%M')
urllib.urlretrieve("http://www.speedtest.net/result/"+resultID.text+".png", "jio_speedtest_ResultID-"+resultID.text+"_"+stamp_date+"_"+stamp_time+".png")
#df = pd.DataFrame(columns=['Result_ID', 'Date', 'Time', 'Ping', 'Download_Speed', 'Upload_Speed', 'Server_Name', 'Server_Place'])
df = df.append({'Result_ID':resultID.text, 'Date':stamp_date, 'Time':stamp_time,
'Ping': pingg, 'Download_Speed': downspeed, 'Upload_Speed': upspeed,
'Server_Name': server_name, 'Server_Place': server_place,
'Result_URL': "http://www.speedtest.net/result/"+resultID.text+".png"}, ignore_index=True)
print df
writer = ExcelWriter('jio_speed_test.xls')
df.to_excel(writer, 'Sheet1')
writer.save()
time.sleep(120)
#browser.find_element_by_xpath("/html/body/div[3]/div[2]/div/div/div/div[3]/div[1]/div[1]/a/span[3]").click()
| 35.229167 | 160 | 0.710526 |
ace44fd7fe1688832d6f9a34e635f100e5d90d18 | 1,025 | py | Python | tests/test_json_encoder_class.py | trustcruit/tclambda | 4413944243257d36088805d8e2f97b0d8b56b87d | [
"MIT"
] | null | null | null | tests/test_json_encoder_class.py | trustcruit/tclambda | 4413944243257d36088805d8e2f97b0d8b56b87d | [
"MIT"
] | 613 | 2019-06-05T10:49:01.000Z | 2021-08-03T03:23:18.000Z | tests/test_json_encoder_class.py | trustcruit/tclambda | 4413944243257d36088805d8e2f97b0d8b56b87d | [
"MIT"
] | null | null | null | import json
import os
import unittest
from decimal import Decimal
import tclambda
from .extras import FunctionBuilder, LambdaContext
TC_THIS_BUCKET = os.getenv("TC_THIS_BUCKET")
def decimal_function():
return {"decimal": Decimal("1.234"), "integer": 1234, "float": 1.234}
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
return super().default(obj)
class TestHandler(unittest.TestCase):
def setUp(self):
self.app = tclambda.LambdaHandler(json_encoder_class=DecimalEncoder)
self.app.register()(decimal_function)
@unittest.skipIf(TC_THIS_BUCKET is None, "TC_THIS_BUCKET is None")
def test_call_handler_sqs_ping(self):
f = FunctionBuilder("decimal_function")
self.app(f.sqs, LambdaContext(function_name="tclambda-test"))
self.assertEqual(
f.result.result(delay=1, max_attempts=3),
{"decimal": "1.234", "integer": 1234, "float": 1.234},
)
| 27.702703 | 76 | 0.682927 |
ace451316b9b057ef48b99513dae8cf9071c5712 | 3,366 | py | Python | src/twitter_app/util/twitter_api_v1_1/standard/twitter_developer_util.py | silverag-corgi/twitter-lib-for-me | ee5e85ecbe87d91b7c7638225a806d4185fb6fd9 | [
"MIT"
] | null | null | null | src/twitter_app/util/twitter_api_v1_1/standard/twitter_developer_util.py | silverag-corgi/twitter-lib-for-me | ee5e85ecbe87d91b7c7638225a806d4185fb6fd9 | [
"MIT"
] | 1 | 2022-03-18T09:24:31.000Z | 2022-03-26T23:34:44.000Z | src/twitter_app/util/twitter_api_v1_1/standard/twitter_developer_util.py | silverag-corgi/twitter-lib-for-me | ee5e85ecbe87d91b7c7638225a806d4185fb6fd9 | [
"MIT"
] | 1 | 2022-03-27T10:12:46.000Z | 2022-03-27T10:12:46.000Z | import json
from datetime import datetime
from logging import Logger
from typing import Any, Optional
import python_lib_for_me as pyl
import tweepy
def show_rate_limit(
api: tweepy.API,
resource_family: str,
endpoint: str
) -> None:
'''
レート制限表示
Args:
api (tweepy.API) : API
resource_family (str) : リソース群
endpoint (str) : エンドポイント
Returns:
-
Notes:
- 認証
- ユーザ認証(OAuth 1.0a)
- アプリ認証(OAuth 2.0)
- エンドポイント
- GET application/rate_limit_status
- レート制限
- ユーザ認証(OAuth 1.0a)
- リクエスト数/15分 : 180
- 超過した場合は15分の待機時間が発生する
- アプリ認証(OAuth 2.0)
- リクエスト数/15分 : 180
- 超過した場合は15分の待機時間が発生する
- Twitter API Standard v1.1 のGETメソッドに対してのみ正確である
References:
- エンドポイント
- https://developer.twitter.com/en/docs/twitter-api/v1/developer-utilities/rate-limit-status/overview
- https://developer.twitter.com/en/docs/twitter-api/v1/developer-utilities/rate-limit-status/api-reference/get-application-rate_limit_status
- レート制限(Standard v1.1)
- https://developer.twitter.com/en/docs/twitter-api/v1/rate-limits
''' # noqa: E501
lg: Optional[Logger] = None
# 認証方式の確認
if isinstance(api.auth, (tweepy.OAuth1UserHandler, tweepy.OAuth2AppHandler)) == False:
raise(pyl.CustomError(
f'この認証方式ではTwitterAPIにアクセスできません。(Auth:{type(api.auth)})'))
try:
lg = pyl.get_logger(__name__)
# レート制限の表示
rate_limits: Any = api.rate_limit_status()
if not(resource_family == '' or endpoint == ''):
rate_limit: dict = rate_limits['resources'][resource_family][endpoint]
remaining: int = rate_limit['remaining']
limit: int = rate_limit['limit']
reset_datetime: datetime = datetime.fromtimestamp(rate_limit['reset'])
pyl.log_inf(lg, f'リクエスト回数(15分間隔):{remaining}/{limit}、' +
f'制限リセット時刻:{reset_datetime} ' +
f'(resource_family:{resource_family}, endpoint:{endpoint})')
else:
pyl.log_inf(lg, f'レート制限:\n{json.dumps(rate_limits, indent=2)}')
except Exception as e:
if lg is not None:
pyl.log_err(lg, f'レート制限表示に失敗しました。' +
f'(resource_family:{resource_family}, endpoint:{endpoint})')
raise(e)
return None
def show_rate_limit_of_lists_members(api: tweepy.API) -> None:
'''レート制限表示(GET lists/members)'''
show_rate_limit(api, 'lists', '/lists/members')
return None
def show_rate_limit_of_friends_list(api: tweepy.API) -> None:
'''レート制限表示(GET friends/list)'''
show_rate_limit(api, 'friends', '/friends/list')
return None
def show_rate_limit_of_followers_list(api: tweepy.API) -> None:
'''レート制限表示(GET followers/list)'''
show_rate_limit(api, 'followers', '/followers/list')
return None
def show_rate_limit_of_search_tweets(api: tweepy.API) -> None:
'''レート制限表示(GET search/tweets)'''
show_rate_limit(api, 'search', '/search/tweets')
return None
| 32.679612 | 153 | 0.578134 |
ace4515ea2941d57132740dcf73f68fddf987dfc | 1,010 | py | Python | book/src/ch10/service/libs/storage/src/storage/converters.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 0be2e41f6cf7322e12ec55d76135ff398df61b4a | [
"MIT"
] | 133 | 2016-07-22T15:16:16.000Z | 2022-03-29T22:39:40.000Z | book/src/ch10/service/libs/storage/src/storage/converters.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 0be2e41f6cf7322e12ec55d76135ff398df61b4a | [
"MIT"
] | 137 | 2021-01-05T11:21:04.000Z | 2022-03-31T11:10:11.000Z | book/src/ch10/service/libs/storage/src/storage/converters.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 0be2e41f6cf7322e12ec55d76135ff398df61b4a | [
"MIT"
] | 41 | 2020-12-29T04:46:14.000Z | 2022-03-20T22:36:17.000Z | """Convert the row resulting from a query to the Entities object."""
from .status import (
DeliveryOrder,
DispatchedOrder,
OrderDelivered,
OrderInTransit,
)
def build_dispatched(row):
return DispatchedOrder(row.dispatched_at)
def build_in_transit(row):
return OrderInTransit(row.location)
def build_delivered(row):
return OrderDelivered(row.delivered_at)
_BUILD_MAPPING = {
"d": build_dispatched,
"t": build_in_transit,
"f": build_delivered,
}
class WrappedRow:
def __init__(self, row):
self._row = row
def __getattr__(self, attrname):
return self._row[attrname]
class OrderNotFoundError(Exception):
"""The requested order does not appear listed."""
def build_from_row(delivery_id, row):
if row is None:
raise OrderNotFoundError(f"{delivery_id} was not found")
row = WrappedRow(row)
status_builder = _BUILD_MAPPING[row.status]
status = status_builder(row)
return DeliveryOrder(delivery_id, status)
| 20.612245 | 68 | 0.707921 |
ace452ef6581f19e6368c8706615399bbf11cf5b | 4,930 | py | Python | python_developer_tools/cv/PyTorch_Networks/Attention/ECA-Net.py | carlsummer/python_developer_tools | a8c4365b7cc601cda55648cdfd8c0cb1faae132f | [
"Apache-2.0"
] | 32 | 2021-06-21T04:49:48.000Z | 2022-03-29T05:46:59.000Z | python_developer_tools/cv/PyTorch_Networks/Attention/ECA-Net.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:45:55.000Z | 2021-11-12T03:45:55.000Z | python_developer_tools/cv/PyTorch_Networks/Attention/ECA-Net.py | HonestyBrave/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 | [
"Apache-2.0"
] | 10 | 2021-06-03T08:05:05.000Z | 2021-12-13T03:10:42.000Z | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2020/10/10 16:45
# @Author : liumin
# @File : ECA-Net.py
import torch
import torch.nn as nn
import torchvision
from math import log
def Conv1(in_planes, places, stride=2):
return nn.Sequential(
nn.Conv2d(in_channels=in_planes,out_channels=places,kernel_size=7,stride=stride,padding=3, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
class SE_Module(nn.Module):
def __init__(self, channel,ratio = 16):
super(SE_Module, self).__init__()
self.squeeze = nn.AdaptiveAvgPool2d(1)
self.excitation = nn.Sequential(
nn.Linear(in_features=channel, out_features=channel // ratio),
nn.ReLU(inplace=True),
nn.Linear(in_features=channel // ratio, out_features=channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.squeeze(x).view(b, c)
z = self.excitation(y).view(b, c, 1, 1)
return x * z.expand_as(x)
class ECA_Module(nn.Module):
def __init__(self, channel,gamma=2, b=1):
super(ECA_Module, self).__init__()
self.gamma = gamma
self.b = b
t = int(abs(log(channel, 2) + self.b) / self.gamma)
k = t if t % 2 else t + 1
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k, padding=k//2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x)
y = self.conv(y.squeeze(-1).transpose(-1,-2))
y = y.transpose(-1,-2).unsqueeze(-1)
y = self.sigmoid(y)
return x * y.expand_as(x)
class ECA_ResNetBlock(nn.Module):
def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 4):
super(ECA_ResNetBlock,self).__init__()
self.expansion = expansion
self.downsampling = downsampling
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels=in_places,out_channels=places,kernel_size=1,stride=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places*self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places*self.expansion),
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places*self.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(places*self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.bottleneck(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ECA_ResNet(nn.Module):
def __init__(self,blocks, num_classes=1000, expansion = 4):
super(ECA_ResNet,self).__init__()
self.expansion = expansion
self.conv1 = Conv1(in_planes = 3, places= 64)
self.layer1 = self.make_layer(in_places = 64, places= 64, block=blocks[0], stride=1)
self.layer2 = self.make_layer(in_places = 256,places=128, block=blocks[1], stride=2)
self.layer3 = self.make_layer(in_places=512,places=256, block=blocks[2], stride=2)
self.layer4 = self.make_layer(in_places=1024,places=512, block=blocks[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(2048,num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def make_layer(self, in_places, places, block, stride):
layers = []
layers.append(ECA_ResNetBlock(in_places, places,stride, downsampling =True))
for i in range(1, block):
layers.append(ECA_ResNetBlock(places*self.expansion, places))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def ECA_ResNet50():
return ECA_ResNet([3, 4, 6, 3])
if __name__=='__main__':
model = ECA_ResNet50()
print(model)
input = torch.randn(1, 3, 224, 224)
out = model(input)
print(out.shape)
| 33.767123 | 127 | 0.604665 |
ace4535d8729f3340bba60277dac22251c349852 | 6,955 | py | Python | jp_game/test_game_flow.py | Scuba-Chris/game_of_greed | e8facbbdd974baae4d300e940064c2eb676142ed | [
"MIT"
] | null | null | null | jp_game/test_game_flow.py | Scuba-Chris/game_of_greed | e8facbbdd974baae4d300e940064c2eb676142ed | [
"MIT"
] | null | null | null | jp_game/test_game_flow.py | Scuba-Chris/game_of_greed | e8facbbdd974baae4d300e940064c2eb676142ed | [
"MIT"
] | null | null | null | import pytest
from jp_game_of_greed import Game
###############################################
##### Day 1 ######
###############################################
@pytest.mark.skip('superceded by later day test')
def test_flow_yes():
prints = ['Welcome to Game of Greed','Check back tomorrow :D']
prompts = ['Wanna play? ']
responses = ['y']
def mock_print(*args):
if len(prints):
current_print = prints.pop(0)
assert args[0] == current_print
def mock_input(*args):
if len(prompts):
current_prompt = prompts.pop(0)
assert args[0] == current_prompt
if len(responses):
current_response = responses.pop(0)
return current_response
game = Game(mock_print, mock_input)
game.play()
def test_flow_no():
flow = {
'prints' : ['Welcome to Game of Greed','OK. Maybe later'],
'prompts' : ['Wanna play? '],
'responses' : ['no'],
}
mp = MockPlayer(**flow)
game = Game(mp.mock_print, mp.mock_input)
game.do_roll = mp.mock_roll
game.play()
assert mp.mop_up()
###############################################
##### Day 2 ######
###############################################
def test_one_round():
flow = {
'prints' : [
'Welcome to Game of Greed',
'Rolling 6 dice',
'You rolled [1, 2, 2, 3, 3, 4]',
'You can bank 100 points or try for more',
'You have 5 dice remaining',
'Rolling 5 dice',
'You rolled [1, 2, 2, 3, 3]',
'You can bank 200 points or try for more',
'You have 4 dice remaining',
'Rolling 4 dice',
'You rolled [1, 2, 2, 3]',
'You can bank 300 points or try for more',
'You have 3 dice remaining',
'Rolling 3 dice',
'You rolled [1, 2, 2]',
'You can bank 400 points or try for more',
'You have 2 dice remaining',
'Rolling 2 dice',
'You rolled [1, 2]',
'You can bank 500 points or try for more',
'You have 1 dice remaining',
'You banked 500 points in round 1',
'You have 500 points total',
'Thanks for playing!'
],
'prompts' : [
'Wanna play? ',
'Enter dice to keep: ',
'Roll again? ',
'Enter dice to keep: ',
'Roll again? ',
'Enter dice to keep: ',
'Roll again? ',
'Enter dice to keep: ',
'Roll again? ',
'Enter dice to keep: ',
'Roll again? ',
],
'responses' : [
'y','1','y','1','y','1','y','1','y','1','n'
],
'rolls' : [
[1, 2, 2, 3, 3, 4],
[1, 2, 2, 3, 3],
[1, 2, 2, 3],
[1, 2, 2],
[1, 2],
]
}
mp = MockPlayer(**flow)
game = Game(mp.mock_print, mp.mock_input)
game._do_roll = mp.mock_roll
game.play(1)
assert mp.mop_up()
def test_flow_scenario_1():
flow = {
'prints' : [
'Welcome to Game of Greed',
'Rolling 6 dice',
'You rolled [1, 2, 3, 4, 1, 2]',
'You can bank 100 points or try for more',
'You have 5 dice remaining',
'Rolling 5 dice',
'You rolled [3, 3, 3, 4, 1]',
'You can bank 500 points or try for more',
'You have 1 dice remaining',
'You banked 500 points in round 1',
'You have 500 points total',
'Thanks for playing!'
],
'prompts' : [
'Wanna play? ',
'Enter dice to keep: ',
'Roll again? '
],
'responses' : ['y','1','y','3331','n'],
'rolls' : [[1,2,3,4,1,2],[3,3,3,4,1]],
}
mp = MockPlayer(**flow)
game = Game(mp.mock_print, mp.mock_input)
game._do_roll = mp.mock_roll
game.play(1)
assert mp.mop_up()
def test_flow_scenario_2():
flow = {
'prints' : [
'Welcome to Game of Greed',
'Rolling 6 dice',
'You rolled [1, 1, 1, 1, 5, 2]',
'You can bank 2050 points or try for more',
'You have 1 dice remaining',
'You banked 2050 points in round 1',
'You have 2050 points total',
'Thanks for playing!',
],
'prompts' : [
'Wanna play? ',
'Enter dice to keep: ',
'Roll again? '
],
'responses' : ['y','11115','n'],
'rolls' : [[1,1,1,1,5,2],],
}
mp = MockPlayer(**flow)
game = Game(mp.mock_print, mp.mock_input)
game._do_roll = mp.mock_roll
game.play(1)
assert mp.mop_up()
def test_flow_zilch():
flow = {
'prints' : [
'Rolling 6 dice',
'You rolled [2, 2, 3, 4, 6, 6]',
'Oh noes! Zilch',
],
'rolls' : [[2,2,3,4,6,6]],
}
mp = MockPlayer(**flow)
game = Game(mp.mock_print, mp.mock_input)
game._do_roll = mp.mock_roll
# Easier to test with hitting _do_round directly,
# no prob, but notice that protected method use is risky
game._do_round()
assert mp.mop_up()
###############################################
##### Day 3 - Coming Soon ######
###############################################
def test_validate_selected_dice():
""""
add a test to confirm that user's selected
"keeper" dice are a valid subset of the user's roll
"""
pass
def test_zilch_ends_round():
""""
add a test to confirm that a zilch roll
ends the turn and no points are awarded
"""
pass
###############################################
##### Helper Class for Testing ######
###############################################
class MockPlayer:
def __init__(self, prints=[], prompts=[], responses=[], rolls=[]):
self.prints = prints
self.prompts = prompts
self.responses = responses
self.rolls = rolls
def mock_print(self, *args):
if len(self.prints):
current_print = self.prints.pop(0)
assert args[0] == current_print
def mock_input(self, *args):
if len(self.prompts):
current_prompt = self.prompts.pop(0)
assert args[0] == current_prompt
if len(self.responses):
current_response = self.responses.pop(0)
return current_response
def mock_roll(self, num_dice):
if len(self.rolls):
current_roll = self.rolls.pop(0)
return current_roll
def mop_up(self):
assert len(self.prints) == 0
assert len(self.prompts) == 0
assert len(self.responses) == 0
assert len(self.rolls) == 0
return True
| 25.569853 | 70 | 0.464558 |
ace453b2dc3cf6951a0331ff919459063f03686e | 1,340 | py | Python | reproductions/offline/discrete_cql.py | ningyixue/AIPI530_Final_Project | b95353ffd003692a37a59042dfcd744a18b7e802 | [
"MIT"
] | 1 | 2022-03-07T17:58:57.000Z | 2022-03-07T17:58:57.000Z | reproductions/offline/discrete_cql.py | ningyixue/AIPI530_Final_Project | b95353ffd003692a37a59042dfcd744a18b7e802 | [
"MIT"
] | null | null | null | reproductions/offline/discrete_cql.py | ningyixue/AIPI530_Final_Project | b95353ffd003692a37a59042dfcd744a18b7e802 | [
"MIT"
] | null | null | null | import argparse
import d3rlpy
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', type=str, default='breakout')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--gpu', type=int)
args = parser.parse_args()
d3rlpy.seed(args.seed)
dataset, env = d3rlpy.datasets.get_atari_transitions(
args.game,
fraction=0.01,
index=1 if args.game == "asterix" else 0,
)
env.seed(args.seed)
cql = d3rlpy.algos.DiscreteCQL(
learning_rate=5e-5,
optim_factory=d3rlpy.models.optimizers.AdamFactory(eps=1e-2 / 32),
batch_size=32,
alpha=4.0,
q_func_factory=d3rlpy.models.q_functions.QRQFunctionFactory(
n_quantiles=200),
scaler="pixel",
n_frames=4,
target_update_interval=2000,
reward_scaler=d3rlpy.preprocessing.ClipRewardScaler(-1.0, 1.0),
use_gpu=args.gpu)
env_scorer = d3rlpy.metrics.evaluate_on_environment(env, epsilon=0.001)
cql.fit(dataset,
eval_episodes=[None],
n_steps=50000000 // 4,
n_steps_per_epoch=125000,
scorers={
'environment': env_scorer,
},
experiment_name=f"DiscreteCQL_{args.game}_{args.seed}")
if __name__ == '__main__':
main()
| 27.346939 | 75 | 0.622388 |
ace453b92cb387fd5941f78f2ef8cb5b2f6c5b96 | 317 | py | Python | tests/unit/models/reddit/mixins/test__init__.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 2,360 | 2015-01-03T18:27:44.000Z | 2022-03-26T23:24:49.000Z | tests/unit/models/reddit/mixins/test__init__.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 1,187 | 2015-01-04T18:42:10.000Z | 2022-03-28T13:46:33.000Z | tests/unit/models/reddit/mixins/test__init__.py | nickatnight/praw | 6ba5c92e5d5210338c0a2a2755a5e5e226a002fa | [
"BSD-2-Clause"
] | 591 | 2015-01-04T17:33:34.000Z | 2022-03-27T20:28:26.000Z | import pytest
from praw.models.reddit.mixins import ThingModerationMixin
from .... import UnitTest
class TestThingModerationMixin(UnitTest):
def test_must_be_extended(self):
with pytest.raises(NotImplementedError):
ThingModerationMixin().send_removal_message("public", "title", "message")
| 26.416667 | 85 | 0.757098 |
ace456782b366e11dea8a945a6a611ea3bdea3fa | 3,179 | py | Python | pkg/suggestion/v1alpha1/NAS_Reinforcement_Learning/SuggestionParam.py | terrytangyuan/katib | 5a7a144a1b33e05466174edd721803349622aabc | [
"Apache-2.0"
] | 6 | 2019-04-26T05:24:45.000Z | 2020-03-16T15:54:23.000Z | pkg/suggestion/v1alpha1/NAS_Reinforcement_Learning/SuggestionParam.py | terrytangyuan/katib | 5a7a144a1b33e05466174edd721803349622aabc | [
"Apache-2.0"
] | 30 | 2019-04-27T01:49:00.000Z | 2021-02-01T08:52:32.000Z | pkg/suggestion/v1alpha1/NAS_Reinforcement_Learning/SuggestionParam.py | terrytangyuan/katib | 5a7a144a1b33e05466174edd721803349622aabc | [
"Apache-2.0"
] | 4 | 2019-05-07T04:53:07.000Z | 2020-10-21T09:10:26.000Z | def parseSuggestionParam(params_raw):
param_standard = {
"lstm_num_cells": ['value', int, [1, 'inf']],
"lstm_num_layers": ['value', int, [1, 'inf']],
"lstm_keep_prob": ['value', float, [0.0, 1.0]],
"optimizer": ['categorical', str, ["adam", "momentum", "sgd"]],
"init_learning_rate": ['value', float, [1e-6, 1.0]],
"lr_decay_start": ['value', int, [0, 'inf']],
"lr_decay_every": ['value', int, [1, 'inf']],
"lr_decay_rate": ['value', float, [0.0, 1.0]],
"skip-target": ['value', float, [0.0, 1.0]],
"skip-weight": ['value', float, [0.0, 'inf']],
"l2_reg": ['value', float, [0.0, 'inf']],
"entropy_weight": ['value', float, [0.0, 'inf']],
"baseline_decay": ['value', float, [0.0, 1.0]],
}
suggestion_params = {
"lstm_num_cells": 64,
"lstm_num_layers": 1,
"lstm_keep_prob": 1.0,
"optimizer": "adam",
"init_learning_rate": 1e-3,
"lr_decay_start": 0,
"lr_decay_every": 1000,
"lr_decay_rate": 0.9,
"skip-target": 0.4,
"skip-weight": 0.8,
"l2_reg": 0,
"entropy_weight": 1e-4,
"baseline_decay": 0.9999
}
def checktype(param_name, param_value, check_mode, supposed_type, supposed_range=None):
correct = True
try:
converted_value = supposed_type(param_value)
except:
correct = False
print("Parameter {} is of wrong type. Set back to default value {}"
.format(param_name, suggestion_params[param_name]))
if correct and check_mode == 'value':
if not ((supposed_range[0] == '-inf' or converted_value >= supposed_range[0]) and
(supposed_range[1] == 'inf' or converted_value <= supposed_range[1])):
correct = False
print("Parameter {} out of range. Set back to default value {}"
.format(param_name, suggestion_params[param_name]))
elif correct and check_mode == 'categorical':
if converted_value not in supposed_range:
correct = False
print("Parameter {} out of range. Set back to default value {}"
.format(param_name, suggestion_params[param_name]))
if correct:
suggestion_params[param_name] = converted_value
for param in params_raw:
# SuggestionCount is automatically added by controller and not used currently
if param.name == "SuggestionCount":
continue
if param.name in suggestion_params.keys():
checktype(param.name,
param.value,
param_standard[param.name][0], # mode
param_standard[param.name][1], # type
param_standard[param.name][2]) # range
else:
print("Unknown Parameter name: {}".format(param.name))
return suggestion_params
| 42.959459 | 93 | 0.517773 |
ace456badf15f19ed7d7d18303c974a531bb9986 | 1,919 | py | Python | latex/environment.py | omnidan/python-latex | 908cdaf7ada05ce0231f8f0da30ac55d3f6d4e8f | [
"BSD-3-Clause"
] | 7 | 2015-10-26T16:06:59.000Z | 2021-05-30T04:45:46.000Z | latex/environment.py | omnidan/python-latex | 908cdaf7ada05ce0231f8f0da30ac55d3f6d4e8f | [
"BSD-3-Clause"
] | null | null | null | latex/environment.py | omnidan/python-latex | 908cdaf7ada05ce0231f8f0da30ac55d3f6d4e8f | [
"BSD-3-Clause"
] | 4 | 2015-10-28T15:32:28.000Z | 2017-02-18T19:31:24.000Z | __author__ = 'Daniel Bugl'
__copyright__ = "Copyright 2013, Daniel Bugl"
__credits__ = ["Daniel Bugl"]
__license__ = "BSD"
__version__ = "0.1.0"
__maintainer__ = "Daniel Bugl"
__email__ = "daniel.bugl@touchlay.com"
__status__ = "Prototype"
from . import lines
class LatexEnvironment(lines.LatexLine):
def getString(self, no_prefix=True):
""" Converts the LatexEnvironment object and all objects part of it into a latex string and returns it """
buf = ""
if self.name:
buf += "\\begin{" + self.name + "}" + "\n"
for l in self.__lines:
buf += l.getString(no_prefix)
buf += "\n"
if self.name:
buf += "\\end{" + self.name + "}"
if no_prefix:
return str(buf)
else:
return self.prefix + str(buf) + self.suffix
def getLines(self):
""" Returns a list of all lines """
return self.__lines
def setLine(self, index, line):
""" Set a line with a specific index """
if not isinstance(line, lines.LatexLine):
return False
else:
self.__lines[index] = line
return True
def setLines(self, lines):
""" Sets the lines to a specific list """
if type(lines) == list:
self.__lines = lines
return True
else:
return False
def addLine(self, line):
""" Adds a LatexLine to the LatexEnvironment object """
if not isinstance(line, lines.LatexLine):
return False
else:
self.__lines.append(line)
return True
def __init__(self, name=None, lines=[], prefix="", suffix=""):
self.name = name # if name is None, it's the global environment
self.__lines = lines
# these are needed when not pretty printing
self.prefix = str(prefix)
self.suffix = str(suffix) | 30.460317 | 114 | 0.569567 |
ace457c49c1b9d9a0b27dedfdefcb09f27bd458f | 409 | py | Python | demo/demo/wsgi.py | pivotal-energy-solutions/django-bootstrap-templatetags | fb2375032de7636f19b728666d04261494f4063b | [
"Apache-2.0"
] | null | null | null | demo/demo/wsgi.py | pivotal-energy-solutions/django-bootstrap-templatetags | fb2375032de7636f19b728666d04261494f4063b | [
"Apache-2.0"
] | null | null | null | demo/demo/wsgi.py | pivotal-energy-solutions/django-bootstrap-templatetags | fb2375032de7636f19b728666d04261494f4063b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
WSGI config for demo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo.settings")
application = get_wsgi_application()
| 22.722222 | 78 | 0.760391 |
ace459c0abc0f4a80bb9a1c35335c50b120f2aae | 1,439 | py | Python | grandprix/migrations/0002_auto_20171110_0640.py | AfricaChess/lichesshub | 833138d96cbf00c60719ae8f25ee882f2d4ba1ff | [
"MIT"
] | 2 | 2017-10-28T11:21:51.000Z | 2018-01-10T19:39:03.000Z | grandprix/migrations/0002_auto_20171110_0640.py | AfricaChess/lichesshub | 833138d96cbf00c60719ae8f25ee882f2d4ba1ff | [
"MIT"
] | 5 | 2017-11-29T12:33:13.000Z | 2021-06-10T19:00:27.000Z | grandprix/migrations/0002_auto_20171110_0640.py | AfricaChess/lichesshub | 833138d96cbf00c60719ae8f25ee882f2d4ba1ff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-10 06:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grandprix', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Season',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
),
migrations.CreateModel(
name='TournamentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='tournament',
name='kind',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grandprix.TournamentType'),
),
migrations.AddField(
model_name='tournament',
name='season',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='grandprix.Season'),
),
]
| 33.465116 | 123 | 0.579569 |
ace459e8e163006f5a7bdab0c0a14c78828b518c | 1,119 | py | Python | zendesk/komand_zendesk/actions/show_memberships/schema.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2020-03-18T09:14:55.000Z | 2020-03-18T09:14:55.000Z | zendesk/komand_zendesk/actions/show_memberships/schema.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | 1 | 2021-02-23T23:57:37.000Z | 2021-02-23T23:57:37.000Z | zendesk/komand_zendesk/actions/show_memberships/schema.py | killstrelok/insightconnect-plugins | 911358925f4233ab273dbd8172e8b7b9188ebc01 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Show all organization memberships"
class Input:
USER_ID = "user_id"
class Output:
MEMBERSHIPS = "memberships"
class ShowMembershipsInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"user_id": {
"type": "string",
"title": "User ID",
"description": "ID of user to show E.g. 20444826487",
"order": 1
}
},
"required": [
"user_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ShowMembershipsOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"memberships": {
"type": "array",
"title": "Memberships",
"description": "Members data",
"items": {
"type": "object"
},
"order": 1
}
},
"required": [
"memberships"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 17.215385 | 59 | 0.563003 |
ace459fa6b9b8e573af3f09e9b337f18ce523002 | 10,642 | py | Python | competing_methods/all_model.py | Califrais/lights_experiments | 567c8e1578c0b23d9a55a5770a34900c5bce25b1 | [
"MIT"
] | null | null | null | competing_methods/all_model.py | Califrais/lights_experiments | 567c8e1578c0b23d9a55a5770a34900c5bce25b1 | [
"MIT"
] | null | null | null | competing_methods/all_model.py | Califrais/lights_experiments | 567c8e1578c0b23d9a55a5770a34900c5bce25b1 | [
"MIT"
] | null | null | null | import os
os.environ['R_HOME'] = "/Library/Frameworks/R.framework/Versions/4.0/Resources"
from rpy2 import robjects
from rpy2.robjects import pandas2ri, numpy2ri
import numpy as np
from lights.inference import prox_QNMCEM
from prettytable import PrettyTable
from lifelines.utils import concordance_index as c_index_score
from time import time
from lights.simulation import SimuJointLongitudinalSurvival
import pandas as pd
from scipy.stats import beta
def load_data(simu):
if simu:
n_long_features = 5
n_time_indep_features = 10
n_samples = 200
simu = SimuJointLongitudinalSurvival(seed=123,
n_long_features=n_long_features,
n_samples=n_samples,
n_time_indep_features=n_time_indep_features,
sparsity=0.5)
X, Y, T, delta, _ = simu.simulate()
id = np.arange(n_samples)
time_dep_feat = ['long_feature_%s' % (l + 1)
for l in range(n_long_features)]
time_indep_feat = ['X_%s' % (l + 1)
for l in range(n_time_indep_features)]
data_lights = pd.DataFrame(data=np.column_stack((id, T, delta, X, Y)),
columns=["id", "T_survival", "delta"] +
time_indep_feat + time_dep_feat)
df1 = pd.DataFrame(data=np.column_stack((id, T, delta, X)),
columns=["id", "T_survival", "delta"] +
time_indep_feat)
# generate t_max
a = 2
b = 5
np.random.seed(0)
r = beta.rvs(a, b, size=n_samples)
t_max = T * (1 - r)
for i in range(n_samples):
Y_i_ = []
for l in range(n_long_features):
Y_il = Y.loc[i][l]
times_il = Y_il.index.values
t_long_chosen = (times_il <= t_max[i])
if not np.any(t_long_chosen):
t_max[i] = times_il[0]
t_long_chosen = (times_il <= t_max[i])
times_il = times_il[t_long_chosen]
y_il = Y_il.values.flatten()[t_long_chosen].tolist()
n_il = len(times_il)
tmp = data_lights.loc[i, time_dep_feat[l]]
if tmp[tmp.index.values <= t_max[i]].empty:
data_lights[time_dep_feat[l]][i] = tmp[tmp.index.values == tmp.index.values[0]]
t_max[i] = tmp.index.values[0]
else:
data_lights[time_dep_feat[l]][i] = tmp[tmp.index.values <= t_max[i]]
Y_i_.append(y_il)
Y_i = np.column_stack(
(np.array([id[i]] * n_il), times_il, np.array([t_max[i]] * n_il), np.array(Y_i_).T))
if i == 0:
Y_ = Y_i
else:
Y_ = np.row_stack((Y_, Y_i))
data_lights["T_max"] = t_max
df2 = pd.DataFrame(data=Y_, columns=["id", "T_long", "T_max"] + time_dep_feat)
data = pd.merge(df2, df1, on="id")
else:
# load PBC Seq
robjects.r.source(os.getcwd() + "/lights/competing_methods/load_PBC_Seq.R")
time_indep_feat = ['drug', 'age', 'sex']
time_dep_feat = ['serBilir', 'albumin', 'SGOT', 'platelets',
'prothrombin', 'alkaline', 'serChol']
data_R = robjects.r["load"]()
# TODO: encoder and normalize
data = pd.DataFrame(data_R).T
data.columns = data_R.colnames
data = data[(data > -1e-4).all(axis=1)]
for feat in time_dep_feat:
data[feat] = np.log(data[feat].values)
id_list = np.unique(data["id"])
n_samples = len(id_list)
n_long_features = len(time_dep_feat)
data_lights = data.drop_duplicates(subset=["id"])
# generate t_max
a = 2
b = 5
np.random.seed(0)
r = beta.rvs(a, b, size=n_samples)
T = data_lights["T_survival"].values
t_max = T * (1 - r)
Y = []
t_max_R = []
for i in range(n_samples):
tmp = data[(data["id"] == id_list[i]) & (data["T_long"] < t_max[i])]
if tmp.empty:
t_max[i] = data[(data["id"] == id_list[i])]["T_long"].values[0]
n_i = 1
else:
n_i = tmp.shape[0]
data = data[(data["id"] != id_list[i]) |
((data["id"] == id_list[i]) & (data["T_long"] <= t_max[i]))]
y_i = []
for l in range(n_long_features):
Y_il = data[["T_long", time_dep_feat[l]]][
(data["id"] == id_list[i]) & (data['T_long'] <= t_max[i])]
# TODO: Add value of 1/365 (the first day of survey instead of 0)
y_i += [pd.Series(Y_il[time_dep_feat[l]].values,
index=Y_il["T_long"].values + 1 / 365)]
Y.append(y_i)
t_max_R += [t_max[i]] * n_i
data_lights[time_dep_feat] = Y
data_lights["T_max"] = t_max
data["T_max"] = np.array(t_max_R).flatten()
return (data, data_lights, time_dep_feat, time_indep_feat)
def extract_lights_feat(data, time_indep_feat, time_dep_feat):
X = np.float_(data[time_indep_feat].values)
Y = data[time_dep_feat]
T = np.float_(data[["T_survival"]].values.flatten())
delta = np.int_(data[["delta"]].values.flatten())
return (X, Y, T, delta)
def extract_R_feat(data):
data_id = data.drop_duplicates(subset=["id"])
T = data_id[["T_survival"]].values.flatten()
delta = data_id[["delta"]].values.flatten()
with robjects.conversion.localconverter(robjects.default_converter +
pandas2ri.converter +
numpy2ri.converter):
data_R = robjects.conversion.py2rpy(data)
T_R = robjects.conversion.py2rpy(T)
delta_R = robjects.conversion.py2rpy(delta)
return (data_R, T_R, delta_R)
def all_model(n_runs = 1, simu=True):
seed = 0
test_size = .2
data, data_lights, time_dep_feat, time_indep_feat = load_data(simu)
t = PrettyTable(['Algos', 'C_index', 'time'])
for i in range(n_runs):
seed += 1
id_list = data_lights["id"]
nb_test_sample = int(test_size * len(id_list))
np.random.seed(seed)
id_test = np.random.choice(id_list, size=nb_test_sample, replace=False)
data_lights_train = data_lights[data_lights.id.isin(id_test)]
data_lights_test = data_lights[data_lights.id.isin(id_test)]
X_lights_train, Y_lights_train, T_train, delta_train = \
extract_lights_feat(data_lights_train, time_indep_feat, time_dep_feat)
X_lights_test, Y_lights_test, T_test, delta_test = \
extract_lights_feat(data_lights_test, time_indep_feat, time_dep_feat)
data_train = data[data.id.isin(id_test)]
data_test = data[data.id.isin(id_test)]
data_R_train, T_R_train, delta_R_train = extract_R_feat(data_train)
data_R_test, T_R_test, delta_R_test = extract_R_feat(data_test)
# The penalized Cox model.
robjects.r.source(os.getcwd() + "/competing_methods/CoxNet.R")
X_R_train = robjects.r["Cox_get_long_feat"](data_R_train, time_dep_feat,
time_indep_feat)
X_R_test = robjects.r["Cox_get_long_feat"](data_R_test, time_dep_feat,
time_indep_feat)
best_lambda = robjects.r["Cox_cross_val"](X_R_train, T_R_train, delta_R_train)
start = time()
trained_CoxPH = robjects.r["Cox_fit"](X_R_train, T_R_train,
delta_R_train, best_lambda)
Cox_pred = robjects.r["Cox_score"](trained_CoxPH, X_R_test)
Cox_marker = np.array(Cox_pred[:])
Cox_c_index = c_index_score(T_test, Cox_marker, delta_test)
Cox_c_index = max(Cox_c_index, 1 - Cox_c_index)
t.add_row(["Cox", "%g" % Cox_c_index, "%.3f" % (time() - start)])
# Multivariate joint latent class model.
start = time()
robjects.r.source(os.getcwd() + "/competing_methods/MJLCMM.R")
trained_long_model, trained_mjlcmm = robjects.r["MJLCMM_fit"](data_R_train,
robjects.StrVector(time_dep_feat),
robjects.StrVector(time_indep_feat))
MJLCMM_pred = robjects.r["MJLCMM_score"](trained_long_model,
trained_mjlcmm,
time_indep_feat, data_R_test)
MJLCMM_marker = np.array(MJLCMM_pred.rx2('pprob')[2])
MJLCMM_c_index = c_index_score(T_test, MJLCMM_marker, delta_test)
MJLCMM_c_index = max(MJLCMM_c_index, 1 - MJLCMM_c_index)
t.add_row(["MJLCMM", "%g" % MJLCMM_c_index, "%.3f" % (time() - start)])
# Multivariate shared random effect model.
start = time()
robjects.r.source(os.getcwd() + "/competing_methods/JMBayes.R")
trained_JMBayes = robjects.r["fit"](data_R_train,
robjects.StrVector(time_dep_feat),
robjects.StrVector(time_indep_feat))
JMBayes_marker = np.array(robjects.r["score"](trained_JMBayes, data_R_test))
JMBayes_c_index = c_index_score(T_test, JMBayes_marker, delta_test)
JMBayes_c_index = max(JMBayes_c_index, 1 - JMBayes_c_index)
t.add_row(["JMBayes", "%g" % JMBayes_c_index, "%.3f" % (time() - start)])
# lights
start = time()
fixed_effect_time_order = 1
learner = prox_QNMCEM(fixed_effect_time_order=fixed_effect_time_order,
max_iter=5, initialize=True, print_every=1,
compute_obj=True, simu=False,
asso_functions=["lp", "re"],
l_pen_SGL=0.02, eta_sp_gp_l1=.9, l_pen_EN=0.02)
learner.fit(X_lights_train, Y_lights_train, T_train, delta_train)
prediction_times = data_lights_test[["T_max"]].values.flatten()
lights_marker = learner.predict_marker(X_lights_test, Y_lights_test, prediction_times)
lights_c_index = c_index_score(T_test, lights_marker, delta_test)
lights_c_index = max(lights_c_index, 1 - lights_c_index)
t.add_row(["lights", "%g" % lights_c_index, "%.3f" % (time() - start)])
print(t)
| 46.269565 | 100 | 0.561549 |
ace45bc208c808af8233fbd45be8295736bb1931 | 342 | py | Python | HackerRank/Python/Medium/M0002.py | Mohammed-Shoaib/HackerRank-Problems | ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b | [
"MIT"
] | 54 | 2019-05-13T12:13:09.000Z | 2022-02-27T02:59:00.000Z | HackerRank/Python/Medium/M0002.py | Mohammed-Shoaib/HackerRank-Problems | ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b | [
"MIT"
] | 2 | 2020-10-02T07:16:43.000Z | 2020-10-19T04:36:19.000Z | HackerRank/Python/Medium/M0002.py | Mohammed-Shoaib/HackerRank-Problems | ccfb9fc2f0d8dff454439d75ce519cf83bad7c3b | [
"MIT"
] | 20 | 2020-05-26T09:48:13.000Z | 2022-03-18T15:18:27.000Z | # Problem Statement: https://www.hackerrank.com/challenges/the-minion-game/problem
def minion_game(S):
kevin = stuart = 0
for i in range(len(S)):
if S[i] in 'AEIOU':
kevin += len(S) - i
else:
stuart += len(S) - i
if kevin > stuart:
print('Kevin', kevin)
elif kevin < stuart:
print('Stuart', stuart)
else:
print('Draw') | 20.117647 | 82 | 0.637427 |
ace45c6104545bc095d5d3d7d16c3d41edc3f0f7 | 67,841 | py | Python | Tests/test_SearchIO_model.py | xzy3/biopython | 4bd06c16bbcef7a0f5bc43ba9bb936b9557996e9 | [
"BSD-3-Clause"
] | 2 | 2020-06-25T12:52:03.000Z | 2020-07-11T09:47:34.000Z | Tests/test_SearchIO_model.py | EmmanuelOwusu/biopython | 4e9a15172ba26bae104eaa7f05819cd6d41d0da8 | [
"BSD-3-Clause"
] | 1 | 2020-06-03T16:16:26.000Z | 2020-06-03T16:16:26.000Z | Tests/test_SearchIO_model.py | EmmanuelOwusu/biopython | 4e9a15172ba26bae104eaa7f05819cd6d41d0da8 | [
"BSD-3-Clause"
] | 3 | 2020-05-17T19:43:05.000Z | 2020-06-04T20:44:38.000Z | # Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for SearchIO objects.
Tests the methods and behaviors of QueryResult, Hit, and HSP objects. All tests
are format-independent and are meant to check the fundamental behavior common
to all formats.
"""
import pickle
import unittest
from io import BytesIO
from copy import deepcopy
from search_tests_common import compare_search_obj
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet, generic_dna
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
# mock HSPFragments
frag111 = HSPFragment("hit1", "query1", hit="ATGCGCAT", query="ATGCGCAT")
frag112 = HSPFragment("hit1", "query1", hit="ATG", query="GAT")
frag113 = HSPFragment("hit1", "query1", hit="ATTCG", query="AT-CG")
frag113b = HSPFragment("hit1", "query1", hit="ATTCG", query="AT-CG")
frag114 = HSPFragment("hit1", "query1", hit="AT", query="AT")
frag114b = HSPFragment("hit1", "query1", hit="ATCG", query="ATGG")
frag211 = HSPFragment("hit2", "query1", hit="GGGCCC", query="GGGCC-")
frag311 = HSPFragment("hit3", "query1", hit="GATG", query="GTTG")
frag312 = HSPFragment("hit3", "query1", hit="ATATAT", query="ATATAT")
frag411 = HSPFragment("hit4", "query1", hit="CC-ATG", query="CCCATG")
frag121 = HSPFragment("hit1", "query2", hit="GCGAG", query="GCGAC")
# mock HSPs
hsp111 = HSP([frag111])
hsp112 = HSP([frag112])
hsp113 = HSP([frag113, frag113b])
hsp114 = HSP([frag114, frag114b])
hsp211 = HSP([frag211])
hsp311 = HSP([frag311])
hsp312 = HSP([frag312])
hsp411 = HSP([frag411])
hsp121 = HSP([frag121])
# mock Hits
hit11 = Hit([hsp111, hsp112, hsp113, hsp114])
hit21 = Hit([hsp211])
hit31 = Hit([hsp311, hsp312])
hit41 = Hit([hsp411])
hit12 = Hit([hsp121])
class QueryResultCases(unittest.TestCase):
def setUp(self):
self.qresult = QueryResult([hit11, hit21, hit31], "query1")
# set mock attributes
self.qresult.seq_len = 1102
self.qresult.target = "refseq_rna"
def test_pickle(self):
"""Test pickling and unpickling of QueryResult."""
buf = BytesIO()
pickle.dump(self.qresult, buf)
unp = pickle.loads(buf.getvalue())
self.assertTrue(compare_search_obj(self.qresult, unp))
def test_order(self):
# added hits should be ordered
self.assertEqual(self.qresult[0], hit11)
self.assertEqual(self.qresult[2], hit31)
# removal of second item should bump item #2 to #1
del self.qresult["hit2"]
self.assertEqual(self.qresult[0], hit11)
self.assertEqual(self.qresult[1], hit31)
def test_init_none(self):
"""Test QueryResult.__init__, no arguments."""
qresult = QueryResult()
self.assertEqual(None, qresult.id)
self.assertEqual(None, qresult.description)
def test_init_id_only(self):
"""Test QueryResult.__init__, with ID only."""
qresult = QueryResult(id="query1")
self.assertEqual("query1", qresult.id)
self.assertEqual(None, qresult.description)
def test_init_hits_only(self):
"""Test QueryResult.__init__, with hits only."""
qresult = QueryResult([hit11, hit21, hit31])
self.assertEqual("query1", qresult.id)
self.assertEqual("<unknown description>", qresult.description)
def test_repr(self):
"""Test QueryResult.__repr__."""
self.assertEqual("QueryResult(id='query1', 3 hits)", repr(self.qresult))
def test_iter(self):
"""Test QueryResult.__iter__."""
# iteration should return hits contained
for counter, hit in enumerate(self.qresult):
self.assertIn(hit, (hit11, hit21, hit31))
self.assertEqual(2, counter)
def test_hits(self):
"""Test QueryResult.hits."""
# hits should return hits contained in qresult
hits = list(self.qresult.hits)
self.assertEqual([hit11, hit21, hit31], hits)
def test_hit_keys(self):
"""Test QueryResult.hit_keys."""
# hit_keys should return hit keys (which default to hit ids)
hit_keys = list(self.qresult.hit_keys)
self.assertEqual(["hit1", "hit2", "hit3"], hit_keys)
def test_items(self):
"""Test QueryResult.items."""
# items should return tuples of hit key, hit object pair
items = list(self.qresult.items)
self.assertEqual([("hit1", hit11), ("hit2", hit21), ("hit3", hit31)], items)
def test_hsps(self):
"""Test QueryResult.hsps."""
# hsps should return all hsps contained in qresult
hsps = self.qresult.hsps
self.assertEqual([hsp111, hsp112, hsp113, hsp114, hsp211, hsp311, hsp312], hsps)
def test_fragments(self):
"""Test QueryResult.fragments."""
# fragments should return all fragments contained in qresult
frags = self.qresult.fragments
self.assertEqual(
[
frag111,
frag112,
frag113,
frag113b,
frag114,
frag114b,
frag211,
frag311,
frag312,
],
frags,
)
def test_contains(self):
"""Test QueryResult.__contains__."""
# contains should work with hit ids or hit objects
self.assertIn("hit1", self.qresult)
self.assertIn(hit21, self.qresult)
self.assertNotIn("hit5", self.qresult)
self.assertNotIn(hit41, self.qresult)
def test_contains_alt(self):
"""Test QueryResult.__contains__, with alternative IDs."""
# contains should work with alternative hit IDs
hit11._id_alt = ["alt1"]
query = QueryResult([hit11])
self.assertIn("alt1", query)
hit11._id_alt = []
def test_len(self):
"""Test QueryResult.__len__."""
# len() should return the number of hits contained
self.assertEqual(3, len(self.qresult))
def test_bool(self):
"""Test QueryResult.__bool__."""
# should return true only if the qresult has hits
self.assertTrue(self.qresult)
blank_qresult = QueryResult()
self.assertFalse(blank_qresult)
def test_setitem_ok(self):
"""Test QueryResult.__setitem__."""
# hit objects assignment should work with arbitrary string keys
self.qresult["hit4"] = hit41
self.assertEqual([hit11, hit21, hit31, hit41], list(self.qresult.hits))
# and if the key already exist, the object should be overwritten
self.qresult["hit4"] = hit11
self.assertEqual([hit11, hit21, hit31, hit11], list(self.qresult.hits))
def test_setitem_ok_alt(self):
"""Test QueryResult.__setitem__, checking alt hit IDs."""
# hit objects assignment should make alt IDs visible
hit11._id_alt = ["alt1", "alt11"]
query = QueryResult()
query["hit1"] = hit11
self.assertEqual(hit11, query["hit1"])
self.assertEqual(hit11, query["alt1"])
self.assertEqual(hit11, query["alt11"])
self.assertNotEqual(hit11.id, "alt1")
self.assertNotEqual(hit11.id, "alt11")
hit11._id_alt = []
def test_setitem_ok_alt_existing(self):
"""Test QueryResult.__setitem__, existing key."""
# hit objects assignment on existing hits should also update alt IDs
hit11._id_alt = ["alt1"]
hit21._id_alt = ["alt2"]
query = QueryResult()
query["hit"] = hit11
self.assertEqual(hit11, query["hit"])
self.assertEqual(hit11, query["alt1"])
query["hit"] = hit21
self.assertEqual(hit21, query["hit"])
self.assertEqual(hit21, query["alt2"])
self.assertRaises(KeyError, query.__getitem__, "alt1")
hit11._id_alt = []
hit21._id_alt = []
def test_setitem_ok_alt_ok_promote(self):
"""Test QueryResult.__setitem__, previously alt ID."""
# hit objects assignment with ID previously existing as alternative
# should make the ID primary
hit11._id_alt = ["alt1"]
hit41._id_alt = ["alt4"]
hit31._id_alt = ["alt3"]
query = QueryResult([hit11, hit41])
self.assertEqual(hit11, query["alt1"])
self.assertEqual(hit41, query["alt4"])
self.assertNotIn("alt1", query._items)
self.assertIn("alt1", query._QueryResult__alt_hit_ids)
query["alt1"] = hit31
self.assertEqual(hit31, query["alt1"])
self.assertEqual(hit41, query["alt4"])
self.assertIn("alt1", query._items)
self.assertNotIn("alt1", query._QueryResult__alt_hit_ids)
hit11._id_alt = []
hit41._id_alt = []
hit31._id_alt = []
def test_setitem_wrong_key_type(self):
"""Test QueryResult.__setitem__, wrong key type."""
# item assignment should fail if the key is not string
self.assertRaises(TypeError, self.qresult.__setitem__, 0, hit41)
self.assertRaises(
TypeError, self.qresult.__setitem__, slice(0, 2), [hit41, hit31]
)
def test_setitem_wrong_type(self):
"""Test QueryResult.__setitem__, wrong type."""
# item assignment should fail if the object assigned is not a hit object
self.assertRaises(TypeError, self.qresult.__setitem__, "hit4", hsp111)
self.assertRaises(TypeError, self.qresult.__setitem__, "hit5", "hit5")
def test_setitem_wrong_query_id(self):
"""Test QueryResult.__setitem__, wrong query ID."""
# item assignment should fail if the hit object does not have the same
# query id
self.assertRaises(ValueError, self.qresult.__setitem__, "hit4", hit12)
def test_setitem_from_empty(self):
"""Test QueryResult.__setitem__, from empty container."""
qresult = QueryResult()
# initial desc and id is None
self.assertEqual(None, qresult.id)
self.assertEqual(None, qresult.description)
# but changes to the first item's after append
qresult.append(hit11)
self.assertEqual("query1", qresult.id)
self.assertEqual("<unknown description>", qresult.description)
# and remains the same after popping the last item
qresult.pop()
self.assertEqual("query1", qresult.id)
self.assertEqual("<unknown description>", qresult.description)
def test_getitem_default_ok(self):
"""Test QueryResult.__getitem__."""
# hits should be retrievable by their keys (default to id)
self.assertEqual(hit21, self.qresult["hit2"])
self.assertEqual(hit11, self.qresult["hit1"])
def test_getitem_int_ok(self):
"""Test QueryResult.__getitem__, with integer."""
# hits should be retrievable by their index
self.assertEqual(hit21, self.qresult[1])
self.assertEqual(hit31, self.qresult[-1])
def test_getitem_slice_ok(self):
"""Test QueryResult.__getitem__, with slice."""
# if the index is a slice object, a new qresult object with the same
# instance attributes should be returned
self.assertEqual(1102, self.qresult.seq_len)
self.assertEqual("refseq_rna", self.qresult.target)
new_qresult = self.qresult[1:]
self.assertEqual([hit21, hit31], list(new_qresult.hits))
self.assertEqual(1102, new_qresult.seq_len)
self.assertEqual("refseq_rna", new_qresult.target)
def test_getitm_slice_alt_ok(self):
"""Test QueryResult.__getitem__, with slice and alt IDs."""
# slicing should be reflected in the alt IDs as well
hit31._id_alt = ["alt3"]
hit11._id_alt = ["alt1"]
query = QueryResult([hit31, hit11])
self.assertEqual(hit11, query["hit1"])
self.assertEqual(hit11, query["alt1"])
self.assertEqual(hit31, query["hit3"])
self.assertEqual(hit31, query["alt3"])
query = query[:1]
self.assertEqual(hit31, query["hit3"])
self.assertEqual(hit31, query["alt3"])
self.assertRaises(KeyError, query.__getitem__, "hit1")
self.assertRaises(KeyError, query.__getitem__, "alt1")
hit31._id_alt = []
hit11._id_alt = []
def test_getitem_alt_ok(self):
"""Test QueryResult.__getitem__, single item with alternative ID."""
hit11._id_alt = ["alt1"]
query = QueryResult([hit11])
self.assertEqual(hit11, query["hit1"])
self.assertEqual(hit11, query["alt1"])
self.assertNotEqual(hit11.id, "alt1")
hit11._id_alt = []
def test_delitem_string_ok(self):
"""Test QueryResult.__getitem__, with string."""
# delitem should work with string index
del self.qresult["hit1"]
self.assertEqual(2, len(self.qresult))
self.assertTrue([hit21, hit31], list(self.qresult.hits))
def test_delitem_int_ok(self):
"""Test QueryResult.__delitem__."""
# delitem should work with int index
del self.qresult[-1]
self.assertEqual(2, len(self.qresult))
self.assertEqual([hit11, hit21], list(self.qresult.hits))
del self.qresult[0]
self.assertEqual(1, len(self.qresult))
self.assertTrue([hit21], list(self.qresult.hits))
def test_delitem_slice_ok(self):
"""Test QueryResult.__delitem__, with slice."""
# delitem should work with slice objects
del self.qresult[:-1]
self.assertEqual(1, len(self.qresult))
self.assertTrue([hit31], self.qresult.hits)
def test_delitem_alt_ok(self):
"""Test QueryResult.__delitem__, with alt ID."""
# delitem should work with alt IDs
hit31._id_alt = ["alt3"]
qresult = QueryResult([hit31, hit41])
self.assertEqual(2, len(qresult))
del qresult["alt3"]
self.assertEqual(1, len(qresult))
self.assertEqual(hit41, qresult["hit4"])
self.assertRaises(KeyError, qresult.__getitem__, "alt3")
hit31._id_alt = []
def test_description_set(self):
"""Test QueryResult.description setter."""
# setting the description should change the query seqrecord description
# of the contained hsps, if they have an alignment
# test for default value
qresult = deepcopy(self.qresult)
new_desc = "unicorn hox homolog"
# test initial condition
for hit in qresult:
self.assertNotEqual(new_desc, hit.query_description)
for hsp in hit:
self.assertNotEqual(new_desc, hsp.query_description)
for fragment in hsp:
self.assertNotEqual(new_desc, fragment.query_description)
self.assertNotEqual(new_desc, fragment.query.description)
qresult.description = new_desc
# test after setting
for hit in qresult:
self.assertEqual(new_desc, hit.query_description)
for hsp in hit:
self.assertEqual(new_desc, hsp.query_description)
for fragment in hsp:
self.assertEqual(new_desc, fragment.query_description)
self.assertEqual(new_desc, fragment.query.description)
def test_description_set_no_seqrecord(self):
"""Test QueryResult.description setter, without HSP SeqRecords."""
frag1 = HSPFragment("hit1", "query")
frag2 = HSPFragment("hit1", "query")
frag3 = HSPFragment("hit2", "query")
hit1 = Hit([HSP([x]) for x in [frag1, frag2]])
hit2 = Hit([HSP([frag3])])
qresult = QueryResult([hit1, hit2])
# test initial condition
for hit in qresult:
for hsp in hit.hsps:
self.assertTrue(getattr(hsp, "query") is None)
qresult.description = "unicorn hox homolog"
# test after setting
for hit in qresult:
for hsp in hit.hsps:
self.assertTrue(getattr(hsp, "query") is None)
def test_id_set(self):
"""Test QueryResult.id setter."""
# setting an ID should change the query IDs of all contained Hit and HSPs
qresult = deepcopy(self.qresult)
self.assertEqual("query1", qresult.id)
for hit in qresult:
self.assertEqual("query1", hit.query_id)
for hsp in hit:
self.assertEqual("query1", hsp.query_id)
for fragment in hsp:
self.assertEqual("query1", fragment.query_id)
self.assertEqual("query1", fragment.query.id)
qresult.id = "new_id"
self.assertEqual("new_id", qresult.id)
for hit in qresult:
self.assertEqual("new_id", hit.query_id)
for hsp in hit:
self.assertEqual("new_id", hsp.query_id)
for fragment in hsp:
self.assertEqual("new_id", fragment.query_id)
self.assertEqual("new_id", fragment.query.id)
def test_absorb_hit_does_not_exist(self):
"""Test QueryResult.absorb, hit does not exist."""
# absorb should work like append when the hit does not exist
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.qresult.absorb(hit41)
self.assertEqual([hit11, hit21, hit31, hit41], list(self.qresult.hits))
self.assertEqual(["hit1", "hit2", "hit3", "hit4"], list(self.qresult.hit_keys))
def test_absorb_hit_exists(self):
"""Test QueryResult.absorb, hit with the same ID exists."""
# absorb should combine the hit's hsps if an existing one is present
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.assertEqual(2, len(self.qresult["hit3"]))
hit = Hit([HSP([HSPFragment("hit3", "query1")])])
self.qresult.absorb(hit)
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.assertEqual(["hit1", "hit2", "hit3"], list(self.qresult.hit_keys))
self.assertEqual(3, len(self.qresult["hit3"]))
# remove the mock hsp
del self.qresult["hit3"][-1]
def test_append_ok(self):
"""Test QueryResult.append."""
# append should work with Hit objects
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.qresult.append(hit41)
self.assertEqual([hit11, hit21, hit31, hit41], list(self.qresult.hits))
self.assertEqual(["hit1", "hit2", "hit3", "hit4"], list(self.qresult.hit_keys))
def test_append_custom_hit_key_function_ok(self):
"""Test QueryResult.append, with custom hit key function."""
self.qresult._hit_key_function = lambda hit: hit.id + "_custom" # noqa: E731
# append should assign hit keys according to _hit_key_function
self.assertEqual(["hit1", "hit2", "hit3"], list(self.qresult.hit_keys))
self.qresult.append(hit41)
self.assertEqual(
["hit1", "hit2", "hit3", "hit4_custom"], list(self.qresult.hit_keys)
)
def test_append_id_exists(self):
"""Test QueryResult.append, when ID exists."""
# append should raise an error if hit_key already exists
self.assertRaises(ValueError, self.qresult.append, hit11)
def test_append_alt_id_exists(self):
"""Test QueryResult.append, when alt ID exists."""
# append should raise an error if hit_key already exists as alt ID
hit11._id_alt = ["alt"]
hit21._id_alt = ["alt"]
qresult = QueryResult([hit11])
self.assertRaises(ValueError, qresult.append, hit21)
hit11._id_alt = []
hit21._id_alt = []
def test_append_alt_id_exists_alt(self):
"""Test QueryResult.append, when alt ID exists as primary."""
# append should raise an error if alt ID already exists as primary ID
hit21._id_alt = ["hit1"]
qresult = QueryResult([hit11])
self.assertRaises(ValueError, qresult.append, hit21)
hit21._id_alt = []
def test_hit_filter(self):
"""Test QueryResult.hit_filter."""
# hit_filter should return a new QueryResult object (shallow copy),
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
# filter func: min hit length == 2
# this would filter out hit21, since it only has 1 HSP
filter_func = lambda hit: len(hit) >= 2 # noqa: E731
filtered = self.qresult.hit_filter(filter_func)
self.assertEqual([hit11, hit31], list(filtered.hits))
# make sure all remaining hits return True for the filter function
self.assertTrue(all(filter_func(hit) for hit in filtered))
self.assertEqual(1102, filtered.seq_len)
self.assertEqual("refseq_rna", filtered.target)
def test_hit_filter_no_func(self):
"""Test QueryResult.hit_filter, without arguments."""
# when given no arguments, hit_filter should create a new object with
# the same contents
filtered = self.qresult.hit_filter()
self.assertTrue(compare_search_obj(filtered, self.qresult))
self.assertNotEqual(id(filtered), id(self.qresult))
self.assertEqual(1102, filtered.seq_len)
self.assertEqual("refseq_rna", filtered.target)
def test_hit_filter_no_filtered(self):
"""Test QueryResult.hit_filter, all hits filtered out."""
# when the filter filters out all hits, hit_filter should return an
# empty QueryResult object
filter_func = lambda hit: len(hit) > 50 # noqa: E731
filtered = self.qresult.hit_filter(filter_func)
self.assertEqual(0, len(filtered))
self.assertIsInstance(filtered, QueryResult)
self.assertEqual(1102, filtered.seq_len)
self.assertEqual("refseq_rna", filtered.target)
def test_hit_map(self):
"""Test QueryResult.hit_map."""
# hit_map should apply the given function to all contained Hits
# deepcopy the qresult since we'll change the objects within
qresult = deepcopy(self.qresult)
# map func: capitalize hit IDs
def map_func(hit):
hit.id = hit.id.upper()
return hit
# test before mapping
self.assertEqual("hit1", qresult[0].id)
self.assertEqual("hit2", qresult[1].id)
self.assertEqual("hit3", qresult[2].id)
mapped = qresult.hit_map(map_func)
self.assertEqual("HIT1", mapped[0].id)
self.assertEqual("HIT2", mapped[1].id)
self.assertEqual("HIT3", mapped[2].id)
# and make sure the attributes are transferred
self.assertEqual(1102, mapped.seq_len)
self.assertEqual("refseq_rna", mapped.target)
def test_hit_map_no_func(self):
"""Test QueryResult.hit_map, without arguments."""
# when given no arguments, hit_map should create a new object with
# the same contents
mapped = self.qresult.hit_map()
self.assertTrue(compare_search_obj(mapped, self.qresult))
self.assertNotEqual(id(mapped), id(self.qresult))
self.assertEqual(1102, mapped.seq_len)
self.assertEqual("refseq_rna", mapped.target)
def test_hsp_filter(self):
"""Test QueryResult.hsp_filter."""
# hsp_filter should return a new QueryResult object (shallow copy)
# and any empty hits should be discarded
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
# filter func: no '-' in hsp query sequence
# this would filter out hsp113 and hsp211, effectively removing hit21
filter_func = lambda hsp: "-" not in str(hsp.fragments[0].query) # noqa: E731
filtered = self.qresult.hsp_filter(filter_func)
self.assertIn("hit1", filtered)
self.assertNotIn("hit2", filtered)
self.assertIn("hit3", filtered)
# test hsps in hit11
self.assertTrue(
all(hsp in filtered["hit1"] for hsp in [hsp111, hsp112, hsp114])
)
# test hsps in hit31
self.assertTrue(all(hsp in filtered["hit3"] for hsp in [hsp311, hsp312]))
def test_hsp_filter_no_func(self):
"""Test QueryResult.hsp_filter, no arguments."""
# when given no arguments, hsp_filter should create a new object with
# the same contents
filtered = self.qresult.hsp_filter()
self.assertTrue(compare_search_obj(filtered, self.qresult))
self.assertNotEqual(id(filtered), id(self.qresult))
self.assertEqual(1102, filtered.seq_len)
self.assertEqual("refseq_rna", filtered.target)
def test_hsp_filter_no_filtered(self):
"""Test QueryResult.hsp_filter, all hits filtered out."""
# when the filter filters out all hits, hsp_filter should return an
# empty QueryResult object
filter_func = lambda hsp: len(hsp) > 50 # noqa: E731
filtered = self.qresult.hsp_filter(filter_func)
self.assertEqual(0, len(filtered))
self.assertIsInstance(filtered, QueryResult)
self.assertEqual(1102, filtered.seq_len)
self.assertEqual("refseq_rna", filtered.target)
def test_hsp_map(self):
"""Test QueryResult.hsp_map."""
# hsp_map should apply the given function to all contained HSPs
# deepcopy the qresult since we'll change the objects within
qresult = deepcopy(self.qresult)
# apply mock attributes to hsp, for testing mapped hsp attributes
for hit in qresult:
for hsp in hit:
setattr(hsp, "mock", 13)
# map func: remove first letter of all HSP.aln
def map_func(hsp):
mapped_frags = [x[1:] for x in hsp]
return HSP(mapped_frags)
mapped = qresult.hsp_map(map_func)
# make sure old hsp attributes is not transferred to mapped hsps
for hit in mapped:
for hsp in hit.hsps:
self.assertFalse(hasattr(hsp, "mock"))
# check hsps in hit1
self.assertEqual("TGCGCAT", str(mapped["hit1"][0][0].hit.seq))
self.assertEqual("TGCGCAT", str(mapped["hit1"][0][0].query.seq))
self.assertEqual("TG", str(mapped["hit1"][1][0].hit.seq))
self.assertEqual("AT", str(mapped["hit1"][1][0].query.seq))
self.assertEqual("TTCG", str(mapped["hit1"][2][0].hit.seq))
self.assertEqual("T-CG", str(mapped["hit1"][2][0].query.seq))
self.assertEqual("TTCG", str(mapped["hit1"][2][1].hit.seq))
self.assertEqual("T-CG", str(mapped["hit1"][2][1].query.seq))
self.assertEqual("T", str(mapped["hit1"][3][0].hit.seq))
self.assertEqual("T", str(mapped["hit1"][3][0].query.seq))
self.assertEqual("TCG", str(mapped["hit1"][3][1].hit.seq))
self.assertEqual("TGG", str(mapped["hit1"][3][1].query.seq))
# check hsps in hit2
self.assertEqual("GGCCC", str(mapped["hit2"][0][0].hit.seq))
self.assertEqual("GGCC-", str(mapped["hit2"][0][0].query.seq))
# check hsps in hit3
self.assertEqual("ATG", str(mapped["hit3"][0][0].hit.seq))
self.assertEqual("TTG", str(mapped["hit3"][0][0].query.seq))
self.assertEqual("TATAT", str(mapped["hit3"][1][0].hit.seq))
self.assertEqual("TATAT", str(mapped["hit3"][1][0].query.seq))
# and make sure the attributes are transferred
self.assertEqual(1102, mapped.seq_len)
self.assertEqual("refseq_rna", mapped.target)
def test_hsp_map_no_func(self):
"""Test QueryResult.hsp_map, without arguments."""
# when given no arguments, hit_map should create a new object with
# the same contents
mapped = self.qresult.hsp_map()
self.assertTrue(compare_search_obj(mapped, self.qresult))
self.assertNotEqual(id(mapped), id(self.qresult))
self.assertEqual(1102, mapped.seq_len)
self.assertEqual("refseq_rna", mapped.target)
def test_pop_ok(self):
"""Test QueryResult.pop."""
self.assertEqual(3, len(self.qresult))
hit = self.qresult.pop()
self.assertEqual(hit, hit31)
self.assertEqual([hit11, hit21], list(self.qresult.hits))
def test_pop_int_index_ok(self):
"""Test QueryResult.pop, with integer index."""
# pop should work if given an int index
self.assertEqual(3, len(self.qresult))
hit = self.qresult.pop(1)
self.assertEqual(hit, hit21)
self.assertEqual([hit11, hit31], list(self.qresult.hits))
def test_pop_string_index_ok(self):
"""Test QueryResult.pop, with string index."""
# pop should work if given a string index
self.assertEqual(3, len(self.qresult))
hit = self.qresult.pop("hit2")
self.assertEqual(hit, hit21)
self.assertEqual([hit11, hit31], list(self.qresult.hits))
def test_pop_string_alt_ok(self):
"""Test QueryResult.pop, with alternative ID."""
# pop should work with alternative index
hit11._id_alt = ["alt1"]
hit21._id_alt = ["alt2"]
qresult = QueryResult([hit11, hit21])
hit = qresult.pop("alt1")
self.assertEqual(hit, hit11)
self.assertEqual([hit21], list(qresult))
self.assertNotIn("hit1", qresult)
hit11._id_alt = []
hit21._id_alt = []
def test_index(self):
"""Test QueryResult.index."""
# index should accept hit objects or hit key strings
self.assertEqual(2, self.qresult.index("hit3"))
self.assertEqual(2, self.qresult.index(hit31))
def test_index_alt(self):
"""Test QueryResult.index, with alt ID."""
# index should work with alt IDs
hit11._id_alt = ["alt1"]
qresult = QueryResult([hit21, hit11])
self.assertEqual(1, qresult.index("alt1"))
hit11._id_alt = []
def test_index_not_present(self):
"""Test QueryResult.index, when index is not present."""
self.assertRaises(ValueError, self.qresult.index, "hit4")
self.assertRaises(ValueError, self.qresult.index, hit41)
def test_sort_ok(self):
"""Test QueryResult.sort."""
# sort without any arguments should keep the Hits in the same order
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.qresult.sort()
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
def test_sort_not_in_place_ok(self):
"""Test QueryResult.sort, not in place."""
# sort without any arguments should keep the Hits in the same order
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
sorted_qresult = self.qresult.sort(in_place=False)
self.assertEqual([hit11, hit21, hit31], list(sorted_qresult.hits))
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
def test_sort_reverse_ok(self):
"""Test QueryResult.sort, reverse."""
# sorting with reverse=True should return a QueryResult with Hits reversed
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.qresult.sort(reverse=True)
self.assertEqual([hit31, hit21, hit11], list(self.qresult.hits))
def test_sort_reverse_not_in_place_ok(self):
"""Test QueryResult.sort, reverse, not in place."""
# sorting with reverse=True should return a QueryResult with Hits reversed
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
sorted_qresult = self.qresult.sort(reverse=True, in_place=False)
self.assertEqual([hit31, hit21, hit11], list(sorted_qresult.hits))
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
def test_sort_key_ok(self):
"""Test QueryResult.sort, with custom key."""
# if custom key is given, sort using it
key = lambda hit: len(hit) # noqa: E731
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
self.qresult.sort(key=key)
self.assertEqual([hit21, hit31, hit11], list(self.qresult.hits))
def test_sort_key_not_in_place_ok(self):
"""Test QueryResult.sort, with custom key, not in place."""
# if custom key is given, sort using it
key = lambda hit: len(hit) # noqa: E731
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
sorted_qresult = self.qresult.sort(key=key, in_place=False)
self.assertEqual([hit21, hit31, hit11], list(sorted_qresult.hits))
self.assertEqual([hit11, hit21, hit31], list(self.qresult.hits))
class HitCases(unittest.TestCase):
def setUp(self):
self.hit = Hit([hsp111, hsp112, hsp113])
self.hit.evalue = 5e-10
self.hit.name = "test"
def test_pickle(self):
"""Test pickling and unpickling of Hit."""
buf = BytesIO()
pickle.dump(self.hit, buf)
unp = pickle.loads(buf.getvalue())
self.assertTrue(compare_search_obj(self.hit, unp))
def test_init_none(self):
"""Test Hit.__init__, no arguments."""
hit = Hit()
self.assertEqual(None, hit.id)
self.assertEqual(None, hit.description)
self.assertEqual(None, hit.query_id)
self.assertEqual(None, hit.query_description)
def test_init_id_only(self):
"""Test Hit.__init__, with ID only."""
hit = Hit(id="hit1")
self.assertEqual("hit1", hit.id)
self.assertEqual(None, hit.description)
self.assertEqual(None, hit.query_id)
self.assertEqual(None, hit.query_description)
def test_init_hsps_only(self):
"""Test Hit.__init__, with hsps only."""
hit = Hit([hsp111, hsp112, hsp113])
self.assertEqual("hit1", hit.id)
self.assertEqual("<unknown description>", hit.description)
self.assertEqual("query1", hit.query_id) # set from the HSPs
self.assertEqual("<unknown description>", hit.query_description)
def test_repr(self):
"""Test Hit.__repr__."""
# test for cases with 1 or other alignment numbers
self.assertEqual("Hit(id='hit1', query_id='query1', 3 hsps)", repr(self.hit))
def test_hsps(self):
"""Test Hit.hsps."""
# hsps should return the list of hsps contained
self.assertEqual([hsp111, hsp112, hsp113], self.hit.hsps)
def test_fragments(self):
"""Test Hit.fragments."""
# fragments should return the list of fragments in each hsps
# as a flat list
self.assertEqual([frag111, frag112, frag113, frag113b], self.hit.fragments)
def test_iter(self):
"""Test Hit.__iter__."""
# iteration should return hsps contained
for counter, hsp in enumerate(self.hit):
self.assertIn(hsp, [hsp111, hsp112, hsp113])
self.assertEqual(2, counter)
def test_len(self):
"""Test Hit.__len__."""
# len() on Hit objects should return how many hsps it has
self.assertEqual(3, len(self.hit))
def test_bool(self):
"""Test Hit.__bool__."""
# bool() on Hit objects should return True only if hsps is filled
# which is always true
self.assertTrue(self.hit)
def test_setitem_single(self):
"""Test Hit.__setitem__, single item."""
# test regular setitem overwrite
self.hit[1] = hsp114
self.assertEqual(self.hit.hsps, [hsp111, hsp114, hsp113])
def test_item_multiple(self):
"""Test Hit.__setitem__, multiple items."""
# test iterable setitem
self.hit[:] = [hsp113, hsp112, hsp111]
self.assertEqual(self.hit.hsps, [hsp113, hsp112, hsp111])
def test_getitem_single(self):
"""Test Hit.__getitem__, single item."""
# getitem using integer index should return a hsp object
hsp1 = self.hit[0]
self.assertEqual(hsp111, hsp1)
hsp3 = self.hit[-1]
self.assertEqual(hsp113, hsp3)
def test_getitem_multiple(self):
"""Test Hit.__getitem__, multiple items."""
# getitem using slices should return another hit object
# with the hsps sliced accordingly, but other attributes preserved
new_hit = self.hit[:2]
self.assertEqual(2, len(new_hit))
self.assertEqual([hsp111, hsp112], new_hit.hsps)
self.assertEqual(self.hit.id, new_hit.id)
self.assertEqual(self.hit.query_id, new_hit.query_id)
self.assertEqual(5e-10, new_hit.evalue)
self.assertEqual("test", new_hit.name)
def test_delitem(self):
"""Test Hit.__delitem__."""
# test delitem
del self.hit[0]
self.assertEqual(2, len(self.hit))
self.assertEqual([hsp112, hsp113], self.hit.hsps)
def test_validate_hsp_ok(self):
"""Test Hit._validate_hsp."""
# validation should pass if item is an hsp object with matching
# query and hit ids
# if validation passes, None is returned
self.assertEqual(None, self.hit._validate_hsp(hsp114))
def test_validate_hsp_wrong_type(self):
"""Test Hit._validate_hsp, wrong type."""
# validation should fail if item is not an hsp object
self.assertRaises(TypeError, self.hit._validate_hsp, 1)
self.assertRaises(TypeError, self.hit._validate_hsp, Seq(""))
def test_validate_hsp_wrong_query_id(self):
"""Test Hit._validate_hsp, wrong query ID."""
# validation should fail if query id does not match
self.assertRaises(ValueError, self.hit._validate_hsp, hsp211)
def test_validate_hsp_wrong_hit_id(self):
"""Test Hit._validate_hsp, wrong hit ID."""
# validation should vail if hit id does not match
self.assertRaises(ValueError, self.hit._validate_hsp, hsp121)
def test_desc_set(self):
"""Test Hit.description setter."""
# setting the description should change the hit seqrecord description
# of the contained hsps, if they have an alignment
# test for default value
hit = deepcopy(self.hit)
new_desc = "unicorn hox homolog"
# test initial condition
for hsp in hit:
self.assertNotEqual(new_desc, hsp.hit_description)
for fragment in hsp:
self.assertNotEqual(new_desc, fragment.hit_description)
self.assertNotEqual(new_desc, fragment.hit.description)
hit.description = new_desc
# test after setting
for hsp in hit:
self.assertEqual(new_desc, hsp.hit_description)
for fragment in hsp:
self.assertEqual(new_desc, fragment.hit_description)
self.assertEqual(new_desc, fragment.hit.description)
def test_desc_set_no_seqrecord(self):
"""Test Hit.description setter, without HSP SeqRecords."""
frag1 = HSPFragment("hit1", "query")
frag2 = HSPFragment("hit1", "query")
hit = Hit([HSP([x]) for x in [frag1, frag2]])
new_desc = "unicorn hox homolog"
# test initial condition
self.assertEqual(hit.description, "<unknown description>")
for hsp in hit:
self.assertEqual(hsp.hit_description, "<unknown description>")
for fragment in hsp:
self.assertEqual(hsp.hit_description, "<unknown description>")
hit.description = new_desc
# test after setting
self.assertEqual(hit.description, new_desc)
for hsp in hit:
self.assertTrue(hsp.hit_description, new_desc)
for fragment in hsp:
self.assertEqual(hsp.hit_description, new_desc)
def test_id_set(self):
"""Test Hit.id setter."""
# setting an ID should change the query IDs of all contained HSPs
hit = deepcopy(self.hit)
self.assertEqual("hit1", hit.id)
for hsp in hit.hsps:
self.assertEqual("hit1", hsp.hit_id)
for fragment in hsp:
self.assertEqual(fragment.hit_id, "hit1")
self.assertEqual(fragment.hit.id, "hit1")
hit.id = "new_id"
self.assertEqual("new_id", hit.id)
for hsp in hit.hsps:
self.assertEqual("new_id", hsp.hit_id)
for fragment in hsp:
self.assertEqual(fragment.hit_id, "new_id")
self.assertEqual(fragment.hit.id, "new_id")
def test_append(self):
"""Test Hit.append."""
# append should add hits to the last position
self.hit.append(hsp114)
self.assertEqual(4, len(self.hit))
self.assertEqual(hsp114, self.hit[-1])
def test_filter(self):
"""Test Hit.filter."""
# filter should return a new QueryResult object (shallow copy),
self.assertEqual([hsp111, hsp112, hsp113], self.hit.hsps)
# filter func: min hsp length == 4
filter_func = lambda hsp: len(hsp[0]) >= 4 # noqa: E731
filtered = self.hit.filter(filter_func)
self.assertEqual([hsp111, hsp113], filtered.hsps)
# make sure all remaining hits return True for the filter function
self.assertTrue(all(filter_func(hit) for hit in filtered))
self.assertEqual(5e-10, filtered.evalue)
self.assertEqual("test", filtered.name)
def test_filter_no_func(self):
"""Test Hit.filter, without arguments."""
# when given no arguments, filter should create a new object with
# the same contents
filtered = self.hit.filter()
self.assertTrue(compare_search_obj(filtered, self.hit))
self.assertNotEqual(id(filtered), id(self.hit))
self.assertEqual(5e-10, filtered.evalue)
self.assertEqual("test", filtered.name)
def test_filter_no_filtered(self):
"""Test Hit.hit_filter, all hits filtered out."""
# when the filter filters out all hits, it should return None
filter_func = lambda hsp: len(hsp[0]) > 50 # noqa: E731
filtered = self.hit.filter(filter_func)
self.assertTrue(filtered is None)
def test_index(self):
"""Test Hit.index."""
# index should accept hsp objects
self.assertEqual(1, self.hit.index(hsp112))
def test_index_not_present(self):
"""Test Hit.index, when index is not present."""
self.assertRaises(ValueError, self.hit.index, hsp114)
def test_map(self):
"""Test Hit.hsp_map."""
# map should apply the given function to all contained HSPs
# deepcopy hit since we'll change the objects within
hit = deepcopy(self.hit)
# apply mock attributes to hsp, for testing mapped hsp attributes
for hsp in hit:
setattr(hsp, "mock", 13)
# map func: remove first letter of all HSP.alignment
def map_func(hsp):
mapped_frags = [x[1:] for x in hsp]
return HSP(mapped_frags)
mapped = hit.map(map_func)
# make sure old hsp attributes is not transferred to mapped hsps
for hsp in mapped:
self.assertFalse(hasattr(hsp, "mock"))
# check hsps in hit1
self.assertEqual("TGCGCAT", str(mapped[0][0].hit.seq))
self.assertEqual("TGCGCAT", str(mapped[0][0].query.seq))
self.assertEqual("TG", str(mapped[1][0].hit.seq))
self.assertEqual("AT", str(mapped[1][0].query.seq))
self.assertEqual("TTCG", str(mapped[2][0].hit.seq))
self.assertEqual("T-CG", str(mapped[2][0].query.seq))
self.assertEqual("TTCG", str(mapped[2][1].hit.seq))
self.assertEqual("T-CG", str(mapped[2][1].query.seq))
# and make sure the attributes are transferred
self.assertEqual(5e-10, mapped.evalue)
self.assertEqual("test", mapped.name)
def test_hsp_map_no_func(self):
"""Test Hit.map, without arguments."""
# when given no arguments, map should create a new object with
# the same contents
mapped = self.hit.map()
self.assertTrue(compare_search_obj(mapped, self.hit))
self.assertNotEqual(id(mapped), id(self.hit))
self.assertEqual(5e-10, mapped.evalue)
self.assertEqual("test", mapped.name)
def test_pop(self):
"""Test Hit.pop."""
# pop should return the last item by default
self.assertEqual(hsp113, self.hit.pop())
self.assertEqual(hsp111, self.hit.pop(0))
def test_sort(self):
"""Test Hit.sort."""
self.assertEqual([hsp111, hsp112, hsp113], self.hit.hsps)
# sort by hsp length
key = lambda batch_hsp: len(batch_hsp[0]) # noqa: E731
self.hit.sort(key=key)
self.assertEqual([hsp112, hsp113, hsp111], self.hit.hsps)
def test_sort_not_in_place(self):
"""Test Hit.sort, not in place."""
self.assertEqual([hsp111, hsp112, hsp113], self.hit.hsps)
# sort by hsp length
key = lambda hsp: len(hsp[0]) # noqa: E731
sorted_hit = self.hit.sort(key=key, in_place=False)
self.assertEqual([hsp112, hsp113, hsp111], sorted_hit.hsps)
self.assertEqual([hsp111, hsp112, hsp113], self.hit.hsps)
self.assertEqual(5e-10, sorted_hit.evalue)
self.assertEqual("test", sorted_hit.name)
class HSPSingleFragmentCases(unittest.TestCase):
def setUp(self):
self.frag = HSPFragment("hit_id", "query_id", "ATCAGT", "AT-ACT")
self.frag.query_start = 0
self.frag.query_end = 6
self.frag.hit_start = 15
self.frag.hit_end = 20
self.hsp = HSP([self.frag])
def test_init_no_fragment(self):
"""Test HSP.__init__ without fragments."""
self.assertRaises(ValueError, HSP, [])
def test_len(self):
"""Test HSP.__len__."""
self.assertEqual(1, len(self.hsp))
def test_fragment(self):
"""Test HSP.fragment property."""
self.assertTrue(self.frag is self.hsp.fragment)
def test_is_fragmented(self):
"""Test HSP.is_fragmented property."""
self.assertFalse(self.hsp.is_fragmented)
def test_seq(self):
"""Test HSP sequence properties."""
self.assertEqual("ATCAGT", str(self.hsp.hit.seq))
self.assertEqual("AT-ACT", str(self.hsp.query.seq))
def test_alignment(self):
"""Test HSP.alignment property."""
aln = self.hsp.aln
self.assertIsInstance(aln, MultipleSeqAlignment)
self.assertEqual(2, len(aln))
self.assertTrue("ATCAGT", str(aln[0].seq))
self.assertTrue("AT-ACT", str(aln[1].seq))
def test_aln_span(self):
"""Test HSP.aln_span property."""
self.assertEqual(6, self.hsp.aln_span)
def test_span(self):
"""Test HSP span properties."""
self.assertEqual(5, self.hsp.hit_span)
self.assertEqual(6, self.hsp.query_span)
def test_range(self):
"""Test HSP range properties."""
self.assertEqual((15, 20), self.hsp.hit_range)
self.assertEqual((0, 6), self.hsp.query_range)
def test_setters_readonly(self):
"""Test HSP read-only properties."""
read_onlies = ("range", "span", "strand", "frame", "start", "end")
for seq_type in ("query", "hit"):
self.assertRaises(AttributeError, setattr, self.hsp, seq_type, "A")
for attr in read_onlies:
self.assertRaises(
AttributeError, setattr, self.hsp, "%s_%s" % (seq_type, attr), 5
)
self.assertRaises(AttributeError, setattr, self.hsp, "aln", None)
class HSPMultipleFragmentCases(unittest.TestCase):
def setUp(self):
self.frag1 = HSPFragment("hit_id", "query_id", "ATCAGT", "AT-ACT")
self.frag1.query_start = 0
self.frag1.query_end = 6
self.frag1.hit_start = 15
self.frag1.hit_end = 20
self.frag2 = HSPFragment("hit_id", "query_id", "GGG", "CCC")
self.frag2.query_start = 10
self.frag2.query_end = 13
self.frag2.hit_start = 158
self.frag2.hit_end = 161
self.hsp = HSP([self.frag1, self.frag2])
def test_pickle(self):
"""Test pickling and unpickling of HSP."""
buf = BytesIO()
pickle.dump(self.hsp, buf)
unp = pickle.loads(buf.getvalue())
self.assertTrue(compare_search_obj(self.hsp, unp))
def test_len(self):
"""Test HSP.__len__."""
self.assertEqual(2, len(self.hsp))
def test_getitem(self):
"""Test HSP.__getitem__."""
self.assertTrue(self.frag1 is self.hsp[0])
self.assertTrue(self.frag2 is self.hsp[1])
def test_setitem_single(self):
"""Test HSP.__setitem___, single item."""
frag3 = HSPFragment("hit_id", "query_id", "AAA", "AAT")
self.hsp[1] = frag3
self.assertEqual(2, len(self.hsp))
self.assertTrue(self.frag1 is self.hsp[0])
self.assertTrue(frag3 is self.hsp[1])
def test_setitem_multiple(self):
"""Test HSP.__setitem__, multiple items."""
frag3 = HSPFragment("hit_id", "query_id", "AAA", "AAT")
frag4 = HSPFragment("hit_id", "query_id", "GGG", "GAG")
self.hsp[:2] = [frag3, frag4]
self.assertEqual(2, len(self.hsp))
self.assertTrue(frag3 is self.hsp[0])
self.assertTrue(frag4 is self.hsp[1])
def test_delitem(self):
"""Test HSP.__delitem__."""
del self.hsp[0]
self.assertEqual(1, len(self.hsp))
self.assertTrue(self.frag2 is self.hsp[0])
def test_contains(self):
"""Test HSP.__contains__."""
frag3 = HSPFragment("hit_id", "query_id", "AAA", "AAT")
self.assertIn(self.frag1, self.hsp)
self.assertNotIn(frag3, self.hsp)
def test_fragments(self):
"""Test HSP.fragments property."""
self.assertEqual([self.frag1, self.frag2], self.hsp.fragments)
def test_is_fragmented(self):
"""Test HSP.is_fragmented property."""
self.assertTrue(self.hsp.is_fragmented)
def test_seqs(self):
"""Test HSP sequence properties."""
self.assertEqual(["ATCAGT", "GGG"], [str(x.seq) for x in self.hsp.hit_all])
self.assertEqual(["AT-ACT", "CCC"], [str(x.seq) for x in self.hsp.query_all])
def test_id_desc_set(self):
"""Test HSP query and hit id and description setters."""
for seq_type in ("query", "hit"):
for attr in ("id", "description"):
attr_name = "%s_%s" % (seq_type, attr)
value = getattr(self.hsp, attr_name)
if attr == "id":
# because we happen to have the same value for
# IDs and the actual attribute name
self.assertEqual(value, attr_name)
for fragment in self.hsp:
self.assertEqual(getattr(fragment, attr_name), attr_name)
else:
self.assertEqual(value, "<unknown description>")
for fragment in self.hsp:
self.assertEqual(
getattr(fragment, attr_name), "<unknown description>"
)
new_value = "new_" + value
setattr(self.hsp, attr_name, new_value)
self.assertEqual(getattr(self.hsp, attr_name), new_value)
self.assertNotEqual(getattr(self.hsp, attr_name), value)
for fragment in self.hsp:
self.assertEqual(getattr(fragment, attr_name), new_value)
self.assertNotEqual(getattr(fragment, attr_name), value)
def test_alphabet(self):
"""Test HSP.alphabet getter."""
self.assertTrue(self.hsp.alphabet is single_letter_alphabet)
def test_alphabet_set(self):
"""Test HSP.alphabet setter."""
# test initial values
self.assertTrue(self.hsp.alphabet is single_letter_alphabet)
for frag in self.hsp.fragments:
self.assertTrue(frag.alphabet is single_letter_alphabet)
self.hsp.alphabet = generic_dna
# test values after setting
self.assertTrue(self.hsp.alphabet is generic_dna)
for frag in self.hsp.fragments:
self.assertTrue(frag.alphabet is generic_dna)
def test_range(self):
"""Test HSP range properties."""
# range on HSP with multiple fragment should give the
# min start and max end coordinates
self.assertEqual((15, 161), self.hsp.hit_range)
self.assertEqual((0, 13), self.hsp.query_range)
def test_ranges(self):
"""Test HSP ranges properties."""
self.assertEqual([(15, 20), (158, 161)], self.hsp.hit_range_all)
self.assertEqual([(0, 6), (10, 13)], self.hsp.query_range_all)
def test_span(self):
"""Test HSP span properties."""
# span is always end - start
self.assertEqual(146, self.hsp.hit_span)
self.assertEqual(13, self.hsp.query_span)
def test_setters_readonly(self):
"""Test HSP read-only properties."""
read_onlies = ("range_all", "strand_all", "frame_all")
for seq_type in ("query", "hit"):
for attr in read_onlies:
self.assertRaises(
AttributeError, setattr, self.hsp, "%s_%s" % (seq_type, attr), 5
)
self.assertRaises(AttributeError, setattr, self.hsp, "aln_all", None)
self.assertRaises(AttributeError, setattr, self.hsp, "hit_all", None)
self.assertRaises(AttributeError, setattr, self.hsp, "query_all", None)
class HSPFragmentWithoutSeqCases(unittest.TestCase):
def setUp(self):
self.fragment = HSPFragment("hit_id", "query_id")
def test_init(self):
"""Test HSPFragment.__init__ attributes."""
fragment = HSPFragment("hit_id", "query_id")
for seq_type in ("query", "hit"):
self.assertTrue(getattr(fragment, seq_type) is None)
for attr in ("strand", "frame", "start", "end"):
attr_name = "%s_%s" % (seq_type, attr)
self.assertTrue(getattr(fragment, attr_name) is None)
self.assertTrue(fragment.aln is None)
self.assertTrue(fragment.alphabet is single_letter_alphabet)
self.assertEqual(fragment.aln_annotation, {})
def test_seqmodel(self):
"""Test HSPFragment sequence attributes, no alignments."""
# all query, hit, and alignment objects should be None
self.assertTrue(self.fragment.query is None)
self.assertTrue(self.fragment.hit is None)
self.assertTrue(self.fragment.aln is None)
def test_len(self):
"""Test HSPFragment.__len__, no alignments."""
self.assertRaises(TypeError, len, self)
# len is a shorthand for .aln_span, and it can be set manually
self.fragment.aln_span = 5
self.assertEqual(5, len(self.fragment))
def test_repr(self):
"""Test HSPFragment.__repr__, no alignments."""
# test for minimum repr
self.assertEqual(
"HSPFragment(hit_id='hit_id', query_id='query_id')", repr(self.fragment)
)
self.fragment.aln_span = 5
self.assertEqual(
"HSPFragment(hit_id='hit_id', query_id='query_id', 5 columns)",
repr(self.fragment),
)
def test_getitem(self):
"""Test HSPFragment.__getitem__, no alignments."""
# getitem not supported without alignment
self.assertRaises(TypeError, self.fragment.__getitem__, 0)
self.assertRaises(TypeError, self.fragment.__getitem__, slice(0, 2))
def test_getitem_only_query(self):
"""Test HSPFragment.__getitem__, only query."""
# getitem should work if only query is present
self.fragment.query = "AATCG"
self.assertEqual("ATCG", str(self.fragment[1:].query.seq))
def test_getitem_only_hit(self):
"""Test HSPFragment.__getitem__, only hit."""
# getitem should work if only query is present
self.fragment.hit = "CATGC"
self.assertEqual("ATGC", str(self.fragment[1:].hit.seq))
def test_iter(self):
"""Test HSP.__iter__, no alignments."""
# iteration not supported
self.assertRaises(TypeError, iter, self)
class HSPFragmentCases(unittest.TestCase):
def setUp(self):
self.fragment = HSPFragment(
"hit_id", "query_id", "ATGCTAGCTACA", "ATG--AGCTAGG"
)
def test_pickle(self):
"""Test pickling and unpickling of HSPFragment."""
buf = BytesIO()
pickle.dump(self.fragment, buf)
unp = pickle.loads(buf.getvalue())
self.assertTrue(compare_search_obj(self.fragment, unp))
def test_init_with_seqrecord(self):
"""Test HSPFragment.__init__, with SeqRecord."""
# init should work with seqrecords
hit_seq = SeqRecord(Seq("ATGCTAGCTACA"))
query_seq = SeqRecord(Seq("ATG--AGCTAGG"))
hsp = HSPFragment("hit_id", "query_id", hit_seq, query_seq)
self.assertIsInstance(hsp.query, SeqRecord)
self.assertIsInstance(hsp.hit, SeqRecord)
self.assertIsInstance(hsp.aln, MultipleSeqAlignment)
def test_init_wrong_seqtypes(self):
"""Test HSPFragment.__init__, wrong sequence argument types."""
# init should only work with string or seqrecords
wrong_query = Seq("ATGC")
wrong_hit = Seq("ATGC")
self.assertRaises(
TypeError, HSPFragment, "hit_id", "query_id", wrong_hit, wrong_query
)
def test_seqmodel(self):
"""Test HSPFragment sequence attribute types and default values."""
# check hit
self.assertIsInstance(self.fragment.hit, SeqRecord)
self.assertEqual("<unknown description>", self.fragment.hit.description)
self.assertEqual("aligned hit sequence", self.fragment.hit.name)
self.assertEqual(single_letter_alphabet, self.fragment.hit.seq.alphabet)
# check query
self.assertIsInstance(self.fragment.query, SeqRecord)
self.assertEqual("<unknown description>", self.fragment.query.description)
self.assertEqual("aligned query sequence", self.fragment.query.name)
self.assertEqual(single_letter_alphabet, self.fragment.query.seq.alphabet)
# check alignment
self.assertIsInstance(self.fragment.aln, MultipleSeqAlignment)
self.assertEqual(single_letter_alphabet, self.fragment.aln._alphabet)
def test_alphabet_no_seq(self):
"""Test HSPFragment alphabet property, query and hit sequences not present."""
self.assertTrue(self.fragment.alphabet is single_letter_alphabet)
self.fragment.alphabet = generic_dna
self.assertTrue(self.fragment.alphabet is generic_dna)
def test_alphabet_with_seq(self):
"""Test HSPFragment alphabet property, query or hit sequences present."""
self.assertTrue(self.fragment.alphabet is single_letter_alphabet)
self.fragment._hit = SeqRecord(Seq("AAA"))
self.fragment._query = SeqRecord(Seq("AAA"))
self.fragment.alphabet = generic_dna
self.assertTrue(self.fragment.alphabet is generic_dna)
self.assertTrue(self.fragment.hit.seq.alphabet is generic_dna)
self.assertTrue(self.fragment.query.seq.alphabet is generic_dna)
def test_seq_unequal_hit_query_len(self):
"""Test HSPFragment sequence setter with unequal hit and query lengths."""
for seq_type in ("hit", "query"):
opp_type = "query" if seq_type == "hit" else "hit"
# reset values first
fragment = HSPFragment("hit_id", "query_id")
# and test it against the opposite
setattr(fragment, seq_type, "ATGCACAACAGGA")
self.assertRaises(ValueError, setattr, fragment, opp_type, "ATGCGA")
def test_len(self):
"""Test HSPFragment.__len__."""
# len should equal alignment column length
self.assertEqual(12, len(self.fragment))
def test_repr(self):
"""Test HSPFragment.__repr__."""
# test for minimum repr
self.assertEqual(
"HSPFragment(hit_id='hit_id', query_id='query_id', 12 columns)",
repr(self.fragment),
)
def test_getitem(self):
"""Test HSPFragment.__getitem__."""
# getitem is supported when alignment is present
sliced_fragment = self.fragment[:5]
self.assertIsInstance(sliced_fragment, HSPFragment)
self.assertEqual(5, len(sliced_fragment))
self.assertEqual("ATGCT", str(sliced_fragment.hit.seq))
self.assertEqual("ATG--", str(sliced_fragment.query.seq))
def test_getitem_attrs(self):
"""Test HSPFragment.__getitem__, with attributes."""
# attributes from the original instance should not be present in the new
# objects, except for query, hit, and alignment - related attributes
setattr(self.fragment, "attr_original", 1000)
setattr(self.fragment, "hit_description", "yeah")
setattr(self.fragment, "hit_strand", 1)
setattr(self.fragment, "query_frame", None)
# test values prior to slicing
self.assertEqual(1000, getattr(self.fragment, "attr_original"))
self.assertEqual("yeah", getattr(self.fragment, "hit_description"))
self.assertEqual(1, getattr(self.fragment, "hit_strand"))
self.assertEqual(None, getattr(self.fragment, "query_frame"))
new_hsp = self.fragment[:5]
# test values after slicing
self.assertFalse(hasattr(new_hsp, "attr_original"))
self.assertEqual(1000, getattr(self.fragment, "attr_original"))
self.assertEqual("yeah", getattr(self.fragment, "hit_description"))
self.assertEqual(1, getattr(self.fragment, "hit_strand"))
self.assertEqual(None, getattr(self.fragment, "query_frame"))
def test_getitem_alignment_annot(self):
"""Test HSPFragment.__getitem__, with alignment annotation."""
# the alignment is annotated, it should be sliced accordingly
# and transferred to the new object
setattr(self.fragment, "aln_annotation", {"test": "182718738172"})
new_hsp = self.fragment[:5]
self.assertEqual("18271", new_hsp.aln_annotation["test"])
def test_default_attrs(self):
"""Test HSPFragment attributes' default values."""
fragment = HSPFragment()
self.assertEqual("<unknown id>", fragment.hit_id)
self.assertEqual("<unknown id>", fragment.query_id)
self.assertEqual("<unknown description>", fragment.hit_description)
self.assertEqual("<unknown description>", fragment.query_description)
self.assertEqual(None, fragment.hit)
self.assertEqual(None, fragment.query)
self.assertEqual(None, fragment.aln)
self.assertEqual([], fragment.hit_features)
self.assertEqual([], fragment.query_features)
self.assertEqual(None, fragment.hit_strand)
self.assertEqual(None, fragment.query_strand)
self.assertEqual(None, fragment.hit_frame)
self.assertEqual(None, fragment.query_frame)
def test_id_desc_set(self):
"""Test HSPFragment query and hit id and description setters."""
for seq_type in ("query", "hit"):
for attr in ("id", "description"):
attr_name = "%s_%s" % (seq_type, attr)
value = getattr(self.fragment, attr_name)
if attr == "id":
# because we happen to have the same value for
# IDs and the actual attribute name
self.assertEqual(value, attr_name)
else:
self.assertEqual(value, "<unknown description>")
new_value = "new_" + value
setattr(self.fragment, attr_name, new_value)
self.assertEqual(getattr(self.fragment, attr_name), new_value)
self.assertNotEqual(getattr(self.fragment, attr_name), value)
def test_frame_set_ok(self):
"""Test HSPFragment query and hit frame setters."""
attr = "frame"
for seq_type in ("query", "hit"):
attr_name = "%s_%s" % (seq_type, attr)
for value in (-3, -2, -1, 0, 1, 2, 3, None):
setattr(self.fragment, attr_name, value)
self.assertEqual(value, getattr(self.fragment, attr_name))
def test_frame_set_error(self):
"""Test HSPFragment query and hit frame setters, invalid values."""
attr = "frame"
for seq_type in ("query", "hit"):
func_name = "_%s_%s_set" % (seq_type, attr)
func = getattr(self.fragment, func_name)
for value in ("3", "+3", "-2", "plus"):
self.assertRaises(ValueError, func, value)
def test_strand_set_ok(self):
"""Test HSPFragment query and hit strand setters."""
attr = "strand"
for seq_type in ("query", "hit"):
attr_name = "%s_%s" % (seq_type, attr)
for value in (-1, 0, 1, None):
setattr(self.fragment, attr_name, value)
self.assertEqual(value, getattr(self.fragment, attr_name))
def test_strand_set_error(self):
"""Test HSPFragment query and hit strand setters, invalid values."""
attr = "strand"
for seq_type in ("query", "hit"):
func_name = "_%s_%s_set" % (seq_type, attr)
func = getattr(self.fragment, func_name)
for value in (3, "plus", "minus", "-", "+"):
self.assertRaises(ValueError, func, value)
def test_strand_set_from_plus_frame(self):
"""Test HSPFragment query and hit strand getters, from plus frame."""
for seq_type in ("query", "hit"):
attr_name = "%s_strand" % seq_type
self.assertTrue(getattr(self.fragment, attr_name) is None)
setattr(self.fragment, "%s_frame" % seq_type, 3)
self.assertEqual(1, getattr(self.fragment, attr_name))
def test_strand_set_from_minus_frame(self):
"""Test HSPFragment query and hit strand getters, from minus frame."""
for seq_type in ("query", "hit"):
attr_name = "%s_strand" % seq_type
self.assertTrue(getattr(self.fragment, attr_name) is None)
setattr(self.fragment, "%s_frame" % seq_type, -2)
self.assertEqual(-1, getattr(self.fragment, attr_name))
def test_strand_set_from_zero_frame(self):
"""Test HSPFragment query and hit strand getters, from zero frame."""
for seq_type in ("query", "hit"):
attr_name = "%s_strand" % seq_type
self.assertTrue(getattr(self.fragment, attr_name) is None)
setattr(self.fragment, "%s_frame" % seq_type, 0)
self.assertEqual(0, getattr(self.fragment, attr_name))
def test_coords_setters_getters(self):
"""Test HSPFragment query and hit coordinate-related setters and getters."""
for seq_type in ("query", "hit"):
attr_start = "%s_%s" % (seq_type, "start")
attr_end = "%s_%s" % (seq_type, "end")
setattr(self.fragment, attr_start, 9)
setattr(self.fragment, attr_end, 99)
# check for span value
span = getattr(self.fragment, "%s_span" % seq_type)
self.assertEqual(90, span)
# and range as well
range = getattr(self.fragment, "%s_range" % seq_type)
self.assertEqual((9, 99), range)
def test_coords_setters_readonly(self):
"""Test HSPFragment query and hit coordinate-related read-only getters."""
read_onlies = ("range", "span")
for seq_type in ("query", "hit"):
for attr in read_onlies:
self.assertRaises(
AttributeError,
setattr,
self.fragment,
"%s_%s" % (seq_type, attr),
5,
)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 42.828914 | 88 | 0.63043 |
ace45d1f281fbc416364889982eb42faf88b8f5a | 15,287 | py | Python | families/wikiquote_family.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | families/wikiquote_family.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | families/wikiquote_family.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import family
__version__ = '$Id: 0f5339b4c0d5fba41fc4b539b0741f0b57c1c83c $'
# The Wikimedia family that is known as Wikiquote
class Family(family.WikimediaFamily):
def __init__(self):
super(Family, self).__init__()
self.name = 'wikiquote'
self.languages_by_size = [
'pl', 'en', 'it', 'ru', 'fr', 'de', 'pt', 'es', 'cs', 'sk', 'bg',
'bs', 'tr', 'sl', 'he', 'uk', 'lt', 'eo', 'el', 'fa', 'id', 'zh',
'hu', 'fi', 'sv', 'nl', 'li', 'ca', 'no', 'nn', 'hr', 'sa', 'ja',
'az', 'hy', 'ar', 'et', 'ko', 'ml', 'cy', 'ka', 'gl', 'sr', 'ro',
'ku', 'th', 'te', 'da', 'eu', 'is', 'af', 'vi', 'sq', 'ta', 'hi',
'la', 'be', 'br', 'mr', 'uz', 'ur', 'zh-min-nan', 'gu', 'kn', 'su',
'wo', 'ky', 'am',
]
self.langs = dict([(lang, '%s.wikiquote.org' % lang)
for lang in self.languages_by_size])
# Override defaults
self.namespaces[2]['ca'] = [u'Usuari']
self.namespaces[3]['ca'] = [u'Usuari Discussió']
self.namespaces[2]['cs'] = [u'Uživatel', u'Uživatelka']
self.namespaces[3]['cs'] = [u'Diskuse s uživatelem', u'Uživatel diskuse', u'Uživatelka diskuse', u'Diskuse s uživatelkou']
self.namespaces[9]['da'] = [u'MediaWiki diskussion', u'MediaWiki-diskussion']
self.namespaces[13]['da'] = [u'Hjælp diskussion', u'Hjælp-diskussion']
self.namespaces[3]['de'] = [u'Benutzer Diskussion', u'BD', u'Benutzerin Diskussion']
self.namespaces[2]['fa'] = [u'کاربر']
self.namespaces[3]['fa'] = [u'بحث کاربر']
self.namespaces[2]['fr'] = [u'Utilisateur']
self.namespaces[3]['fr'] = [u'Discussion utilisateur', u'Discussion Utilisateur']
self.namespaces[8]['hi'] = [u'मीडियाविकि']
self.namespaces[9]['hi'] = [u'मीडियाविकि वार्ता']
self.namespaces[2]['pl'] = [u'Użytkownik', u'Użytkowniczka']
self.namespaces[3]['pl'] = [u'Dyskusja użytkownika', u'Dyskusja użytkowniczki']
self.namespaces[2]['pt'] = [u'Utilizador', u'Usuário', u'Utilizadora']
self.namespaces[3]['pt'] = [u'Utilizador Discussão', u'Usuário Discussão', u'Utilizadora Discussão']
self.namespaces[9]['ro'] = [u'Discuție MediaWiki', u'Discuţie MediaWiki']
self.namespaces[10]['zh'] = [u'Template', u'模板', u'样板', u'樣板']
self.namespaces[14]['zh'] = [u'Category', u'分类', u'分類']
# Most namespaces are inherited from family.Family.
# Translation used on all wikis for the different namespaces.
# (Please sort languages alphabetically)
# You only need to enter translations that differ from _default.
self.namespaces[4] = {
'_default': self.namespaces[4]['_default'],
'af': u'Wikiquote',
'am': u'Wikiquote',
'ang': u'Wikiquote',
'ar': [u'ويكي الاقتباس', u'Wikiquote'],
'az': [u'Vikisitat', u'Wikiquote'],
'be': u'Wikiquote',
'bg': [u'Уикицитат', u'Wikiquote'],
'br': [u'Wikiarroud', u'Wikiquote'],
'bs': [u'Wikicitati', u'Wikiquote'],
'ca': [u'Viquidites', u'Wikiquote'],
'co': u'Wikiquote',
'cs': [u'Wikicitáty', u'WC', u'WQ', u'Wikiquote'],
'cy': u'Wikiquote',
'da': u'Wikiquote',
'de': [u'Wikiquote', u'WQ'],
'el': [u'Βικιφθέγματα', u'Wikiquote'],
'en': u'Wikiquote',
'eo': [u'Vikicitaro', u'Wikiquote'],
'es': u'Wikiquote',
'et': [u'Vikitsitaadid', u'Wikiquote'],
'eu': u'Wikiquote',
'fa': [u'ویکیگفتاورد', u'Wikiquote'],
'fi': [u'Wikisitaatit', u'Wikiquote'],
'fr': u'Wikiquote',
'ga': u'Vicísliocht',
'gl': u'Wikiquote',
'gu': u'Wikiquote',
'he': [u'ויקיציטוט', u'Wikiquote'],
'hi': u'Wikiquote',
'hr': [u'Wikicitat', u'Wikiquote'],
'hu': [u'Wikidézet', u'Wikiquote'],
'hy': [u'Վիքիքաղվածք', u'Wikiquote'],
'id': u'Wikiquote',
'is': [u'Wikivitnun', u'Wikiquote'],
'it': u'Wikiquote',
'ja': u'Wikiquote',
'ka': [u'ვიკიციტატა', u'Wikiquote'],
'kk': u'Уикидәйек',
'kn': u'Wikiquote',
'ko': [u'위키인용집', u'인', u'Wikiquote'],
'ku': [u'Wîkîgotin', u'Wikiquote'],
'ky': u'Wikiquote',
'la': [u'Vicicitatio', u'Wikiquote'],
'lb': u'Wikiquote',
'li': u'Wikiquote',
'lt': u'Wikiquote',
'ml': [u'വിക്കിചൊല്ലുകൾ', u'വിക്കി ചൊല്ലുകൾ', u'Wikiquote'],
'mr': u'Wikiquote',
'nl': u'Wikiquote',
'nn': u'Wikiquote',
'no': u'Wikiquote',
'pl': [u'Wikicytaty', u'Wikiquote'],
'pt': u'Wikiquote',
'ro': [u'Wikicitat', u'Wikiquote'],
'ru': [u'Викицитатник', u'ВЦ'],
'sa': [u'विकिसूक्तिः', u'Wikiquote'],
'sk': [u'Wikicitáty', u'Wikiquote'],
'sl': [u'Wikinavedek', u'Wikiquote'],
'sq': u'Wikiquote',
'sr': u'Wikiquote',
'su': u'Wikiquote',
'sv': u'Wikiquote',
'ta': [u'விக்கிமேற்கோள்', u'Wikiquote', u'விக்கிபீடியா'],
'te': u'Wikiquote',
'th': [u'วิกิคำคม', u'Wikiquote'],
'tr': [u'Vikisöz', u'Wikiquote'],
'uk': [u'Вікіцитати', u'ВЦ', u'Wikiquote'],
'ur': [u'وکی اقتباسات', u'Wikiquote'],
'uz': [u'Vikiiqtibos', u'Wikiquote'],
'vi': u'Wikiquote',
'wo': u'Wikiquote',
'zh': u'Wikiquote',
'zh-min-nan': u'Wikiquote',
}
self.namespaces[5] = {
'_default': self.namespaces[5]['_default'],
'af': u'Wikiquotebespreking',
'als': u'Wikiquote Diskussion',
'am': u'Wikiquote ውይይት',
'ang': u'Wikiquote talk',
'ar': u'نقاش ويكي الاقتباس',
'ast': u'Wikiquote alderique',
'az': [u'Vikisitat müzakirəsi', u'Wikiquote talk'],
'be': [u'Размовы пра Wikiquote', u'Wikiquote размовы'],
'bg': u'Уикицитат беседа',
'bm': u'Discussion Wikiquote',
'br': u'Kaozeadenn Wikiarroud',
'bs': u'Razgovor s Wikicitatima',
'ca': u'Viquidites Discussió',
'co': u'Wikiquote talk',
'cs': [u'Diskuse k Wikicitátům', u'Wikiquote diskuse', u'Wikiquote talk', u'Wikicitáty diskuse'],
'cy': u'Sgwrs Wikiquote',
'da': [u'Wikiquote diskussion', u'Wikiquote-diskussion'],
'de': u'Wikiquote Diskussion',
'el': [u'Συζήτηση Βικιφθέγματα', u'Βικιφθέγματα συζήτηση'],
'en': u'Wikiquote talk',
'eo': [u'Vikicitaro-Diskuto', u'Vikicitaro diskuto'],
'es': u'Wikiquote discusión',
'et': [u'Vikitsitaatide arutelu', u'Vikitsitaadid arutelu'],
'eu': u'Wikiquote eztabaida',
'fa': u'بحث ویکیگفتاورد',
'fi': u'Keskustelu Wikisitaateista',
'fr': u'Discussion Wikiquote',
'ga': u'Plé Vicísliocht',
'gl': u'Conversa Wikiquote',
'gu': u'Wikiquote ચર્ચા',
'he': u'שיחת ויקיציטוט',
'hi': u'Wikiquote वार्ता',
'hr': u'Razgovor Wikicitat',
'hu': [u'Wikidézet-vita', u'Wikidézet vita'],
'hy': u'Վիքիքաղվածքի քննարկում',
'id': u'Pembicaraan Wikiquote',
'is': u'Wikivitnunspjall',
'it': u'Discussioni Wikiquote',
'ja': [u'Wikiquote・トーク', u'Wikiquote‐ノート'],
'ka': [u'ვიკიციტატა განხილვა', u'Wikiquote განხილვა'],
'kk': u'Уикидәйек талқылауы',
'kn': u'Wikiquote ಚರ್ಚೆ',
'ko': u'위키인용집토론',
'ku': [u'Gotûbêja Wîkîgotinê', u'Wîkîgotin nîqaş'],
'ky': u'Wikiquote баарлашуу',
'la': u'Disputatio Vicicitationis',
'lb': u'Wikiquote Diskussioun',
'li': u'Euverlèk Wikiquote',
'lt': u'Wikiquote aptarimas',
'ml': [u'വിക്കിചൊല്ലുകൾ സംവാദം', u'വിക്കി ചൊല്ലുകൾ സംവാദം'],
'mr': u'Wikiquote चर्चा',
'nds': u'Wikiquote Diskuschoon',
'nl': u'Overleg Wikiquote',
'nn': u'Wikiquote-diskusjon',
'no': u'Wikiquote-diskusjon',
'pl': u'Dyskusja Wikicytatów',
'pt': u'Wikiquote Discussão',
'qu': u'Wikiquote rimanakuy',
'ro': [u'Discuție Wikicitat', u'Discuţie Wikicitat'],
'ru': u'Обсуждение Викицитатника',
'sa': [u'विकिसूक्तिःसम्भाषणम्', u'विकिसूक्तिःसंभाषणं'],
'sk': [u'Diskusia k Wikicitátom', u'Komentár k Wikipédii'],
'sl': u'Pogovor o Wikinavedku',
'sq': u'Wikiquote diskutim',
'sr': [u'Разговор о Wikiquote', u'Razgovor o Wikiquote'],
'su': u'Obrolan Wikiquote',
'sv': u'Wikiquotediskussion',
'ta': [u'விக்கிமேற்கோள் பேச்சு', u'விக்கிபீடியா பேச்சு'],
'te': u'Wikiquote చర్చ',
'th': u'คุยเรื่องวิกิคำคม',
'tr': u'Vikisöz tartışma',
'tt': u'Wikiquote bäxäse',
'uk': u'Обговорення Вікіцитат',
'ur': u'تبادلۂ خیال وکی اقتباسات',
'uz': u'Vikiiqtibos munozarasi',
'vi': u'Thảo luận Wikiquote',
'vo': u'Bespik dö Wikiquote',
'wo': [u'Wikiquote waxtaan', u'Discussion Wikiquote'],
'zh': u'Wikiquote talk',
'zh-min-nan': u'Wikiquote talk',
}
self.namespaces[100] = {
'de': u'Portal',
'fr': u'Portail',
'he': u'פורטל',
'li': u'Portaol',
'sk': u'Deň',
'zh': u'Transwiki',
}
self.namespaces[101] = {
'de': u'Portal Diskussion',
'fr': u'Discussion Portail',
'he': u'שיחת פורטל',
'li': u'Euverlèk portaol',
'sk': u'Diskusia ku dňu',
'zh': u'Transwiki talk',
}
self.namespaces[102] = {
'fr': u'Projet',
}
self.namespaces[103] = {
'fr': u'Discussion Projet',
}
self.namespaces[104] = {
'fr': u'Référence',
}
self.namespaces[105] = {
'fr': u'Discussion Référence',
}
self.namespaces[108] = {
'fr': u'Transwiki',
}
self.namespaces[109] = {
'fr': u'Discussion Transwiki',
}
# attop is a list of languages that prefer to have the interwiki
# links at the top of the page.
self.interwiki_attop = []
# on_one_line is a list of languages that want the interwiki links
# one-after-another on a single line
self.interwiki_on_one_line = []
# Similar for category
self.category_attop = []
# List of languages that want the category on_one_line.
self.category_on_one_line = []
# Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = [
'af', 'am', 'ar', 'az', 'be', 'bg', 'br', 'bs', 'ca', 'cs', 'da',
'el', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'gl', 'he', 'hi',
'hu', 'hy', 'id', 'is', 'it', 'ja', 'ka', 'kn', 'ku', 'ky', 'la',
'li', 'lt', 'ml', 'mr', 'nl', 'nn', 'no', 'pt', 'ro', 'ru', 'sk',
'sl', 'sq', 'sr', 'su', 'sv', 'ta', 'te', 'tr', 'uk', 'uz', 'vi',
'wo', 'zh',
]
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
'simple': self.alphabetic,
'pt': self.alphabetic,
}
self.obsolete = {
'als': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Alemannic_Wikiquote
'ang': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=29150
'ast': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=28964
'bm': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bambara_Wikiquote
'co': None,
'cr': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nehiyaw_Wikiquote
'dk': 'da',
'ga': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gaeilge_Wikiquote
'jp': 'ja',
'kk': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325
'kr': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kanuri_Wikiquote
'ks': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kashmiri_Wikiquote
'kw': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kernewek_Wikiquote
'lb': None,
'minnan':'zh-min-nan',
'na': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nauruan_Wikiquote
'nb': 'no',
'nds': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Low_Saxon_Wikiquote
'qu': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Quechua_Wikiquote
'simple': 'en', #http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Simple_English_(3)_Wikiquote
'tk': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Turkmen_Wikiquote
'tokipona': None,
'tt': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tatar_Wikiquote
'ug': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Oyghurque_Wikiquote
'vo': None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Volapuk_Wikiquote
'za':None, # http://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zhuang_Wikiquote
'zh-tw': 'zh',
'zh-cn': 'zh'
}
def code2encodings(self, code):
"""
Return a list of historical encodings for a specific language wikipedia
"""
# Historic compatibility
if code == 'pl':
return 'utf-8', 'iso8859-2'
if code == 'ru':
return 'utf-8', 'iso8859-5'
return self.code2encoding(code),
| 45.362018 | 130 | 0.522797 |
ace45db6000d7c91cb982d4b51fde06a270bf072 | 13,411 | py | Python | jtnn/jtnn_vae.py | nisargjoshi10/icml18-jtnn | c8140b97b502f507dd7ebb9607af09e719c619eb | [
"MIT"
] | null | null | null | jtnn/jtnn_vae.py | nisargjoshi10/icml18-jtnn | c8140b97b502f507dd7ebb9607af09e719c619eb | [
"MIT"
] | null | null | null | jtnn/jtnn_vae.py | nisargjoshi10/icml18-jtnn | c8140b97b502f507dd7ebb9607af09e719c619eb | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import torch
import torch.nn as nn
from .mol_tree import Vocab, MolTree
from .nnutils import create_var
from .jtnn_enc import JTNNEncoder
from .jtnn_dec import JTNNDecoder
from .mpn import MPN, mol2graph
from .jtmpn import JTMPN
from .chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols, atom_equal, decode_stereo
import rdkit
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
import copy, math
from six.moves import range
from six.moves import zip
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
node.wid = vocab.get_index(node.smiles)
tot += 1
class JTNNVAE(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, depth, stereo=True):
super(JTNNVAE, self).__init__()
self.vocab = vocab
self.hidden_size = hidden_size
self.latent_size = latent_size
self.depth = depth
self.embedding = nn.Embedding(vocab.size(), hidden_size)
self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding)
self.jtmpn = JTMPN(hidden_size, depth)
self.mpn = MPN(hidden_size, depth)
self.decoder = JTNNDecoder(vocab, hidden_size, latent_size / 2, self.embedding)
self.T_mean = nn.Linear(hidden_size, latent_size / 2)
self.T_var = nn.Linear(hidden_size, latent_size / 2)
self.G_mean = nn.Linear(hidden_size, latent_size / 2)
self.G_var = nn.Linear(hidden_size, latent_size / 2)
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
self.use_stereo = stereo
if stereo:
self.stereo_loss = nn.CrossEntropyLoss(size_average=False)
def encode(self, mol_batch):
set_batch_nodeID(mol_batch, self.vocab)
root_batch = [mol_tree.nodes[0] for mol_tree in mol_batch]
tree_mess,tree_vec = self.jtnn(root_batch)
smiles_batch = [mol_tree.smiles for mol_tree in mol_batch]
mol_vec = self.mpn(mol2graph(smiles_batch))
return tree_mess, tree_vec, mol_vec
def encode_latent_mean(self, smiles_list):
mol_batch = [MolTree(s) for s in smiles_list]
for mol_tree in mol_batch:
mol_tree.recover()
_, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
mol_mean = self.G_mean(mol_vec)
return torch.cat([tree_mean,mol_mean], dim=1)
def forward(self, mol_batch, beta=0):
batch_size = len(mol_batch)
tree_mess, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
z_mean = torch.cat([tree_mean,mol_mean], dim=1)
z_log_var = torch.cat([tree_log_var,mol_log_var], dim=1)
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(batch_size, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
word_loss, topo_loss, word_acc, topo_acc = self.decoder(mol_batch, tree_vec)
assm_loss, assm_acc = self.assm(mol_batch, mol_vec, tree_mess)
if self.use_stereo:
stereo_loss, stereo_acc = self.stereo(mol_batch, mol_vec)
else:
stereo_loss, stereo_acc = 0, 0
all_vec = torch.cat([tree_vec, mol_vec], dim=1)
loss = word_loss + topo_loss + assm_loss + 2 * stereo_loss + beta * kl_loss
return loss, kl_loss.item(), word_acc, topo_acc, assm_acc, stereo_acc
def assm(self, mol_batch, mol_vec, tree_mess):
cands = []
batch_idx = []
for i,mol_tree in enumerate(mol_batch):
for node in mol_tree.nodes:
#Leaf node's attachment is determined by neighboring node's attachment
if node.is_leaf or len(node.cands) == 1: continue
cands.extend( [(cand, mol_tree.nodes, node) for cand in node.cand_mols] )
batch_idx.extend([i] * len(node.cands))
cand_vec = self.jtmpn(cands, tree_mess)
cand_vec = self.G_mean(cand_vec)
batch_idx = create_var(torch.LongTensor(batch_idx))
mol_vec = mol_vec.index_select(0, batch_idx)
mol_vec = mol_vec.view(-1, 1, self.latent_size / 2)
cand_vec = cand_vec.view(-1, self.latent_size / 2, 1)
scores = torch.bmm(mol_vec, cand_vec).squeeze()
cnt,tot,acc = 0,0,0
all_loss = []
for i,mol_tree in enumerate(mol_batch):
comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]
cnt += len(comp_nodes)
for node in comp_nodes:
label = node.cands.index(node.label)
ncand = len(node.cands)
cur_score = scores.narrow(0, tot, ncand)
tot += ncand
if cur_score[label].item() >= cur_score.max().item():
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )
#all_loss = torch.stack(all_loss).sum() / len(mol_batch)
all_loss = sum(all_loss) / len(mol_batch)
return all_loss, acc * 1.0 / cnt
def stereo(self, mol_batch, mol_vec):
stereo_cands,batch_idx = [],[]
labels = []
for i,mol_tree in enumerate(mol_batch):
cands = mol_tree.stereo_cands
if len(cands) == 1: continue
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_cands.extend(cands)
batch_idx.extend([i] * len(cands))
labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )
if len(labels) == 0:
return create_var(torch.zeros(1)), 1.0
batch_idx = create_var(torch.LongTensor(batch_idx))
stereo_cands = self.mpn(mol2graph(stereo_cands))
stereo_cands = self.G_mean(stereo_cands)
stereo_labels = mol_vec.index_select(0, batch_idx)
scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)
st,acc = 0,0
all_loss = []
for label,le in labels:
cur_scores = scores.narrow(0, st, le)
if cur_scores.data[label] >= cur_scores.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
st += le
#all_loss = torch.cat(all_loss).sum() / len(labels)
all_loss = sum(all_loss) / len(labels)
return all_loss, acc * 1.0 / len(labels)
def reconstruct(self, smiles, prob_decode=False):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
return self.decode(tree_vec, mol_vec, prob_decode)
def recon_eval(self, smiles):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
all_smiles = []
for i in range(10):
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var / 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var / 2) * epsilon
for j in range(10):
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=True)
all_smiles.append(new_smiles)
return all_smiles
def sample_prior(self, prob_decode=False):
tree_vec = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size / 2), False)
return self.decode(tree_vec, mol_vec, prob_decode)
def sample_eval(self):
tree_vec = create_var(torch.randn(1, self.latent_size / 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size / 2), False)
all_smiles = []
for i in range(100):
s = self.decode(tree_vec, mol_vec, prob_decode=True)
all_smiles.append(s)
return all_smiles
def decode(self, tree_vec, mol_vec, prob_decode):
pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)
#Mark nid & is_leaf & atommap
for i,node in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = (len(node.neighbors) == 1)
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None: return None
if self.use_stereo == False:
return Chem.MolToSmiles(cur_mol)
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
_,max_id = scores.max(dim=0)
return stereo_cands[max_id.data[0]]
def dfs_assemble(self, tree_mess, mol_vec, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node, prob_decode):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
return None
cand_smiles,cand_mols,cand_amap = list(zip(*cands))
cands = [(candmol, all_nodes, cur_node) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1,-1)).squeeze() + 1e-5 #prevent prob = 0
cand_idx = torch.multinomial(probs, probs.numel())
else:
_,cand_idx = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in range(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].item()]
new_global_amap = copy.deepcopy(global_amap)
for nei_id,ctr_atom,nei_atom in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None: continue
result = True
for nei_node in children:
if nei_node.is_leaf: continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node, prob_decode)
if cur_mol is None:
result = False
break
if result: return cur_mol
return None
| 41.649068 | 144 | 0.625457 |
ace45decb9739283dfc2d8db46336b8d1d65b091 | 5,983 | py | Python | semo/RNNLG/nn/NNGenerator.py | WowCZ/strac | a55195a04ea4f1a4051fdcf11e83777b47ca83e1 | [
"Apache-2.0"
] | 3 | 2019-04-28T03:53:33.000Z | 2020-01-20T14:38:40.000Z | semo/RNNLG/nn/NNGenerator.py | WowCZ/strac | a55195a04ea4f1a4051fdcf11e83777b47ca83e1 | [
"Apache-2.0"
] | null | null | null | semo/RNNLG/nn/NNGenerator.py | WowCZ/strac | a55195a04ea4f1a4051fdcf11e83777b47ca83e1 | [
"Apache-2.0"
] | 1 | 2019-11-27T09:28:10.000Z | 2019-11-27T09:28:10.000Z | ######################################################################
######################################################################
# Copyright Tsung-Hsien Wen, Cambridge Dialogue Systems Group, 2016 #
######################################################################
######################################################################
import sys
import numpy as np
import theano.tensor as T
import theano.gradient as G
from collections import OrderedDict
sys.path.insert(0, '.')
from basic import *
from hlstm import *
from sclstm import *
from encdec import *
class NNGenerator(object):
def __init__(self, gentype, vocab, beamwidth, overgen,
vocab_size, hidden_size, batch_size, feat_sizes,
obj='ml', train_mode='all', decode='beam',tokmap=None):
# hyperparameters
self.gentype= gentype
self.decode = decode
self.mode = train_mode
self.di = vocab_size
self.dh = hidden_size
self.db = batch_size
self.dfs= feat_sizes
self.obj= obj
# choose generator architecture
self.params = []
if self.gentype=='sclstm':
self.generator = sclstm(self.gentype,vocab,
beamwidth,overgen,
self.di,self.dh,self.db,self.dfs)
self.params = self.generator.params
elif self.gentype=='encdec':
self.generator = encdec(self.gentype,vocab,
beamwidth,overgen,
self.di,self.dh,self.db,self.dfs)
self.params = self.generator.params
elif self.gentype=='hlstm':
self.generator = hlstm(self.gentype,vocab,
beamwidth,overgen,
self.di,self.dh,self.db,self.dfs,
tokmap)
self.params = self.generator.params
def config_theano(self):
# input tensor variables
w_idxes = T.imatrix('w_idxes')
w_idxes = T.imatrix('w_idxes')
a = T.imatrix('a')
sv = T.imatrix('sv')
s = T.imatrix('s')
v = T.imatrix('v')
# cutoff for batch and time
cutoff_f = T.imatrix('cutoff_f')
cutoff_b = T.iscalar('cutoff_b')
# regularization and learning rate
lr = T.scalar('lr')
reg = T.scalar('reg')
# unroll generator and produce cost
if self.gentype=='sclstm':
self.cost, cutoff_logp = \
self.generator.unroll(a,sv,w_idxes,cutoff_f,cutoff_b)
elif self.gentype=='encdec':
self.cost, cutoff_logp = \
self.generator.unroll(a,s,v,w_idxes,cutoff_f,cutoff_b)
elif self.gentype=='hlstm':
self.cost, cutoff_logp = \
self.generator.unroll(a,sv,w_idxes,cutoff_f,cutoff_b)
###################### ML Training #####################
# gradients and updates
gradients = T.grad( clip_gradient(self.cost,1),self.params )
updates = OrderedDict(( p, p-lr*g+reg*p ) \
for p, g in zip( self.params , gradients))
# theano functions
self.train = theano.function(
inputs= [a,sv,s,v, w_idxes, cutoff_f, cutoff_b, lr, reg],
outputs=-self.cost,
updates=updates,
on_unused_input='ignore')
self.test = theano.function(
inputs= [a,sv,s,v, w_idxes, cutoff_f, cutoff_b],
outputs=-self.cost,
on_unused_input='ignore')
###################### DT Training #####################
# expected objective
bleus = T.fvector('bleu')
errs = T.fvector('err')
gamma = T.iscalar('gamma')
senp = T.pow(10,gamma*cutoff_logp/cutoff_f[4][:cutoff_b])/\
T.sum(T.pow(10,gamma*cutoff_logp/cutoff_f[4][:cutoff_b]))
xBLEU = T.sum(senp*bleus[:cutoff_b])
xERR = T.sum(senp*errs[:cutoff_b])
self.obj = -xBLEU + 0.3*xERR
obj_grad = T.grad( clip_gradient(self.obj,1),self.params )
obj_updates = OrderedDict(( p, p-lr*g+reg*p ) \
for p, g in zip( self.params , obj_grad))
# expected objective functions
self.trainObj = theano.function(
inputs= [a,sv,s,v, w_idxes, cutoff_f, cutoff_b,
bleus, errs, gamma, lr, reg],
outputs=[self.obj,xBLEU,xERR,senp],
updates=obj_updates,
on_unused_input='ignore',
allow_input_downcast=True)
self.testObj = theano.function(
inputs= [a,sv,s,v, w_idxes, cutoff_f, cutoff_b,
bleus,errs,gamma],
outputs=[self.obj,xBLEU,xERR],
on_unused_input='ignore',
allow_input_downcast=True)
def gen(self,a,sv,s,v):
if self.decode=='beam':
if self.gentype=='sclstm':
return self.generator.beamSearch(a,sv)
elif self.gentype=='encdec':
return self.generator.beamSearch(a,s,v)
elif self.gentype=='hlstm':
return self.generator.beamSearch(a,sv)
else:
if self.gentype=='sclstm':
return self.generator.sample(a,sv)
elif self.gentype=='encdec':
return self.generator.sample(a,s,v)
elif self.gentype=='hlstm':
return self.generator.sample(a,sv)
def setWordVec(self,word2vec):
self.generator.setWordVec(word2vec)
def setParams(self,params):
for i in range(len(self.params)):
self.params[i].set_value(params[i])
def getParams(self):
return [p.get_value() for p in self.params]
def numOfParams(self):
return sum([p.get_value().size for p in self.params])
def loadConverseParams(self):
self.generator.loadConverseParams()
| 37.39375 | 74 | 0.519639 |
ace45e2a5e0348e2615c6b17c93621bfe006bbe9 | 726 | py | Python | openapi_core/validation/util.py | pcrespov/openapi-core | 0e30b71a77ee63737094a2527d62b363c642e94c | [
"BSD-3-Clause"
] | null | null | null | openapi_core/validation/util.py | pcrespov/openapi-core | 0e30b71a77ee63737094a2527d62b363c642e94c | [
"BSD-3-Clause"
] | null | null | null | openapi_core/validation/util.py | pcrespov/openapi-core | 0e30b71a77ee63737094a2527d62b363c642e94c | [
"BSD-3-Clause"
] | null | null | null | """OpenAPI core validation util module"""
from six.moves.urllib.parse import urlparse
def is_absolute(url):
return url.startswith('//') or '://' in url
def path_qs(url):
pr = urlparse(url)
result = pr.path
if pr.query:
result += '?' + pr.query
return result
def get_operation_pattern(server_url, request_url_pattern):
"""Return an updated request URL pattern with the server URL removed."""
if server_url[-1] == "/":
# operations have to start with a slash, so do not remove it
server_url = server_url[:-1]
if is_absolute(server_url):
return request_url_pattern.replace(server_url, "", 1)
return path_qs(request_url_pattern).replace(server_url, "", 1)
| 29.04 | 76 | 0.670799 |
ace45e898564d594bc7eea09063c207fab5ec039 | 1,917 | py | Python | handlers/account/api.py | hust-hackday/aircraft-server | 51581613765975e978c674aca582f8adc2a4f5a7 | [
"MIT"
] | 1 | 2015-06-05T03:20:31.000Z | 2015-06-05T03:20:31.000Z | handlers/account/api.py | aircraft-game/aircraft-server | 51581613765975e978c674aca582f8adc2a4f5a7 | [
"MIT"
] | null | null | null | handlers/account/api.py | aircraft-game/aircraft-server | 51581613765975e978c674aca582f8adc2a4f5a7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import hashlib
from handlers.base import BaseHandler
import logging
logger = logging.getLogger(__name__)
class AccountLoginHandler(BaseHandler):
def post(self):
username = self.get_argument('username', None)
password = self.get_argument('password', None)
if not username or not password:
return self.raise_error_page(400)
db = self.application.database
user = db.user.find_one({'username': username})
if not user:
return self.response_json(content={'message': 'invalid username'}, status_code=400)
encrypted_password = hashlib.sha1(password).hexdigest()
if user.get('password') != encrypted_password:
return self.response_json(content={'message': 'wrong password'}, status_code=400)
self.session['username'] = username
return self.response_json(content={'username': username})
class AccountRegisterHandlker(BaseHandler):
def post(self):
username = self.get_argument('username', None)
password = self.get_argument('password', None)
repeat_password = self.get_argument('repeat-password')
if not username or not password or not repeat_password:
return self.raise_error_page(400)
if password != repeat_password:
return self.response_json(content={'message': 'repeat password wrong'}, status_code=400)
db = self.application.database
user = db.user.find_one({'username': username})
if user:
return self.response_json(content={'message': 'repeat username'}, status_code=400)
encrypted_password = hashlib.sha1(password).hexdigest()
db.user.insert({
'username': username,
'password': encrypted_password,
})
self.session['username'] = username
return self.response_json(content={'username': username})
| 35.5 | 100 | 0.659364 |
ace45ed91c0407ca7181743953651bc9fe3e6f50 | 4,262 | py | Python | BlobSegmentation/data/packer.py | ForrestPi/DefectDetection | 7e999335ffbd50519cdfaba7de0d6bfa306a579a | [
"Unlicense"
] | 4 | 2020-06-03T08:10:13.000Z | 2021-06-11T09:46:48.000Z | BlobSegmentation/data/packer.py | ForrestPi/DefectDetection | 7e999335ffbd50519cdfaba7de0d6bfa306a579a | [
"Unlicense"
] | null | null | null | BlobSegmentation/data/packer.py | ForrestPi/DefectDetection | 7e999335ffbd50519cdfaba7de0d6bfa306a579a | [
"Unlicense"
] | 1 | 2020-04-14T08:28:04.000Z | 2020-04-14T08:28:04.000Z | #_*_ coding: utf-8 -*-
import cPickle
import cv2
import os
import numpy
"""
This class is for packing image data used for training and validation by using cPickle(serialization) and cv2.imencode
"""
class PackData(object):
'''
listFile: str , list file containing all im and gt with bbox
saveLocation: str , example './save/xxx.pkl'
compressParams: ('compress_type',quality), compress_type now only supports .jpg (or .jpeg); quality is 0-100 to balance compress rate
'''
def __init__(self, listFile, saveLocation, compressParams=('.jpg', 100)):
self.listFile = listFile
self.compressParams = compressParams
self.saveLocation = saveLocation
def setCompressParams(self, compressParams):
self.compressParams = compressParams
def pack(self):
dataCollection = []
imageNoneList=[]
gtNoneList=[]
imageComFailList=[]
gtComFailList=[]
successCount=0
fin = open(self.listFile, 'r')
count = 0
for line in fin:
count += 1
tempData = {}
line = line.replace('\n','').split(' ')
imPath = line[0]
gtPath = line[1]
top = int(line[2])
bottom = int(line[3])
left = int(line[4])
right = int(line[5])
# load img
img = cv2.imread(imPath)
if img == None:
print 'Image: %s does not exist!!' % (imPath)
imageNoneList.append(imPath)
continue
ret, buf = cv2.imencode(self.compressParams[0], img, [cv2.IMWRITE_JPEG_QUALITY, self.compressParams[1]])
if not ret:
print 'Image: %s compression error!!' % (imPath)
imageComFailList.append(imPath)
continue
tempData['image'] = buf
# load gt
if gtPath == 'null':
tempData['gt'] = None
else:
gt = cv2.imread(gtPath)
if gt == None:
print 'GT: %s does not exist!!' % (gtPath)
gtNoneList.append(gtPath)
continue
if gt.shape != img.shape:
print 'GT and img have different shape!!'+str(gt.shape)+'|'+str(img.shape)
continue
gt = cv2.cvtColor(gt, cv2.COLOR_BGR2GRAY)
ret, buf = cv2.imencode(self.compressParams[0], gt, [cv2.IMWRITE_JPEG_QUALITY, self.compressParams[1]])
if not ret:
print 'GT: %s compression error!!' % (gtPath)
gtComFailList.append(gtPath)
continue
tempData['gt'] = buf
# append bbox
tempData['border'] = numpy.array([top, bottom, left, right])
successCount +=1
dataCollection.append(tempData)
print '( %d ) Sample processed successfully.' % (successCount)
print 'Processing statistics:'
print 'There are %d image failed to read----' % (len(imageNoneList))
for imagePath in imageNoneList:
print imagePath
print 'There are %d gt failed to read----' % (len(gtNoneList))
for gtPath in gtNoneList:
print gtPath
print 'There are %d image failed to compress----' % (len(imageComFailList))
for imagePath in imageComFailList:
print imagePath
print 'There are %d gt failed to compress----' % (len(gtComFailList))
for gtPath in imageComFailList:
print gtPath
print 'start to save pickle file......'
locationDir = os.path.dirname(self.saveLocation)
if not os.path.exists(locationDir):
os.makedirs(locationDir)
cPickle.dump(dataCollection, open(self.saveLocation, 'wb'), cPickle.HIGHEST_PROTOCOL)
print 'pickle file save successfully!'
def runPackData():
dataListFileName = './train_list_2017.6.22.txt'
packData = PackData(dataListFileName, './train_data_2017.6.22.pkl')
packData.pack()
if __name__=='__main__':
runPackData()
| 34.096 | 137 | 0.545284 |
ace45f1969005bbf58160a81d931ad8de9026a43 | 298 | py | Python | Hackerearth/monk_and_welcome_problem.py | manavbansalcoder/Hacktoberfest2021 | ba20770f070bf9c0b02a8fe2bcbeb72cd559e428 | [
"CC0-1.0"
] | null | null | null | Hackerearth/monk_and_welcome_problem.py | manavbansalcoder/Hacktoberfest2021 | ba20770f070bf9c0b02a8fe2bcbeb72cd559e428 | [
"CC0-1.0"
] | null | null | null | Hackerearth/monk_and_welcome_problem.py | manavbansalcoder/Hacktoberfest2021 | ba20770f070bf9c0b02a8fe2bcbeb72cd559e428 | [
"CC0-1.0"
] | null | null | null | #https://www.hackerearth.com/practice/data-structures/arrays/1-d/practice-problems/algorithm/monk-and-welcome-problem/
from sys import stdin
n=int(input())
A=list(map(int,stdin.readline().split()))
B=list(map(int,stdin.readline().split()))
print(" ".join([str(A[i]+B[i]) for i in range(n)]))
| 42.571429 | 119 | 0.708054 |
ace4601a1f6a656627965d8a3c3f629f35972502 | 4,012 | py | Python | Lixur Protocol/run.py | Nanra/Lixur-Protocol | f445cba0f1b647d3060514bb8b1e82c50ff8afbd | [
"Apache-2.0"
] | null | null | null | Lixur Protocol/run.py | Nanra/Lixur-Protocol | f445cba0f1b647d3060514bb8b1e82c50ff8afbd | [
"Apache-2.0"
] | null | null | null | Lixur Protocol/run.py | Nanra/Lixur-Protocol | f445cba0f1b647d3060514bb8b1e82c50ff8afbd | [
"Apache-2.0"
] | null | null | null | import hashlib
from flask import Flask, jsonify, request
from flask import send_file
import json
import socket
import networkx as nx
from flask_ngrok import run_with_ngrok as run
import matplotlib.pyplot as plt
import os
import sys
import datetime, time
# imported classes
from source.node import Node
from source.util import Util
from source.graph import Keys, Graph
from source.wallet import Wallet
from source.cryptography import KeyGen as keygen
app = Flask(__name__)
cryptography = keygen()
@app.route('/node', methods=['GET', 'POST'])
def register_new_node():
node.register_neighbours(ip_address, port)
response = node.check_node_status()
return jsonify(response), 201
@app.route('/transactions/new', methods=['GET', 'POST'])
def new_transaction():
private_key = cryptography.get_ex_private_key(cryptography)
public_key = cryptography.get_ex_public_key(cryptography)
alphanumeric_address = cryptography.get_ex_alphanumeric_address(cryptography)
if private_key and public_key != None:
response = node.graph.make_transaction(
alphanumeric_address,
request.form.get('recipient_public_key'),
request.form.get('amount'),
cryptography.sign_tx(public_key, private_key, "Lixur"))
else:
return jsonify("[-] Private key or public key is not found "), 400
return jsonify(response), 201
@app.route('/wallet', methods=['GET', 'POST'])
def address_retrieval():
wallet = Wallet()
if wallet.access_wallet() == True:
node.graph.make_transaction(
wallet.retrieve_addresses()[0],
wallet.retrieve_addresses()[0],
69420000,
cryptography.sign_tx(cryptography.get_public_key(cryptography), cryptography.get_private_key(cryptography), "Lixur"))
node.refresh()
print("[+] Genesis Wallet created")
else:
node.graph.make_transaction(
cryptography.get_alphanumeric_address(cryptography),
cryptography.get_alphanumeric_address(cryptography),
21000000, cryptography.sign_tx(cryptography.get_public_key(cryptography), cryptography.get_private_key(cryptography), "Lixur"))
node.refresh()
response = {
"alphanumeric_address": wallet.retrieve_addresses()[0],
"readable_address": wallet.retrieve_addresses()[1],
"balance": "{:,}".format(wallet.get_balance(wallet.retrieve_addresses()[0])) + " LXR"
}
return jsonify(response), 201
@app.route('/stats', methods=['GET', 'POST'])
def stats():
graph = Graph()
ledger = utils.get_graph()
unique_addresses = []
for key in ledger:
unique_addresses.append(ledger[key]['sender'])
unique_addresses.append(ledger[key]['recipient'])
unique_addresses = list(set(unique_addresses))
unique_addresses.remove("None")
unique_addresses.pop()
number_of_unique_addresses = len(unique_addresses)
total_amount_of_lxr = 0
for key in ledger:
total_amount_of_lxr += ledger[key]['amount']
total_amount_of_lxr = "{:,}".format(total_amount_of_lxr) + " LXR"
try:
failed_transactions = len(graph.get_failed_transactions())
except TypeError:
failed_transactions = None
response = {
"Successful Transaction Count": utils.get_graph_tx_count(),
"Total Unique Addresses" : number_of_unique_addresses,
"Total Supply of LXR" : total_amount_of_lxr,
"Pending Transaction Count": len(graph.get_pending_transactions()),
"Failed Transaction Count": failed_transactions
}
return jsonify(response), 201
@app.route('/', methods=['GET', 'POST'])
def show_DAG():
serializable_format = node.getGraphAsJSONdict()
node.refresh()
return jsonify(serializable_format), 201
if __name__ == '__main__':
# print("Loading Lixur Testnet Beta Version 1.0.0")
utils = Util()
node = Node(utils.unique_gen())
app.run() | 34.886957 | 140 | 0.682453 |
ace4602c9515f174dc721f3fc8aba9afedc2ce3b | 5,126 | py | Python | modules/webhooks.py | bisol84/Plex-Meta-Manager | 3acb8671391851a3fd652b643281ccfc7b2a919b | [
"MIT"
] | null | null | null | modules/webhooks.py | bisol84/Plex-Meta-Manager | 3acb8671391851a3fd652b643281ccfc7b2a919b | [
"MIT"
] | 16 | 2021-11-23T19:31:32.000Z | 2022-03-18T21:23:47.000Z | modules/webhooks.py | bisol84/Plex-Meta-Manager | 3acb8671391851a3fd652b643281ccfc7b2a919b | [
"MIT"
] | null | null | null | import logging
from json import JSONDecodeError
from modules import util
from modules.util import Failed
logger = logging.getLogger("Plex Meta Manager")
class Webhooks:
def __init__(self, config, system_webhooks, library=None, notifiarr=None):
self.config = config
self.error_webhooks = system_webhooks["error"] if "error" in system_webhooks else []
self.run_start_webhooks = system_webhooks["run_start"] if "run_start" in system_webhooks else []
self.run_end_webhooks = system_webhooks["run_end"] if "run_end" in system_webhooks else []
self.library = library
self.notifiarr = notifiarr
def _request(self, webhooks, json):
if self.config.trace_mode:
util.separator("Webhooks", space=False, border=False)
logger.debug("")
logger.debug(f"JSON: {json}")
for webhook in list(set(webhooks)):
response = None
if self.config.trace_mode:
logger.debug(f"Webhook: {webhook}")
if webhook == "notifiarr":
if self.notifiarr:
url, params = self.notifiarr.get_url("notification/pmm/")
for x in range(6):
response = self.config.get(url, json=json, params=params)
if response.status_code < 500:
break
else:
response = self.config.post(webhook, json=json)
if response:
try:
response_json = response.json()
if self.config.trace_mode:
logger.debug(f"Response: {response_json}")
if "result" in response_json and response_json["result"] == "error" and "details" in response_json and "response" in response_json["details"]:
raise Failed(f"Notifiarr Error: {response_json['details']['response']}")
if response.status_code >= 400 or ("result" in response_json and response_json["result"] == "error"):
raise Failed(f"({response.status_code} [{response.reason}]) {response_json}")
except JSONDecodeError:
if response.status_code >= 400:
raise Failed(f"({response.status_code} [{response.reason}])")
def start_time_hooks(self, start_time):
if self.run_start_webhooks:
self._request(self.run_start_webhooks, {"start_time": start_time.strftime("%Y-%m-%d %H:%M:%S")})
def end_time_hooks(self, start_time, end_time, run_time, stats):
if self.run_end_webhooks:
self._request(self.run_end_webhooks, {
"start_time": start_time.strftime("%Y-%m-%d %H:%M:%S"),
"end_time": end_time.strftime("%Y-%m-%d %H:%M:%S"),
"run_time": run_time,
"collections_created": stats["created"],
"collections_modified": stats["modified"],
"collections_deleted": stats["deleted"],
"items_added": stats["added"],
"items_removed": stats["removed"],
"added_to_radarr": stats["radarr"],
"added_to_sonarr": stats["sonarr"],
})
def error_hooks(self, text, server=None, library=None, collection=None, playlist=None, critical=True):
if self.error_webhooks:
json = {"error": str(text), "critical": critical}
if server: json["server_name"] = str(server)
if library: json["library_name"] = str(library)
if collection: json["collection"] = str(collection)
if playlist: json["playlist"] = str(playlist)
self._request(self.error_webhooks, json)
def collection_hooks(self, webhooks, collection, poster_url=None, background_url=None, created=False, deleted=False, additions=None, removals=None, playlist=False):
if self.library:
thumb = None
if not poster_url and collection.thumb and next((f for f in collection.fields if f.name == "thumb"), None):
thumb = self.config.get_image_encoded(f"{self.library.url}{collection.thumb}?X-Plex-Token={self.library.token}")
art = None
if not playlist and not background_url and collection.art and next((f for f in collection.fields if f.name == "art"), None):
art = self.config.get_image_encoded(f"{self.library.url}{collection.art}?X-Plex-Token={self.library.token}")
self._request(webhooks, {
"server_name": self.library.PlexServer.friendlyName,
"library_name": self.library.name,
"playlist" if playlist else "collection": collection.title,
"created": created,
"deleted": deleted,
"poster": thumb,
"background": art,
"poster_url": poster_url,
"background_url": background_url,
"additions": additions if additions else [],
"removals": removals if removals else [],
})
| 52.845361 | 168 | 0.58135 |
ace460b7acad32390ad0b3227a353f762e8e73b4 | 4,088 | py | Python | organisation/api_v2.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | organisation/api_v2.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | organisation/api_v2.py | mohdbakhrayba/it-assets | ea03882ffd70e40c82f5684dc4980ff46520843b | [
"Apache-2.0"
] | null | null | null | from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework import viewsets, serializers, status, generics, views
from rest_framework.decorators import detail_route, list_route, renderer_classes, authentication_classes, permission_classes
from rest_framework_recursive.fields import RecursiveField
from organisation.models import Location, OrgUnit, DepartmentUser, CostCentre
class UserLocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = ('id', 'name')
class UserOrgUnitSerializer(serializers.ModelSerializer):
class Meta:
model = OrgUnit
fields = ('id', 'name', 'acronym')
class DepartmentUserMinSerializer(serializers.ModelSerializer):
class Meta:
model = DepartmentUser
fields = (
'id',
'name'
)
class DepartmentUserSerializer(serializers.ModelSerializer):
location = UserLocationSerializer()
org_unit = UserOrgUnitSerializer()
group_unit = UserOrgUnitSerializer()
class Meta:
model = DepartmentUser
fields = (
'id', 'name', 'preferred_name', 'email', 'username', 'title',
'telephone', 'extension', 'mobile_phone',
'location',
'photo_ad',
'org_unit',
'group_unit',
'org_unit_chain',
'parent',
'children',
)
class DepartmentUserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = DepartmentUser.objects.filter(
**DepartmentUser.ACTIVE_FILTER
).exclude(
account_type__in=DepartmentUser.ACCOUNT_TYPE_EXCLUDE
).prefetch_related(
'location', 'children',
'org_unit', 'org_unit__children',
).order_by('name')
serializer_class = DepartmentUserSerializer
@method_decorator(cache_page(60*5))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class DepartmentTreeSerializer(serializers.ModelSerializer):
children = serializers.ListField(source='children_filtered', child=RecursiveField())
class Meta:
model = DepartmentUser
fields = ('id', 'name', 'title', 'children')
class DepartmentTreeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = DepartmentUser.objects.filter(**DepartmentUser.ACTIVE_FILTER).exclude(account_type__in=DepartmentUser.ACCOUNT_TYPE_EXCLUDE).filter(parent__isnull=True)
serializer_class = DepartmentTreeSerializer
class LocationSerializer(serializers.ModelSerializer):
class Meta:
model = Location
fields = ('id', 'name', 'point', 'manager', 'address', 'pobox', 'phone', 'fax', 'email', 'url', 'bandwidth_url')
class LocationViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Location.objects.filter(active=True)
serializer_class = LocationSerializer
class OrgUnitSerializer(serializers.ModelSerializer):
unit_type = serializers.CharField(source='get_unit_type_display')
class Meta:
model = OrgUnit
fields = ('id', 'name', 'acronym', 'unit_type', 'manager', 'parent', 'children', 'location')
class OrgUnitViewSet(viewsets.ReadOnlyModelViewSet):
queryset = OrgUnit.objects.filter(active=True)
serializer_class = OrgUnitSerializer
class OrgTreeSerializer(serializers.ModelSerializer):
children = serializers.ListField(source='children_active', child=RecursiveField())
class Meta:
model = OrgUnit
fields = ('id', 'name', 'acronym', 'children')
class OrgTreeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = OrgUnit.objects.filter(active=True, parent__isnull=True)
serializer_class = OrgTreeSerializer
class CostCentreSerializer(serializers.ModelSerializer):
class Meta:
model = CostCentre
fields = ('name', 'code', 'chart_acct_name', 'manager', 'business_manager', 'admin', 'tech_contact')
class CostCentreViewSet(viewsets.ReadOnlyModelViewSet):
queryset = CostCentre.objects.filter(active=True)
serializer_class = CostCentreSerializer
| 32.967742 | 166 | 0.708659 |
ace46102761d4f4733f359634952dd7e2e2c3cfd | 290 | py | Python | stai/util/make_test_constants.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 10 | 2021-10-02T18:33:56.000Z | 2021-11-14T17:10:48.000Z | stai/util/make_test_constants.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 14 | 2021-10-07T22:10:15.000Z | 2021-12-21T09:13:49.000Z | stai/util/make_test_constants.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 6 | 2021-10-29T19:36:59.000Z | 2021-12-19T19:52:57.000Z | from typing import Dict
from stai.consensus.constants import ConsensusConstants
from stai.consensus.default_constants import DEFAULT_CONSTANTS
def make_test_constants(test_constants_overrides: Dict) -> ConsensusConstants:
return DEFAULT_CONSTANTS.replace(**test_constants_overrides)
| 32.222222 | 78 | 0.858621 |
ace4614c624db1f61759aa58a6e5110666ad6bfc | 6,147 | py | Python | docs/conf.py | e7mac/crema | 37a9b0201006cca3fb1a60fec124ca2ff01f94a8 | [
"BSD-2-Clause"
] | 73 | 2015-10-26T22:18:54.000Z | 2022-03-27T15:58:16.000Z | docs/conf.py | e7mac/crema | 37a9b0201006cca3fb1a60fec124ca2ff01f94a8 | [
"BSD-2-Clause"
] | 43 | 2015-09-10T13:27:39.000Z | 2022-03-28T15:49:39.000Z | docs/conf.py | e7mac/crema | 37a9b0201006cca3fb1a60fec124ca2ff01f94a8 | [
"BSD-2-Clause"
] | 21 | 2015-10-26T22:19:01.000Z | 2022-03-30T09:43:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# crema documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 9 12:09:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'crema'
copyright = '2017, Brian McFee'
author = 'Brian McFee'
# Dependency mockery
# import mock
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['keras', 'tensorflow', 'numpy', 'mir_eval',
'scipy', 'scipy.stats', 'librosa.util',
'jams', 'h5py', 'librosa']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import imp
crema_version = imp.load_source('crema.version', '../crema/version.py')
version = crema_version.version
# The full version, including alpha/beta/rc tags.
release = crema_version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cremadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'crema.tex', 'crema Documentation',
'Brian McFee', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'crema', 'crema Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'crema', 'crema Documentation',
author, 'crema', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None,
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'librosa': ('https://librosa.github.io/librosa', None),
'jams': ('https://jams.readthedocs.io/en/latest', None)}
| 30.58209 | 80 | 0.666179 |
ace4621bb39c522a19161bf930051c42f50e6d22 | 282 | py | Python | pyfr/integrators/dual/__init__.py | NauticalMile64/PyFR | dcb16f01f7a68098efc9d043e77befd076b3eab5 | [
"BSD-3-Clause"
] | 1 | 2020-08-21T02:50:20.000Z | 2020-08-21T02:50:20.000Z | pyfr/integrators/dual/__init__.py | NauticalMile64/PyFR | dcb16f01f7a68098efc9d043e77befd076b3eab5 | [
"BSD-3-Clause"
] | null | null | null | pyfr/integrators/dual/__init__.py | NauticalMile64/PyFR | dcb16f01f7a68098efc9d043e77befd076b3eab5 | [
"BSD-3-Clause"
] | 1 | 2020-08-21T02:50:17.000Z | 2020-08-21T02:50:17.000Z | # -*- coding: utf-8 -*-
from pyfr.integrators.dual.controllers import BaseDualController
from pyfr.integrators.dual.multip import DualMultiPIntegrator
from pyfr.integrators.dual.pseudosteppers import BaseDualPseudoStepper
from pyfr.integrators.dual.steppers import BaseDualStepper
| 40.285714 | 70 | 0.847518 |
ace4637526f075298695ad2479b2e3ab519f8f5d | 4,837 | py | Python | src/olympia/github/utils.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/github/utils.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/github/utils.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | import os
import uuid
import zipfile
import commonware.log
import requests
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django_statsd.clients import statsd
from olympia.amo.helpers import user_media_path
from olympia.files.utils import SafeUnzip
log = commonware.log.getLogger('z.github')
class GithubCallback(object):
def __init__(self, data):
if data['type'] != 'github':
raise ValueError('Not a github callback.')
self.data = data
def get(self):
log.info('Getting zip from github: {}'.format(self.data['zip_url']))
with statsd.timer('github.zip'):
res = requests.get(self.data['zip_url'])
res.raise_for_status()
return res
def post(self, url, data):
msg = data.get('state', 'comment')
log.info('Setting github to: {} at: {}'.format(msg, url))
with statsd.timer('github.{}'.format(msg)):
data['context'] = 'mozilla/addons-linter'
log.info('Body: {}'.format(data))
res = requests.post(
url,
json=data,
auth=(settings.GITHUB_API_USER, settings.GITHUB_API_TOKEN))
log.info('Response: {}'.format(res.content))
res.raise_for_status()
def pending(self):
self.post(self.data['status_url'], data={'state': 'pending'})
def success(self, url):
self.post(self.data['status_url'], data={
'state': 'success',
'target_url': url
})
def error(self, url):
self.post(self.data['status_url'], data={
'state': 'error',
# Not localising because we aren't sure what locale to localise to.
# I would like to pass a longer string here that shows more details
# however, we are limited to "A short description of the status."
# Which means all the fancy things I wanted to do got truncated.
'description': 'This add-on did not validate.',
'target_url': url
})
def failure(self):
data = {
'state': 'failure',
# Not localising because we aren't sure what locale to localise to.
'description': 'The validator failed to run correctly.'
}
self.post(self.data['status_url'], data=data)
class GithubRequest(forms.Form):
status_url = forms.URLField(required=False)
zip_url = forms.URLField(required=False)
sha = forms.CharField(required=False)
@property
def repo(self):
return self.data['pull_request']['head']['repo']
@property
def sha(self):
return self.data['pull_request']['head']['sha']
def get_status(self):
return self.repo['statuses_url'].replace('{sha}', self.sha)
def get_zip(self):
return (
self.repo['archive_url']
.replace('{archive_format}', 'zipball')
.replace('{/ref}', '/' + self.sha))
def validate_url(self, url):
if not url.startswith('https://api.github.com/'):
raise forms.ValidationError('Invalid URL: {}'.format(url))
return url
def clean(self):
fields = (
('status_url', self.get_status),
('zip_url', self.get_zip),
)
for url, method in fields:
try:
self.cleaned_data[url] = self.validate_url(method())
except:
log.error('Invalid data in processing JSON')
raise forms.ValidationError('Invalid data')
self.cleaned_data['sha'] = self.data['pull_request']['head']['sha']
self.cleaned_data['type'] = 'github'
return self.cleaned_data
def rezip_file(response, pk):
# An .xpi does not have a directory inside the zip, yet zips from github
# do, so we'll need to rezip the file before passing it through to the
# validator.
loc = os.path.join(user_media_path('addons'), 'temp', uuid.uuid4().hex)
old_filename = '{}_github_webhook.zip'.format(pk)
old_path = os.path.join(loc, old_filename)
with storage.open(old_path, 'wb') as old:
old.write(response.content)
new_filename = '{}_github_webhook.xpi'.format(pk)
new_path = os.path.join(loc, new_filename)
old_zip = SafeUnzip(old_path)
if not old_zip.is_valid():
raise
with storage.open(new_path, 'w') as new:
new_zip = zipfile.ZipFile(new, 'w')
for obj in old_zip.filelist:
# Basically strip off the leading directory.
new_filename = obj.filename.partition('/')[-1]
if not new_filename:
continue
new_zip.writestr(new_filename, old_zip.read(obj.filename))
new_zip.close()
old_zip.close()
return new_path
| 32.246667 | 79 | 0.601406 |
ace463b7d9180f7dcb63576d48da65d321626cb8 | 6,242 | py | Python | common/controller.py | IanYHWu/msc_2021 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | [
"MIT"
] | null | null | null | common/controller.py | IanYHWu/msc_2021 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | [
"MIT"
] | null | null | null | common/controller.py | IanYHWu/msc_2021 | 0ae09ed392cce5fdf0e85d1f96b7af82900835f8 | [
"MIT"
] | null | null | null | import random
import numpy as np
from common.utils import extract_seeds
class DemoScheduler:
"""Demonstration Scheduler - used for predefined schedules for querying and learning"""
def __init__(self, args, params, rollout, schedule='linear'):
self.num_timesteps = args.num_timesteps
self.num_demos = params.num_demo_queries
self.demo_schedule = schedule
self.demo_learn_ratio = params.demo_learn_ratio
self.hot_start = params.hot_start
self.rollout = rollout
self.n_envs = params.n_envs
self.n_steps = params.n_steps
self.multi = params.demo_multi
self.multi_seed_sampling = params.multi_seed_sampling
self.seed_store = set()
self.query_count = 0
self.demo_learn_count = 0
if self.hot_start:
self.buffer_empty = False
else:
self.buffer_empty = True
def query_demonstrator(self, curr_timestep):
"""Get a trajectory from the demonstrator"""
if self.demo_schedule == 'linear':
return self._linear_schedule(curr_timestep)
else:
raise NotImplementedError
def learn_from_demos(self, curr_timestep, always_learn=False):
"""Learn from the replay buffer"""
if always_learn:
return True
learn_every = (1 / self.demo_learn_ratio) * self.n_envs * self.n_steps
if not self.multi and self.buffer_empty:
return False
else:
if curr_timestep > ((self.demo_learn_count + 1) * learn_every):
self.demo_learn_count += 1
return True
else:
return False
def store_seeds(self):
info = self.rollout.info_batch[-1]
seeds_list = extract_seeds(info)
self.seed_store.update(seeds_list)
def _linear_schedule(self, curr_timestep):
"""Linear Scheduler"""
demo_every = self.num_timesteps // self.num_demos
if curr_timestep > ((self.query_count + 1) * demo_every):
self.buffer_empty = False
self.query_count += 1
return True
else:
return False
def get_seeds(self, demos_per_step=2):
if self.multi and self.multi_seed_sampling == 'latest':
info = self.rollout.info_batch[-1]
seeds = extract_seeds(info)
return seeds
elif self.multi and self.multi_seed_sampling == 'random':
seeds = random.choices(tuple(self.seed_store), k=self.n_envs)
return seeds
elif not self.multi:
envs = np.random.randint(0, self.n_envs, demos_per_step)
seeds = []
for env in envs:
seed = self.rollout.info_batch[-1][env]['level_seed']
seeds.append(seed)
return seeds
def get_stats(self):
return self.query_count, self.demo_learn_count, 0.0
class GAEController:
def __init__(self, args, params, rollout):
self.rollout = rollout
self.args = args
self.n_envs = params.n_envs
self.n_steps = params.n_steps
self.adv_tracker = np.zeros(self.n_envs)
self.count_tracker = np.zeros(self.n_envs)
self.running_avg = 0.0
self.weighting_coef = params.weighting_coef
self.rho = params.rho
self.t = 0
self.demo_seeds = None
self.demo_learn_ratio = params.demo_learn_ratio
self.num_timesteps = args.num_timesteps
self.num_demos = params.num_demo_queries
self.hot_start = params.hot_start
self.query_count = 0
self.demo_learn_count = 0
if self.hot_start:
self.buffer_empty = False
else:
self.buffer_empty = True
def _compute_avg_adv(self):
adv_batch = self.rollout.adv_batch
done_batch = self.rollout.done_batch
info_batch = self.rollout.info_batch
adv_list = []
seed_list = []
for i in range(self.n_envs):
for j in range(self.n_steps):
if not done_batch[j][i]:
self.count_tracker[i] += 1
self.adv_tracker[i] += (1 / self.count_tracker[i]) * (abs(adv_batch[j][i]) - self.adv_tracker[i])
else:
seed = info_batch[j][i]['level_seed']
adv_list.append(self.adv_tracker[i])
seed_list.append(seed)
self.adv_tracker[i] = 0
self.count_tracker[i] = 0
return adv_list, seed_list
def _update_running_avg(self, adv_list):
if adv_list:
mean_adv_t = np.mean(adv_list)
if self.t == 0:
self.running_avg = mean_adv_t
self.t += 1
else:
self.running_avg = self.weighting_coef * mean_adv_t + (1 - self.weighting_coef) * self.running_avg
self.t += 1
def _generate_demo_seeds(self):
demo_seeds = []
adv_list, seed_list = self._compute_avg_adv()
self._update_running_avg(adv_list)
for adv, seed in zip(adv_list, seed_list):
if adv > self.rho * self.running_avg:
demo_seeds.append(seed)
self.demo_seeds = demo_seeds
def query_demonstrator(self, curr_timestep):
"""Get a trajectory from the demonstrator"""
self._generate_demo_seeds()
if self.demo_seeds:
self.query_count += len(self.demo_seeds)
return True
else:
return False
def learn_from_demos(self, curr_timestep, always_learn=False):
"""Learn from the replay buffer"""
if always_learn:
return True
learn_every = (1 / self.demo_learn_ratio) * self.n_envs * self.n_steps
if self.buffer_empty:
return False
else:
if curr_timestep > ((self.demo_learn_count + 1) * learn_every):
self.demo_learn_count += 1
return True
else:
return False
def get_seeds(self):
return self.demo_seeds
def get_stats(self):
return self.query_count, self.demo_learn_count, self.running_avg
| 30.44878 | 117 | 0.591958 |
ace4656f69ee4b615c6c46ae2d955e1c7725e112 | 7,274 | py | Python | 02-core_genes/plot-goea-core-genes.py | rionbr/meionav | 3e0cca6bea206023ea3a3b322c5bfdd2081e9842 | [
"MIT"
] | 1 | 2019-08-15T10:50:38.000Z | 2019-08-15T10:50:38.000Z | 02-core_genes/plot-goea-core-genes.py | rionbr/spermnet | 3e0cca6bea206023ea3a3b322c5bfdd2081e9842 | [
"MIT"
] | null | null | null | 02-core_genes/plot-goea-core-genes.py | rionbr/spermnet | 3e0cca6bea206023ea3a3b322c5bfdd2081e9842 | [
"MIT"
] | null | null | null | # coding=utf-8
# Author: Rion B Correia
# Date: Nov 17, 2020
#
# Description: Reads GOAE results for core genes and plots results
#
#
import math
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from utils import ensurePathExists
import matplotlib as mpl
mpl.rcParams['font.family'] = 'Helvetica'
mpl.rcParams['mathtext.fontset'] = 'cm'
import matplotlib.pyplot as plt
#
from wordcloud import WordCloud
from nltk.corpus import stopwords
def plot_goea(df, celltype='spermatocyte', species='HS', facecolor='red', ns='BP'):
df = df.copy()
# Select
df = df.loc[(df['NS'] == ns), :]
# Trim
df = df.loc[(df['depth'] >= 5), :]
# All zeros are set to the smallest computable float
df.loc[df['p_fdr_bh'] == 0.0, 'p_fdr_bh'] = np.nextafter(0, 1)
#
df['1-log(p)'] = 1 - (np.log(df['p_fdr_bh']))
print('Plotting GOEA Bars: {celltype:s} {species} {ns:s}'.format(celltype=celltype, species=species, ns=ns))
species_str = dict_species[species]
ns_str = dict_ns[ns]
df = df.sort_values('1-log(p)', ascending=False)
#
dft10 = df.iloc[:10, :].sort_values('1-log(p)', ascending=True)
sl = 75 # string slice
dft10['name'] = dft10['name'].apply(lambda x: x[0:sl] + '..' if len(x) > sl else x)
if len(dft10) == 0:
print('No significant GOs.')
return None
# Plot
fig, ax = plt.subplots(figsize=(4.7, 3.0))
# P-values
title = 'GO enrichment - {species:s} {ns:s}'.format(species=species_str, ns=ns_str)
ind = np.arange(0, len(dft10), 1)
bp = ax.barh(ind, 1 - np.log(dft10['p_fdr_bh']), height=0.8, facecolor=facecolor, zorder=4)
ax.set_title(title, fontsize='large')
minx, maxx = ax.get_xlim()
for bar, name in zip(bp.patches, dft10['name'].tolist()):
bx = bar.get_x()
by = bar.get_y()
bh = bar.get_height()
# bw = bar.get_width()
tx = bx + (0.01 * maxx)
ty = (by + (bh / 2))
ax.text(x=tx, y=ty, s=name, ha='left', va='center', fontsize='x-small', zorder=5)
#
ax.axvline(x=(1 - math.log(0.01)), color='#666666', ls='dotted')
ax.axvline(x=(1 - math.log(0.05)), color='#c7c7c7', ls='dashed')
ax.set_yticks(ind)
ax.set_yticklabels(dft10['GO'])
ax.set_xlabel(r'$1 - $log($p$-value)')
ax.set_ylim(-0.7, (10 - 0.3))
ax.grid(axis='x', zorder=1)
plt.subplots_adjust(left=0.21, right=0.97, bottom=0.17, top=0.89)
#plt.tight_layout()
#
wIMGFile = 'images/goea-bars/img-goea-bars-{celltype:s}-{species:s}-core-genes-{ns:s}.pdf'.format(celltype=celltype, species=species, ns=ns)
print(wIMGFile)
ensurePathExists(wIMGFile)
plt.savefig(wIMGFile, dpi=300, bbox_inches=None, pad_inches=0.0)
plt.close()
def plot_wordcloud(df, celltype='spermatocyte', species='DM', facecolor='red', ns='BP'):
celltype_str = celltype.title()
# Trim
df = df.loc[(df['depth'] >= 5), :]
# All zeros are set to the smallest computable float
df.loc[df['p_fdr_bh'] == 0.0, 'p_fdr_bh'] = np.nextafter(0, 1)
#
df['1-log(p)'] = 1 - (np.log(df['p_fdr_bh']))
species_str = dict_specie[species]
#
english_stopwords = stopwords.words('english')
print('Plotting GOEA Wordcloud: {celltype:s} - {species}'.format(celltype=celltype, species=species))
# WordCloud
dft = df.loc[(df['module-id'] == mid), :]
text = ' '.join(dft['name'].tolist())
if len(text) == 0:
print('No significant GOs.')
return None
text = text.replace('-', ' ')
#
fig, ax = plt.subplots(figsize=(4.0, 3.0))
def color_func(*args, **kwargs):
return (0, 0, 0)
wordcloud = WordCloud(background_color='white', max_font_size=45, width=400, height=300, stopwords=english_stopwords, relative_scaling='auto', colormap='tab10', color_func=color_func, collocation_threshold=20)
def calc_frequencies(dfA):
r = []
for i, dfAt in dfA.iterrows():
name = dfAt['name']
pvalue = dfAt['1-log(p)']
name = name.replace('-', ' ').replace(',', '').replace('.', '').replace("'", '')
for word in name.split(' '):
if word not in english_stopwords:
r.append((i, word, pvalue))
dfr = pd.DataFrame(r, columns=['id', 'name', 'pvalue']).set_index('id')
dfr['name'] = dfr['name'].replace('proteasomal', 'proteasome')
#
dfrg = dfr.groupby('name').agg({'pvalue': ['count', 'sum']})
dfrg.columns = dfrg.columns.droplevel()
dfrg['frequency'] = dfrg['count'].rank(method='min') * dfrg['sum'].rank(method='min')
dfrg.sort_values('frequency', ascending=False, inplace=True)
return dfrg.reset_index().set_index('name')['frequency'].to_dict()
frequencies = calc_frequencies(dft)
wordcloud.generate_from_frequencies(frequencies)
# wordcloud.generate_from_text(text)
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
if word in data_text_color[mid]:
return text_color
else:
return 'black'
# Recolor
wordcloud.recolor(color_func=color_func)
title = 'GOEA-{specie:s} {celltype:s} M{mid:d}-{mname:s}'.format(specie=specie, celltype=celltype_str, mid=mid, mname=dict_replace[mname])
ax.set_title(title)
#
wp = ax.imshow(wordcloud, interpolation='bilinear')
#
ax.set_xticks([])
ax.set_yticks([])
plt.subplots_adjust(left=0.03, right=0.97, bottom=0.17, top=0.89)
#
wIMGFile = 'images/goea-wordcloud/{celltype:s}/{layer:s}/img-goea-wc-{celltype:s}-{network:s}-{threshold:s}-{layer:s}-mod-{mid:d}.pdf'.format(celltype=celltype, network=network, threshold=threshold_str, layer=layer, mid=mid)
ensurePathExists(wIMGFile)
plt.savefig(wIMGFile, dpi=300, bbox_inches=None, pad_inches=0.0)
plt.close()
if __name__ == '__main__':
celltype = 'spermatocyte' # spermatocyte or enterocyte
species = 'HS'
dict_species = {'HS': 'Human', 'MM': 'Mouse', 'DM': 'Insect'}
dict_ns = {'BP': 'biological processes', 'MF': 'molecular functions'}
"""
data_text_color = {
1: ['ubiquitination', 'ubiquitin'],
2: ['splicing'],
3: ['translation', 'translational', 'cotranslational'],
4: ['rRNA'],
5: ['vesicle', 'transport'],
6: ['respiration', 'respiratory', 'electron'],
7: ['cell', 'cycle'],
8: ['DNA', 'repair'],
9: ['mitochondrial', 'translation', 'translational'],
10: ['cell', 'cycle'],
11: ['metabolic'],
12: ['histidine', 'peptidyl', 'dephosphorylation'],
}
"""
for species in ['HS', 'MM', 'DM']:
rCSVFile = 'results/goea/goea-{celltype:s}-{species:s}-core-genes.csv.gz'.format(celltype=celltype, species=species)
df = pd.read_csv(rCSVFile)
print(df['NS'].value_counts())
plot_goea(df, celltype, species, facecolor='#2ca02c', ns='BP')
plot_goea(df, celltype, species, facecolor='#98df8a', ns='MF')
#
#plot_wordcloud(df, celltype, species, facecolor='#d62728', ns='BP')
#plot_wordcloud(df, celltype, species, facecolor='#1f77b4', ns='MF')
| 35.31068 | 228 | 0.609981 |
ace465ef0d30c0aa790c5dce5016462180dc60a4 | 4,894 | py | Python | third_party/unidecode/x05a.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
] | 39 | 2015-06-10T23:18:07.000Z | 2021-10-21T04:29:06.000Z | third_party/unidecode/x05a.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
] | 2 | 2016-08-22T12:38:10.000Z | 2017-01-26T18:37:33.000Z | third_party/unidecode/x05a.py | asysc2020/contentbox | 5c155976e0ce7ea308d62293ab89624d97b21d09 | [
"Apache-2.0"
] | 26 | 2015-06-10T22:09:15.000Z | 2021-06-27T15:45:15.000Z | data = (
'Song ', # 0x00
'Wei ', # 0x01
'Hong ', # 0x02
'Wa ', # 0x03
'Lou ', # 0x04
'Ya ', # 0x05
'Rao ', # 0x06
'Jiao ', # 0x07
'Luan ', # 0x08
'Ping ', # 0x09
'Xian ', # 0x0a
'Shao ', # 0x0b
'Li ', # 0x0c
'Cheng ', # 0x0d
'Xiao ', # 0x0e
'Mang ', # 0x0f
'Fu ', # 0x10
'Suo ', # 0x11
'Wu ', # 0x12
'Wei ', # 0x13
'Ke ', # 0x14
'Lai ', # 0x15
'Chuo ', # 0x16
'Ding ', # 0x17
'Niang ', # 0x18
'Xing ', # 0x19
'Nan ', # 0x1a
'Yu ', # 0x1b
'Nuo ', # 0x1c
'Pei ', # 0x1d
'Nei ', # 0x1e
'Juan ', # 0x1f
'Shen ', # 0x20
'Zhi ', # 0x21
'Han ', # 0x22
'Di ', # 0x23
'Zhuang ', # 0x24
'E ', # 0x25
'Pin ', # 0x26
'Tui ', # 0x27
'Han ', # 0x28
'Mian ', # 0x29
'Wu ', # 0x2a
'Yan ', # 0x2b
'Wu ', # 0x2c
'Xi ', # 0x2d
'Yan ', # 0x2e
'Yu ', # 0x2f
'Si ', # 0x30
'Yu ', # 0x31
'Wa ', # 0x32
'[?] ', # 0x33
'Xian ', # 0x34
'Ju ', # 0x35
'Qu ', # 0x36
'Shui ', # 0x37
'Qi ', # 0x38
'Xian ', # 0x39
'Zhui ', # 0x3a
'Dong ', # 0x3b
'Chang ', # 0x3c
'Lu ', # 0x3d
'Ai ', # 0x3e
'E ', # 0x3f
'E ', # 0x40
'Lou ', # 0x41
'Mian ', # 0x42
'Cong ', # 0x43
'Pou ', # 0x44
'Ju ', # 0x45
'Po ', # 0x46
'Cai ', # 0x47
'Ding ', # 0x48
'Wan ', # 0x49
'Biao ', # 0x4a
'Xiao ', # 0x4b
'Shu ', # 0x4c
'Qi ', # 0x4d
'Hui ', # 0x4e
'Fu ', # 0x4f
'E ', # 0x50
'Wo ', # 0x51
'Tan ', # 0x52
'Fei ', # 0x53
'Wei ', # 0x54
'Jie ', # 0x55
'Tian ', # 0x56
'Ni ', # 0x57
'Quan ', # 0x58
'Jing ', # 0x59
'Hun ', # 0x5a
'Jing ', # 0x5b
'Qian ', # 0x5c
'Dian ', # 0x5d
'Xing ', # 0x5e
'Hu ', # 0x5f
'Wa ', # 0x60
'Lai ', # 0x61
'Bi ', # 0x62
'Yin ', # 0x63
'Chou ', # 0x64
'Chuo ', # 0x65
'Fu ', # 0x66
'Jing ', # 0x67
'Lun ', # 0x68
'Yan ', # 0x69
'Lan ', # 0x6a
'Kun ', # 0x6b
'Yin ', # 0x6c
'Ya ', # 0x6d
'Ju ', # 0x6e
'Li ', # 0x6f
'Dian ', # 0x70
'Xian ', # 0x71
'Hwa ', # 0x72
'Hua ', # 0x73
'Ying ', # 0x74
'Chan ', # 0x75
'Shen ', # 0x76
'Ting ', # 0x77
'Dang ', # 0x78
'Yao ', # 0x79
'Wu ', # 0x7a
'Nan ', # 0x7b
'Ruo ', # 0x7c
'Jia ', # 0x7d
'Tou ', # 0x7e
'Xu ', # 0x7f
'Yu ', # 0x80
'Wei ', # 0x81
'Ti ', # 0x82
'Rou ', # 0x83
'Mei ', # 0x84
'Dan ', # 0x85
'Ruan ', # 0x86
'Qin ', # 0x87
'Hui ', # 0x88
'Wu ', # 0x89
'Qian ', # 0x8a
'Chun ', # 0x8b
'Mao ', # 0x8c
'Fu ', # 0x8d
'Jie ', # 0x8e
'Duan ', # 0x8f
'Xi ', # 0x90
'Zhong ', # 0x91
'Mei ', # 0x92
'Huang ', # 0x93
'Mian ', # 0x94
'An ', # 0x95
'Ying ', # 0x96
'Xuan ', # 0x97
'Jie ', # 0x98
'Wei ', # 0x99
'Mei ', # 0x9a
'Yuan ', # 0x9b
'Zhen ', # 0x9c
'Qiu ', # 0x9d
'Ti ', # 0x9e
'Xie ', # 0x9f
'Tuo ', # 0xa0
'Lian ', # 0xa1
'Mao ', # 0xa2
'Ran ', # 0xa3
'Si ', # 0xa4
'Pian ', # 0xa5
'Wei ', # 0xa6
'Wa ', # 0xa7
'Jiu ', # 0xa8
'Hu ', # 0xa9
'Ao ', # 0xaa
'[?] ', # 0xab
'Bou ', # 0xac
'Xu ', # 0xad
'Tou ', # 0xae
'Gui ', # 0xaf
'Zou ', # 0xb0
'Yao ', # 0xb1
'Pi ', # 0xb2
'Xi ', # 0xb3
'Yuan ', # 0xb4
'Ying ', # 0xb5
'Rong ', # 0xb6
'Ru ', # 0xb7
'Chi ', # 0xb8
'Liu ', # 0xb9
'Mei ', # 0xba
'Pan ', # 0xbb
'Ao ', # 0xbc
'Ma ', # 0xbd
'Gou ', # 0xbe
'Kui ', # 0xbf
'Qin ', # 0xc0
'Jia ', # 0xc1
'Sao ', # 0xc2
'Zhen ', # 0xc3
'Yuan ', # 0xc4
'Cha ', # 0xc5
'Yong ', # 0xc6
'Ming ', # 0xc7
'Ying ', # 0xc8
'Ji ', # 0xc9
'Su ', # 0xca
'Niao ', # 0xcb
'Xian ', # 0xcc
'Tao ', # 0xcd
'Pang ', # 0xce
'Lang ', # 0xcf
'Nao ', # 0xd0
'Bao ', # 0xd1
'Ai ', # 0xd2
'Pi ', # 0xd3
'Pin ', # 0xd4
'Yi ', # 0xd5
'Piao ', # 0xd6
'Yu ', # 0xd7
'Lei ', # 0xd8
'Xuan ', # 0xd9
'Man ', # 0xda
'Yi ', # 0xdb
'Zhang ', # 0xdc
'Kang ', # 0xdd
'Yong ', # 0xde
'Ni ', # 0xdf
'Li ', # 0xe0
'Di ', # 0xe1
'Gui ', # 0xe2
'Yan ', # 0xe3
'Jin ', # 0xe4
'Zhuan ', # 0xe5
'Chang ', # 0xe6
'Ce ', # 0xe7
'Han ', # 0xe8
'Nen ', # 0xe9
'Lao ', # 0xea
'Mo ', # 0xeb
'Zhe ', # 0xec
'Hu ', # 0xed
'Hu ', # 0xee
'Ao ', # 0xef
'Nen ', # 0xf0
'Qiang ', # 0xf1
'Ma ', # 0xf2
'Pie ', # 0xf3
'Gu ', # 0xf4
'Wu ', # 0xf5
'Jiao ', # 0xf6
'Tuo ', # 0xf7
'Zhan ', # 0xf8
'Mao ', # 0xf9
'Xian ', # 0xfa
'Xian ', # 0xfb
'Mo ', # 0xfc
'Liao ', # 0xfd
'Lian ', # 0xfe
'Hua ', # 0xff
)
| 18.895753 | 21 | 0.369228 |
ace465f0ccc2064551dac140ac940ae4add24ee0 | 7,042 | py | Python | testing/tests/001-main/003-self/200-json/001-users.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | null | null | null | testing/tests/001-main/003-self/200-json/001-users.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | null | null | null | testing/tests/001-main/003-self/200-json/001-users.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | null | null | null | # @dependency 001-main/001-empty/003-criticctl/002-adduser-deluser.py
# @dependency 001-main/001-empty/004-mixed/003-oauth.py
# @dependency 001-main/001-empty/004-mixed/004-password.py
# @dependency 001-main/003-self/028-gitemails.py
frontend.json(
"users",
expect={ "users": [user_json("admin", "Testing Administrator"),
user_json("alice"),
user_json("bob"),
user_json("dave"),
user_json("erin"),
user_json("howard"),
user_json("extra", status="retired"),
user_json("carol"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("iris")] })
frontend.json(
"users",
params={ "status": "current" },
expect={ "users": [user_json("admin", "Testing Administrator"),
user_json("alice"),
user_json("bob"),
user_json("dave"),
user_json("erin"),
user_json("howard"),
user_json("carol"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("iris")] })
frontend.json(
"users",
params={ "status": "retired" },
expect={ "users": [user_json("extra", status="retired")] })
frontend.json(
"users",
params={ "sort": "fullname" },
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("carol"),
user_json("dave"),
user_json("erin"),
user_json("extra", status="retired"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("howard"),
user_json("iris"),
user_json("admin", "Testing Administrator")] })
frontend.json(
"users",
params={ "sort": "fullname",
"count": "4" },
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("carol"),
user_json("dave")] })
frontend.json(
"users",
params={ "sort": "fullname",
"offset": "2",
"count": "4" },
expect={ "users": [user_json("carol"),
user_json("dave"),
user_json("erin"),
user_json("extra", status="retired")] })
frontend.json(
"users",
params={ "sort": "fullname",
"offset": "6" },
expect={ "users": [user_json("felix"),
user_json("gina", no_email=True),
user_json("howard"),
user_json("iris"),
user_json("admin", "Testing Administrator")] })
frontend.json(
"users/%d" % instance.userid("alice"),
expect=user_json("alice"))
frontend.json(
"users/%d" % instance.userid("alice"),
params={ "fields": "id" },
expect={ "id": instance.userid("alice") })
frontend.json(
"users",
params={ "name": "alice" },
expect=user_json("alice"))
frontend.json(
"users/%d/emails" % instance.userid("alice"),
expect={ "emails": [{ "address": "alice@example.org",
"selected": True,
"verified": None }] })
filter_json = { "id": int,
"type": "reviewer",
"path": "028-gitemails/",
"repository": 1,
"delegates": [instance.userid("erin")] }
frontend.json(
"users/%d/filters" % instance.userid("alice"),
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "repository": "critic" },
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "repository": "1" },
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "include": "users,repositories" },
expect={ "filters": [{ "id": int,
"type": "reviewer",
"path": "028-gitemails/",
"repository": 1,
"delegates": [instance.userid("erin")] }],
"linked": { "repositories": [critic_json],
"users": [user_json("erin")] }})
frontend.json(
"users/%d,%d,%d" % (instance.userid("alice"),
instance.userid("bob"),
instance.userid("dave")),
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("dave")] })
frontend.json(
"users/%d,%d,%d" % (instance.userid("alice"),
instance.userid("bob"),
instance.userid("dave")),
params={ "fields[users]": "name" },
expect={ "users": [{ "name": "alice" },
{ "name": "bob" },
{ "name": "dave" }] })
frontend.json(
"users/4711",
expect={ "error": { "title": "No such resource",
"message": "Resource not found: Invalid user id: 4711" }},
expected_http_status=404)
frontend.json(
"users/alice",
expect={ "error": { "title": "Invalid API request",
"message": "Invalid numeric id: 'alice'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "name": "nosuchuser" },
expect={ "error": { "title": "No such resource",
"message": "Resource not found: Invalid user name: 'nosuchuser'" }},
expected_http_status=404)
frontend.json(
"users",
params={ "status": "clown" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user status values: 'clown'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "status": "current,clown,president" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user status values: 'clown', 'president'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "sort": "age" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user sort parameter: 'age'" }},
expected_http_status=400)
frontend.json(
"users/%d/emails/1" % instance.userid("alice"),
expect={ "error": { "title": "Invalid API request",
"message": "Resource does not support arguments: v1/users/emails" }},
expected_http_status=400)
frontend.json(
"users/%d/filters/1" % instance.userid("alice"),
expect={ "error": { "title": "Invalid API request",
"message": "Resource does not support arguments: v1/users/filters" }},
expected_http_status=400)
| 34.861386 | 94 | 0.489492 |
ace46620f85b36b66141ceac39681d4682caced5 | 1,538 | py | Python | search/routes/classic_api/__init__.py | ID2797370/arxiv-search | 889402e8eef9a2faaa8e900978cd27ff2784ce33 | [
"MIT"
] | 35 | 2018-12-18T02:51:09.000Z | 2022-03-30T04:43:20.000Z | search/routes/classic_api/__init__.py | ID2797370/arxiv-search | 889402e8eef9a2faaa8e900978cd27ff2784ce33 | [
"MIT"
] | 172 | 2018-02-02T14:35:11.000Z | 2018-12-04T15:35:30.000Z | search/routes/classic_api/__init__.py | ID2797370/arxiv-search | 889402e8eef9a2faaa8e900978cd27ff2784ce33 | [
"MIT"
] | 13 | 2019-01-10T22:01:48.000Z | 2021-11-05T12:25:08.000Z | """Provides the classic search API."""
__all__ = ["blueprint", "exceptions"]
from flask import Blueprint, make_response, request, Response
from arxiv.base import logging
# from arxiv.users.auth import scopes
# from arxiv.users.auth.decorators import scoped
from search import serialize
from search.controllers import classic_api
from search.routes.consts import ATOM_XML
from search.routes.classic_api import exceptions
logger = logging.getLogger(__name__)
blueprint = Blueprint("classic_api", __name__, url_prefix="/")
@blueprint.route("query", methods=["GET"])
# @scoped(required=scopes.READ_PUBLIC)
def query() -> Response:
"""Provide the main query endpoint."""
logger.debug("Got query: %s", request.args)
data, status_code, headers = classic_api.query(request.args)
response_data = serialize.as_atom( # type: ignore
data.results, query=data.query
) # type: ignore
headers.update({"Content-type": ATOM_XML})
response: Response = make_response(response_data, status_code, headers)
return response
@blueprint.route("<arxiv:paper_id>v<string:version>", methods=["GET"])
# @scoped(required=scopes.READ_PUBLIC)
def paper(paper_id: str, version: str) -> Response:
"""Document metadata endpoint."""
data, status_code, headers = classic_api.paper(f"{paper_id}v{version}")
response_data = serialize.as_atom(data.results) # type:ignore
headers.update({"Content-type": ATOM_XML})
response: Response = make_response(response_data, status_code, headers)
return response
| 34.954545 | 75 | 0.739272 |
ace4668ad1c219ca3a65fe8e35cbc072fdf0875d | 3,017 | py | Python | 33_file.py | CourtHans/401-ops-challenges | 1c695bc52ee2f3fa85ea5ce2656bf64f843329fd | [
"MIT"
] | null | null | null | 33_file.py | CourtHans/401-ops-challenges | 1c695bc52ee2f3fa85ea5ce2656bf64f843329fd | [
"MIT"
] | null | null | null | 33_file.py | CourtHans/401-ops-challenges | 1c695bc52ee2f3fa85ea5ce2656bf64f843329fd | [
"MIT"
] | 1 | 2020-11-17T08:35:35.000Z | 2020-11-17T08:35:35.000Z | #!/usr/bin/env python3
# Script: 401 Op Challenge Day 33
# Author: Courtney Hans
# Date of latest revision: 11/18/20
# Purpose: Signature-based Malware Detection file crawler
# Import libraries
from sys import platform
import os, time, datetime, math
import hashlib
# Declare functions
# create a dynamic timestamp
def create_timestamp():
now = datetime.datetime.now()
timestamp = now.strftime('%m-%d-%Y %H:%M:%S:%f')
return str(timestamp)
# function to check hash against virustotal
def malware_check(the_hash):
apikey = os.getenv('API_KEY_VIRUSTOTAL') # Set your environment variable before proceeding. You'll need a free API key from virustotal.com so get signed up there first.
# hash = '099f4bbdaef65e980748a544faffd9a7' # Set your hash here.
# This concatenates everything into a working shell statement that gets passed into virustotal-search.py
query = 'python3 virustotal-search.py -k ' + apikey + ' -m ' + the_hash
os.system(query)
# hashing function
def hash_it(filename):
# filename = input("Enter the file name: ")
md5_hash = hashlib.md5()
with open(filename,"rb") as f:
# Read and update hash in chunks of 4K
for byte_block in iter(lambda: f.read(4096),b""):
md5_hash.update(byte_block)
return md5_hash.hexdigest()
# use os.walk to crawl through directories and perform the hash
def dirContents_hash():
dir_count = 0
file_count = 0
start_path = input("Please enter the absolute path to the directory you want to scan: ")
# start_path = "/home/osboxes/Desktop/lab32_dir/lab32folder" # linux test path
# start_path = "C:\Users\cornt\OneDrive\Desktop\lab32folder" # windows test path
for (path,dirs,files) in os.walk(start_path):
print('DIRECTORY: {:s}'.format(path))
print("")
dir_count += 1
#Repeat for each file in directory
for file in files:
fstat = os.stat(os.path.join(path,file))
# Convert file size to MB, KB or Bytes
if (fstat.st_size > 1024 * 1024):
fsize = math.ceil(fstat.st_size / (1024 * 1024))
unit = "MB"
elif (fstat.st_size > 1024):
fsize = math.ceil(fstat.st_size / 1024)
unit = "KB"
else:
fsize = fstat.st_size
unit = "B"
file_count += 1
filename = os.path.join(path,file)
the_hash = hash_it(filename)
timestamp = create_timestamp()
print(timestamp)
print(f"FILENAME: {file}\tSIZE: {str(fsize) + unit}\tPATH: {filename}")
print(f"Virus scan against hash:")
malware_check(the_hash)
print("")
# Print total files and directory count
print('[*]Summary[*] Files hashed: {}, Directories crawled: {}'.format(file_count,dir_count))
dir_count = 0
file_count = 0
# Main
dirContents_hash()
# End | 35.081395 | 172 | 0.620815 |
ace466d49125408519c1ebba52dae2e9cdba4305 | 6,221 | py | Python | wes_service/cwl_runner.py | DailyDreaming/workflow-service | 8d8612c8ca2df01510b32d6e3a9b4c2b83ad9e35 | [
"Apache-2.0"
] | null | null | null | wes_service/cwl_runner.py | DailyDreaming/workflow-service | 8d8612c8ca2df01510b32d6e3a9b4c2b83ad9e35 | [
"Apache-2.0"
] | null | null | null | wes_service/cwl_runner.py | DailyDreaming/workflow-service | 8d8612c8ca2df01510b32d6e3a9b4c2b83ad9e35 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import json
import os
import subprocess
import uuid
from wes_service.util import WESBackend
class Workflow(object):
def __init__(self, run_id):
super(Workflow, self).__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), "workflows", self.run_id)
def run(self, request, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
os.makedirs(self.workdir)
outdir = os.path.join(self.workdir, "outdir")
os.mkdir(outdir)
with open(os.path.join(self.workdir, "request.json"), "w") as f:
json.dump(request, f)
with open(os.path.join(
self.workdir, "cwl.input.json"), "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
workflow_url = request.get("workflow_url") # Will always be local path to descriptor cwl, or url.
output = open(os.path.join(self.workdir, "cwl.output.json"), "w")
stderr = open(os.path.join(self.workdir, "stderr"), "w")
runner = opts.getopt("runner", default="cwl-runner")
extra = opts.getoptlist("extra")
command_args = [runner] + extra + [workflow_url, inputtemp.name]
proc = subprocess.Popen(command_args,
stdout=output,
stderr=stderr,
close_fds=True,
cwd=outdir)
output.close()
stderr.close()
with open(os.path.join(self.workdir, "pid"), "w") as pid:
pid.write(str(proc.pid))
return self.getstatus()
def getstate(self):
"""
Returns RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
state = "RUNNING"
exit_code = -1
exitcode_file = os.path.join(self.workdir, "exit_code")
pid_file = os.path.join(self.workdir, "pid")
if os.path.exists(exitcode_file):
with open(exitcode_file) as f:
exit_code = int(f.read())
elif os.path.exists(pid_file):
with open(pid_file, "r") as pid:
pid = int(pid.read())
try:
(_pid, exit_status) = os.waitpid(pid, os.WNOHANG)
if _pid != 0:
exit_code = exit_status >> 8
with open(exitcode_file, "w") as f:
f.write(str(exit_code))
os.unlink(pid_file)
except OSError:
os.unlink(pid_file)
exit_code = 255
if exit_code == 0:
state = "COMPLETE"
elif exit_code != -1:
state = "EXECUTOR_ERROR"
return state, exit_code
def getstatus(self):
state, exit_code = self.getstate()
return {
"run_id": self.run_id,
"state": state
}
def getlog(self):
state, exit_code = self.getstate()
with open(os.path.join(self.workdir, "request.json"), "r") as f:
request = json.load(f)
with open(os.path.join(self.workdir, "stderr"), "r") as f:
stderr = f.read()
outputobj = {}
if state == "COMPLETE":
output_path = os.path.join(self.workdir, "cwl.output.json")
with open(output_path, "r") as outputtemp:
outputobj = json.load(outputtemp)
return {
"run_id": self.run_id,
"request": request,
"state": state,
"workflow_log": {
"cmd": [""],
"start_time": "",
"end_time": "",
"stdout": "",
"stderr": stderr,
"exit_code": exit_code
},
"task_logs": [],
"outputs": outputobj
}
def cancel(self):
pass
class CWLRunnerBackend(WESBackend):
def GetServiceInfo(self):
return {
"workflow_type_versions": {
"CWL": {"workflow_type_version": ["v1.0"]}
},
"supported_wes_versions": ["0.3.0"],
"supported_filesystem_protocols": ["file", "http", "https"],
"engine_versions": "cwl-runner",
"system_state_counts": {},
"key_values": {}
}
def ListRuns(self, page_size=None, page_token=None, state_search=None):
# FIXME #15 results don't page
wf = []
for l in os.listdir(os.path.join(os.getcwd(), "workflows")):
if os.path.isdir(os.path.join(os.getcwd(), "workflows", l)):
wf.append(Workflow(l))
workflows = [{"run_id": w.run_id, "state": w.getstate()[0]} for w in wf] # NOQA
return {
"workflows": workflows,
"next_page_token": ""
}
def RunWorkflow(self, **args):
tempdir, body = self.collect_attachments()
run_id = uuid.uuid4().hex
job = Workflow(run_id)
job.run(body, self)
return {"run_id": run_id}
def GetRunLog(self, run_id):
job = Workflow(run_id)
return job.getlog()
def CancelRun(self, run_id):
job = Workflow(run_id)
job.cancel()
return {"run_id": run_id}
def GetRunStatus(self, run_id):
job = Workflow(run_id)
return job.getstatus()
def create_backend(app, opts):
return CWLRunnerBackend(opts)
| 31.261307 | 112 | 0.530783 |
ace469076b841469dc772e6b1e742c6fe01d46ad | 2,387 | py | Python | rate/models.py | Lugaga/AWWWARDS | 4d3b35686747450725f7dda3fa796ff66cb04dc7 | [
"MIT"
] | null | null | null | rate/models.py | Lugaga/AWWWARDS | 4d3b35686747450725f7dda3fa796ff66cb04dc7 | [
"MIT"
] | 3 | 2020-02-12T03:03:55.000Z | 2021-06-10T21:49:09.000Z | rator/models.py | BRIGHTON-ASUMANI/awwards | ee98746f0e27795bc0b9de78f6a963d0c101c858 | [
"MIT"
] | 1 | 2019-08-28T15:42:50.000Z | 2019-08-28T15:42:50.000Z | from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
import datetime as dt
from pyuploadcare.dj.models import ImageField
from django.db.models.signals import post_save
from django.utils import timezone
from django.core.urlresolvers import reverse
class Project(models.Model):
user = models.ForeignKey(User, related_name="poster", on_delete=models.CASCADE)
landing_page = ImageField(manual_crop='')
title = models.CharField(max_length=30)
description = models.TextField()
link = models.URLField(max_length=250)
post_date = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('dump', kwargs={'pk':self.pk})
def __str__(self):
return self.title
@classmethod
def all_projects(cls):
project = cls.objects.order_by('post_date')
return project
@classmethod
def get_image(cls, id):
project = cls.objects.get(id=id)
return project
@classmethod
def search_by_title(cls,search_term):
project = cls.objects.filter(title__title__icontains=search_term)
return project
class Profile(models.Model):
user = models.ForeignKey(User, related_name="profiler", on_delete=models.CASCADE)
picture = ImageField()
contact = models.BigIntegerField()
bio = models.TextField()
def get_absolute_url(self):
return reverse('dump', kwargs={'pk':self.pk})
@classmethod
def get_all(cls):
profiles = Profile.objects.all()
return profiles
@classmethod
def save_profile(self):
return self.save()
@classmethod
def delete_profile(self):
return self.delete()
def __str__(self):
return self.user.username
# class Review(models.Model):
# RATING_CHOICES = (
# (1, '1'),
# (2, '2'),
# (3, '3'),
# (4, '4'),
# (5, '5'),
#
# )
# project=models.ForeignKey(Project,null=True)
# user = models.ForeignKey(User,null=True)
# design=models.IntegerField(choices=RATING_CHOICES,null=True)
# usability=models.IntegerField(choices=RATING_CHOICES,null=True)
# content=models.IntegerField(choices=RATING_CHOICES,null=True)
#
#
# @classmethod
# def get_all(cls):
# all_objects = Review.objects.all()
# return all_objects
| 25.945652 | 85 | 0.671135 |
ace4695758198b57bc67ff3c27364814e6a74fab | 43,251 | py | Python | Lib/test/test_bytes.py | clach04/bb_import_jython | 4529a0f578186a1c33c476300294ab42658eaf7c | [
"CNRI-Jython"
] | null | null | null | Lib/test/test_bytes.py | clach04/bb_import_jython | 4529a0f578186a1c33c476300294ab42658eaf7c | [
"CNRI-Jython"
] | null | null | null | Lib/test/test_bytes.py | clach04/bb_import_jython | 4529a0f578186a1c33c476300294ab42658eaf7c | [
"CNRI-Jython"
] | null | null | null | """Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.test_support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.test_support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxint])
self.assertRaises(IndexError, lambda: b[sys.maxint+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxint])
self.assertRaises(IndexError, lambda: b[-sys.maxint-1])
self.assertRaises(IndexError, lambda: b[-sys.maxint-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
# allowed in 2.x
#self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxint-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxint])
self.assertRaises(ValueError, self.type2test, [sys.maxint+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character sizes
self.assertEqual(self.type2test(b"\0a\0b\0c") == u"abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == u"abc", False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == u"abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == u"abc", False)
self.assertEqual(self.type2test() == unicode(), False)
self.assertEqual(self.type2test() != unicode(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
#XXX: Jython doesn't support codepoints outside of the UTF-16 range even at
# parse time. Maybe someday we might push the error off to later, but for
# now I'm just commenting this whole test out.
# See http://bugs.jython.org/issue1836 for more.
# def test_encoding(self):
# sample = u"Hello world\n\u1234\u5678\u9abc\udef0"
# for enc in ("utf8", "utf16"):
# b = self.type2test(sample, enc)
# self.assertEqual(b, self.type2test(sample.encode(enc)))
# self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin1")
# b = self.type2test(sample, "latin1", "ignore")
# self.assertEqual(b, self.type2test(sample[:-4], "utf-8"))
def test_decode(self):
sample = u"Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf8", "utf16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = u"Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf8")
self.assertEqual(b.decode("utf8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf8"),
"Hello world\n")
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + u"def")
self.assertRaises(TypeError, lambda: u"abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
self.assertRaises((OverflowError, MemoryError),
lambda: b * sys.maxsize)
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: u"a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(u''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex(u'1a2B30'), b)
self.assertEqual(self.type2test.fromhex(u' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex(u'0000'), b'\0\0')
self.assertRaises(ValueError, self.type2test.fromhex, u'a')
self.assertRaises(ValueError, self.type2test.fromhex, u'rt')
self.assertRaises(ValueError, self.type2test.fromhex, u'1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, u'\x00')
self.assertRaises(ValueError, self.type2test.fromhex, u'12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
def test_find(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
def test_rfind(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
def test_index(self):
b = self.type2test(b'world')
self.assertEqual(b.index(b'w'), 0)
self.assertEqual(b.index(b'orl'), 1)
self.assertRaises(ValueError, b.index, b'worm')
self.assertRaises(ValueError, b.index, b'ldo')
def test_rindex(self):
# XXX could be more rigorous
b = self.type2test(b'world')
self.assertEqual(b.rindex(b'w'), 0)
self.assertEqual(b.rindex(b'orl'), 1)
self.assertRaises(ValueError, b.rindex, b'worm')
self.assertRaises(ValueError, b.rindex, b'ldo')
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, u' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, u' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, u'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, u'b')
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegexp(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
# Python 2.x
b_sample = (ord(s) for s in sample)
self.assertEqual(list(b), list(b_sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(r"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += u""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(ord, orig * 25))
a.extend(ord(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(ord, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove(u'e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(u'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(u'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
# allowed in 2.x
#self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
def test_empty_bytearray(self):
# Issue #7561: operations on empty bytearrays could crash in many
# situations, due to a fragile implementation of the
# PyByteArray_AS_STRING() C macro.
self.assertRaises(ValueError, int, bytearray(b''))
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(".")[0]', 'val.rpartition(".")[2]',
'val.splitlines()[0]', 'val.replace("", "")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super(FixedStringTest, self).fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
def test_hash(self):
# XXX check this out
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
class ByteArraySubclass(bytearray):
pass
class ByteArraySubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(ByteArraySubclass, bytearray))
self.assertIsInstance(ByteArraySubclass(), bytearray)
a, b = b"abcd", b"efgh"
_a, _b = ByteArraySubclass(a), ByteArraySubclass(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = ByteArraySubclass(b"abcd")
s2 = bytearray().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is bytearray, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is bytearray)
def test_pickle(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = ByteArraySubclass(b"abcd")
a.x = 10
a.y = ByteArraySubclass(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_init_override(self):
class subclass(bytearray):
def __init__(self, newarg=1, *args, **kwargs):
bytearray.__init__(self, *args, **kwargs)
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
def test_main():
#test.test_support.run_unittest(BytesTest)
#test.test_support.run_unittest(AssortedBytesTest)
#test.test_support.run_unittest(BytesAsStringTest)
test.test_support.run_unittest(
ByteArrayTest,
ByteArrayAsStringTest,
ByteArraySubclassTest,
BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
| 38.479537 | 96 | 0.576102 |
ace4699a0e100fbdb6cbf514679928e7704ec650 | 7,552 | py | Python | BaseTools/Source/Python/GenFds/CapsuleData.py | GlovePuppet/edk2 | 8028f03032182f2c72e7699e1d14322bb5586581 | [
"BSD-2-Clause"
] | 2,757 | 2018-04-28T21:41:36.000Z | 2022-03-29T06:33:36.000Z | BaseTools/Source/Python/GenFds/CapsuleData.py | HouQiming/edk2 | ba07eef98ec49068d6453aba2aed73f6e7d7f600 | [
"BSD-2-Clause"
] | 20 | 2019-07-23T15:29:32.000Z | 2022-01-21T12:53:04.000Z | BaseTools/Source/Python/GenFds/CapsuleData.py | HouQiming/edk2 | ba07eef98ec49068d6453aba2aed73f6e7d7f600 | [
"BSD-2-Clause"
] | 449 | 2018-05-09T05:54:05.000Z | 2022-03-30T14:54:18.000Z | ## @file
# generate capsule
#
# Copyright (c) 2007-2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from io import BytesIO
from struct import pack
import os
from Common.Misc import SaveFileOnChange
import uuid
## base class for capsule data
#
#
class CapsuleData:
## The constructor
#
# @param self The object pointer
def __init__(self):
pass
## generate capsule data
#
# @param self The object pointer
def GenCapsuleSubItem(self):
pass
## FFS class for capsule data
#
#
class CapsuleFfs (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
## generate FFS capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
FfsFile = self.Ffs.GenFfs()
return FfsFile
## FV class for capsule data
#
#
class CapsuleFv (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FvName = None
self.CapsuleName = None
## generate FV capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FvName.find('.fv') == -1:
if self.FvName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[self.FvName.upper()]
FdBuffer = BytesIO()
FvObj.CapsuleName = self.CapsuleName
FvFile = FvObj.AddToBuffer(FdBuffer)
FvObj.CapsuleName = None
FdBuffer.close()
return FvFile
else:
FvFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FvName)
return FvFile
## FD class for capsule data
#
#
class CapsuleFd (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FdName = None
self.CapsuleName = None
## generate FD capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
if self.FdName.find('.fd') == -1:
if self.FdName.upper() in GenFdsGlobalVariable.FdfParser.Profile.FdDict:
FdObj = GenFdsGlobalVariable.FdfParser.Profile.FdDict[self.FdName.upper()]
FdFile = FdObj.GenFd()
return FdFile
else:
FdFile = GenFdsGlobalVariable.ReplaceWorkspaceMacro(self.FdName)
return FdFile
## AnyFile class for capsule data
#
#
class CapsuleAnyFile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate AnyFile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
## Afile class for capsule data
#
#
class CapsuleAfile (CapsuleData):
## The constructor
#
# @param self The object pointer
#
def __init__(self) :
self.Ffs = None
self.FileName = None
## generate Afile capsule data
#
# @param self The object pointer
# @retval string Generated file name
#
def GenCapsuleSubItem(self):
return self.FileName
class CapsulePayload(CapsuleData):
'''Generate payload file, the header is defined below:
#pragma pack(1)
typedef struct {
UINT32 Version;
EFI_GUID UpdateImageTypeId;
UINT8 UpdateImageIndex;
UINT8 reserved_bytes[3];
UINT32 UpdateImageSize;
UINT32 UpdateVendorCodeSize;
UINT64 UpdateHardwareInstance; //Introduced in v2
} EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER;
'''
def __init__(self):
self.UiName = None
self.Version = None
self.ImageTypeId = None
self.ImageIndex = None
self.HardwareInstance = None
self.ImageFile = []
self.VendorCodeFile = []
self.Certificate_Guid = None
self.MonotonicCount = None
self.Existed = False
self.Buffer = None
def GenCapsuleSubItem(self, AuthData=[]):
if not self.Version:
self.Version = '0x00000002'
if not self.ImageIndex:
self.ImageIndex = '0x1'
if not self.HardwareInstance:
self.HardwareInstance = '0x0'
ImageFileSize = os.path.getsize(self.ImageFile)
if AuthData:
# the ImageFileSize need include the full authenticated info size. From first bytes of MonotonicCount to last bytes of certificate.
# the 32 bit is the MonotonicCount, dwLength, wRevision, wCertificateType and CertType
ImageFileSize += 32
VendorFileSize = 0
if self.VendorCodeFile:
VendorFileSize = os.path.getsize(self.VendorCodeFile)
#
# Fill structure
#
Guid = self.ImageTypeId.split('-')
Buffer = pack('=ILHHBBBBBBBBBBBBIIQ',
int(self.Version, 16),
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16),
int(self.ImageIndex, 16),
0,
0,
0,
ImageFileSize,
VendorFileSize,
int(self.HardwareInstance, 16)
)
if AuthData:
Buffer += pack('QIHH', AuthData[0], AuthData[1], AuthData[2], AuthData[3])
Buffer += uuid.UUID(AuthData[4]).bytes_le
#
# Append file content to the structure
#
ImageFile = open(self.ImageFile, 'rb')
Buffer += ImageFile.read()
ImageFile.close()
if self.VendorCodeFile:
VendorFile = open(self.VendorCodeFile, 'rb')
Buffer += VendorFile.read()
VendorFile.close()
self.Existed = True
return Buffer
| 30.699187 | 144 | 0.553231 |
ace469bffe9b37bcccc73911d6d2343e609c3b72 | 22,944 | py | Python | python3-virtualenv/lib/python3.8/site-packages/sqlalchemy/ext/asyncio/engine.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.8/site-packages/sqlalchemy/ext/asyncio/engine.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | python3-virtualenv/lib/python3.8/site-packages/sqlalchemy/ext/asyncio/engine.py | bbalkaransingh23888/OrientationHack | 7eae6cce1226112c000ea8a175f6dc5a82ee0ac2 | [
"MIT"
] | null | null | null | # ext/asyncio/engine.py
# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import exc as async_exc
from .base import ProxyComparable
from .base import StartableContext
from .result import AsyncResult
from ... import exc
from ... import util
from ...engine import create_engine as _create_engine
from ...engine.base import NestedTransaction
from ...future import Connection
from ...future import Engine
from ...util.concurrency import greenlet_spawn
def create_async_engine(*arg, **kw):
"""Create a new async engine instance.
Arguments passed to :func:`_asyncio.create_async_engine` are mostly
identical to those passed to the :func:`_sa.create_engine` function.
The specified dialect must be an asyncio-compatible dialect
such as :ref:`dialect-postgresql-asyncpg`.
.. versionadded:: 1.4
"""
if kw.get("server_side_cursors", False):
raise async_exc.AsyncMethodRequired(
"Can't set server_side_cursors for async engine globally; "
"use the connection.stream() method for an async "
"streaming result set"
)
kw["future"] = True
sync_engine = _create_engine(*arg, **kw)
return AsyncEngine(sync_engine)
class AsyncConnectable:
__slots__ = "_slots_dispatch", "__weakref__"
@util.create_proxy_methods(
Connection,
":class:`_future.Connection`",
":class:`_asyncio.AsyncConnection`",
classmethods=[],
methods=[],
attributes=[
"closed",
"invalidated",
"dialect",
"default_isolation_level",
],
)
class AsyncConnection(ProxyComparable, StartableContext, AsyncConnectable):
"""An asyncio proxy for a :class:`_engine.Connection`.
:class:`_asyncio.AsyncConnection` is acquired using the
:meth:`_asyncio.AsyncEngine.connect`
method of :class:`_asyncio.AsyncEngine`::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname")
async with engine.connect() as conn:
result = await conn.execute(select(table))
.. versionadded:: 1.4
""" # noqa
# AsyncConnection is a thin proxy; no state should be added here
# that is not retrievable from the "sync" engine / connection, e.g.
# current transaction, info, etc. It should be possible to
# create a new AsyncConnection that matches this one given only the
# "sync" elements.
__slots__ = (
"sync_engine",
"sync_connection",
)
def __init__(self, async_engine, sync_connection=None):
self.engine = async_engine
self.sync_engine = async_engine.sync_engine
self.sync_connection = self._assign_proxied(sync_connection)
@classmethod
def _regenerate_proxy_for_target(cls, target):
return AsyncConnection(
AsyncEngine._retrieve_proxy_for_target(target.engine), target
)
async def start(self, is_ctxmanager=False):
"""Start this :class:`_asyncio.AsyncConnection` object's context
outside of using a Python ``with:`` block.
"""
if self.sync_connection:
raise exc.InvalidRequestError("connection is already started")
self.sync_connection = self._assign_proxied(
await (greenlet_spawn(self.sync_engine.connect))
)
return self
@property
def connection(self):
"""Not implemented for async; call
:meth:`_asyncio.AsyncConnection.get_raw_connection`.
"""
raise exc.InvalidRequestError(
"AsyncConnection.connection accessor is not implemented as the "
"attribute may need to reconnect on an invalidated connection. "
"Use the get_raw_connection() method."
)
async def get_raw_connection(self):
"""Return the pooled DBAPI-level connection in use by this
:class:`_asyncio.AsyncConnection`.
This is typically the SQLAlchemy connection-pool proxied connection
which then has an attribute .connection that refers to the actual
DBAPI-level connection.
"""
conn = self._sync_connection()
return await greenlet_spawn(getattr, conn, "connection")
@property
def _proxied(self):
return self.sync_connection
@property
def info(self):
"""Return the :attr:`_engine.Connection.info` dictionary of the
underlying :class:`_engine.Connection`.
This dictionary is freely writable for user-defined state to be
associated with the database connection.
This attribute is only available if the :class:`.AsyncConnection` is
currently connected. If the :attr:`.AsyncConnection.closed` attribute
is ``True``, then accessing this attribute will raise
:class:`.ResourceClosedError`.
.. versionadded:: 1.4.0b2
"""
return self.sync_connection.info
def _sync_connection(self):
if not self.sync_connection:
self._raise_for_not_started()
return self.sync_connection
def begin(self):
"""Begin a transaction prior to autobegin occurring."""
self._sync_connection()
return AsyncTransaction(self)
def begin_nested(self):
"""Begin a nested transaction and return a transaction handle."""
self._sync_connection()
return AsyncTransaction(self, nested=True)
async def invalidate(self, exception=None):
"""Invalidate the underlying DBAPI connection associated with
this :class:`_engine.Connection`.
See the method :meth:`_engine.Connection.invalidate` for full
detail on this method.
"""
conn = self._sync_connection()
return await greenlet_spawn(conn.invalidate, exception=exception)
async def get_isolation_level(self):
conn = self._sync_connection()
return await greenlet_spawn(conn.get_isolation_level)
async def set_isolation_level(self):
conn = self._sync_connection()
return await greenlet_spawn(conn.get_isolation_level)
def in_transaction(self):
"""Return True if a transaction is in progress.
.. versionadded:: 1.4.0b2
"""
conn = self._sync_connection()
return conn.in_transaction()
def in_nested_transaction(self):
"""Return True if a transaction is in progress.
.. versionadded:: 1.4.0b2
"""
conn = self._sync_connection()
return conn.in_nested_transaction()
def get_transaction(self):
"""Return an :class:`.AsyncTransaction` representing the current
transaction, if any.
This makes use of the underlying synchronous connection's
:meth:`_engine.Connection.get_transaction` method to get the current
:class:`_engine.Transaction`, which is then proxied in a new
:class:`.AsyncTransaction` object.
.. versionadded:: 1.4.0b2
"""
conn = self._sync_connection()
trans = conn.get_transaction()
if trans is not None:
return AsyncTransaction._retrieve_proxy_for_target(trans)
else:
return None
def get_nested_transaction(self):
"""Return an :class:`.AsyncTransaction` representing the current
nested (savepoint) transaction, if any.
This makes use of the underlying synchronous connection's
:meth:`_engine.Connection.get_nested_transaction` method to get the
current :class:`_engine.Transaction`, which is then proxied in a new
:class:`.AsyncTransaction` object.
.. versionadded:: 1.4.0b2
"""
conn = self._sync_connection()
trans = conn.get_nested_transaction()
if trans is not None:
return AsyncTransaction._retrieve_proxy_for_target(trans)
else:
return None
async def execution_options(self, **opt):
r"""Set non-SQL options for the connection which take effect
during execution.
This returns this :class:`_asyncio.AsyncConnection` object with
the new options added.
See :meth:`_future.Connection.execution_options` for full details
on this method.
"""
conn = self._sync_connection()
c2 = await greenlet_spawn(conn.execution_options, **opt)
assert c2 is conn
return self
async def commit(self):
"""Commit the transaction that is currently in progress.
This method commits the current transaction if one has been started.
If no transaction was started, the method has no effect, assuming
the connection is in a non-invalidated state.
A transaction is begun on a :class:`_future.Connection` automatically
whenever a statement is first executed, or when the
:meth:`_future.Connection.begin` method is called.
"""
conn = self._sync_connection()
await greenlet_spawn(conn.commit)
async def rollback(self):
"""Roll back the transaction that is currently in progress.
This method rolls back the current transaction if one has been started.
If no transaction was started, the method has no effect. If a
transaction was started and the connection is in an invalidated state,
the transaction is cleared using this method.
A transaction is begun on a :class:`_future.Connection` automatically
whenever a statement is first executed, or when the
:meth:`_future.Connection.begin` method is called.
"""
conn = self._sync_connection()
await greenlet_spawn(conn.rollback)
async def close(self):
"""Close this :class:`_asyncio.AsyncConnection`.
This has the effect of also rolling back the transaction if one
is in place.
"""
conn = self._sync_connection()
await greenlet_spawn(conn.close)
async def exec_driver_sql(
self,
statement,
parameters=None,
execution_options=util.EMPTY_DICT,
):
r"""Executes a driver-level SQL string and return buffered
:class:`_engine.Result`.
"""
conn = self._sync_connection()
result = await greenlet_spawn(
conn.exec_driver_sql,
statement,
parameters,
execution_options,
_require_await=True,
)
if result.context._is_server_side:
raise async_exc.AsyncMethodRequired(
"Can't use the connection.exec_driver_sql() method with a "
"server-side cursor."
"Use the connection.stream() method for an async "
"streaming result set."
)
return result
async def stream(
self,
statement,
parameters=None,
execution_options=util.EMPTY_DICT,
):
"""Execute a statement and return a streaming
:class:`_asyncio.AsyncResult` object."""
conn = self._sync_connection()
result = await greenlet_spawn(
conn._execute_20,
statement,
parameters,
util.EMPTY_DICT.merge_with(execution_options, {"stream_results": True}),
_require_await=True,
)
if not result.context._is_server_side:
# TODO: real exception here
assert False, "server side result expected"
return AsyncResult(result)
async def execute(
self,
statement,
parameters=None,
execution_options=util.EMPTY_DICT,
):
r"""Executes a SQL statement construct and return a buffered
:class:`_engine.Result`.
:param object: The statement to be executed. This is always
an object that is in both the :class:`_expression.ClauseElement` and
:class:`_expression.Executable` hierarchies, including:
* :class:`_expression.Select`
* :class:`_expression.Insert`, :class:`_expression.Update`,
:class:`_expression.Delete`
* :class:`_expression.TextClause` and
:class:`_expression.TextualSelect`
* :class:`_schema.DDL` and objects which inherit from
:class:`_schema.DDLElement`
:param parameters: parameters which will be bound into the statement.
This may be either a dictionary of parameter names to values,
or a mutable sequence (e.g. a list) of dictionaries. When a
list of dictionaries is passed, the underlying statement execution
will make use of the DBAPI ``cursor.executemany()`` method.
When a single dictionary is passed, the DBAPI ``cursor.execute()``
method will be used.
:param execution_options: optional dictionary of execution options,
which will be associated with the statement execution. This
dictionary can provide a subset of the options that are accepted
by :meth:`_future.Connection.execution_options`.
:return: a :class:`_engine.Result` object.
"""
conn = self._sync_connection()
result = await greenlet_spawn(
conn._execute_20,
statement,
parameters,
execution_options,
_require_await=True,
)
if result.context._is_server_side:
raise async_exc.AsyncMethodRequired(
"Can't use the connection.execute() method with a "
"server-side cursor."
"Use the connection.stream() method for an async "
"streaming result set."
)
return result
async def scalar(
self,
statement,
parameters=None,
execution_options=util.EMPTY_DICT,
):
r"""Executes a SQL statement construct and returns a scalar object.
This method is shorthand for invoking the
:meth:`_engine.Result.scalar` method after invoking the
:meth:`_future.Connection.execute` method. Parameters are equivalent.
:return: a scalar Python value representing the first column of the
first row returned.
"""
result = await self.execute(statement, parameters, execution_options)
return result.scalar()
async def run_sync(self, fn, *arg, **kw):
"""Invoke the given sync callable passing self as the first argument.
This method maintains the asyncio event loop all the way through
to the database connection by running the given callable in a
specially instrumented greenlet.
E.g.::
with async_engine.begin() as conn:
await conn.run_sync(metadata.create_all)
.. note::
The provided callable is invoked inline within the asyncio event
loop, and will block on traditional IO calls. IO within this
callable should only call into SQLAlchemy's asyncio database
APIs which will be properly adapted to the greenlet context.
.. seealso::
:ref:`session_run_sync`
"""
conn = self._sync_connection()
return await greenlet_spawn(fn, conn, *arg, **kw)
def __await__(self):
return self.start().__await__()
async def __aexit__(self, type_, value, traceback):
await self.close()
@util.create_proxy_methods(
Engine,
":class:`_future.Engine`",
":class:`_asyncio.AsyncEngine`",
classmethods=[],
methods=[
"clear_compiled_cache",
"update_execution_options",
"get_execution_options",
],
attributes=["url", "pool", "dialect", "engine", "name", "driver", "echo"],
)
class AsyncEngine(ProxyComparable, AsyncConnectable):
"""An asyncio proxy for a :class:`_engine.Engine`.
:class:`_asyncio.AsyncEngine` is acquired using the
:func:`_asyncio.create_async_engine` function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("postgresql+asyncpg://user:pass@host/dbname")
.. versionadded:: 1.4
""" # noqa
# AsyncEngine is a thin proxy; no state should be added here
# that is not retrievable from the "sync" engine / connection, e.g.
# current transaction, info, etc. It should be possible to
# create a new AsyncEngine that matches this one given only the
# "sync" elements.
__slots__ = ("sync_engine", "_proxied")
_connection_cls = AsyncConnection
_option_cls: type
class _trans_ctx(StartableContext):
def __init__(self, conn):
self.conn = conn
async def start(self, is_ctxmanager=False):
await self.conn.start(is_ctxmanager=is_ctxmanager)
self.transaction = self.conn.begin()
await self.transaction.__aenter__()
return self.conn
async def __aexit__(self, type_, value, traceback):
await self.transaction.__aexit__(type_, value, traceback)
await self.conn.close()
def __init__(self, sync_engine):
if not sync_engine.dialect.is_async:
raise exc.InvalidRequestError(
"The asyncio extension requires an async driver to be used. "
f"The loaded {sync_engine.dialect.driver!r} is not async."
)
self.sync_engine = self._proxied = self._assign_proxied(sync_engine)
@classmethod
def _regenerate_proxy_for_target(cls, target):
return AsyncEngine(target)
def begin(self):
"""Return a context manager which when entered will deliver an
:class:`_asyncio.AsyncConnection` with an
:class:`_asyncio.AsyncTransaction` established.
E.g.::
async with async_engine.begin() as conn:
await conn.execute(
text("insert into table (x, y, z) values (1, 2, 3)")
)
await conn.execute(text("my_special_procedure(5)"))
"""
conn = self.connect()
return self._trans_ctx(conn)
def connect(self):
"""Return an :class:`_asyncio.AsyncConnection` object.
The :class:`_asyncio.AsyncConnection` will procure a database
connection from the underlying connection pool when it is entered
as an async context manager::
async with async_engine.connect() as conn:
result = await conn.execute(select(user_table))
The :class:`_asyncio.AsyncConnection` may also be started outside of a
context manager by invoking its :meth:`_asyncio.AsyncConnection.start`
method.
"""
return self._connection_cls(self)
async def raw_connection(self):
"""Return a "raw" DBAPI connection from the connection pool.
.. seealso::
:ref:`dbapi_connections`
"""
return await greenlet_spawn(self.sync_engine.raw_connection)
def execution_options(self, **opt):
"""Return a new :class:`_asyncio.AsyncEngine` that will provide
:class:`_asyncio.AsyncConnection` objects with the given execution
options.
Proxied from :meth:`_future.Engine.execution_options`. See that
method for details.
"""
return AsyncEngine(self.sync_engine.execution_options(**opt))
async def dispose(self):
"""Dispose of the connection pool used by this
:class:`_asyncio.AsyncEngine`.
This will close all connection pool connections that are
**currently checked in**. See the documentation for the underlying
:meth:`_future.Engine.dispose` method for further notes.
.. seealso::
:meth:`_future.Engine.dispose`
"""
return await greenlet_spawn(self.sync_engine.dispose)
class AsyncTransaction(ProxyComparable, StartableContext):
"""An asyncio proxy for a :class:`_engine.Transaction`."""
__slots__ = ("connection", "sync_transaction", "nested")
def __init__(self, connection, nested=False):
self.connection = connection # AsyncConnection
self.sync_transaction = None # sqlalchemy.engine.Transaction
self.nested = nested
@classmethod
def _regenerate_proxy_for_target(cls, target):
sync_connection = target.connection
sync_transaction = target
nested = isinstance(target, NestedTransaction)
async_connection = AsyncConnection._retrieve_proxy_for_target(sync_connection)
assert async_connection is not None
obj = cls.__new__(cls)
obj.connection = async_connection
obj.sync_transaction = obj._assign_proxied(sync_transaction)
obj.nested = nested
return obj
def _sync_transaction(self):
if not self.sync_transaction:
self._raise_for_not_started()
return self.sync_transaction
@property
def _proxied(self):
return self.sync_transaction
@property
def is_valid(self):
return self._sync_transaction().is_valid
@property
def is_active(self):
return self._sync_transaction().is_active
async def close(self):
"""Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
await greenlet_spawn(self._sync_transaction().close)
async def rollback(self):
"""Roll back this :class:`.Transaction`."""
await greenlet_spawn(self._sync_transaction().rollback)
async def commit(self):
"""Commit this :class:`.Transaction`."""
await greenlet_spawn(self._sync_transaction().commit)
async def start(self, is_ctxmanager=False):
"""Start this :class:`_asyncio.AsyncTransaction` object's context
outside of using a Python ``with:`` block.
"""
self.sync_transaction = self._assign_proxied(
await greenlet_spawn(
self.connection._sync_connection().begin_nested
if self.nested
else self.connection._sync_connection().begin
)
)
if is_ctxmanager:
self.sync_transaction.__enter__()
return self
async def __aexit__(self, type_, value, traceback):
await greenlet_spawn(self._sync_transaction().__exit__, type_, value, traceback)
def _get_sync_engine_or_connection(async_engine):
if isinstance(async_engine, AsyncConnection):
return async_engine.sync_connection
try:
return async_engine.sync_engine
except AttributeError as e:
raise exc.ArgumentError("AsyncEngine expected, got %r" % async_engine) from e
| 32.590909 | 88 | 0.648579 |
ace46a55c0794dc3760e4ae9c16a2140cce3e46e | 458 | py | Python | apps/tips/urls.py | MeirKriheli/debian.org.il | 90bff955c38f7e6e51647463c6a59701302de8e1 | [
"MIT"
] | null | null | null | apps/tips/urls.py | MeirKriheli/debian.org.il | 90bff955c38f7e6e51647463c6a59701302de8e1 | [
"MIT"
] | 1 | 2017-05-09T17:22:07.000Z | 2017-06-04T14:42:01.000Z | apps/tips/urls.py | MeirKriheli/debian.org.il | 90bff955c38f7e6e51647463c6a59701302de8e1 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import TipsIndexView, TipDetailView, TipsByTagView
app_name = 'tips'
urlpatterns = [
url(
regex=r'^$',
view=TipsIndexView.as_view(),
name='index'),
url(
regex=r'^(?P<slug>[0-9A-Za-z-_]+)/$',
view=TipDetailView.as_view(),
name='tip'),
url(
regex=r'^tag/(?P<slug>[0-9A-Za-z-_]+)/$',
view=TipsByTagView.as_view(),
name='tag'),
]
| 21.809524 | 62 | 0.552402 |
ace46be3efc968ed3d8ba591834e70ff227ed9b7 | 50,105 | py | Python | armoryengine/MultiSigUtils.py | Manny27nyc/BitcoinArmory | 1d02a6640d6257ab0c37013e5cd4b99681a5cfc3 | [
"MIT"
] | 505 | 2016-02-04T15:54:46.000Z | 2022-03-27T18:43:01.000Z | armoryengine/MultiSigUtils.py | Lcote71/BitcoinArmory | 1d02a6640d6257ab0c37013e5cd4b99681a5cfc3 | [
"MIT"
] | 528 | 2016-02-06T19:50:12.000Z | 2022-01-15T10:21:16.000Z | armoryengine/MultiSigUtils.py | Lcote71/BitcoinArmory | 1d02a6640d6257ab0c37013e5cd4b99681a5cfc3 | [
"MIT"
] | 195 | 2016-02-03T21:54:44.000Z | 2022-03-08T09:26:16.000Z | ################################################################################
# #
# Copyright (C) 2011-2015, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
################################################################################
from armoryengine.ArmoryUtils import *
from armoryengine.Transaction import *
from armoryengine.Script import *
MULTISIG_VERSION = 1
################################################################################
#
# Multi-signature transactions are going to require a ton of different
# primitives to be both useful and safe for escrow. All primitives should
# have an ASCII-armored-esque form for transmission through email or text
# file, as well as binary form for when file transfer is guaranteed
#
# Until Armory implements BIP 32, these utilities are more suited to
# low-volume use cases, such as one-time escrow, or long-term savings
# using multi-device authentication. Multi-signature *wallets* which
# behave like regular wallets but spit out P2SH addresses and usable
# in every day situations -- those will have to wait for Armory's new
# wallet format.
#
# Concepts:
# "Lockbox": A "lock box" for putting coins that will be protected
# with multiple signatures. The lockbox contains both
# the script info as well as meta-data, like participants'
# names and emails associated with each public key.
#
#
#
#
#
################################################################################
"""
Use-Case 1 -- Protecting coins with 2-of-3 computers (2 offline, 1 online):
Create or access existing wallet on each of three computers.
Online computer will create the lockbox - needs one public key from its
own wallet, and one from each offline wallet. Can have both WO wallets
on the online computer, and pull keys directly from those.
User creates an lockbox with all three keys, labeling them appropriately
This lockbox will be added to the global list.
User will fund the lockbox from an existing offline wallet with lots of
money. He does not need to use the special funding procedure, which is
only needed if there's multiple people involved with less than full trust.
Creates the transaction as usual, but uses the "lockbox" button for the
recipient instead of normal address. The address line will show the
lockbox ID and short description.
Will save the lockbox and the offline transaction to the USB drive
"""
LOCKBOXIDSIZE = 8
PROMIDSIZE = 4
LBPREFIX, LBSUFFIX = 'Lockbox[Bare:', ']'
LBP2SHPREFIX = 'Lockbox['
#multisig address types
LBTYPE_RAW = "lba_raw"
LBTYPE_P2SH = "lba_p2sh"
LBTYPE_NESTED_P2WSH = "lba_nested_p2wsh"
#############################################################################
def getRecipStr(decoratedTxOut):
if decoratedTxOut.scriptType in CPP_TXOUT_HAS_ADDRSTR:
return script_to_addrStr(decoratedTxOut.binScript)
elif decoratedTxOut.scriptType == CPP_TXOUT_MULTISIG:
lbID = calcLockboxID(decoratedTxOut.binScript)
return 'Multisig %d-of-%d (%s)' % \
(decoratedTxOut.multiInfo['M'], decoratedTxOut.multiInfo['N'], lbID)
else:
return ''
################################################################################
def calcLockboxID(script=None, scraddr=None):
# ScrAddr is "Script/Address" and for multisig it is 0xfe followed by
# M and N, then the SORTED hash160 values of the public keys
# Part of the reason for using "ScrAddrs" is to bundle together
# different scripts that have the same effective signing authority.
# Different sortings of the same public key list have same signing
# authority and therefore should have the same ScrAddr
if script is not None:
scrType = getTxOutScriptType(script)
if not scrType==CPP_TXOUT_MULTISIG:
LOGERROR('Not a multisig script!')
return None
scraddr = script_to_scrAddr(script)
if not scraddr.startswith(SCRADDR_MULTISIG_BYTE):
LOGERROR('ScrAddr is not a multisig script!')
return None
hashedData = hash160(MAGIC_BYTES + scraddr)
return binary_to_base58(hashedData)[1:9]
################################################################################
def createLockboxEntryStr(lbID, isBareMultiSig=False):
return '%s%s%s' % (LBPREFIX if isBareMultiSig else LBP2SHPREFIX,
lbID, LBSUFFIX)
################################################################################
def readLockboxEntryStr(addrtext):
result = None
if isBareLockbox(addrtext) or isP2SHLockbox(addrtext):
len(LBPREFIX if isBareLockbox(addrtext) else LBP2SHPREFIX)
idStr = addrtext[len(LBPREFIX if isBareLockbox(addrtext) else LBP2SHPREFIX):
addrtext.find(LBSUFFIX)]
if len(idStr)==LOCKBOXIDSIZE:
result = idStr
return result
################################################################################
def isBareLockbox(addrtext):
return addrtext.startswith(LBPREFIX)
def scrAddr_to_displayStr(scrAddr, wltMap, lbMap):
retStr = ''
if scrAddr[0] in (SCRADDR_P2PKH_BYTE, SCRADDR_P2SH_BYTE):
retStr = scrAddr_to_addrStr(scrAddr)
elif scrAddr[0] == SCRADDR_MULTISIG_BYTE:
retStr = getDisplayStringForScript(scrAddr[1:], wltMap, lbMap)
else:
LOGERROR('scrAddr %s is invalid.' % binary_to_hex(scrAddr))
return retStr
################################################################################
def isP2SHLockbox(addrtext):
# Bugfix: Bare prefix includes P2SH prefix, whoops. Return false if Bare
return addrtext.startswith(LBP2SHPREFIX) and not isBareLockbox(addrtext)
################################################################################
def getWltFromB58ID(inB58ID, inWltMap, inLBMap, inLBWltMap):
retWlt = None
retWltIsCPP = True
if inB58ID in inWltMap.keys():
retWlt = inWltMap[inB58ID]
retWltIsCPP = False
elif inB58ID in inLBMap.keys():
retWlt = inLBWltMap[inB58ID]
else:
LOGERROR('Base58 ID %s does not represent a valid wallet or lockbox.' % \
inB58ID)
return (retWlt, retWltIsCPP)
################################################################################
# Function that writes a lockbox to a file. The lockbox can be appended to a
# previously existing file or can overwrite what was already in the file.
def writeLockboxesFile(inLockboxes, lbFilePath, append=False):
writeMode = 'w'
if append:
writeMode = 'a'
# Do all the serializing and bail-on-error before opening the file
# for writing, or we might delete it all by accident
textOut = '\n\n'.join([lb.serializeAscii() for lb in inLockboxes]) + '\n'
with open(lbFilePath, writeMode) as f:
f.write(textOut)
f.flush()
os.fsync(f.fileno())
################################################################################
# Function that can be used to send an e-mail to multiple recipients.
def readLockboxesFile(lbFilePath):
retLBList = []
# Read in the lockbox file.
with open(lbFilePath, 'r') as lbFileData:
allData = lbFileData.read()
# Find the lockbox starting point.
startMark = '=====LOCKBOX'
if startMark in allData:
try:
# Find the point where the start mark begins and collect either all the
# data before the next LB or the remaining data in the file (i.e.,
# we're on the final LB).
pos = allData.find(startMark)
while pos >= 0:
nextPos = allData.find(startMark, pos+1)
if nextPos < 0:
nextPos = len(allData)
# Pull in all the LB data, process it and add it to the LB list.
lbBlock = allData[pos:nextPos].strip()
lbox = MultiSigLockbox().unserializeAscii(lbBlock)
LOGINFO('Read in Lockbox: %s' % lbox.uniqueIDB58)
retLBList.append(lbox)
pos = allData.find(startMark, pos+1)
except:
LOGEXCEPT('Error reading lockboxes file')
shutil.copy(lbFilePath, lbFilePath+'.%d.bak'% long(RightNow()))
return retLBList
#############################################################################
def getLockboxFilePaths():
'''Function that finds the paths of all lockboxes in the Armory
home directory.'''
lbPaths = []
# Return the multisig file path.
if os.path.isfile(MULTISIG_FILE):
lbPaths.append(MULTISIG_FILE)
# For now, no standalone lockboxes will be returned. Maybe later....
#for f in os.listdir(ARMORY_HOME_DIR):
# fullPath = os.path.join(ARMORY_HOME_DIR, f)
# if os.path.isfile(fullPath) and not fullPath.endswith('lockbox.txt'):
# lbPaths.append(fullPath)
return lbPaths
#############################################################################
def isMofNNonStandardToSpend(m, n):
# Minimum non-standard tx spends
# 4 of 4
# 3 of 5
# 2 of 6
# any of 7
return (n > 3 and m > 3) or \
(n > 4 and m > 2) or \
(n > 5 and m > 1) or \
n > 6
################################################################################
class LockboxAddresses(object):
"""
feed this class a raw multisig script, get all script hash + address
variations
"""
#############################################################################
def __init__(self, script=None, script_compressed=None):
self.setupAddressesFromScript(script, script_compressed)
#############################################################################
def setupAddressesFromScript(self, script, script_compressed):
self.script = script
if script == None:
return
self.scrAddr = script_to_scrAddr(script)
self.p2shScript = script_to_p2sh_script(script)
self.p2shScrAddr = script_to_scrAddr(self.p2shScript)
self.p2wsh_base_script = script_compressed
self.scriptHash256 = sha256(self.p2wsh_base_script)
bp = BinaryPacker()
bp.put(UINT8, 0)
bp.put(VAR_STR, self.scriptHash256)
self.p2wsh_script = bp.getBinaryString()
self.p2wsh_nested_script = script_to_p2sh_script(self.p2wsh_script)
self.nested_p2wsh_scrAddr = \
script_to_scrAddr(self.p2wsh_nested_script)
self.defaultScriptType = LBTYPE_P2SH
#############################################################################
def getAddr(self, _type=None):
if _type == None:
_type = self.defaultScriptType
if _type == LBTYPE_RAW:
return self.scrAddr
elif _type == LBTYPE_P2SH:
return self.p2shScrAddr
elif _type == LBTYPE_NESTED_P2WSH:
return self.nested_p2wsh_scrAddr
else:
raise Exception("illegal address type")
#############################################################################
def hasScrAddr(self, scrAddr):
if scrAddr == self.scrAddr or \
scrAddr == self.p2shScrAddr or \
scrAddr == self.nested_p2wsh_scrAddr:
return True
return False
#############################################################################
def isAddrSegWit(self, scrAddr):
return scrAddr == self.nested_p2wsh_scrAddr
#############################################################################
def getScript(self, _type=None):
if _type == None:
_type = self.defaultScriptType
if _type == LBTYPE_RAW:
return self.script
elif _type == LBTYPE_P2SH:
return self.p2shScript
elif _type == LBTYPE_NESTED_P2WSH:
return self.p2wsh_nested_script
else:
raise Exception("illegal script type")
#############################################################################
def getScrAddrType(self, scrAddr):
if scrAddr == self.scrAddr:
return LBTYPE_RAW
elif scrAddr == self.p2shScrAddr:
return LBTYPE_P2SH
elif scrAddr == self.nested_p2wsh_scrAddr:
return LBTYPE_NESTED_P2WSH
return None
#############################################################################
def getScrAddrList(self):
scrAddrList = []
scrAddrList.append(script_to_scrAddr(self.script))
scrAddrList.append(script_to_scrAddr(self.p2shScript))
scrAddrList.append(script_to_scrAddr(\
script_to_p2sh_script(self.p2wsh_script)))
return scrAddrList
#############################################################################
def getScriptDict(self):
scriptDict = {}
p2sh_hex = binary_to_hex(self.p2shScrAddr)
p2sh_hex_noprefix = p2sh_hex[2:]
scriptDict[p2sh_hex] = self.script
scriptDict[p2sh_hex_noprefix] = self.script
p2wsh_hex = binary_to_hex(self.nested_p2wsh_scrAddr)
p2wsh_hex_noprefix = p2wsh_hex[2:]
scriptDict[p2wsh_hex] = self.p2wsh_script
scriptDict[p2wsh_hex_noprefix] = self.p2wsh_script
scriptDict[self.p2wsh_script] = self.p2wsh_base_script
scriptDict[binary_to_hex(self.scriptHash256)] = self.p2wsh_base_script
return scriptDict
#############################################################################
def getChangeScript(self, utxoList=None):
if utxoList == None:
return self.getScript(self.defaultScriptType)
hasRawScript = False
hasP2SHScript = False
hasNestedSWScript = False
for utxo in utxoList:
scrAddr = utxo.getRecipientScrAddr()
scrType = self.getScrAddrType(scrAddr)
if scrType == None:
continue
if scrType == LBTYPE_RAW:
hasRawScript = True
elif scrType == LBTYPE_P2SH:
hasP2SHScript = True
elif scrType == LBTYPE_NESTED_P2WSH:
hasNestedSWScript = True
returnType = None
if hasRawScript and not hasP2SHScript and not hasNestedSWScript:
returnType = LBTYPE_RAW
elif hasP2SHScript and not hasRawScript and not hasNestedSWScript:
returnType = LBTYPE_P2SH
elif hasNestedSWScript and not hasP2SHScript and not hasRawScript:
returnType = LBTYPE_NESTED_P2WSH
if returnType == None:
returnType = self.defaultScriptType
return self.getScript(returnType)
#############################################################################
def setScriptType(self, _type):
if _type != LBTYPE_RAW and \
_type != LBTYPE_P2SH and \
_type != LBTYPE_NESTED_P2WSH:
return
self.defaultScriptType = _type
#############################################################################
def getScriptType(self):
return self.defaultScriptType
################################################################################
################################################################################
class MultiSigLockbox(AsciiSerializable):
OBJNAME = 'Lockbox'
BLKSTRING = 'LOCKBOX'
EMAILSUBJ = 'Armory Lockbox Definition - %s'
EMAILBODY = """
The chunk of text below is a complete lockbox definition
needed to track the balance of this multi-sig lockbox, as well
as create signatures for proposed spending transactions. Open
the Lockbox Manager, click "Import Lockbox" in the first row,
then copy the text below into the import box, including the
first and last lines. You will need to restart Armory and let
it rescan if this lockbox has already been used."""
#############################################################################
def __init__(self, name=None, descr=None, M=None, N=None, dPubKeys=None,
createDate=None, version=MULTISIG_VERSION):
self.version = MULTISIG_VERSION
self.shortName = toUnicode(name)
self.longDescr = toUnicode(descr)
self.createDate = long(RightNow()) if createDate is None else createDate
self.magicBytes = MAGIC_BYTES
self.uniqueIDB58 = None
self.asciiID = None
self.addrStruct = LockboxAddresses()
#UI member for rescans
self.isEnabled = True
if (M is not None) and (N is not None) and (dPubKeys is not None):
self.setParams(name, descr, M, N, dPubKeys, createDate, version)
#############################################################################
def registerLockbox(self, addressList, isNew=False):
return TheBDM.registerLockbox(self.uniqueIDB58, addressList, isNew)
#############################################################################
def setParams(self, name, descr, M, N, dPubKeys, createDate=None,
version=MULTISIG_VERSION):
self.version = version
self.magicBytes = MAGIC_BYTES
self.shortName = name
self.longDescr = toUnicode(descr)
self.M = M
self.N = N
self.dPubKeys = dPubKeys[:]
binPubKeys = [p.binPubKey for p in dPubKeys]
self.a160List = [hash160(p) for p in binPubKeys]
self.compressedPubKeys = []
for pubkey in dPubKeys:
pubkey_sbd = SecureBinaryData(pubkey.binPubKey)
self.compressedPubKeys.append(CryptoECDSA().CompressPoint(pubkey_sbd).toBinStr())
if createDate is not None:
self.createDate = createDate
script = pubkeylist_to_multisig_script(binPubKeys, self.M, True)
script_compressed = \
pubkeylist_to_multisig_script(self.compressedPubKeys, self.M, True)
# Computed some derived members
self.binScript = script
self.binScriptCompressed = script_compressed
self.addrStruct.setupAddressesFromScript(script, script_compressed)
self.uniqueIDB58 = calcLockboxID(script)
self.opStrList = convertScriptToOpStrings(script)
self.asciiID = self.uniqueIDB58 # need a common member name in all classes
#############################################################################
def serialize(self):
bp = BinaryPacker()
bp.put(UINT32, self.version)
bp.put(BINARY_CHUNK, MAGIC_BYTES)
bp.put(UINT64, self.createDate)
bp.put(VAR_STR, toBytes(self.shortName))
bp.put(VAR_STR, toBytes(self.longDescr))
bp.put(UINT8, self.M)
bp.put(UINT8, self.N)
for i in range(self.N):
bp.put(VAR_STR, self.dPubKeys[i].serialize())
return bp.getBinaryString()
#############################################################################
# In the final stages of lockbox design, I changed up the serialization
# format for lockboxes, and decided to see how easy it was to transition
# using the version numbers. Here's the old unserialize version, modified
# to map the old data to the new format. ArmoryQt will read all the
# lockboxes in the file, it will call this on each one of them, and then
# it will write out all the lockboxes whic effectively, immediately upgrades
# all of them.
def unserialize_v0(self, rawData, expectID=None, skipMagicCheck=False):
LOGWARN('Version 0 lockbox detected. Reading and converting')
bu = BinaryUnpacker(rawData)
boxVersion = bu.get(UINT32)
boxMagic = bu.get(BINARY_CHUNK, 4)
created = bu.get(UINT64)
boxScript = bu.get(VAR_STR)
boxName = toUnicode(bu.get(VAR_STR))
boxDescr = toUnicode(bu.get(VAR_STR))
nComment = bu.get(UINT32)
boxComms = ['']*nComment
for i in range(nComment):
boxComms[i] = toUnicode(bu.get(VAR_STR))
# Check the magic bytes of the lockbox match
if not boxMagic == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' Lockbox Magic: ' + binary_to_hex(boxMagic))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
# Lockbox ID is written in the first line, it should match the script
# If not maybe a version mistmatch, serialization error, or bug
# (unfortunately, for mixed network testing, the lockbox ID is the
# hash of the script & the MAGIC_BYTES, which means we need to
# skip checking the ID if we are skipping MAGIC)
if expectID and not calcLockboxID(boxScript) == expectID and not skipMagicCheck:
LOGERROR('ID on lockbox block does not match script')
raise UnserializeError('ID on lockbox does not match!')
# Now we switch to the new setParams method
M,N,a160s,pubs = getMultisigScriptInfo(boxScript)
dPubKeys = [DecoratedPublicKey(pub, com) for pub,com in zip(pubs,boxComms)]
# No need to read magic bytes -- already checked & bailed if incorrect
self.setParams(boxName, boxDescr, M, N, dPubKeys, created)
return self
#############################################################################
def unserialize(self, rawData, expectID=None, skipMagicCheck=False):
bu = BinaryUnpacker(rawData)
lboxVersion = bu.get(UINT32)
# If this is an older version, use conversion method
if lboxVersion==0:
return self.unserialize_v0(rawData, expectID)
lboxMagic = bu.get(BINARY_CHUNK, 4)
created = bu.get(UINT64)
lboxName = toUnicode(bu.get(VAR_STR))
lboxDescr = toUnicode(bu.get(VAR_STR))
M = bu.get(UINT8)
N = bu.get(UINT8)
dPubKeys = []
for i in range(N):
dPubKeys.append(DecoratedPublicKey().unserialize(bu.get(VAR_STR),
skipMagicCheck=skipMagicCheck))
# Issue a warning if the versions don't match
if not lboxVersion == MULTISIG_VERSION:
LOGWARN('Unserialing lockbox of different version')
LOGWARN(' Lockbox Version: %d' % lboxVersion)
LOGWARN(' Armory Version: %d' % MULTISIG_VERSION)
# Check the magic bytes of the lockbox match
if not lboxMagic == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' Lockbox Magic: ' + binary_to_hex(lboxMagic))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
binPubKeys = [p.binPubKey for p in dPubKeys]
lboxScript = pubkeylist_to_multisig_script(binPubKeys, M)
# Lockbox ID is written in the first line, it should match the script
# If not maybe a version mistmatch, serialization error, or bug
if expectID and not calcLockboxID(lboxScript) == expectID and not skipMagicCheck:
LOGERROR('ID on lockbox block does not match script')
LOGERROR(' Expecting: %s' % str(expectID))
LOGERROR(' Calculated: %s' % str(calcLockboxID(lboxScript)))
raise UnserializeError('ID on lockbox does not match!')
# No need to read magic bytes -- already checked & bailed if incorrect
self.setParams(lboxName, lboxDescr, M, N, dPubKeys, created)
return self
#############################################################################
def toJSONMap(self):
outjson = {}
outjson['version'] = self.version
outjson['magicbytes'] = binary_to_hex(MAGIC_BYTES)
outjson['id'] = self.asciiID
outjson['lboxname'] = self.shortName
outjson['lboxdescr'] = self.longDescr
outjson['M'] = self.M
outjson['N'] = self.N
outjson['pubkeylist'] = [dpk.toJSONMap() for dpk in self.dPubKeys]
outjson['a160list'] = [binary_to_hex(hash160(p.binPubKey)) \
for p in self.dPubKeys]
outjson['addrstrs'] = [hash160_to_addrStr(hex_to_binary(a)) \
for a in outjson['a160list']]
outjson['txoutscript'] = binary_to_hex(self.binScript)
outjson['p2shscript'] = binary_to_hex(self.addrStruct.getScript(LBTYPE_P2SH))
outjson['p2shaddr'] = scrAddr_to_addrStr(self.addrStruct.getAddr(LBTYPE_P2SH))
outjson['createdate'] = self.createDate
return outjson
#############################################################################
def fromJSONMap(self, jsonMap, skipMagicCheck=False):
ver = jsonMap['version']
magic = hex_to_binary(jsonMap['magicbytes'])
uniq = jsonMap['id']
# Issue a warning if the versions don't match
if not ver == UNSIGNED_TX_VERSION:
LOGWARN('Unserializing Lockbox of different version')
LOGWARN(' USTX Version: %d' % ver)
LOGWARN(' Armory Version: %d' % UNSIGNED_TX_VERSION)
# Check the magic bytes of the lockbox match
if not magic == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' USTX Magic: ' + binary_to_hex(magic))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
name = jsonMap['lboxname']
descr = jsonMap['lboxdescr']
M = jsonMap['M']
N = jsonMap['N']
pubs = []
for i in range(N):
pubs.append(DecoratedPublicKey().fromJSONMap(jsonMap['pubkeylist'][i], skipMagicCheck))
created = jsonMap['createdate']
self.setParams(name, descr, M, N, pubs, created)
return self
#############################################################################
def pprint(self):
print 'Multi-signature %d-of-%d lockbox:' % (self.M, self.N)
print ' Unique ID: ', self.uniqueIDB58
print ' Created: ', unixTimeToFormatStr(self.createDate)
print ' LBox Name: ', self.shortName
print ' P2SHAddr: ', scrAddr_to_addrStr(self.addrStruct.getAddr(LBTYPE_P2SH))
print ' Box Desc: '
print ' ', self.longDescr[:70]
print ' Key List: '
print ' Script Ops: '
for opStr in self.opStrList:
print ' ', opStr
print''
print ' Key Info: '
for i in range(len(self.dPubKeys)):
print ' Key %d' % i
print ' ', binary_to_hex(self.dPubKeys[i].binPubKey)[:40] + '...'
print ' ', hash160_to_addrStr(self.a160List[i])
print ''
#############################################################################
def pprintOneLine(self):
print 'LockBox %s: %s-of-%s, created: %s; "%s"' % (self.uniqueIDB58,
self.M, self.N, unixTimeToFormatStr(self.createDate), self.shortName)
################################################################################
def createDecoratedTxOut(self, value=0, asP2SH=False):
if not asP2SH:
dtxoScript = self.binScript
p2shScript = None
else:
dtxoScript = script_to_p2sh_script(self.binScript)
p2shScript = self.binScript
return DecoratedTxOut(dtxoScript, value, p2shScript)
################################################################################
def makeFundingTxFromPromNotes(self, promList):
ustxiAccum = []
totalPay = sum([prom.dtxoTarget.value for prom in promList])
totalFee = sum([prom.feeAmt for prom in promList])
# DTXO list always has at least the lockbox itself
dtxoAccum = [self.createDecoratedTxOut(value=totalPay, asP2SH=False)]
# Errors with the change values should've been caught in prom::setParams
totalInputs = 0
totalChange = 0
for prom in promList:
for ustxi in prom.ustxInputs:
ustxiAccum.append(ustxi)
totalInputs += ustxi.value
# Add any change outputs
if prom.dtxoChange and prom.dtxoChange.value > 0:
dtxoAccum.append(prom.dtxoChange)
totalChange += prom.dtxoChange.value
if not totalPay + totalFee == totalInputs - totalChange:
raise ValueError('Promissory note values do not add up correctly')
return UnsignedTransaction().createFromUnsignedTxIO(ustxiAccum, dtxoAccum)
################################################################################
def makeSpendingTx(self, rawFundTxIdxPairs, dtxoList, feeAmt):
ustxiAccum = []
# Errors with the change values should've been caught in setParams
totalInputs = 0
anyP2SH = False
for rawTx,txoIdx in rawFundTxIdxPairs:
fundTx = PyTx().unserialize(rawTx)
txout = fundTx.outputs[txoIdx]
txoScript = txout.getScript()
txoValue = txout.getValue()
if not calcLockboxID(txoScript)==self.uniqueIDB58:
raise InvalidScriptError('Given OutPoint is not for this lockbox')
# If the funding tx is P2SH, make sure it matches the lockbox
# then include the subscript in the USTXI
p2shSubscript = None
if getTxOutScriptType(txoScript) == CPP_TXOUT_P2SH:
# setParams guarantees self.binScript is bare multi-sig script
txP2SHScrAddr = script_to_scrAddr(txoScript)
lbP2SHScrAddr = script_to_p2sh_script(self.binScript)
if not lbP2SHScrAddr == txP2SHScrAddr:
LOGERROR('Given utxo script hash does not match this lockbox')
raise InvalidScriptError('P2SH input does not match lockbox')
p2shSubscript = self.binScript
anyP2SH = True
ustxiAccum.append(UnsignedTxInput(rawTx, txoIdx, p2shSubscript))
totalInputs += txoValue
# Copy the dtxoList since we're probably adding a change output
dtxoAccum = dtxoList[:]
totalOutputs = sum([dtxo.value for dtxo in dtxoAccum])
changeAmt = totalInputs - (totalOutputs + feeAmt)
if changeAmt < 0:
raise ValueError('More outputs than inputs!')
elif changeAmt > 0:
# If adding change output, make it P2SH if any inputs were P2SH
if not anyP2SH:
txoScript = self.binScript
p2shScript = None
else:
txoScript = script_to_p2sh_script(self.binScript)
p2shScript = self.binScript
dtxoAccum.append( DecoratedTxOut(txoScript, changeAmt, p2shScript))
return UnsignedTransaction().createFromUnsignedTxIO(ustxiAccum, dtxoAccum)
#############################################################################
def getAddr(self, _type=None):
return self.addrStruct.getAddr(_type)
#############################################################################
def hasScrAddr(self, scrAddr):
return self.addrStruct.hasScrAddr(scrAddr)
#############################################################################
def getScript(self, _type=None):
return self.addrStruct.getScript(_type)
#############################################################################
def getScrAddrList(self):
return self.addrStruct.getScrAddrList()
#############################################################################
def getScriptDict(self):
return self.addrStruct.getScriptDict()
#############################################################################
def isAddrSegWit(self, scrAddr):
return self.addrStruct.isAddrSegWit(scrAddr)
#############################################################################
def setScriptType(self, _type):
self.addrStruct.setScriptType(_type)
#############################################################################
def getScriptType(self):
return self.addrStruct.getScriptType()
################################################################################
################################################################################
class DecoratedPublicKey(AsciiSerializable):
OBJNAME = 'PublicKey'
BLKSTRING = 'PUBLICKEY'
EMAILSUBJ = 'Armory Public Key for Lockbox Creation - %s'
EMAILBODY = """
The chunk of text below is a public key that can be imported
into the lockbox creation window in Armory.
Open the lockbox manager,
click on "Create Lockbox", and then use the "Import" button
next to the address book button. Copy the following text
into the box, including the first and last lines."""
EQ_ATTRS_SIMPLE = ['version', 'binPubKey', 'keyComment', 'wltLocator',
'pubKeyID', 'asciiID', 'authMethod', 'authData']
#############################################################################
def __init__(self, binPubKey=None, keyComment=None, wltLoc=None,
authMethod=None, authData=None):
self.version = MULTISIG_VERSION
self.binPubKey = binPubKey
self.keyComment = ''
self.wltLocator = ''
self.authMethod = ''
self.authData = authData if authData else NullAuthData()
self.pubKeyID = None
self.asciiID = None
if binPubKey is not None:
self.setParams(binPubKey, keyComment, wltLoc, authMethod, authData,
version=self.version)
#############################################################################
def setParams(self, binPubKey, keyComment=None, wltLoc=None, authMethod=None,
authData=None, version=MULTISIG_VERSION):
# Set params will only overwrite with non-None data
self.binPubKey = binPubKey
if keyComment is not None:
self.keyComment = toUnicode(keyComment)
if wltLoc is not None:
self.wltLocator = wltLoc
if authMethod is not None:
self.authMethod = authMethod
if authData is not None:
self.authData = authData
self.version = version
pubkeyAddr = hash160_to_addrStr(hash160(binPubKey))
self.pubKeyID = pubkeyAddr[:12]
self.asciiID = self.pubKeyID # need a common member name in all classes
#############################################################################
def serialize(self):
if not self.binPubKey:
LOGERROR('Cannot serialize uninitialized pubkey')
return None
bp = BinaryPacker()
bp.put(UINT32, self.version)
bp.put(BINARY_CHUNK, MAGIC_BYTES)
bp.put(VAR_STR, self.binPubKey)
bp.put(VAR_STR, toBytes(self.keyComment))
bp.put(VAR_STR, self.wltLocator)
bp.put(VAR_STR, self.authMethod)
bp.put(VAR_STR, self.authData.serialize())
return bp.getBinaryString()
#############################################################################
def unserialize(self, rawData, expectID=None, skipMagicCheck=False):
ustxiList = []
bu = BinaryUnpacker(rawData)
version = bu.get(UINT32)
magicBytes = bu.get(BINARY_CHUNK, 4)
binPubKey = bu.get(VAR_STR)
keyComment = toUnicode(bu.get(VAR_STR))
wltLoc = bu.get(VAR_STR)
authMeth = bu.get(VAR_STR)
authDataStr = bu.get(VAR_STR)
authData = NullAuthData().unserialize(authDataStr)
# Check the magic bytes of the lockbox match
if not magicBytes == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' PubKey Magic: ' + binary_to_hex(magicBytes))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
if not version==MULTISIG_VERSION:
LOGWARN('Unserializing LB pubkey of different version')
LOGWARN(' PubKey Version: %d' % version)
LOGWARN(' Armory Version: %d' % MULTISIG_VERSION)
self.setParams(binPubKey, keyComment, wltLoc, authMeth, authData, version)
if expectID and not expectID==self.pubKeyID:
LOGERROR('Pubkey block ID does not match expected')
return None
return self
#############################################################################
def toJSONMap(self):
outjson = {}
outjson['version'] = self.version
outjson['magicbytes'] = binary_to_hex(MAGIC_BYTES)
outjson['id'] = self.asciiID
outjson['pubkeyhex'] = binary_to_hex(self.binPubKey)
outjson['keycomment'] = self.keyComment
outjson['wltLocator'] = binary_to_hex(self.wltLocator)
outjson['authmethod'] = self.authMethod # we expect plaintext
outjson['authdata'] = binary_to_hex(self.authData.serialize())
return outjson
#############################################################################
def fromJSONMap(self, jsonMap, skipMagicCheck=False):
ver = jsonMap['version']
magic = jsonMap['magicbytes']
uniq = jsonMap['id']
# Issue a warning if the versions don't match
if not ver == UNSIGNED_TX_VERSION:
LOGWARN('Unserializing DPK of different version')
LOGWARN(' USTX Version: %d' % ver)
LOGWARN(' Armory Version: %d' % UNSIGNED_TX_VERSION)
# Check the magic bytes of the lockbox match
if not magic == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' USTX Magic: ' + binary_to_hex(magic))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
pub = hex_to_binary(jsonMap['pubkeyhex'])
comm = jsonMap['keycomment']
loc = hex_to_binary(jsonMap['wltLocator'])
meth = jsonMap['authmethod']
data = hex_to_binary(jsonMap['authdata'])
authobj = NullAuthData().unserialize(data)
self.setParams(pub, comm, loc, meth, authobj)
return self
#############################################################################
def pprint(self):
print 'pprint of DecoratedPublicKey is not implemented'
################################################################################
def computePromissoryID(ustxiList=None, dtxoTarget=None, feeAmt=None,
dtxoChange=None, prom=None):
if prom:
ustxiList = prom.ustxInputs
dtxoTarget = prom.dtxoTarget
feeAmt = prom.feeAmt
dtxoChange = prom.dtxoChange
if not ustxiList:
LOGERROR("Empty ustxiList in computePromissoryID")
return None
outptList = sorted([ustxi.outpoint.serialize() for ustxi in ustxiList])
targStr = dtxoTarget.binScript
targStr += int_to_binary(dtxoTarget.value, widthBytes=8)
targStr += dtxoChange.binScript if dtxoChange else ''
return binary_to_base58(hash256(''.join(outptList) + targStr))[:8]
################################################################################
################################################################################
class MultiSigPromissoryNote(AsciiSerializable):
OBJNAME = 'PromNote'
BLKSTRING = 'PROMISSORY'
EMAILSUBJ = 'Armory Promissory Note for Simulfunding - %s'
EMAILBODY = """
The chunk of text below describes how this wallet will
contribute to a Simulfunding transaction. In the lockbox
manager, go to "Merge Promissory Notes" and then click on
"Import Promissory Note." Copy and paste the block of text
into the import box, including the first and last lines.
You should receive a block of text like this from each party
funding this transaction."""
EQ_ATTRS_SIMPLE = ['version', 'dtxoTarget', 'feeAmt', 'dtxoChange',
'promID', 'asciiID', 'promLabel']
EQ_ATTRS_LISTS = ['ustxInputs']
#############################################################################
def __init__(self, dtxoTarget=None, feeAmt=None, ustxInputs=None,
dtxoChange=None, promLabel=None,
version=MULTISIG_VERSION):
self.version = MULTISIG_VERSION
self.dtxoTarget = dtxoTarget
self.feeAmt = feeAmt
self.ustxInputs = ustxInputs
self.dtxoChange = dtxoChange
self.promID = None
self.asciiID = None
self.promLabel = promLabel if promLabel else ''
# We MIGHT use this object to simultaneously promise funds AND
# provide a key to include in the target multisig lockbox (which would
# save a round of exchanging data, if the use-case allows it)
self.lockboxKey = ''
if dtxoTarget is not None:
self.setParams(dtxoTarget, feeAmt, dtxoChange, ustxInputs,
promLabel, version)
#############################################################################
def setParams(self, dtxoTarget=None, feeAmt=None, dtxoChange=None,
ustxInputs=None, promLabel=None, version=MULTISIG_VERSION):
# Set params will only overwrite with non-None data
if dtxoTarget is not None:
self.dtxoTarget = dtxoTarget
if feeAmt is not None:
self.feeAmt = feeAmt
if dtxoChange is not None:
self.dtxoChange = dtxoChange
if ustxInputs is not None:
self.ustxInputs = ustxInputs
if promLabel is not None:
self.promLabel = promLabel
# Compute some other data members
self.version = version
self.magicBytes = MAGIC_BYTES
self.promID = computePromissoryID(prom=self)
self.asciiID = self.promID # need a common member name in all classes
# Make sure that the change output matches expected, also set contribIDs
totalInputs = 0
for ustxi in self.ustxInputs:
totalInputs += ustxi.value
ustxi.contribID = self.promID
changeAmt = totalInputs - (self.dtxoTarget.value + self.feeAmt)
if changeAmt > 0:
if not self.dtxoChange.value==changeAmt:
LOGERROR('dtxoChange.value==%s, computed %s',
coin2strNZS(self.dtxoChange.value), coin2strNZS(changeAmt))
raise ValueError('Change output on prom note is unexpected')
elif changeAmt < 0:
LOGERROR('Insufficient prom inputs for payAmt and feeAmt')
LOGERROR('Total inputs: %s', coin2strNZS(totalInputs))
LOGERROR('(Amt, Fee)=(%s,%s)', coin2strNZS(self.dtxoTarget.value),
coin2strNZS(self.feeAmt))
raise ValueError('Insufficient prom inputs for pay & fee')
#############################################################################
def setLockboxKey(self, binPubKey):
keyPair = [binPubKey[0], len(binPubKey)]
if not keyPair in [['\x02', 33], ['\x03', 33], ['\x04', 65]]:
LOGERROR('Invalid public key supplied')
return False
if keyPair[0] == '\x04':
if not CryptoECDSA().VerifyPublicKeyValid(SecureBinaryData(binPubKey)):
LOGERROR('Invalid public key supplied')
return False
self.lockboxKey = binPubKey[:]
return True
#############################################################################
def serialize(self):
if not self.dtxoTarget:
LOGERROR('Cannot serialize uninitialized promissory note')
return None
if self.dtxoChange is None:
serChange = ''
else:
serChange = self.dtxoChange.serialize()
bp = BinaryPacker()
bp.put(UINT32, self.version)
bp.put(BINARY_CHUNK, MAGIC_BYTES)
bp.put(VAR_STR, self.dtxoTarget.serialize())
bp.put(VAR_STR, serChange)
bp.put(UINT64, self.feeAmt)
bp.put(VAR_INT, len(self.ustxInputs))
for ustxi in self.ustxInputs:
bp.put(VAR_STR, ustxi.serialize())
bp.put(VAR_STR, toBytes(self.promLabel))
bp.put(VAR_STR, self.lockboxKey)
return bp.getBinaryString()
#############################################################################
def unserialize(self, rawData, expectID=None, skipMagicCheck=False):
ustxiList = []
bu = BinaryUnpacker(rawData)
version = bu.get(UINT32)
magicBytes = bu.get(BINARY_CHUNK, 4)
target = bu.get(VAR_STR)
change = bu.get(VAR_STR)
feeAmt = bu.get(UINT64)
numUSTXI = bu.get(VAR_INT)
# Check the magic bytes of the lockbox match
if not magicBytes == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' PromNote Magic: ' + binary_to_hex(magicBytes))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
for i in range(numUSTXI):
ustxiList.append( UnsignedTxInput().unserialize(bu.get(VAR_STR), skipMagicCheck=skipMagicCheck) )
promLabel = toUnicode(bu.get(VAR_STR))
lockboxKey = bu.get(VAR_STR)
if not version==MULTISIG_VERSION:
LOGWARN('Unserialing promissory note of different version')
LOGWARN(' PromNote Version: %d' % version)
LOGWARN(' Armory Version: %d' % MULTISIG_VERSION)
dtxoTarget = DecoratedTxOut().unserialize(target, skipMagicCheck=skipMagicCheck)
dtxoChange = DecoratedTxOut().unserialize(change, skipMagicCheck=skipMagicCheck) if change else None
self.setParams(dtxoTarget, feeAmt, dtxoChange, ustxiList, promLabel)
if expectID and not expectID==self.promID:
LOGERROR('Promissory note ID does not match expected')
return None
if len(lockboxKey)>0:
self.setLockboxKey(lockboxKey)
return self
#############################################################################
def toJSONMap(self, lite=False):
outjson = {}
outjson['version'] = self.version
outjson['magicbytes'] = binary_to_hex(MAGIC_BYTES)
outjson['id'] = self.asciiID
#bp = BinaryPacker()
#bp.put(UINT32, self.version)
#bp.put(BINARY_CHUNK, MAGIC_BYTES)
#bp.put(VAR_STR, self.dtxoTarget.serialize())
#bp.put(VAR_STR, serChange)
#bp.put(UINT64, self.feeAmt)
#bp.put(VAR_INT, len(self.ustxInputs))
#for ustxi in self.ustxInputs:
#bp.put(VAR_STR, ustxi.serialize())
#bp.put(VAR_STR, toBytes(self.promLabel))
#bp.put(VAR_STR, self.lockboxKey)
if self.dtxoChange is None:
dtxoChangeMap = {}
else:
dtxoChangeMap = self.dtxoChange.toJSONMap()
outjson['txouttarget'] = self.dtxoTarget.toJSONMap()
outjson['txoutchange'] = dtxoChangeMap
outjson['fee'] = self.feeAmt
outjson['numinputs'] = len(self.ustxInputs)
outjson['promlabel'] = self.promLabel
outjson['lbpubkey'] = self.lockboxKey
if not lite:
outjson['inputs'] = []
for ustxi in self.ustxInputs:
outjson['inputs'].append(ustxi.toJSONMap())
return outjson
#############################################################################
def fromJSONMap(self, jsonMap, skipMagicCheck=False):
ver = jsonMap['version']
magic = jsonMap['magicbytes']
uniq = jsonMap['id']
# Issue a warning if the versions don't match
if not ver == UNSIGNED_TX_VERSION:
LOGWARN('Unserializing Lockbox of different version')
LOGWARN(' USTX Version: %d' % ver)
LOGWARN(' Armory Version: %d' % UNSIGNED_TX_VERSION)
# Check the magic bytes of the lockbox match
if not magic == MAGIC_BYTES and not skipMagicCheck:
LOGERROR('Wrong network!')
LOGERROR(' USTX Magic: ' + binary_to_hex(magic))
LOGERROR(' Armory Magic: ' + binary_to_hex(MAGIC_BYTES))
raise NetworkIDError('Network magic bytes mismatch')
targ = DecoratedTxOut().fromJSONMap(jsonMap['txouttarget'])
fee = jsonMap['fee']
if len(jsonMap['txoutchange'])>0:
chng = DecoratedTxOut().fromJSONMap(jsonMap['txoutchange'])
else:
chng = None
nin = jsonMap['numinputs']
inputs = []
for i in range(nin):
inputs.append(UnsignedTxInput().fromJSONMap(jsonMap['inputs'][i], skipMagicCheck))
lbl = jsonMap['promlabel']
self.setParams(targ, fee, chng, inputs, lbl)
return self
#############################################################################
def pprint(self):
print 'Promissory Note:'
print ' Version :', self.version
print ' Unique ID :', self.promID
print ' Num Inputs :', len(self.ustxInputs)
print ' Target Addr :', self.dtxoTarget.getRecipStr()
print ' Pay Amount :', self.dtxoTarget.value
print ' Fee Amount :', self.feeAmt
if self.dtxoChange is not None:
print ' ChangeAddr :', self.dtxoChange.getRecipStr()
print ' LB Key :', self.lockboxKey
print ' LB Key Info :', self.promLabel
# Resolve circular dependencies here.
from armoryengine.UserAddressUtils import getDisplayStringForScript
| 37.958333 | 106 | 0.561042 |
ace46d99d8815e88231f58fb63b85767f027c20f | 5,396 | py | Python | aec_checker.py | alexjago/AEC_Checker | dbcd097e8e16f2e75788a1e7c1d8f1cc0a53d346 | [
"MIT"
] | 1 | 2021-11-20T23:48:49.000Z | 2021-11-20T23:48:49.000Z | aec_checker.py | alexjago/AEC_Checker | dbcd097e8e16f2e75788a1e7c1d8f1cc0a53d346 | [
"MIT"
] | null | null | null | aec_checker.py | alexjago/AEC_Checker | dbcd097e8e16f2e75788a1e7c1d8f1cc0a53d346 | [
"MIT"
] | 1 | 2021-11-26T09:15:52.000Z | 2021-11-26T09:15:52.000Z | #!python3
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import NoSuchElementException
import csv
import argparse
import typing
import collections
from enum import Enum
import sys
class AECResult(Enum):
PASS = "Pass"
PARTIAL = "Partial"
FAIL = "Fail"
FAIL_STREET = "Fail_Street"
FAIL_SUBURB = "Fail_Suburb"
AECStatus = collections.namedtuple(
"AECStatus", ["result", "federal", "state", "local_gov", "local_ward"]
)
def getAECStatus(
driver: webdriver,
givenNames: str,
surname: str,
postcode: str,
suburb: str,
state: str,
streetName: str,
) -> AECStatus:
elem = driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_textGivenName")
elem.clear()
elem.send_keys(givenNames)
elem = driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_textSurname")
elem.clear()
elem.send_keys(surname)
elem = driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_textPostcode")
elem.clear()
elem.send_keys(postcode)
time.sleep(0.1)
suburb_state = f"{str.upper(suburb)} ({state})"
try:
suburb_dropdown = Select(
driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_DropdownSuburb")
)
suburb_dropdown.select_by_value(suburb_state)
except Exception as e:
print(e, suburb_state, file=sys.stderr)
return AECStatus(AECResult.FAIL_SUBURB, "", "", "", "")
elem = driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_textStreetName")
elem.clear()
elem.send_keys(streetName)
captcha_entered = False
driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_textVerificationCode"
).send_keys("")
while not captcha_entered:
time.sleep(1)
try:
elem = driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_textVerificationCode"
)
except NoSuchElementException:
break
if len(elem.get_attribute("value")) == 4:
driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_buttonVerify"
).click()
try:
# Look for the first name tag, if it exist the captcha failed
driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_textGivenName")
except Exception:
# Otherwise we're good. (why is a success state in an exception, brah)
captcha_entered = True
if not captcha_entered:
driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_textVerificationCode"
).send_keys("")
try:
driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_panelSuccess")
federal_division = ""
state_district = ""
local_gov = ""
local_ward = ""
try:
federal_division = driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_linkProfile"
).text
state_district = driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_labelStateDistrict2"
).text
local_gov = driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_labelLGA2"
).text
local_ward = driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_labelLGAWard2"
).text
except Exception:
pass
driver.find_element(By.ID, "ctl00_ContentPlaceHolderBody_buttonBack").click()
return AECStatus(
AECResult.PASS, federal_division, state_district, local_gov, local_ward
)
except Exception:
out = AECStatus(AECResult.FAIL, "", "", "", "")
try:
reason = driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_labelFailedReason"
)
if "partial" in reason.text:
out = AECStatus(AECResult.PARTIAL, "", "", "", "")
elif "street" in reason.text:
out = AECStatus(AECResult.FAIL_STREET, "", "", "", "")
except Exception:
pass
driver.find_element(
By.ID, "ctl00_ContentPlaceHolderBody_buttonTryAgain"
).click()
return out
def main():
parser = argparse.ArgumentParser(description="automate AEC checking")
parser.add_argument(
"--skip", type=int, default=0, help="skip entries you've already seen"
)
parser.add_argument("--infile", default="input.csv")
parser.add_argument("--outfile", default="output.csv")
args = parser.parse_args()
driver = webdriver.Firefox()
driver.get("https://check.aec.gov.au/")
writer = csv.writer(
open(
args.outfile,
"a",
newline="",
)
)
count = 0
with open(args.infile) as csvfile:
reader = csv.reader(csvfile, delimiter=",")
for row in reader:
count += 1
if count <= 1 + args.skip:
writer.writerow(row)
continue
time.sleep(0.1)
status = getAECStatus(driver, *row[:6])
writer.writerow(row + [i for i in status])
writer.close()
driver.close()
if __name__ == "__main__":
main()
| 29.812155 | 88 | 0.611749 |
ace46e49f0a829b566354953e1bb5518e6253a61 | 7,584 | py | Python | a5/src/run.py | ZubinGou/CS224n-Assignment | 55cf163c7afcb6d9339cf010492681fe71b13887 | [
"MIT"
] | 19 | 2021-03-22T14:18:35.000Z | 2022-03-21T00:49:49.000Z | a5/src/run.py | ZubinGou/CS224n-Assignment | 55cf163c7afcb6d9339cf010492681fe71b13887 | [
"MIT"
] | 1 | 2021-04-30T03:14:11.000Z | 2021-05-05T07:30:22.000Z | a5/src/run.py | ZubinGou/CS224n-Assignment | 55cf163c7afcb6d9339cf010492681fe71b13887 | [
"MIT"
] | 9 | 2021-08-30T03:32:03.000Z | 2022-03-17T13:47:22.000Z | import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.nn import functional as F
import random
import argparse
random.seed(0)
import dataset
from model import GPTConfig, GPT
from trainer import Trainer, TrainerConfig
import utils
argp = argparse.ArgumentParser()
argp.add_argument('function',
help="Whether to pretrain, finetune or evaluate a model",
choices=["pretrain", "finetune", "evaluate"])
argp.add_argument('variant',
help="Which variant of the model to run ('vanilla' or 'synthesizer')",
choices=["vanilla", "synthesizer"])
argp.add_argument('pretrain_corpus_path', help="Path of the corpus to pretrain on", default=None)
argp.add_argument('--reading_params_path',
help="If specified, path of the model to load before finetuning/evaluation",
default=None)
argp.add_argument('--writing_params_path', help="Path to save the model after pretraining/finetuning", default=None)
argp.add_argument('--finetune_corpus_path', help="Path of the corpus to finetune on", default=None)
argp.add_argument('--eval_corpus_path', help="Path of the corpus to evaluate on", default=None)
argp.add_argument('--outputs_path', default=None)
args = argp.parse_args()
# Save the device
device = torch.cuda.current_device() if torch.cuda.is_available() else 'cpu'
# Keep the block size 128
# Why is the pretraining corpus always required (even if we're not pretraining?)
# It's because we're using it as a hack to always have the same vocabulary
# (that is, the same mapping from character to integer, and we build the
# vocab from the pretraining corpus.)
block_size = 128
text = open(args.pretrain_corpus_path, encoding="utf-8").read()
pretrain_dataset = dataset.CharCorruptionDataset(text, block_size)
# We don't suggest you change these hyperparameters, as they're known to work.
# use them for both the vanilla and the synthesizer models
mconf = GPTConfig(pretrain_dataset.vocab_size, pretrain_dataset.block_size, n_layer=4, n_head=8, n_embd=256)
def main():
"""
Don't change above here; write your code below
"""
if args.variant == 'vanilla':
model = GPT(mconf) # TODO [part c]: Make some model here
elif args.variant == 'synthesizer':
# TODO [part g]: Make some other model here
mconf.synthesizer = True
model = GPT(mconf)
# From here on, your code should be identical independent of which
# variant (vanilla or synthesizer) has been chosen.
if args.function == 'pretrain':
assert args.pretrain_corpus_path is not None
assert args.writing_params_path is not None
# TODO [part f]:
# - Given:
# 1. A corpus specified in args.pretrain_corpus_path
# 2. An output path args.writing_params_path for the model parameters
# - Goals:
# 1. Pretrain the model on this corpus
# 2. Save the resulting model in args.writing_params_path
# - Make sure to use the following hyperparameters for pretraining:
# max_epochs=650
# batch_size=128
# learning_rate=6e-3
# lr_decay=True
# warmup_tokens=512*20
# final_tokens=200*len(pretrain_dataset)*block_size
# num_workers=4
# pretrain_text = open(args.pretrain_corpus_path, 'r', encoding='utf-8').read()
# pretrain_dataset =
tconf = TrainerConfig(max_epochs=650,
batch_size=128,
learning_rate=6e-3,
lr_decay=True,
warmup_token=512 * 20,
final_tokens=200 * len(pretrain_dataset) * block_size,
num_workers=4)
trainer = Trainer(model, pretrain_dataset, None, tconf)
trainer.train()
torch.save(model.state_dict(), args.writing_params_path)
elif args.function == 'finetune':
assert args.writing_params_path is not None
assert args.finetune_corpus_path is not None
# TODO [part c] [part f]:
# - Given:
# 1. A finetuning corpus specified in args.finetune_corpus_path
# 2. A path args.reading_params_path containing pretrained model
# parameters, or None if finetuning without a pretrained model
# 3. An output path args.writing_params_path for the model parameters
# - Goals:
# 1. If args.reading_params_path is specified, load these parameters
# into the model
# 2. Finetune the model on this corpus
# 3. Save the resulting model in args.writing_params_path
# - Make sure to use the following hyperparameters:
# Hyperparameters for finetuning WITHOUT a pretrained model:
# max_epochs=75
# batch_size=256
# learning_rate=6e-4
# lr_decay=True
# warmup_tokens=512*20
# final_tokens=200*len(pretrain_dataset)*block_size
# num_workers=4
# Hyperparameters for finetuning WITH a pretrained model:
# max_epochs=10
# batch_size=256
# learning_rate=6e-4
# lr_decay=True
# warmup_tokens=512*20
# final_tokens=200*len(pretrain_dataset)*block_size
# num_workers=4
if args.reading_params_path is not None:
model.load_state_dict(torch.load(args.reading_params_path))
tconf = TrainerConfig(max_epochs=75,
batch_size=256,
learning_rate=6e-4,
lr_decay=True,
warmup_tokens=512 * 20,
final_tokens=200 * len(pretrain_dataset) * block_size,
num_workers=4)
text = open(args.finetune_corpus_path, 'r').read()
train_dataset = dataset.NameDataset(pretrain_dataset, text)
trainer = Trainer(model, train_dataset, None, tconf)
trainer.train()
# save to args.writing_params_path
torch.save(model.state_dict(), args.writing_params_path)
elif args.function == 'evaluate':
assert args.outputs_path is not None
assert args.reading_params_path is not None
assert args.eval_corpus_path is not None
model.load_state_dict(torch.load(args.reading_params_path))
model = model.to(device)
correct = 0
total = 0
with open(args.outputs_path, 'w') as fout:
predictions = []
for line in tqdm(open(args.eval_corpus_path)):
x = line.split('\t')[0]
x = x + '⁇'
x = torch.tensor([pretrain_dataset.stoi[s] for s in x], dtype=torch.long)[None, ...].to(device)
pred = utils.sample(model, x, 32, sample=False)[0]
completion = ''.join([pretrain_dataset.itos[int(i)] for i in pred])
pred = completion.split('⁇')[1]
predictions.append(pred)
fout.write(pred + '\n')
total, correct = utils.evaluate_places(args.eval_corpus_path, predictions)
if total > 0:
print('Correct: {} out of {}: {}%'.format(correct, total, correct / total * 100))
else:
print('Predictions written to {}; no targets provided'.format(args.outputs_path))
if __name__ == '__main__':
main() | 44.87574 | 116 | 0.61577 |
ace46f24d22bf4438823a20ca6f186a181068a03 | 8,656 | py | Python | src/net.py | kareza-best/F3Net | 94c83a908987f81830dc2f2bd75d4105a39b8d23 | [
"MIT"
] | 176 | 2019-11-27T02:31:04.000Z | 2022-03-31T04:59:00.000Z | src/net.py | sam-ai/F3Net | a5dd8c06c9f797e02c8c606c44d7b09a0d56f074 | [
"MIT"
] | 25 | 2019-12-12T07:18:59.000Z | 2022-03-25T01:29:42.000Z | src/net.py | sam-ai/F3Net | a5dd8c06c9f797e02c8c606c44d7b09a0d56f074 | [
"MIT"
] | 38 | 2019-11-27T05:30:08.000Z | 2022-02-17T07:42:17.000Z | #!/usr/bin/python3
#coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
def weight_init(module):
for n, m in module.named_children():
print('initialize: '+n)
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
nn.init.ones_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Sequential):
weight_init(m)
elif isinstance(m, nn.ReLU):
pass
else:
m.initialize()
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=(3*dilation-1)//2, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes*4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes*4)
self.downsample = downsample
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
x = self.downsample(x)
return F.relu(out+x, inplace=True)
class ResNet(nn.Module):
def __init__(self):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self.make_layer( 64, 3, stride=1, dilation=1)
self.layer2 = self.make_layer(128, 4, stride=2, dilation=1)
self.layer3 = self.make_layer(256, 6, stride=2, dilation=1)
self.layer4 = self.make_layer(512, 3, stride=2, dilation=1)
def make_layer(self, planes, blocks, stride, dilation):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes*4, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes*4))
layers = [Bottleneck(self.inplanes, planes, stride, downsample, dilation=dilation)]
self.inplanes = planes*4
for _ in range(1, blocks):
layers.append(Bottleneck(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
out1 = F.relu(self.bn1(self.conv1(x)), inplace=True)
out1 = F.max_pool2d(out1, kernel_size=3, stride=2, padding=1)
out2 = self.layer1(out1)
out3 = self.layer2(out2)
out4 = self.layer3(out3)
out5 = self.layer4(out4)
return out2, out3, out4, out5
def initialize(self):
self.load_state_dict(torch.load('../res/resnet50-19c8e357.pth'), strict=False)
class CFM(nn.Module):
def __init__(self):
super(CFM, self).__init__()
self.conv1h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn1h = nn.BatchNorm2d(64)
self.conv2h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn2h = nn.BatchNorm2d(64)
self.conv3h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn3h = nn.BatchNorm2d(64)
self.conv4h = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn4h = nn.BatchNorm2d(64)
self.conv1v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn1v = nn.BatchNorm2d(64)
self.conv2v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn2v = nn.BatchNorm2d(64)
self.conv3v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn3v = nn.BatchNorm2d(64)
self.conv4v = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.bn4v = nn.BatchNorm2d(64)
def forward(self, left, down):
if down.size()[2:] != left.size()[2:]:
down = F.interpolate(down, size=left.size()[2:], mode='bilinear')
out1h = F.relu(self.bn1h(self.conv1h(left )), inplace=True)
out2h = F.relu(self.bn2h(self.conv2h(out1h)), inplace=True)
out1v = F.relu(self.bn1v(self.conv1v(down )), inplace=True)
out2v = F.relu(self.bn2v(self.conv2v(out1v)), inplace=True)
fuse = out2h*out2v
out3h = F.relu(self.bn3h(self.conv3h(fuse )), inplace=True)+out1h
out4h = F.relu(self.bn4h(self.conv4h(out3h)), inplace=True)
out3v = F.relu(self.bn3v(self.conv3v(fuse )), inplace=True)+out1v
out4v = F.relu(self.bn4v(self.conv4v(out3v)), inplace=True)
return out4h, out4v
def initialize(self):
weight_init(self)
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.cfm45 = CFM()
self.cfm34 = CFM()
self.cfm23 = CFM()
def forward(self, out2h, out3h, out4h, out5v, fback=None):
if fback is not None:
refine5 = F.interpolate(fback, size=out5v.size()[2:], mode='bilinear')
refine4 = F.interpolate(fback, size=out4h.size()[2:], mode='bilinear')
refine3 = F.interpolate(fback, size=out3h.size()[2:], mode='bilinear')
refine2 = F.interpolate(fback, size=out2h.size()[2:], mode='bilinear')
out5v = out5v+refine5
out4h, out4v = self.cfm45(out4h+refine4, out5v)
out3h, out3v = self.cfm34(out3h+refine3, out4v)
out2h, pred = self.cfm23(out2h+refine2, out3v)
else:
out4h, out4v = self.cfm45(out4h, out5v)
out3h, out3v = self.cfm34(out3h, out4v)
out2h, pred = self.cfm23(out2h, out3v)
return out2h, out3h, out4h, out5v, pred
def initialize(self):
weight_init(self)
class F3Net(nn.Module):
def __init__(self, cfg):
super(F3Net, self).__init__()
self.cfg = cfg
self.bkbone = ResNet()
self.squeeze5 = nn.Sequential(nn.Conv2d(2048, 64, 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.squeeze4 = nn.Sequential(nn.Conv2d(1024, 64, 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.squeeze3 = nn.Sequential(nn.Conv2d( 512, 64, 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.squeeze2 = nn.Sequential(nn.Conv2d( 256, 64, 1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.decoder1 = Decoder()
self.decoder2 = Decoder()
self.linearp1 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)
self.linearp2 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)
self.linearr2 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)
self.linearr3 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)
self.linearr4 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)
self.linearr5 = nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)
self.initialize()
def forward(self, x, shape=None):
out2h, out3h, out4h, out5v = self.bkbone(x)
out2h, out3h, out4h, out5v = self.squeeze2(out2h), self.squeeze3(out3h), self.squeeze4(out4h), self.squeeze5(out5v)
out2h, out3h, out4h, out5v, pred1 = self.decoder1(out2h, out3h, out4h, out5v)
out2h, out3h, out4h, out5v, pred2 = self.decoder2(out2h, out3h, out4h, out5v, pred1)
shape = x.size()[2:] if shape is None else shape
pred1 = F.interpolate(self.linearp1(pred1), size=shape, mode='bilinear')
pred2 = F.interpolate(self.linearp2(pred2), size=shape, mode='bilinear')
out2h = F.interpolate(self.linearr2(out2h), size=shape, mode='bilinear')
out3h = F.interpolate(self.linearr3(out3h), size=shape, mode='bilinear')
out4h = F.interpolate(self.linearr4(out4h), size=shape, mode='bilinear')
out5h = F.interpolate(self.linearr5(out5v), size=shape, mode='bilinear')
return pred1, pred2, out2h, out3h, out4h, out5h
def initialize(self):
if self.cfg.snapshot:
self.load_state_dict(torch.load(self.cfg.snapshot))
else:
weight_init(self)
| 44.618557 | 141 | 0.615065 |
ace46f5e09c4a6af69f6e4f10642f8f0cb338b3b | 48,164 | py | Python | src/sage/rings/asymptotic/growth_group_cartesian.py | defeo/sage | d8822036a9843bd4d75845024072515ede56bcb9 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/asymptotic/growth_group_cartesian.py | defeo/sage | d8822036a9843bd4d75845024072515ede56bcb9 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/asymptotic/growth_group_cartesian.py | defeo/sage | d8822036a9843bd4d75845024072515ede56bcb9 | [
"BSL-1.0"
] | null | null | null | r"""
Cartesian Products of Growth Groups
See :doc:`growth_group` for a description.
AUTHORS:
- Benjamin Hackl (2015)
- Daniel Krenn (2015)
- Clemens Heuberger (2016)
ACKNOWLEDGEMENT:
- Benjamin Hackl, Clemens Heuberger and Daniel Krenn are supported by the
Austrian Science Fund (FWF): P 24644-N26.
- Benjamin Hackl is supported by the Google Summer of Code 2015.
.. WARNING::
As this code is experimental, warnings are thrown when a growth
group is created for the first time in a session (see
:class:`sage.misc.superseded.experimental`).
TESTS::
sage: from sage.rings.asymptotic.growth_group import GenericGrowthGroup, GrowthGroup
sage: GenericGrowthGroup(ZZ)
doctest:...: FutureWarning: This class/method/function is marked as
experimental. It, its functionality or its interface might change
without a formal deprecation.
See http://trac.sagemath.org/17601 for details.
Growth Group Generic(ZZ)
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: A = GrowthGroup('QQ^x * x^ZZ'); A
Growth Group QQ^x * x^ZZ
sage: A.construction()
(The cartesian_product functorial construction,
(Growth Group QQ^x, Growth Group x^ZZ))
sage: A.construction()[1][0].construction()
(ExponentialGrowthGroup[x], Rational Field)
sage: A.construction()[1][1].construction()
(MonomialGrowthGroup[x], Integer Ring)
sage: B = GrowthGroup('x^ZZ * y^ZZ'); B
Growth Group x^ZZ * y^ZZ
sage: B.construction()
(The cartesian_product functorial construction,
(Growth Group x^ZZ, Growth Group y^ZZ))
sage: C = GrowthGroup('x^ZZ * log(x)^ZZ * y^ZZ'); C
Growth Group x^ZZ * log(x)^ZZ * y^ZZ
sage: C.construction()
(The cartesian_product functorial construction,
(Growth Group x^ZZ * log(x)^ZZ, Growth Group y^ZZ))
sage: C.construction()[1][0].construction()
(The cartesian_product functorial construction,
(Growth Group x^ZZ, Growth Group log(x)^ZZ))
sage: C.construction()[1][1].construction()
(MonomialGrowthGroup[y], Integer Ring)
::
sage: cm = sage.structure.element.get_coercion_model()
sage: D = GrowthGroup('QQ^x * x^QQ')
sage: cm.common_parent(A, D)
Growth Group QQ^x * x^QQ
sage: E = GrowthGroup('ZZ^x * x^QQ')
sage: cm.record_exceptions() # not tested, see #19411
sage: cm.common_parent(A, E)
Growth Group QQ^x * x^QQ
sage: for t in cm.exception_stack(): # not tested, see #19411
....: print(t)
::
sage: A.an_element()
(1/2)^x*x
sage: tuple(E.an_element())
(1, x^(1/2))
Classes and Methods
===================
"""
#*****************************************************************************
# Copyright (C) 2014--2015 Benjamin Hackl <benjamin.hackl@aau.at>
# 2014--2015 Daniel Krenn <dev@danielkrenn.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function
from __future__ import absolute_import
import sage
class CartesianProductFactory(sage.structure.factory.UniqueFactory):
r"""
Create various types of Cartesian products depending on its input.
INPUT:
- ``growth_groups`` -- a tuple (or other iterable) of growth groups.
- ``order`` -- (default: ``None``) if specified, then this order
is taken for comparing two Cartesian product elements. If ``order`` is
``None`` this is determined automatically.
.. NOTE::
The Cartesian product of growth groups is again a growth
group. In particular, the resulting structure is partially
ordered.
The order on the product is determined as follows:
- Cartesian factors with respect to the same variable are
ordered lexicographically. This causes
``GrowthGroup('x^ZZ * log(x)^ZZ')`` and
``GrowthGroup('log(x)^ZZ * x^ZZ')`` to produce two
different growth groups.
- Factors over different variables are equipped with the
product order (i.e. the comparison is component-wise).
Also, note that the sets of variables of the Cartesian
factors have to be either equal or disjoint.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: A = GrowthGroup('x^ZZ'); A
Growth Group x^ZZ
sage: B = GrowthGroup('log(x)^ZZ'); B
Growth Group log(x)^ZZ
sage: C = cartesian_product([A, B]); C # indirect doctest
Growth Group x^ZZ * log(x)^ZZ
sage: C._le_ == C.le_lex
True
sage: D = GrowthGroup('y^ZZ'); D
Growth Group y^ZZ
sage: E = cartesian_product([A, D]); E # indirect doctest
Growth Group x^ZZ * y^ZZ
sage: E._le_ == E.le_product
True
sage: F = cartesian_product([C, D]); F # indirect doctest
Growth Group x^ZZ * log(x)^ZZ * y^ZZ
sage: F._le_ == F.le_product
True
sage: cartesian_product([A, E]); G # indirect doctest
Traceback (most recent call last):
...
ValueError: The growth groups (Growth Group x^ZZ, Growth Group x^ZZ * y^ZZ)
need to have pairwise disjoint or equal variables.
sage: cartesian_product([A, B, D]) # indirect doctest
Growth Group x^ZZ * log(x)^ZZ * y^ZZ
TESTS::
sage: from sage.rings.asymptotic.growth_group_cartesian import CartesianProductFactory
sage: CartesianProductFactory('factory')([A, B], category=Groups() & Posets())
Growth Group x^ZZ * log(x)^ZZ
sage: CartesianProductFactory('factory')([], category=Sets())
Traceback (most recent call last):
...
TypeError: Cannot create Cartesian product without factors.
"""
def create_key_and_extra_args(self, growth_groups, category, **kwds):
r"""
Given the arguments and keywords, create a key that uniquely
determines this object.
TESTS::
sage: from sage.rings.asymptotic.growth_group_cartesian import CartesianProductFactory
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: A = GrowthGroup('x^ZZ')
sage: CartesianProductFactory('factory').create_key_and_extra_args(
....: [A], category=Sets(), order='blub')
(((Growth Group x^ZZ,), Category of sets), {'order': 'blub'})
"""
return (tuple(growth_groups), category), kwds
def create_object(self, version, args, **kwds):
r"""
Create an object from the given arguments.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: cartesian_product([GrowthGroup('x^ZZ')]) # indirect doctest
Growth Group x^ZZ
"""
growth_groups, category = args
if not growth_groups:
raise TypeError('Cannot create Cartesian product without factors.')
order = kwds.pop('order', None)
if order is not None:
return GenericProduct(growth_groups, category, order=order, **kwds)
vg = tuple((g.variable_names(), g) for g in growth_groups)
# check if all groups have a variable
if not all(v for v, _ in vg):
raise NotImplementedError('Growth groups %s have no variable.' %
tuple(g for g in growth_groups
if not g.variable_names()))
# sort by variables
from itertools import groupby, product
vgs = tuple((v, tuple(gs)) for v, gs in
groupby(sorted(vg, key=lambda k: k[0]), key=lambda k: k[0]))
# check whether variables are pairwise disjoint
for u, w in product(iter(v for v, _ in vgs), repeat=2):
if u != w and not set(u).isdisjoint(set(w)):
raise ValueError('The growth groups %s need to have pairwise '
'disjoint or equal variables.' % (growth_groups,))
# build Cartesian products
u_groups = list()
for _, gs in vgs:
gs = tuple(g for _, g in gs)
if len(gs) > 1:
u_groups.append(UnivariateProduct(gs, category, **kwds))
else:
u_groups.append(gs[0])
if len(u_groups) > 1:
m_group = MultivariateProduct(tuple(u_groups), category, **kwds)
else:
m_group = u_groups[0]
return m_group
CartesianProductGrowthGroups = CartesianProductFactory('CartesianProductGrowthGroups')
from sage.combinat.posets.cartesian_product import CartesianProductPoset
from .growth_group import GenericGrowthGroup
class GenericProduct(CartesianProductPoset, GenericGrowthGroup):
r"""
A Cartesian product of growth groups.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: P = GrowthGroup('x^QQ')
sage: L = GrowthGroup('log(x)^ZZ')
sage: C = cartesian_product([P, L], order='lex'); C # indirect doctest
Growth Group x^QQ * log(x)^ZZ
sage: C.an_element()
x^(1/2)*log(x)
::
sage: Px = GrowthGroup('x^QQ')
sage: Lx = GrowthGroup('log(x)^ZZ')
sage: Cx = cartesian_product([Px, Lx], order='lex') # indirect doctest
sage: Py = GrowthGroup('y^QQ')
sage: C = cartesian_product([Cx, Py], order='product'); C # indirect doctest
Growth Group x^QQ * log(x)^ZZ * y^QQ
sage: C.an_element()
x^(1/2)*log(x)*y^(1/2)
.. SEEALSO::
:class:`~sage.sets.cartesian_product.CartesianProduct`,
:class:`~sage.combinat.posets.cartesian_product.CartesianProductPoset`.
"""
__classcall__ = CartesianProductPoset.__classcall__
def __init__(self, sets, category, **kwds):
r"""
See :class:`GenericProduct` for details.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: GrowthGroup('x^ZZ * y^ZZ') # indirect doctest
Growth Group x^ZZ * y^ZZ
"""
order = kwds.pop('order')
CartesianProductPoset.__init__(self, sets, category, order, **kwds)
vars = sum(iter(factor.variable_names()
for factor in self.cartesian_factors()),
tuple())
from itertools import groupby
from .growth_group import Variable
Vars = Variable(tuple(v for v, _ in groupby(vars)), repr=self._repr_short_())
GenericGrowthGroup.__init__(self, sets[0], Vars, self.category(), **kwds)
__hash__ = CartesianProductPoset.__hash__
def some_elements(self):
r"""
Return some elements of this Cartesian product of growth groups.
See :class:`TestSuite` for a typical use case.
INPUT:
Nothing.
OUTPUT:
An iterator.
EXAMPLES::
sage: from itertools import islice
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('QQ^y * x^QQ * log(x)^ZZ')
sage: tuple(islice(G.some_elements(), 10))
(x^(1/2)*(1/2)^y,
x^(-1/2)*log(x)*(-1/2)^y,
x^2*log(x)^(-1)*2^y,
x^(-2)*log(x)^2*(-2)^y,
log(x)^(-2),
x*log(x)^3*(-1)^y,
x^(-1)*log(x)^(-3)*42^y,
x^42*log(x)^4*(2/3)^y,
x^(2/3)*log(x)^(-4)*(-2/3)^y,
x^(-2/3)*log(x)^5*(3/2)^y)
"""
from itertools import izip
return iter(
self(c) for c in
izip(*tuple(F.some_elements() for F in self.cartesian_factors())))
def _create_element_in_extension_(self, element):
r"""
Create an element in an extension of this Cartesian product of
growth groups which is chosen according to the input ``element``.
INPUT:
- ``element`` -- a tuple.
OUTPUT:
An element.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('z^ZZ * log(z)^ZZ')
sage: z = G('z')[0]
sage: lz = G('log(z)')[1]
sage: G._create_element_in_extension_((z^3, lz)).parent()
Growth Group z^ZZ * log(z)^ZZ
sage: G._create_element_in_extension_((z^(1/2), lz)).parent()
Growth Group z^QQ * log(z)^ZZ
::
sage: G._create_element_in_extension_((3, 3, 3))
Traceback (most recent call last):
...
ValueError: Cannot create (3, 3, 3) as a Cartesian product like
Growth Group z^ZZ * log(z)^ZZ.
"""
factors = self.cartesian_factors()
if len(element) != len(factors):
raise ValueError('Cannot create %s as a Cartesian product like %s.' %
(element, self))
if all(n.parent() is f for n, f in zip(element, factors)):
parent = self
else:
from .misc import underlying_class
parent = underlying_class(self)(tuple(n.parent() for n in element),
category=self.category())
return parent(element)
def _element_constructor_(self, data):
r"""
Converts the given object to an element of this Cartesian
product.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * y^ZZ')
sage: G_log = GrowthGroup('x^ZZ * log(x)^ZZ * y^ZZ')
Conversion from the symbolic ring works::
sage: x,y = var('x y')
sage: G(x^-3*y^2)
x^(-3)*y^2
sage: G(x^4), G(y^2)
(x^4, y^2)
sage: G(1)
1
Even more complex expressions can be parsed::
sage: G_log(x^42*log(x)^-42*y^42)
x^42*log(x)^(-42)*y^42
TESTS::
sage: G = GrowthGroup('x^ZZ * y^ZZ')
sage: G('x'), G('y')
(x, y)
::
sage: G_log(log(x))
log(x)
::
sage: G(G.cartesian_factors()[0].gen())
x
::
sage: GrowthGroup('QQ^x * x^QQ')(['x^(1/2)'])
x^(1/2)
sage: l = GrowthGroup('x^ZZ * log(x)^ZZ')(['x', 'log(x)']); l
x*log(x)
sage: type(l)
<class 'sage.rings.asymptotic.growth_group_cartesian.UnivariateProduct_with_category.element_class'>
sage: GrowthGroup('QQ^x * x^QQ')(['2^log(x)'])
Traceback (most recent call last):
...
ValueError: ['2^log(x)'] is not in Growth Group QQ^x * x^QQ.
> *previous* ValueError: 2^log(x) is not in any of the factors of
Growth Group QQ^x * x^QQ
sage: GrowthGroup('QQ^x * x^QQ')(['2^log(x)', 'x^55'])
Traceback (most recent call last):
...
ValueError: ['2^log(x)', 'x^55'] is not in Growth Group QQ^x * x^QQ.
> *previous* ValueError: 2^log(x) is not in any of the factors of
Growth Group QQ^x * x^QQ
::
sage: n = GrowthGroup('n^ZZ * log(n)^ZZ')('n')
sage: G = GrowthGroup('QQ^n * n^ZZ * log(n)^ZZ')
sage: G(n).value
(1, n, 1)
"""
def convert_factors(data, raw_data):
try:
return self._convert_factors_(data)
except ValueError as e:
from .misc import combine_exceptions
raise combine_exceptions(
ValueError('%s is not in %s.' % (raw_data, self)), e)
if data == 1:
return self.one()
elif data is None:
raise ValueError('%s cannot be converted.' % (data,))
elif type(data) == self.element_class and data.parent() == self:
return data
elif isinstance(data, str):
from .misc import split_str_by_op
return convert_factors(split_str_by_op(data, '*'), data)
elif hasattr(data, 'parent'):
P = data.parent()
if P is self:
return data
elif P is sage.symbolic.ring.SR:
from sage.symbolic.operators import mul_vararg
if data.operator() == mul_vararg:
return convert_factors(data.operands(), data)
# room for other parents (e.g. polynomial ring et al.)
try:
return super(GenericProduct, self)._element_constructor_(data)
except (TypeError, ValueError):
pass
if isinstance(data, (tuple, list,
sage.sets.cartesian_product.CartesianProduct.Element)):
return convert_factors(tuple(data), data)
return convert_factors((data,), data)
_repr_ = GenericGrowthGroup._repr_
def _repr_short_(self):
r"""
A short (shorter than :meth:`._repr_`) representation string
for this Cartesian product of growth groups.
INPUT:
Nothing.
OUTPUT:
A string.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: P = GrowthGroup('x^QQ')
sage: L = GrowthGroup('log(x)^ZZ')
sage: cartesian_product([P, L], order='lex')._repr_short_()
'x^QQ * log(x)^ZZ'
"""
return ' * '.join(S._repr_short_() for S in self.cartesian_factors())
def _convert_factors_(self, factors):
r"""
Helper method. Try to convert some ``factors`` to an
element of one of the Cartesian factors and return the product of
all these factors.
INPUT:
- ``factors`` -- a tuple or other iterable.
OUTPUT:
An element of this Cartesian product.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * log(x)^QQ * y^QQ')
sage: e1 = G._convert_factors_([x^2])
sage: (e1, e1.parent())
(x^2, Growth Group x^ZZ * log(x)^QQ * y^QQ)
"""
from sage.misc.misc_c import prod
def get_factor(data):
for factor in self.cartesian_factors():
try:
return factor, factor(data)
except (ValueError, TypeError):
pass
raise ValueError('%s is not in any of the factors of %s' % (data, self))
return prod(self.cartesian_injection(*get_factor(f))
for f in factors)
def cartesian_injection(self, factor, element):
r"""
Inject the given element into this Cartesian product at the given factor.
INPUT:
- ``factor`` -- a growth group (a factor of this Cartesian product).
- ``element`` -- an element of ``factor``.
OUTPUT:
An element of this Cartesian product.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * y^QQ')
sage: G.cartesian_injection(G.cartesian_factors()[1], 'y^7')
y^7
"""
return self(tuple((f.one() if f != factor else element)
for f in self.cartesian_factors()))
def _coerce_map_from_(self, S):
r"""
Return whether ``S`` coerces into this growth group.
INPUT:
- ``S`` -- a parent.
OUTPUT:
A boolean.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: A = GrowthGroup('QQ^x * x^QQ')
sage: B = GrowthGroup('QQ^x * x^ZZ')
sage: A.has_coerce_map_from(B) # indirect doctest
True
sage: B.has_coerce_map_from(A) # indirect doctest
False
"""
if CartesianProductPoset.has_coerce_map_from(self, S):
return True
elif isinstance(S, GenericProduct):
factors = S.cartesian_factors()
else:
factors = (S,)
if all(any(g.has_coerce_map_from(f) for g in self.cartesian_factors())
for f in factors):
return True
def _pushout_(self, other):
r"""
Construct the pushout of this and the other growth group. This is called by
:func:`sage.categories.pushout.pushout`.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: from sage.categories.pushout import pushout
sage: cm = sage.structure.element.get_coercion_model()
sage: A = GrowthGroup('QQ^x * x^ZZ')
sage: B = GrowthGroup('x^ZZ * log(x)^ZZ')
sage: A._pushout_(B)
Growth Group QQ^x * x^ZZ * log(x)^ZZ
sage: pushout(A, B)
Growth Group QQ^x * x^ZZ * log(x)^ZZ
sage: cm.discover_coercion(A, B)
((map internal to coercion system -- copy before use)
Conversion map:
From: Growth Group QQ^x * x^ZZ
To: Growth Group QQ^x * x^ZZ * log(x)^ZZ,
(map internal to coercion system -- copy before use)
Conversion map:
From: Growth Group x^ZZ * log(x)^ZZ
To: Growth Group QQ^x * x^ZZ * log(x)^ZZ)
sage: cm.common_parent(A, B)
Growth Group QQ^x * x^ZZ * log(x)^ZZ
::
sage: C = GrowthGroup('QQ^x * x^QQ * y^ZZ')
sage: D = GrowthGroup('x^ZZ * log(x)^QQ * QQ^z')
sage: C._pushout_(D)
Growth Group QQ^x * x^QQ * log(x)^QQ * y^ZZ * QQ^z
sage: cm.common_parent(C, D)
Growth Group QQ^x * x^QQ * log(x)^QQ * y^ZZ * QQ^z
sage: A._pushout_(D)
Growth Group QQ^x * x^ZZ * log(x)^QQ * QQ^z
sage: cm.common_parent(A, D)
Growth Group QQ^x * x^ZZ * log(x)^QQ * QQ^z
sage: cm.common_parent(B, D)
Growth Group x^ZZ * log(x)^QQ * QQ^z
sage: cm.common_parent(A, C)
Growth Group QQ^x * x^QQ * y^ZZ
sage: E = GrowthGroup('log(x)^ZZ * y^ZZ')
sage: cm.common_parent(A, E)
Traceback (most recent call last):
...
TypeError: no common canonical parent for objects with parents:
'Growth Group QQ^x * x^ZZ' and 'Growth Group log(x)^ZZ * y^ZZ'
::
sage: F = GrowthGroup('z^QQ')
sage: pushout(C, F)
Growth Group QQ^x * x^QQ * y^ZZ * z^QQ
::
sage: pushout(GrowthGroup('QQ^x * x^ZZ'), GrowthGroup('ZZ^x * x^QQ'))
Growth Group QQ^x * x^QQ
sage: cm.common_parent(GrowthGroup('QQ^x * x^ZZ'), GrowthGroup('ZZ^x * x^QQ'))
Growth Group QQ^x * x^QQ
::
sage: pushout(GrowthGroup('QQ^n * n^QQ'), GrowthGroup('SR^n'))
Growth Group SR^n * n^QQ
"""
from .growth_group import GenericGrowthGroup, AbstractGrowthGroupFunctor
from .misc import merge_overlapping
from .misc import underlying_class
Sfactors = self.cartesian_factors()
if isinstance(other, GenericProduct):
Ofactors = other.cartesian_factors()
elif isinstance(other, GenericGrowthGroup):
Ofactors = (other,)
elif (other.construction() is not None and
isinstance(other.construction()[0], AbstractGrowthGroupFunctor)):
Ofactors = (other,)
else:
return
def pushout_univariate_factors(self, other, var, Sfactors, Ofactors):
try:
return merge_overlapping(
Sfactors, Ofactors,
lambda f: (underlying_class(f), f._var_.var_repr))
except ValueError:
pass
cm = sage.structure.element.get_coercion_model()
try:
Z = cm.common_parent(*Sfactors+Ofactors)
return (Z,), (Z,)
except TypeError:
pass
def subfactors(F):
for f in F:
if isinstance(f, GenericProduct):
for g in subfactors(f.cartesian_factors()):
yield g
else:
yield f
try:
return merge_overlapping(
tuple(subfactors(Sfactors)), tuple(subfactors(Ofactors)),
lambda f: (underlying_class(f), f._var_.var_repr))
except ValueError:
pass
from sage.structure.coerce_exceptions import CoercionException
raise CoercionException(
'Cannot construct the pushout of %s and %s: The factors '
'with variables %s are not overlapping, '
'no common parent was found, and '
'splitting the factors was unsuccessful.' % (self, other, var))
# A wrapper around an iterator that stores additional intermediate data.
# This deviates slightly from the iterator protocol:
# At the end of the iteration the data is reset to None instead
# of raising a StopIteration.
class it:
def __init__(self, it):
self.it = it
self.var = None
self.factors = None
def next_custom(self):
try:
self.var, factors = next(self.it)
self.factors = tuple(factors)
except StopIteration:
self.var = None
self.factors = tuple()
from itertools import groupby
S = it(groupby(Sfactors, key=lambda k: k.variable_names()))
O = it(groupby(Ofactors, key=lambda k: k.variable_names()))
newS = []
newO = []
S.next_custom()
O.next_custom()
while S.var is not None or O.var is not None:
if S.var is not None and S.var < O.var:
newS.extend(S.factors)
newO.extend(S.factors)
S.next_custom()
elif O.var is not None and S.var > O.var:
newS.extend(O.factors)
newO.extend(O.factors)
O.next_custom()
else:
SL, OL = pushout_univariate_factors(self, other, S.var,
S.factors, O.factors)
newS.extend(SL)
newO.extend(OL)
S.next_custom()
O.next_custom()
assert(len(newS) == len(newO))
if (len(Sfactors) == len(newS) and
len(Ofactors) == len(newO)):
# We had already all factors in each of self and
# other, thus splitting it in subproblems (one for
# each factor) is the strategy to use. If a pushout is
# possible :func:`sage.categories.pushout.pushout`
# will manage this by itself.
return
from sage.categories.pushout import pushout
from sage.categories.cartesian_product import cartesian_product
return pushout(cartesian_product(newS), cartesian_product(newO))
def gens_monomial(self):
r"""
Return a tuple containing monomial generators of this growth group.
INPUT:
Nothing.
OUTPUT:
A tuple containing elements of this growth group.
.. NOTE::
This method calls the ``gens_monomial()`` method on the
individual factors of this Cartesian product and
concatenates the respective outputs.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * log(x)^ZZ * y^QQ * log(z)^ZZ')
sage: G.gens_monomial()
(x, y)
TESTS::
sage: all(g.parent() == G for g in G.gens_monomial())
True
"""
return sum(iter(
tuple(self.cartesian_injection(factor, g) for g in factor.gens_monomial())
for factor in self.cartesian_factors()),
tuple())
def variable_names(self):
r"""
Return the names of the variables.
OUTPUT:
A tuple of strings.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: GrowthGroup('x^ZZ * log(x)^ZZ * y^QQ * log(z)^ZZ').variable_names()
('x', 'y', 'z')
"""
vars = sum(iter(factor.variable_names()
for factor in self.cartesian_factors()),
tuple())
from itertools import groupby
return tuple(v for v, _ in groupby(vars))
class Element(CartesianProductPoset.Element):
from .growth_group import _is_lt_one_
is_lt_one = _is_lt_one_
def _repr_(self, latex=False):
r"""
A representation string for this Cartesian product element.
INPUT:
- ``latex`` -- (default: ``False``) a boolean. If set, then
LaTeX-output is returned.
OUTPUT:
A string.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: P = GrowthGroup('x^QQ')
sage: L = GrowthGroup('log(x)^ZZ')
sage: cartesian_product([P, L], order='lex').an_element()._repr_()
'x^(1/2)*log(x)'
"""
if latex:
from sage.misc.latex import latex as latex_repr
f = latex_repr
else:
f = repr
mul = ' ' if latex else '*'
s = mul.join(f(v) for v in self.value if not v.is_one())
if s == '':
return '1'
return s
def _latex_(self):
r"""
A representation string for this Cartesian product element.
OUTPUT:
A string.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: P = GrowthGroup('x^QQ')
sage: L = GrowthGroup('log(x)^ZZ')
sage: latex(cartesian_product([P, L], order='lex').an_element()) # indirect doctest
x^{\frac{1}{2}} \log\left(x\right)
sage: latex(GrowthGroup('QQ^n * n^QQ').an_element()) # indirect doctest
\left(\frac{1}{2}\right)^{n} n^{\frac{1}{2}}
"""
return self._repr_(latex=True)
def __pow__(self, exponent):
r"""
Calculate the power of this growth element to the given
``exponent``.
INPUT:
- ``exponent`` -- a number.
OUTPUT:
A growth element.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * y^QQ * z^ZZ')
sage: x, y, z = G.gens_monomial()
sage: (x^5 * y * z^5)^(1/5) # indirect doctest
x*y^(1/5)*z
::
sage: G = GrowthGroup('x^QQ * log(x)^QQ'); x = G('x')
sage: (x^(21/5) * log(x)^7)^(1/42) # indirect doctest
x^(1/10)*log(x)^(1/6)
"""
return self.parent()._create_element_in_extension_(
tuple(x ** exponent for x in self.cartesian_factors()))
def factors(self):
r"""
Return the atomic factors of this growth element. An atomic factor
cannot be split further and is not the identity (`1`).
INPUT:
Nothing.
OUTPUT:
A tuple of growth elements.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * log(x)^ZZ * y^ZZ')
sage: x, y = G.gens_monomial()
sage: x.factors()
(x,)
sage: f = (x * y).factors(); f
(x, y)
sage: tuple(factor.parent() for factor in f)
(Growth Group x^ZZ, Growth Group y^ZZ)
sage: f = (x * log(x)).factors(); f
(x, log(x))
sage: tuple(factor.parent() for factor in f)
(Growth Group x^ZZ, Growth Group log(x)^ZZ)
::
sage: G = GrowthGroup('x^ZZ * log(x)^ZZ * log(log(x))^ZZ * y^QQ')
sage: x, y = G.gens_monomial()
sage: f = (x * log(x) * y).factors(); f
(x, log(x), y)
sage: tuple(factor.parent() for factor in f)
(Growth Group x^ZZ, Growth Group log(x)^ZZ, Growth Group y^QQ)
::
sage: G.one().factors()
()
"""
return sum(iter(f.factors()
for f in self.cartesian_factors()
if not f.is_one()),
tuple())
from .growth_group import _log_factor_, _log_
log = _log_
log_factor = _log_factor_
def _log_factor_(self, base=None):
r"""
Helper method for calculating the logarithm of the factorization
of this element.
INPUT:
- ``base`` -- the base of the logarithm. If ``None``
(default value) is used, the natural logarithm is taken.
OUTPUT:
A tuple of pairs, where the first entry is either a growth
element or something out of which we can construct a growth element
and the second a multiplicative coefficient.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('QQ^x * x^ZZ * log(x)^ZZ * y^ZZ * log(y)^ZZ')
sage: x, y = G.gens_monomial()
sage: (x * y).log_factor() # indirect doctest
((log(x), 1), (log(y), 1))
"""
if self.is_one():
return tuple()
def try_create_growth(g):
try:
return self.parent()(g)
except (TypeError, ValueError):
return g
try:
return sum(iter(tuple((try_create_growth(g), c)
for g, c in factor._log_factor_(base=base))
for factor in self.cartesian_factors()
if factor != factor.parent().one()),
tuple())
except (ArithmeticError, TypeError, ValueError) as e:
from .misc import combine_exceptions
raise combine_exceptions(
ArithmeticError('Cannot build log(%s) in %s.' %
(self, self.parent())), e)
from .growth_group import _rpow_
rpow = _rpow_
def _rpow_element_(self, base):
r"""
Return an element which is the power of ``base`` to this
element.
INPUT:
- ``base`` -- an element.
OUTPUT:
A growth element.
.. NOTE::
The parent of the result can be different from the parent
of this element.
A ``ValueError`` is raised if the calculation is not possible
within this method. (Then the calling method should take care
of the calculation.)
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('QQ^x * x^ZZ * log(x)^ZZ')
sage: lx = log(G('x'))
sage: rp = lx._rpow_element_('e'); rp
x
sage: rp.parent()
Growth Group x^ZZ
"""
factors = self.factors()
if len(factors) != 1:
raise ValueError # calling method has to deal with it...
from .growth_group import MonomialGrowthGroup
factor = factors[0]
if not isinstance(factor.parent(), MonomialGrowthGroup):
raise ValueError # calling method has to deal with it...
return factor._rpow_element_(base)
def exp(self):
r"""
The exponential of this element.
INPUT:
Nothing.
OUTPUT:
A growth element.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^ZZ * log(x)^ZZ * log(log(x))^ZZ')
sage: x = G('x')
sage: exp(log(x))
x
sage: exp(log(log(x)))
log(x)
::
sage: exp(x)
Traceback (most recent call last):
...
ArithmeticError: Cannot construct e^x in
Growth Group x^ZZ * log(x)^ZZ * log(log(x))^ZZ
> *previous* TypeError: unsupported operand parent(s) for '*':
'Growth Group x^ZZ * log(x)^ZZ * log(log(x))^ZZ' and
'Growth Group (e^x)^ZZ'
TESTS::
sage: E = GrowthGroup("(e^y)^QQ * y^QQ * log(y)^QQ")
sage: y = E('y')
sage: log(exp(y))
y
sage: exp(log(y))
y
"""
return self.rpow('e')
def __invert__(self):
r"""
Return the multiplicative inverse of this Cartesian product.
OUTPUT:
An growth element.
.. NOTE::
The result may live in a larger parent than we started with.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('ZZ^x * x^ZZ')
sage: g = G('2^x * x^3')
sage: (~g).parent()
Growth Group QQ^x * x^ZZ
"""
return self.parent()._create_element_in_extension_(
tuple(~x for x in self.cartesian_factors()))
def _substitute_(self, rules):
r"""
Substitute the given ``rules`` in this
Cartesian product growth element.
INPUT:
- ``rules`` -- a dictionary.
The neutral element of the group is replaced by the value
to key ``'_one_'``.
OUTPUT:
An object.
TESTS::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('x^QQ * log(x)^QQ')
sage: G(x^3 * log(x)^5)._substitute_({'x': SR.var('z')})
z^3*log(z)^5
sage: _.parent()
Symbolic Ring
sage: G(x^3 * log(x)^5)._substitute_({'x': 2.2}) # rel tol 1e-6
3.24458458945
sage: _.parent()
Real Field with 53 bits of precision
sage: G(1 / x)._substitute_({'x': 0})
Traceback (most recent call last):
...
ZeroDivisionError: Cannot substitute in x^(-1) in
Growth Group x^QQ * log(x)^QQ.
> *previous* ZeroDivisionError: Cannot substitute in x^(-1) in
Growth Group x^QQ.
>> *previous* ZeroDivisionError: rational division by zero
sage: G(1)._substitute_({'_one_': 'one'})
'one'
"""
if self.is_one():
return rules['_one_']
from sage.symbolic.operators import mul_vararg
try:
return mul_vararg(
*tuple(x._substitute_(rules)
for x in self.cartesian_factors()))
except (ArithmeticError, TypeError, ValueError) as e:
from .misc import substitute_raise_exception
substitute_raise_exception(self, e)
def _singularity_analysis_(self, var, zeta, precision):
r"""
Perform singularity analysis on this growth element.
INPUT:
- ``var`` -- a string denoting the variable
- ``zeta`` -- a number
- ``precision`` -- an integer
OUTPUT:
An asymptotic expansion for `[z^n] f` where `n` is ``var``
and `f` has this growth element as a singular expansion
in `T=\frac{1}{1-\frac{z}{\zeta}}\to \infty` where this
element is a growth element in `T`.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('exp(x)^QQ * x^QQ * log(x)^QQ')
sage: G(x^(1/2))._singularity_analysis_('n', 2, precision=2)
1/sqrt(pi)*(1/2)^n*n^(-1/2) - 1/8/sqrt(pi)*(1/2)^n*n^(-3/2)
+ O((1/2)^n*n^(-5/2))
sage: G(log(x))._singularity_analysis_('n', 1, precision=5)
n^(-1) + O(n^(-3))
sage: G(x*log(x))._singularity_analysis_('n', 1, precision=5)
log(n) + euler_gamma + 1/2*n^(-1) + O(n^(-2))
TESTS::
sage: G('exp(x)*log(x)')._singularity_analysis_('n', 1, precision=5)
Traceback (most recent call last):
...
NotImplementedError: singularity analysis of exp(x)*log(x)
not implemented
sage: G('exp(x)*x*log(x)')._singularity_analysis_('n', 1, precision=5)
Traceback (most recent call last):
...
NotImplementedError: singularity analysis of exp(x)*x*log(x)
not yet implemented since it has more than two factors
sage: G(1)._singularity_analysis_('n', 2, precision=3)
Traceback (most recent call last):
...
NotImplementedOZero: The error term in the result is O(0)
which means 0 for sufficiently large n.
sage: G('exp(x)')._singularity_analysis_('n', 2, precision=3)
Traceback (most recent call last):
...
NotImplementedError: singularity analysis of exp(x)
not implemented
"""
factors = self.factors()
if len(factors) == 0:
from .asymptotic_expansion_generators import asymptotic_expansions
from .misc import NotImplementedOZero
raise NotImplementedOZero(var=var)
elif len(factors) == 1:
return factors[0]._singularity_analysis_(
var=var, zeta=zeta, precision=precision)
elif len(factors) == 2:
from .growth_group import MonomialGrowthGroup
from sage.rings.integer_ring import ZZ
a, b = factors
if all(isinstance(f.parent(), MonomialGrowthGroup)
for f in factors) \
and a.parent().gens_monomial() \
and b.parent().gens_logarithmic() \
and a.parent().variable_name() == \
b.parent().variable_name():
if b.exponent not in ZZ:
raise NotImplementedError(
'singularity analysis of {} not implemented '
'since exponent {} of {} is not an integer'.format(
self, b.exponent, b.parent().gen()))
from sage.rings.asymptotic.asymptotic_expansion_generators import \
asymptotic_expansions
return asymptotic_expansions.SingularityAnalysis(
var=var, zeta=zeta, alpha=a.exponent,
beta=ZZ(b.exponent), delta=0,
precision=precision, normalized=False)
else:
raise NotImplementedError(
'singularity analysis of {} not implemented'.format(self))
else:
raise NotImplementedError(
'singularity analysis of {} not yet implemented '
'since it has more than two factors'.format(self))
def variable_names(self):
r"""
Return the names of the variables of this growth element.
OUTPUT:
A tuple of strings.
EXAMPLES::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: G = GrowthGroup('QQ^m * m^QQ * log(n)^ZZ')
sage: G('2^m * m^4 * log(n)').variable_names()
('m', 'n')
sage: G('2^m * m^4').variable_names()
('m',)
sage: G('log(n)').variable_names()
('n',)
sage: G('m^3').variable_names()
('m',)
sage: G('m^0').variable_names()
()
"""
vars = sum(iter(factor.variable_names()
for factor in self.factors()),
tuple())
from itertools import groupby
return tuple(v for v, _ in groupby(vars))
CartesianProduct = CartesianProductGrowthGroups
class UnivariateProduct(GenericProduct):
r"""
A Cartesian product of growth groups with the same variables.
.. NOTE::
A univariate product of growth groups is ordered
lexicographically. This is motivated by the assumption
that univariate growth groups can be ordered in a chain
with respect to the growth they model (e.g.
``x^ZZ * log(x)^ZZ``: polynomial growth dominates
logarithmic growth).
.. SEEALSO::
:class:`MultivariateProduct`,
:class:`GenericProduct`.
"""
def __init__(self, sets, category, **kwargs):
r"""
See :class:`UnivariateProduct` for details.
TEST::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: type(GrowthGroup('x^ZZ * log(x)^ZZ')) # indirect doctest
<class 'sage.rings.asymptotic.growth_group_cartesian.UnivariateProduct_with_category'>
"""
super(UnivariateProduct, self).__init__(
sets, category, order='lex', **kwargs)
CartesianProduct = CartesianProductGrowthGroups
class MultivariateProduct(GenericProduct):
r"""
A Cartesian product of growth groups with pairwise disjoint
(or equal) variable sets.
.. NOTE::
A multivariate product of growth groups is ordered by
means of the product order, i.e. component-wise. This is
motivated by the assumption that different variables are
considered to be independent (e.g. ``x^ZZ * y^ZZ``).
.. SEEALSO::
:class:`UnivariateProduct`,
:class:`GenericProduct`.
"""
def __init__(self, sets, category, **kwargs):
r"""
TEST::
sage: from sage.rings.asymptotic.growth_group import GrowthGroup
sage: type(GrowthGroup('x^ZZ * y^ZZ')) # indirect doctest
<class 'sage.rings.asymptotic.growth_group_cartesian.MultivariateProduct_with_category'>
"""
super(MultivariateProduct, self).__init__(
sets, category, order='product', **kwargs)
CartesianProduct = CartesianProductGrowthGroups
| 33.99012 | 112 | 0.532036 |
ace46f64b2b549fc74946df1f11af8fcb2f7ce04 | 637 | py | Python | src/iris/service/content/petition/acl.py | iris-dni/iris-backend | 680aadb1d9dd02e031b1902a4f9ef19440959465 | [
"Apache-2.0"
] | 2 | 2017-08-28T14:26:54.000Z | 2017-09-15T20:49:35.000Z | src/iris/service/content/petition/acl.py | iris-dni/iris-backend | 680aadb1d9dd02e031b1902a4f9ef19440959465 | [
"Apache-2.0"
] | 27 | 2016-09-16T09:44:44.000Z | 2017-03-27T12:05:31.000Z | src/iris/service/content/petition/acl.py | iris-dni/iris-backend | 680aadb1d9dd02e031b1902a4f9ef19440959465 | [
"Apache-2.0"
] | null | null | null | from pyramid import security
from iris.service.security import acl
from iris.service.rest.auth import BaseAuthFactory
class PublicPetitionServiceAuthFactory(BaseAuthFactory):
"""Allows the admin role the get access to the full API
"""
__acl__ = [
(security.Allow, acl.Roles.Admin, acl.Permissions.AdminPetition)
]
class SupportersTokenServiceAuthFactory(BaseAuthFactory):
"""Allows access with API key permission only
"""
__acl__ = [
(security.Allow, acl.Roles.Admin, acl.Permissions.ListSupporters),
(security.Allow, acl.Roles.ApiKeyUser, acl.Permissions.ListSupporters)
]
| 26.541667 | 78 | 0.728414 |
ace46fba372e0b6ee27959076e1e855e77c4305c | 22,223 | py | Python | ppocr/utils/e2e_utils/extract_textpoint_slow.py | nan-wang/PaddleOCR | 31b06a2fd19f877a09acaf658387bd919c289b8e | [
"Apache-2.0"
] | 20,401 | 2020-05-08T10:56:13.000Z | 2022-03-31T23:34:38.000Z | ppocr/utils/e2e_utils/extract_textpoint_slow.py | justld/PaddleOCR | 09604c38e42591c240771edbbff43a6dd7ebf592 | [
"Apache-2.0"
] | 4,988 | 2020-05-10T08:19:41.000Z | 2022-03-31T17:57:11.000Z | ppocr/utils/e2e_utils/extract_textpoint_slow.py | justld/PaddleOCR | 09604c38e42591c240771edbbff43a6dd7ebf592 | [
"Apache-2.0"
] | 4,479 | 2020-05-08T11:12:13.000Z | 2022-03-31T11:55:28.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various CTC decoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import math
import numpy as np
from itertools import groupby
from skimage.morphology._skeletonize import thin
def get_dict(character_dict_path):
character_str = ""
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
character_str += line
dict_character = list(character_str)
return dict_character
def point_pair2poly(point_pair_list):
"""
Transfer vertical point_pairs into poly point in clockwise.
"""
pair_length_list = []
for point_pair in point_pair_list:
pair_length = np.linalg.norm(point_pair[0] - point_pair[1])
pair_length_list.append(pair_length)
pair_length_list = np.array(pair_length_list)
pair_info = (pair_length_list.max(), pair_length_list.min(),
pair_length_list.mean())
point_num = len(point_pair_list) * 2
point_list = [0] * point_num
for idx, point_pair in enumerate(point_pair_list):
point_list[idx] = point_pair[0]
point_list[point_num - 1 - idx] = point_pair[1]
return np.array(point_list).reshape(-1, 2), pair_info
def shrink_quad_along_width(quad, begin_width_ratio=0., end_width_ratio=1.):
"""
Generate shrink_quad_along_width.
"""
ratio_pair = np.array(
[[begin_width_ratio], [end_width_ratio]], dtype=np.float32)
p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair
p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair
return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]])
def expand_poly_along_width(poly, shrink_ratio_of_width=0.3):
"""
expand poly along width.
"""
point_num = poly.shape[0]
left_quad = np.array(
[poly[0], poly[1], poly[-2], poly[-1]], dtype=np.float32)
left_ratio = -shrink_ratio_of_width * np.linalg.norm(left_quad[0] - left_quad[3]) / \
(np.linalg.norm(left_quad[0] - left_quad[1]) + 1e-6)
left_quad_expand = shrink_quad_along_width(left_quad, left_ratio, 1.0)
right_quad = np.array(
[
poly[point_num // 2 - 2], poly[point_num // 2 - 1],
poly[point_num // 2], poly[point_num // 2 + 1]
],
dtype=np.float32)
right_ratio = 1.0 + \
shrink_ratio_of_width * np.linalg.norm(right_quad[0] - right_quad[3]) / \
(np.linalg.norm(right_quad[0] - right_quad[1]) + 1e-6)
right_quad_expand = shrink_quad_along_width(right_quad, 0.0, right_ratio)
poly[0] = left_quad_expand[0]
poly[-1] = left_quad_expand[-1]
poly[point_num // 2 - 1] = right_quad_expand[1]
poly[point_num // 2] = right_quad_expand[2]
return poly
def softmax(logits):
"""
logits: N x d
"""
max_value = np.max(logits, axis=1, keepdims=True)
exp = np.exp(logits - max_value)
exp_sum = np.sum(exp, axis=1, keepdims=True)
dist = exp / exp_sum
return dist
def get_keep_pos_idxs(labels, remove_blank=None):
"""
Remove duplicate and get pos idxs of keep items.
The value of keep_blank should be [None, 95].
"""
duplicate_len_list = []
keep_pos_idx_list = []
keep_char_idx_list = []
for k, v_ in groupby(labels):
current_len = len(list(v_))
if k != remove_blank:
current_idx = int(sum(duplicate_len_list) + current_len // 2)
keep_pos_idx_list.append(current_idx)
keep_char_idx_list.append(k)
duplicate_len_list.append(current_len)
return keep_char_idx_list, keep_pos_idx_list
def remove_blank(labels, blank=0):
new_labels = [x for x in labels if x != blank]
return new_labels
def insert_blank(labels, blank=0):
new_labels = [blank]
for l in labels:
new_labels += [l, blank]
return new_labels
def ctc_greedy_decoder(probs_seq, blank=95, keep_blank_in_idxs=True):
"""
CTC greedy (best path) decoder.
"""
raw_str = np.argmax(np.array(probs_seq), axis=1)
remove_blank_in_pos = None if keep_blank_in_idxs else blank
dedup_str, keep_idx_list = get_keep_pos_idxs(
raw_str, remove_blank=remove_blank_in_pos)
dst_str = remove_blank(dedup_str, blank=blank)
return dst_str, keep_idx_list
def instance_ctc_greedy_decoder(gather_info,
logits_map,
keep_blank_in_idxs=True):
"""
gather_info: [[x, y], [x, y] ...]
logits_map: H x W X (n_chars + 1)
"""
_, _, C = logits_map.shape
ys, xs = zip(*gather_info)
logits_seq = logits_map[list(ys), list(xs)] # n x 96
probs_seq = softmax(logits_seq)
dst_str, keep_idx_list = ctc_greedy_decoder(
probs_seq, blank=C - 1, keep_blank_in_idxs=keep_blank_in_idxs)
keep_gather_list = [gather_info[idx] for idx in keep_idx_list]
return dst_str, keep_gather_list
def ctc_decoder_for_image(gather_info_list, logits_map,
keep_blank_in_idxs=True):
"""
CTC decoder using multiple processes.
"""
decoder_results = []
for gather_info in gather_info_list:
res = instance_ctc_greedy_decoder(
gather_info, logits_map, keep_blank_in_idxs=keep_blank_in_idxs)
decoder_results.append(res)
return decoder_results
def sort_with_direction(pos_list, f_direction):
"""
f_direction: h x w x 2
pos_list: [[y, x], [y, x], [y, x] ...]
"""
def sort_part_with_direction(pos_list, point_direction):
pos_list = np.array(pos_list).reshape(-1, 2)
point_direction = np.array(point_direction).reshape(-1, 2)
average_direction = np.mean(point_direction, axis=0, keepdims=True)
pos_proj_leng = np.sum(pos_list * average_direction, axis=1)
sorted_list = pos_list[np.argsort(pos_proj_leng)].tolist()
sorted_direction = point_direction[np.argsort(pos_proj_leng)].tolist()
return sorted_list, sorted_direction
pos_list = np.array(pos_list).reshape(-1, 2)
point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] # x, y
point_direction = point_direction[:, ::-1] # x, y -> y, x
sorted_point, sorted_direction = sort_part_with_direction(pos_list,
point_direction)
point_num = len(sorted_point)
if point_num >= 16:
middle_num = point_num // 2
first_part_point = sorted_point[:middle_num]
first_point_direction = sorted_direction[:middle_num]
sorted_fist_part_point, sorted_fist_part_direction = sort_part_with_direction(
first_part_point, first_point_direction)
last_part_point = sorted_point[middle_num:]
last_point_direction = sorted_direction[middle_num:]
sorted_last_part_point, sorted_last_part_direction = sort_part_with_direction(
last_part_point, last_point_direction)
sorted_point = sorted_fist_part_point + sorted_last_part_point
sorted_direction = sorted_fist_part_direction + sorted_last_part_direction
return sorted_point, np.array(sorted_direction)
def add_id(pos_list, image_id=0):
"""
Add id for gather feature, for inference.
"""
new_list = []
for item in pos_list:
new_list.append((image_id, item[0], item[1]))
return new_list
def sort_and_expand_with_direction(pos_list, f_direction):
"""
f_direction: h x w x 2
pos_list: [[y, x], [y, x], [y, x] ...]
"""
h, w, _ = f_direction.shape
sorted_list, point_direction = sort_with_direction(pos_list, f_direction)
# expand along
point_num = len(sorted_list)
sub_direction_len = max(point_num // 3, 2)
left_direction = point_direction[:sub_direction_len, :]
right_dirction = point_direction[point_num - sub_direction_len:, :]
left_average_direction = -np.mean(left_direction, axis=0, keepdims=True)
left_average_len = np.linalg.norm(left_average_direction)
left_start = np.array(sorted_list[0])
left_step = left_average_direction / (left_average_len + 1e-6)
right_average_direction = np.mean(right_dirction, axis=0, keepdims=True)
right_average_len = np.linalg.norm(right_average_direction)
right_step = right_average_direction / (right_average_len + 1e-6)
right_start = np.array(sorted_list[-1])
append_num = max(
int((left_average_len + right_average_len) / 2.0 * 0.15), 1)
left_list = []
right_list = []
for i in range(append_num):
ly, lx = np.round(left_start + left_step * (i + 1)).flatten().astype(
'int32').tolist()
if ly < h and lx < w and (ly, lx) not in left_list:
left_list.append((ly, lx))
ry, rx = np.round(right_start + right_step * (i + 1)).flatten().astype(
'int32').tolist()
if ry < h and rx < w and (ry, rx) not in right_list:
right_list.append((ry, rx))
all_list = left_list[::-1] + sorted_list + right_list
return all_list
def sort_and_expand_with_direction_v2(pos_list, f_direction, binary_tcl_map):
"""
f_direction: h x w x 2
pos_list: [[y, x], [y, x], [y, x] ...]
binary_tcl_map: h x w
"""
h, w, _ = f_direction.shape
sorted_list, point_direction = sort_with_direction(pos_list, f_direction)
# expand along
point_num = len(sorted_list)
sub_direction_len = max(point_num // 3, 2)
left_direction = point_direction[:sub_direction_len, :]
right_dirction = point_direction[point_num - sub_direction_len:, :]
left_average_direction = -np.mean(left_direction, axis=0, keepdims=True)
left_average_len = np.linalg.norm(left_average_direction)
left_start = np.array(sorted_list[0])
left_step = left_average_direction / (left_average_len + 1e-6)
right_average_direction = np.mean(right_dirction, axis=0, keepdims=True)
right_average_len = np.linalg.norm(right_average_direction)
right_step = right_average_direction / (right_average_len + 1e-6)
right_start = np.array(sorted_list[-1])
append_num = max(
int((left_average_len + right_average_len) / 2.0 * 0.15), 1)
max_append_num = 2 * append_num
left_list = []
right_list = []
for i in range(max_append_num):
ly, lx = np.round(left_start + left_step * (i + 1)).flatten().astype(
'int32').tolist()
if ly < h and lx < w and (ly, lx) not in left_list:
if binary_tcl_map[ly, lx] > 0.5:
left_list.append((ly, lx))
else:
break
for i in range(max_append_num):
ry, rx = np.round(right_start + right_step * (i + 1)).flatten().astype(
'int32').tolist()
if ry < h and rx < w and (ry, rx) not in right_list:
if binary_tcl_map[ry, rx] > 0.5:
right_list.append((ry, rx))
else:
break
all_list = left_list[::-1] + sorted_list + right_list
return all_list
def generate_pivot_list_curved(p_score,
p_char_maps,
f_direction,
score_thresh=0.5,
is_expand=True,
is_backbone=False,
image_id=0):
"""
return center point and end point of TCL instance; filter with the char maps;
"""
p_score = p_score[0]
f_direction = f_direction.transpose(1, 2, 0)
p_tcl_map = (p_score > score_thresh) * 1.0
skeleton_map = thin(p_tcl_map)
instance_count, instance_label_map = cv2.connectedComponents(
skeleton_map.astype(np.uint8), connectivity=8)
# get TCL Instance
all_pos_yxs = []
center_pos_yxs = []
end_points_yxs = []
instance_center_pos_yxs = []
pred_strs = []
if instance_count > 0:
for instance_id in range(1, instance_count):
pos_list = []
ys, xs = np.where(instance_label_map == instance_id)
pos_list = list(zip(ys, xs))
### FIX-ME, eliminate outlier
if len(pos_list) < 3:
continue
if is_expand:
pos_list_sorted = sort_and_expand_with_direction_v2(
pos_list, f_direction, p_tcl_map)
else:
pos_list_sorted, _ = sort_with_direction(pos_list, f_direction)
all_pos_yxs.append(pos_list_sorted)
# use decoder to filter backgroud points.
p_char_maps = p_char_maps.transpose([1, 2, 0])
decode_res = ctc_decoder_for_image(
all_pos_yxs, logits_map=p_char_maps, keep_blank_in_idxs=True)
for decoded_str, keep_yxs_list in decode_res:
if is_backbone:
keep_yxs_list_with_id = add_id(keep_yxs_list, image_id=image_id)
instance_center_pos_yxs.append(keep_yxs_list_with_id)
pred_strs.append(decoded_str)
else:
end_points_yxs.extend((keep_yxs_list[0], keep_yxs_list[-1]))
center_pos_yxs.extend(keep_yxs_list)
if is_backbone:
return pred_strs, instance_center_pos_yxs
else:
return center_pos_yxs, end_points_yxs
def generate_pivot_list_horizontal(p_score,
p_char_maps,
f_direction,
score_thresh=0.5,
is_backbone=False,
image_id=0):
"""
return center point and end point of TCL instance; filter with the char maps;
"""
p_score = p_score[0]
f_direction = f_direction.transpose(1, 2, 0)
p_tcl_map_bi = (p_score > score_thresh) * 1.0
instance_count, instance_label_map = cv2.connectedComponents(
p_tcl_map_bi.astype(np.uint8), connectivity=8)
# get TCL Instance
all_pos_yxs = []
center_pos_yxs = []
end_points_yxs = []
instance_center_pos_yxs = []
if instance_count > 0:
for instance_id in range(1, instance_count):
pos_list = []
ys, xs = np.where(instance_label_map == instance_id)
pos_list = list(zip(ys, xs))
### FIX-ME, eliminate outlier
if len(pos_list) < 5:
continue
# add rule here
main_direction = extract_main_direction(pos_list,
f_direction) # y x
reference_directin = np.array([0, 1]).reshape([-1, 2]) # y x
is_h_angle = abs(np.sum(
main_direction * reference_directin)) < math.cos(math.pi / 180 *
70)
point_yxs = np.array(pos_list)
max_y, max_x = np.max(point_yxs, axis=0)
min_y, min_x = np.min(point_yxs, axis=0)
is_h_len = (max_y - min_y) < 1.5 * (max_x - min_x)
pos_list_final = []
if is_h_len:
xs = np.unique(xs)
for x in xs:
ys = instance_label_map[:, x].copy().reshape((-1, ))
y = int(np.where(ys == instance_id)[0].mean())
pos_list_final.append((y, x))
else:
ys = np.unique(ys)
for y in ys:
xs = instance_label_map[y, :].copy().reshape((-1, ))
x = int(np.where(xs == instance_id)[0].mean())
pos_list_final.append((y, x))
pos_list_sorted, _ = sort_with_direction(pos_list_final,
f_direction)
all_pos_yxs.append(pos_list_sorted)
# use decoder to filter backgroud points.
p_char_maps = p_char_maps.transpose([1, 2, 0])
decode_res = ctc_decoder_for_image(
all_pos_yxs, logits_map=p_char_maps, keep_blank_in_idxs=True)
for decoded_str, keep_yxs_list in decode_res:
if is_backbone:
keep_yxs_list_with_id = add_id(keep_yxs_list, image_id=image_id)
instance_center_pos_yxs.append(keep_yxs_list_with_id)
else:
end_points_yxs.extend((keep_yxs_list[0], keep_yxs_list[-1]))
center_pos_yxs.extend(keep_yxs_list)
if is_backbone:
return instance_center_pos_yxs
else:
return center_pos_yxs, end_points_yxs
def generate_pivot_list_slow(p_score,
p_char_maps,
f_direction,
score_thresh=0.5,
is_backbone=False,
is_curved=True,
image_id=0):
"""
Warp all the function together.
"""
if is_curved:
return generate_pivot_list_curved(
p_score,
p_char_maps,
f_direction,
score_thresh=score_thresh,
is_expand=True,
is_backbone=is_backbone,
image_id=image_id)
else:
return generate_pivot_list_horizontal(
p_score,
p_char_maps,
f_direction,
score_thresh=score_thresh,
is_backbone=is_backbone,
image_id=image_id)
# for refine module
def extract_main_direction(pos_list, f_direction):
"""
f_direction: h x w x 2
pos_list: [[y, x], [y, x], [y, x] ...]
"""
pos_list = np.array(pos_list)
point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]]
point_direction = point_direction[:, ::-1] # x, y -> y, x
average_direction = np.mean(point_direction, axis=0, keepdims=True)
average_direction = average_direction / (
np.linalg.norm(average_direction) + 1e-6)
return average_direction
def sort_by_direction_with_image_id_deprecated(pos_list, f_direction):
"""
f_direction: h x w x 2
pos_list: [[id, y, x], [id, y, x], [id, y, x] ...]
"""
pos_list_full = np.array(pos_list).reshape(-1, 3)
pos_list = pos_list_full[:, 1:]
point_direction = f_direction[pos_list[:, 0], pos_list[:, 1]] # x, y
point_direction = point_direction[:, ::-1] # x, y -> y, x
average_direction = np.mean(point_direction, axis=0, keepdims=True)
pos_proj_leng = np.sum(pos_list * average_direction, axis=1)
sorted_list = pos_list_full[np.argsort(pos_proj_leng)].tolist()
return sorted_list
def sort_by_direction_with_image_id(pos_list, f_direction):
"""
f_direction: h x w x 2
pos_list: [[y, x], [y, x], [y, x] ...]
"""
def sort_part_with_direction(pos_list_full, point_direction):
pos_list_full = np.array(pos_list_full).reshape(-1, 3)
pos_list = pos_list_full[:, 1:]
point_direction = np.array(point_direction).reshape(-1, 2)
average_direction = np.mean(point_direction, axis=0, keepdims=True)
pos_proj_leng = np.sum(pos_list * average_direction, axis=1)
sorted_list = pos_list_full[np.argsort(pos_proj_leng)].tolist()
sorted_direction = point_direction[np.argsort(pos_proj_leng)].tolist()
return sorted_list, sorted_direction
pos_list = np.array(pos_list).reshape(-1, 3)
point_direction = f_direction[pos_list[:, 1], pos_list[:, 2]] # x, y
point_direction = point_direction[:, ::-1] # x, y -> y, x
sorted_point, sorted_direction = sort_part_with_direction(pos_list,
point_direction)
point_num = len(sorted_point)
if point_num >= 16:
middle_num = point_num // 2
first_part_point = sorted_point[:middle_num]
first_point_direction = sorted_direction[:middle_num]
sorted_fist_part_point, sorted_fist_part_direction = sort_part_with_direction(
first_part_point, first_point_direction)
last_part_point = sorted_point[middle_num:]
last_point_direction = sorted_direction[middle_num:]
sorted_last_part_point, sorted_last_part_direction = sort_part_with_direction(
last_part_point, last_point_direction)
sorted_point = sorted_fist_part_point + sorted_last_part_point
sorted_direction = sorted_fist_part_direction + sorted_last_part_direction
return sorted_point
def generate_pivot_list_tt_inference(p_score,
p_char_maps,
f_direction,
score_thresh=0.5,
is_backbone=False,
is_curved=True,
image_id=0):
"""
return center point and end point of TCL instance; filter with the char maps;
"""
p_score = p_score[0]
f_direction = f_direction.transpose(1, 2, 0)
p_tcl_map = (p_score > score_thresh) * 1.0
skeleton_map = thin(p_tcl_map)
instance_count, instance_label_map = cv2.connectedComponents(
skeleton_map.astype(np.uint8), connectivity=8)
# get TCL Instance
all_pos_yxs = []
if instance_count > 0:
for instance_id in range(1, instance_count):
pos_list = []
ys, xs = np.where(instance_label_map == instance_id)
pos_list = list(zip(ys, xs))
### FIX-ME, eliminate outlier
if len(pos_list) < 3:
continue
pos_list_sorted = sort_and_expand_with_direction_v2(
pos_list, f_direction, p_tcl_map)
pos_list_sorted_with_id = add_id(pos_list_sorted, image_id=image_id)
all_pos_yxs.append(pos_list_sorted_with_id)
return all_pos_yxs
| 37.475548 | 91 | 0.622373 |
ace4705f9636f18222ce1ed32544795eaa5ced51 | 7,637 | py | Python | mailosaur/operations/messages_operations.py | skamieniarz/mailosaur-python | c2276296b3c078e11e80d01942910e9968982cd5 | [
"MIT"
] | null | null | null | mailosaur/operations/messages_operations.py | skamieniarz/mailosaur-python | c2276296b3c078e11e80d01942910e9968982cd5 | [
"MIT"
] | null | null | null | mailosaur/operations/messages_operations.py | skamieniarz/mailosaur-python | c2276296b3c078e11e80d01942910e9968982cd5 | [
"MIT"
] | null | null | null | import time
from datetime import datetime, timedelta
from ..models import MessageListResult
from ..models import Message
from ..models import MailosaurException
class MessagesOperations(object):
"""MessagesOperations operations.
"""
def __init__(self, session, base_url):
self.session = session
self.base_url = base_url
def get(self, server, criteria, timeout=10000, received_after=(datetime.today() - timedelta(hours=1))):
"""Retrieve a message using search criteria.
Returns as soon as a message matching the specified search criteria is
found. This is the most efficient method of looking up a message.
:param server: The identifier of the server hosting the message.
:type server: str
:param criteria: The search criteria to use in order to find a match.
:type criteria: ~mailosaur.models.SearchCriteria
:param timeout: Specify how long to wait for a matching result (in milliseconds).
:type timeout: int
:param received_after: Limits results to only messages received after this date/time.
:type received_after: datetime
:return: Message
:rtype: ~mailosaur.models.Message
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
# Defaults timeout to 10s, receivedAfter to 1h
if len(server) > 8:
raise Exception("Use get_by_id to retrieve a message using its identifier")
result = self.search(server, criteria, 0, 1, timeout, received_after)
return self.get_by_id(result.items[0].id)
def get_by_id(self, id):
"""Retrieve a message.
Retrieves the detail for a single email message. Simply supply the
unique identifier for the required message.
:param id: The identifier of the email message to be retrieved.
:type id: str
:return: Message
:rtype: ~mailosaur.models.Message
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/messages/%s" % (self.base_url, id)
response = self.session.get(url)
if response.status_code not in [200]:
raise MailosaurException(response)
data = response.json()
return Message(data)
def delete(self, id):
"""Delete a message.
Permanently deletes a message. This operation cannot be undone. Also
deletes any attachments related to the message.
:param id: The identifier of the message to be deleted.
:type id: str
:return: None
:rtype: None
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/messages/%s" % (self.base_url, id)
response = self.session.delete(url)
if response.status_code not in [204]:
raise MailosaurException(response)
def list(self, server, page=None, items_per_page=None):
"""List all messages.
Returns a list of your messages in summary form. The summaries are
returned sorted by received date, with the most recently-received
messages appearing first.
:param server: The identifier of the server hosting the messages.
:type server: str
:param page: Used in conjunction with `itemsPerPage` to support
pagination.
:type page: int
:param items_per_page: A limit on the number of results to be returned
per page. Can be set between 1 and 1000 items, the default is 50.
:type items_per_page: int
:return: MessageListResult
:rtype: ~mailosaur.models.MessageListResult
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/messages" % (self.base_url)
params = {'server': server, 'page': page, 'itemsPerPage': items_per_page}
response = self.session.get(url, params=params)
if response.status_code not in [200]:
raise MailosaurException(response)
data = response.json()
return MessageListResult(data)
def delete_all(self, server):
"""Delete all messages.
Permanently deletes all messages held by the specified server. This
operation cannot be undone. Also deletes any attachments related to
each message.
:param server: The identifier of the server to be emptied.
:type server: str
:return: None
:rtype: None
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/messages" % (self.base_url)
params = {'server': server}
response = self.session.delete(url, params=params)
if response.status_code not in [204]:
raise MailosaurException(response)
def search(self, server, criteria, page=None, items_per_page=None, timeout=None, received_after=None):
"""Search for messages.
Returns a list of messages matching the specified search criteria, in
summary form. The messages are returned sorted by received date, with
the most recently-received messages appearing first.
:param server: The identifier of the server hosting the messages.
:type server: str
:param criteria: The search criteria to match results against.
:type criteria: ~mailosaur.models.SearchCriteria
:param page: Used in conjunction with `itemsPerPage` to support
pagination.
:type page: int
:param items_per_page: A limit on the number of results to be returned
per page. Can be set between 1 and 1000 items, the default is 50.
:type items_per_page: int
:param timeout: Specify how long to wait for a matching result (in milliseconds).
:type timeout: int
:param received_after: Limits results to only messages received after this date/time.
:type received_after: datetime
:return: MessageListResult
:rtype: ~mailosaur.models.MessageListResult
:raises:
:class:`MailosaurException<mailosaur.models.MailosaurException>`
"""
url = "%sapi/messages/search" % (self.base_url)
params = {'server': server, 'page': page, 'itemsPerPage': items_per_page, 'receivedAfter': received_after}
poll_count = 0
start_time = datetime.today()
while True:
response = self.session.post(url, params=params, json=criteria.toJSON())
if response.status_code not in [200]:
raise MailosaurException(response)
data = response.json()
result = MessageListResult(data)
if timeout is None or timeout == 0 or len(result.items) != 0:
return result
# List conversion necessary for Python 3 compatibility
# https://stackoverflow.com/questions/36982858/object-of-type-map-has-no-len-in-python-3
delay_pattern = list(map(int, (response.headers.get('x-ms-delay') or '1000').split(',')))
delay = delay_pattern[len(delay_pattern) - 1] if poll_count >= len(delay_pattern) else delay_pattern[poll_count]
poll_count += 1
## Stop if timeout will be exceeded
if ((1000 * (datetime.today() - start_time).total_seconds()) + delay) > timeout:
raise Exception("No matching messages were found in time")
time.sleep(delay / 1000) | 39.776042 | 124 | 0.638863 |
ace4721046b367c8cbaf6981c4c1706be3374ffe | 4,027 | py | Python | ps1_argonaut/errors_warnings.py | OverSurge/PS1-BRender-Reverse | 3253707d5382cb3df41d8704214b60582e0ea56b | [
"MIT"
] | 51 | 2020-05-23T21:25:22.000Z | 2021-04-25T07:52:48.000Z | ps1_argonaut/errors_warnings.py | OverSurge/PS1-BRender-Reverse | 3253707d5382cb3df41d8704214b60582e0ea56b | [
"MIT"
] | 1 | 2021-04-03T23:12:27.000Z | 2021-04-03T23:12:27.000Z | ps1_argonaut/errors_warnings.py | OverSurge/PS1-BRender-Reverse | 3253707d5382cb3df41d8704214b60582e0ea56b | [
"MIT"
] | null | null | null | class UnsupportedParsing(NotImplementedError):
def __init__(self, feature_name):
super().__init__(f"Sorry, {feature_name} parsing / exporting isn't supported (yet) on this game.")
class UnsupportedSerialization(NotImplementedError):
def __init__(self, feature_name):
super().__init__(f"Sorry, {feature_name} serializing isn't supported (yet) on this game.")
class ReverseError(Exception):
def __init__(self, explanation: str, absolute_file_offset: int = None):
Exception.__init__(self)
self.message = f"A reversing error has been encountered at offset {hex(absolute_file_offset)}:\n" \
if absolute_file_offset is not None else f"A reversing error has been encountered:\n"
self.message += f"{explanation}\n" \
f"If you think this error isn't supposed to happen, you can ask me for help " \
f"(contact details in the README)."
def __str__(self):
return self.message
class SectionNameError(ReverseError):
def __init__(self, absolute_file_offset: int, expected: str, found: str):
super().__init__(f"Section codename is either missing or incorrect, expected '{expected}', got '{found}'.",
absolute_file_offset)
class SectionSizeMismatch(ReverseError):
def __init__(self, absolute_file_offset: int, name: str, expected: int, found: int):
super().__init__(f"The {name} section size is different than expected: got {found} instead of {expected}.",
absolute_file_offset)
class NegativeIndexError(ReverseError):
CAUSE_VERTEX = "vertex"
CAUSE_VERTEX_NORMAL = "vertex normal"
CAUSE_FACE = "face"
def __init__(self, absolute_file_offset: int, cause: str, value: int, entire):
super().__init__(f"A negative {cause} index has been found: {value}. Whole {cause}: {entire}",
absolute_file_offset)
class VerticesNormalsGroupsMismatch(ReverseError):
def __init__(self, n_vertices_groups: int, n_normals_groups: int, absolute_file_offset: int):
super().__init__(f"Different amounts of vertices groups ({n_vertices_groups}) "
f"and normals groups ({n_normals_groups}) found.", absolute_file_offset)
class IncompatibleAnimationError(ReverseError):
def __init__(self, n_model_vg: int, n_anim_vg: int):
super().__init__(
f"This model has {n_model_vg} vertex groups, but "
f"this animation is designed for models with {n_anim_vg} vertex groups, thus they are incompatible.")
class ZeroRunLengthError(ReverseError):
def __init__(self, absolute_file_offset: int):
super().__init__("A zero run length has been found while decompressing.", absolute_file_offset)
class TexturesWarning(ReverseError):
def __init__(self, absolute_file_offset: int, n_textures: int, n_rows: int):
super().__init__(
f"Too much textures ({n_textures}), or incorrect row count ({n_rows}).\n"
f"It is most probably caused by an inaccuracy in my reverse engineering of the textures format.",
absolute_file_offset)
class Models3DWarning(ReverseError):
def __init__(self, absolute_file_offset: int, n_vertices: int, n_faces: int):
super().__init__(
f"Too many vertices or faces ({n_vertices} vertices, {n_faces} faces). It is most probably caused by an "
f"inaccuracy in my reverse engineering of the models format.\nIf you think that the amounts are coherent, "
f"you can silence this warning with the --ignore-warnings commandline option.", absolute_file_offset)
class AnimationsWarning(ReverseError):
def __init__(self, absolute_file_offset: int, n_total_frames: int):
super().__init__(
f"Too much frames in animation (or no frame): {n_total_frames} frames.\n"
f"It is most probably caused by an inaccuracy in my reverse engineering of the textures format.",
absolute_file_offset)
| 46.825581 | 119 | 0.69034 |
ace472b217b8bb1b5193a1d4a3b8aff879234301 | 437 | py | Python | dynastes/util/precision_util.py | dynastes-team/dynastes | 931b6d9ac83862eb39c2f5144c95b952e9efcd8e | [
"MIT"
] | 7 | 2020-01-18T14:28:04.000Z | 2021-11-10T16:46:34.000Z | dynastes/util/precision_util.py | veqtor/dynastes | 931b6d9ac83862eb39c2f5144c95b952e9efcd8e | [
"MIT"
] | null | null | null | dynastes/util/precision_util.py | veqtor/dynastes | 931b6d9ac83862eb39c2f5144c95b952e9efcd8e | [
"MIT"
] | null | null | null | import tensorflow as tf
def large_compatible_negative(tensor_type):
"""Large negative number as Tensor.
This function is necessary because the standard value for epsilon
in this module (-1e9) cannot be represented using tf.float16
Args:
tensor_type: a dtype to determine the type.
Returns:
a large negative number.
"""
if tensor_type == tf.float16:
return tf.float16.min
return -1e9
| 27.3125 | 69 | 0.697941 |
ace47704a3e22a7f85c1f5575baaa0d756304184 | 2,212 | py | Python | pool_v0.1_1agent_timestep/model/parts/pruebas.py | bloxmove-com/TE_Simulations_Research_Group | e12c1c974f3b27f222ef37db656951bae8e442f3 | [
"MIT"
] | null | null | null | pool_v0.1_1agent_timestep/model/parts/pruebas.py | bloxmove-com/TE_Simulations_Research_Group | e12c1c974f3b27f222ef37db656951bae8e442f3 | [
"MIT"
] | null | null | null | pool_v0.1_1agent_timestep/model/parts/pruebas.py | bloxmove-com/TE_Simulations_Research_Group | e12c1c974f3b27f222ef37db656951bae8e442f3 | [
"MIT"
] | null | null | null | import numpy as np
import random
import pandas as pd
import matplotlib.pyplot as plt
# # Define a dictionary containing Students data
# data = {'Name': ['Jai', 'Princi', 'Gaurav', 'Anuj'],
# 'Height': [5.1, 6.2, 5.1, 5.2],
# 'Qualification': ['Msc', 'MA', 'Msc', 'Msc']}
# # Define a dictionary with key values of
# # an existing column and their respective
# # value pairs as the # values for our new column.
# address = {'Delhi': 'Jai', 'Bangalore': 'Princi',
# 'Patna': 'Gaurav', 'Chennai': 'Anuj'}
# # Convert the dictionary into DataFrame
# df = pd.DataFrame(data)
# # Provide 'Address' as the column name
# print(df.keys())
# for _ in df.keys():
# print(_)
# Slippage:
# initial_ta = 1000
# initial_tb = 1000
# initial_p_ta = 1
# initial_p_tb = 1
# change = -400
# ta = initial_ta
# tb= initial_tb
# p_ta = initial_p_ta
# p_tb = initial_p_tb
# cte = ta*tb
# add_ta = change
# new_ta = ta + add_ta
# new_tb = cte/new_ta
# new_pta = (p_tb*new_tb)/new_ta
# print('***************')
# print('1. 200 a precio inicial')
# print(new_ta)
# print(new_pta)
# print(new_pta*new_ta)
# print(p_tb*new_tb)
# print('Te llevas X tokens:')
# print(ta-new_ta)
# print(new_tb-tb)
# print('***************')
# print('2. 200 a precio actualizado')
# ta = initial_ta
# tb= initial_tb
# p_ta = initial_p_ta
# p_tb = initial_p_tb
# cte = ta*tb
# new_ta = ta
# lista = []
# token = 0
# while token < abs(change):
# new_ta -= 1
# new_tb = cte/new_ta
# new_pta = (p_tb*new_tb)/new_ta
# lista.append(new_pta)
# token += 1
# print(new_ta)
# print(new_pta)
# print(new_pta*new_ta)
# print(p_tb*new_tb)
# print('Te llevas X tokens:')
# print(ta-new_ta)
# print(new_tb-tb)
# plt.plot(lista)
# plt.show()
# class agent():
# def __init__(self):
# self.count = 0
# self.price = 0
# dimi = agent()
# def test(dimi):
# dimi.count += 2
# return
# test(dimi)
# print(dimi.count)
df = pd.DataFrame({
'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
'col2': [2, 1, 9, 8, 7, 4],
'col3': [0, 1, 9, 4, 2, 3],
'col4': ['a', 'B', 'c', 'D', 'e', 'F']
})
print(df.iloc[[2]]) | 19.927928 | 55 | 0.569168 |
ace4777bc25c04344cc546847b9104d52b774439 | 2,023 | py | Python | aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DeleteVodTemplateRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DeleteVodTemplateRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-vod/aliyunsdkvod/request/v20170321/DeleteVodTemplateRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvod.endpoint import endpoint_data
class DeleteVodTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vod', '2017-03-21', 'DeleteVodTemplate','vod')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_VodTemplateId(self):
return self.get_query_params().get('VodTemplateId')
def set_VodTemplateId(self,VodTemplateId):
self.add_query_param('VodTemplateId',VodTemplateId) | 36.781818 | 75 | 0.777064 |
ace477c444dd03c577ebcd9a6ab44936649a139f | 90 | py | Python | src/genui/utils/extensions/__init__.py | Tontolda/genui | c5b7da7c5a99fc16d34878e2170145ac7c8e31c4 | [
"0BSD"
] | 15 | 2021-05-31T13:39:17.000Z | 2022-03-30T12:04:14.000Z | src/genui/utils/extensions/__init__.py | martin-sicho/genui | ea7f1272030a13e8e253a7a9b6479ac6a78552d3 | [
"MIT"
] | 3 | 2021-04-08T22:02:22.000Z | 2022-03-16T09:10:20.000Z | src/genui/utils/extensions/__init__.py | Tontolda/genui | c5b7da7c5a99fc16d34878e2170145ac7c8e31c4 | [
"0BSD"
] | 5 | 2021-03-04T11:00:54.000Z | 2021-12-18T22:59:22.000Z | """
__init__.py
Created by: Martin Sicho
On: 4/29/20, 5:01 PM
"""
__all__ = ('tasks',)
| 9 | 24 | 0.6 |
ace4784106038ddd89e56af7b940a02610b747ee | 35,601 | py | Python | src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_11_01/v2019_11_01/operations/_express_route_cross_connections_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_11_01/v2019_11_01/operations/_express_route_cross_connections_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/connection-monitor-preview/azext_connection_monitor_preview/vendored_sdks/v2019_11_01/v2019_11_01/operations/_express_route_cross_connections_operations.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCrossConnectionsOperations(object):
"""ExpressRouteCrossConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2019-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2019-11-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieves all the ExpressRouteCrossConnections in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCrossConnection
:rtype:
~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnectionPaged[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ExpressRouteCrossConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCrossConnections'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves all the ExpressRouteCrossConnections in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCrossConnection
:rtype:
~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnectionPaged[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ExpressRouteCrossConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections'}
def get(
self, resource_group_name, cross_connection_name, custom_headers=None, raw=False, **operation_config):
"""Gets details about the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group (peering
location of the circuit).
:type resource_group_name: str
:param cross_connection_name: The name of the
ExpressRouteCrossConnection (service key of the circuit).
:type cross_connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCrossConnection or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'}
def _create_or_update_initial(
self, resource_group_name, cross_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ExpressRouteCrossConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, cross_connection_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Update the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the
ExpressRouteCrossConnection.
:type cross_connection_name: str
:param parameters: Parameters supplied to the update express route
crossConnection operation.
:type parameters:
~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCrossConnection or
ClientRawResponse<ExpressRouteCrossConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCrossConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'}
def update_tags(
self, resource_group_name, cross_connection_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates an express route cross connection tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the cross connection.
:type cross_connection_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCrossConnection or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnection or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
cross_connection_parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(cross_connection_parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'}
def _list_arp_table_initial(
self, resource_group_name, cross_connection_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_arp_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_arp_table(
self, resource_group_name, cross_connection_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised ARP table associated with the express
route cross connection in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the
ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsArpTableListResult or
ClientRawResponse<ExpressRouteCircuitsArpTableListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitsArpTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitsArpTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_arp_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'}
def _list_routes_table_summary_initial(
self, resource_group_name, cross_connection_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table_summary.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table_summary(
self, resource_group_name, cross_connection_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the route table summary associated with the express route cross
connection in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the
ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCrossConnectionsRoutesTableSummaryListResult or
ClientRawResponse<ExpressRouteCrossConnectionsRoutesTableSummaryListResult>
if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'}
def _list_routes_table_initial(
self, resource_group_name, cross_connection_name, peering_name, device_path, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_routes_table.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_routes_table(
self, resource_group_name, cross_connection_name, peering_name, device_path, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the currently advertised routes table associated with the express
route cross connection in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the
ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitsRoutesTableListResult or
ClientRawResponse<ExpressRouteCircuitsRoutesTableListResult> if
raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitsRoutesTableListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2019_11_01.models.ExpressRouteCircuitsRoutesTableListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_routes_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'}
| 48.768493 | 252 | 0.685992 |
ace47a2cecd9acc9f31d7bdc733f4df89a1d64bc | 539 | py | Python | indicators/migrations/0064_auto_20190624_1558.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | null | null | null | indicators/migrations/0064_auto_20190624_1558.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | 268 | 2020-03-31T15:46:59.000Z | 2022-03-31T18:01:08.000Z | indicators/migrations/0064_auto_20190624_1558.py | Falliatcom-sa/falliatcom | 39fb926de072c296ed32d50cccfb8003ca870739 | [
"Apache-2.0"
] | 1 | 2021-01-05T01:58:24.000Z | 2021-01-05T01:58:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-24 22:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indicators', '0063_sentence_case_level_field'),
]
operations = [
migrations.AlterField(
model_name='level',
name='assumptions',
field=models.TextField(blank=True, default='', verbose_name='Assumptions'),
preserve_default=False,
),
]
| 24.5 | 87 | 0.632653 |
ace47a56d2cc8315345e2bb847dcba4d96a57321 | 2,251 | py | Python | homeassistant/components/smartthings/switch.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/smartthings/switch.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/smartthings/switch.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 2 | 2020-12-09T02:21:27.000Z | 2021-08-07T04:58:01.000Z | """Support for switches through the SmartThings cloud API."""
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.components.switch import SwitchDevice
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add switches for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsSwitch(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "switch")
]
)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
# Must be able to be turned on/off.
if Capability.switch in capabilities:
return [Capability.switch, Capability.energy_meter, Capability.power_meter]
return None
class SmartThingsSwitch(SmartThingsEntity, SwitchDevice):
"""Define a SmartThings switch."""
async def async_turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
await self._device.switch_off(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
async def async_turn_on(self, **kwargs) -> None:
"""Turn the switch on."""
await self._device.switch_on(set_status=True)
# State is set optimistically in the command above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state()
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self._device.status.attributes[Attribute.power].value
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return self._device.status.attributes[Attribute.energy].value
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
| 35.730159 | 83 | 0.701466 |
ace47b30611556f82b52b78a08aa4d718daaa341 | 685 | py | Python | Inheritance/05. Shop/project/product_repository.py | milenpenev/SoftUni-OOP | 90f730cb37713f7ca93b1c0ecd0d12aa351247d2 | [
"MIT"
] | null | null | null | Inheritance/05. Shop/project/product_repository.py | milenpenev/SoftUni-OOP | 90f730cb37713f7ca93b1c0ecd0d12aa351247d2 | [
"MIT"
] | null | null | null | Inheritance/05. Shop/project/product_repository.py | milenpenev/SoftUni-OOP | 90f730cb37713f7ca93b1c0ecd0d12aa351247d2 | [
"MIT"
] | null | null | null | from project.product import Product
class ProductRepository:
def __init__(self):
self.products = []
def add(self, product: Product):
self.products.append(product)
def find(self, product_name: str):
for el in self.products:
if el == product_name:
return el
def remove(self, product_name):
for el in self.products:
if el.name == product_name:
self.products.remove(el)
def __repr__(self):
result = []
for el in self.products:
result.append(f'{el.name}: {el.quantity}')
new_line = "\n"
return f'{new_line.join(x for x in result)}'
| 25.37037 | 54 | 0.572263 |
ace47b7702b16600c50abee0af175a19a06c9cf7 | 801 | py | Python | accounts/migrations/0008_auto_20210704_1537.py | Srinjay-hack/Buddy | 155b9ba58a20bf043493213dd8349f61012fc480 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0008_auto_20210704_1537.py | Srinjay-hack/Buddy | 155b9ba58a20bf043493213dd8349f61012fc480 | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0008_auto_20210704_1537.py | Srinjay-hack/Buddy | 155b9ba58a20bf043493213dd8349f61012fc480 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.4 on 2021-07-04 15:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_auto_20210704_1517'),
]
operations = [
migrations.AlterField(
model_name='assistant',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='caller',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
]
| 30.807692 | 148 | 0.665418 |
ace47bb2850a3a85eb75f4218026e0bad57e4d82 | 2,208 | py | Python | neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py | freyes/neutron | 197c222acb0390728106a083d1663f2c06427518 | [
"Apache-2.0"
] | null | null | null | neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py | freyes/neutron | 197c222acb0390728106a083d1663f2c06427518 | [
"Apache-2.0"
] | null | null | null | neutron/db/migration/alembic_migrations/versions/newton/expand/5abc0278ca73_add_support_for_vlan_trunking.py | freyes/neutron | 197c222acb0390728106a083d1663f2c06427518 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
revision = '5abc0278ca73'
down_revision = '45f8dd33480b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('trunks',
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('standard_attr_id', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['standard_attr_id'],
['standardattributes.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('port_id'),
sa.UniqueConstraint('standard_attr_id')
)
op.create_table('subports',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('trunk_id', sa.String(length=36), nullable=False),
sa.Column('segmentation_type', sa.String(length=32), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['trunk_id'], ['trunks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id', 'trunk_id'),
sa.UniqueConstraint('port_id'),
sa.UniqueConstraint('trunk_id', 'segmentation_type', 'segmentation_id',
name='uniq_subport0trunk_id0segmentation_type0segmentation_id')
)
| 44.16 | 79 | 0.641757 |
ace47bf326be2da76c3577c60303ee62a7a76edb | 272 | py | Python | Leetcode/461.hamming-distance.py | EdwaRen/Competitve-Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | 1 | 2021-05-03T21:48:25.000Z | 2021-05-03T21:48:25.000Z | Leetcode/461.hamming-distance.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | Leetcode/461.hamming-distance.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | class Solution(object):
def hammingDistance(self, x, y):
xor = x ^ y
count = 0
while xor:
xor = xor & (xor - 1)
count +=1
return count
z = Solution()
a = 1
b = 4
print(z.hammingDistance(a, b))
| 13.6 | 36 | 0.459559 |
ace47bfca1e473608029b46378b12fa57467b02d | 1,728 | py | Python | app/user/serializers.py | massimilianoporzio/recipe-app-api | 7ff681d0377abf12802f7916398cd868eaa638ab | [
"MIT"
] | null | null | null | app/user/serializers.py | massimilianoporzio/recipe-app-api | 7ff681d0377abf12802f7916398cd868eaa638ab | [
"MIT"
] | null | null | null | app/user/serializers.py | massimilianoporzio/recipe-app-api | 7ff681d0377abf12802f7916398cd868eaa638ab | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
| 30.857143 | 74 | 0.645833 |
ace47c0cbfa05cec817467a50a3ea40709177ca9 | 986 | py | Python | cevent.py | DirAgAlok/HomeDisplay | a9e761ffa4c1495f9165d5a5637a401ff9a4c82b | [
"CC0-1.0"
] | 1 | 2021-04-14T11:39:05.000Z | 2021-04-14T11:39:05.000Z | cevent.py | DirAgAlok/HomeDisplay | a9e761ffa4c1495f9165d5a5637a401ff9a4c82b | [
"CC0-1.0"
] | null | null | null | cevent.py | DirAgAlok/HomeDisplay | a9e761ffa4c1495f9165d5a5637a401ff9a4c82b | [
"CC0-1.0"
] | null | null | null | import pygame
from pygame.locals import *
import pygame
from pygame.locals import *
class CEvent:
def __init__(self):
pass
def on_input_focus(self):
pass
def on_input_blur(self):
pass
def on_key_down(self, event):
pass
def on_key_up(self, event):
pass
def on_mouse_focus(self):
pass
def on_mouse_blur(self):
pass
def on_lbutton_up(self, event):
pass
def on_lbutton_down(self, event):
pass
def on_rbutton_up(self, event):
pass
def on_rbutton_down(self, event):
pass
def on_minimize(self):
pass
def on_restore(self):
pass
def on_resize(self,event):
pass
def on_expose(self):
pass
def on_exit(self):
pass
def on_event(self, event):
pass
def on_mouse_move(self, event):
pass
if __name__ == "__main__" :
event = CEvent() | 21.434783 | 38 | 0.563895 |
ace47c1449b4c7d9617cb7aa4e1b14d93b3137bc | 1,488 | py | Python | joplin/pages/department_page/tests.py | cityofaustin/joplin | 01424e46993e9b1c8e57391d6b7d9448f31d596b | [
"MIT"
] | 15 | 2018-09-27T07:36:30.000Z | 2021-08-03T16:01:21.000Z | joplin/pages/department_page/tests.py | cityofaustin/joplin | 01424e46993e9b1c8e57391d6b7d9448f31d596b | [
"MIT"
] | 183 | 2017-11-16T23:30:47.000Z | 2020-12-18T21:43:36.000Z | joplin/pages/department_page/tests.py | cityofaustin/joplin | 01424e46993e9b1c8e57391d6b7d9448f31d596b | [
"MIT"
] | 12 | 2017-12-12T22:48:05.000Z | 2021-03-01T18:01:24.000Z | from pages.department_page.factories import DepartmentPageFactory
from pages.department_page.models import DepartmentPage
from importer.page_importer import PageImporter
from groups.factories import DepartmentFactory
import pytest
# If we don't have any associated department group
@pytest.mark.django_db
def test_department_page_no_department_group(home_page):
page = DepartmentPageFactory.create(slug="department_slug", parent=home_page)
urls = page.janis_urls()
janis_publish_url = page.janis_publish_url()
assert urls == []
assert janis_publish_url == '#'
# If we don't have any associated department group
@pytest.mark.django_db
def test_department_page_with_department_group(home_page, expected_publish_url_base):
department = DepartmentFactory.create(add_department_page__dummy=True, add_department_page__parent=home_page)
page = department.department_page
urls = page.janis_urls()
janis_publish_url = page.janis_publish_url()
assert urls == [f'/{page.slug}/']
assert janis_publish_url == f'{expected_publish_url_base}/{page.slug}/'
@pytest.mark.skip("importer test")
# @pytest.mark.django_db
def test_create_department_page_from_api(remote_staging_preview_url, test_api_url, test_api_jwt_token):
url = f'{remote_staging_preview_url}/department/UGFnZVJldmlzaW9uTm9kZToyNg==?CMS_API={test_api_url}'
page = PageImporter(url, test_api_jwt_token).fetch_page_data().create_page()
assert isinstance(page, DepartmentPage)
| 38.153846 | 113 | 0.801075 |
ace47c40d2c6a445f092c839e1a0164ca4250150 | 17 | py | Python | pytorch_fft/__init__.py | taras-sereda/pytorch_fft | 2f314d0b9d618954880c74a8ff28d562d31d2e4e | [
"Apache-2.0"
] | 322 | 2017-05-25T08:42:23.000Z | 2022-03-28T02:32:25.000Z | pytorch_fft/__init__.py | bloodmage/pytorch_fft | 34057d19563c939cc49b116ff8570f95747e552c | [
"Apache-2.0"
] | 37 | 2017-06-16T20:05:53.000Z | 2021-03-11T08:04:09.000Z | pytorch_fft/__init__.py | bloodmage/pytorch_fft | 34057d19563c939cc49b116ff8570f95747e552c | [
"Apache-2.0"
] | 54 | 2017-05-26T02:20:54.000Z | 2022-01-19T12:40:18.000Z | from . import fft | 17 | 17 | 0.764706 |
ace47d1d1125f54f5e7de1cd7c678baba0a12d05 | 1,078 | py | Python | cli/proxyPool.py | ArtrixTech/proxy_pool | 3ee80e9cbba531de532e3ebefef10743c93395e7 | [
"MIT"
] | 24 | 2018-12-11T23:58:04.000Z | 2020-04-05T09:45:01.000Z | cli/proxyPool.py | ArtrixTech/proxy_pool | 3ee80e9cbba531de532e3ebefef10743c93395e7 | [
"MIT"
] | 6 | 2020-05-07T11:37:18.000Z | 2022-03-20T04:33:25.000Z | cli/proxyPool.py | ArtrixTech/proxy_pool | 3ee80e9cbba531de532e3ebefef10743c93395e7 | [
"MIT"
] | 19 | 2018-12-11T16:42:10.000Z | 2020-03-31T14:16:25.000Z | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: proxy_pool
Description :
Author : JHao
date: 2019/8/2
-------------------------------------------------
Change Activity:
2019/8/2:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
import click
import platform
sys.path.append('../')
from Config.setting import HEADER
from Schedule.ProxyScheduler import runScheduler
from Api.ProxyApi import runFlask,runFlaskWithGunicorn
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version='2.0.0')
def cli():
"""ProxyPool cli工具"""
@cli.command(name="schedule")
def schedule():
""" 启动调度程序 """
click.echo(HEADER)
runScheduler()
@cli.command(name="webserver")
def schedule():
""" 启动web服务 """
click.echo(HEADER)
if platform.system() == "Windows":
runFlask()
else:
runFlaskWithGunicorn()
if __name__ == '__main__':
cli()
| 20.339623 | 59 | 0.550093 |
ace47df0345488966158e7bbe41b3abb90b9a392 | 607 | py | Python | config.py | multavici/spikeball | 37e8e2afc81a9f50ecd953a2771b14ac0535b39a | [
"MIT"
] | 2 | 2019-01-26T08:11:15.000Z | 2019-01-28T22:59:39.000Z | config.py | multavici/spikeball | 37e8e2afc81a9f50ecd953a2771b14ac0535b39a | [
"MIT"
] | 1 | 2019-02-19T19:19:39.000Z | 2019-02-19T19:19:39.000Z | config.py | multavici/spikeball | 37e8e2afc81a9f50ecd953a2771b14ac0535b39a | [
"MIT"
] | 1 | 2019-01-26T08:11:22.000Z | 2019-01-26T08:11:22.000Z | import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, ".flaskenv"))
class Config(object):
SECRET_KEY = "you-will-never-guess"
SQLALCHEMY_DATABASE_URI = os.environ["DATABASE_URI"]
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ["MAIL_SERVER"]
MAIL_PORT = int(os.environ["MAIL_PORT"])
MAIL_USE_TLS = os.environ["MAIL_USE_TLS"]
MAIL_USERNAME = os.environ["MAIL_USERNAME"]
MAIL_PASSWORD = os.environ["MAIL_PASSWORD"]
ADMINS = ["noreply@spikeballgent.be"]
POSTS_PER_PAGE = 25
| 27.590909 | 56 | 0.729819 |
ace47e1e652ba8fba7783c5ec7ed3d3cfcaccf13 | 3,905 | py | Python | dataPrepScripts/RandomSampling.py | strixy16/Clairvoyante | 2bf60f9fc54d51518730d94cb05ffdf3a51f0176 | [
"BSD-3-Clause"
] | 171 | 2017-07-24T00:35:48.000Z | 2022-03-24T08:28:59.000Z | dataPrepScripts/RandomSampling.py | strixy16/Clairvoyante | 2bf60f9fc54d51518730d94cb05ffdf3a51f0176 | [
"BSD-3-Clause"
] | 45 | 2018-10-30T07:37:42.000Z | 2021-12-30T07:53:24.000Z | dataPrepScripts/RandomSampling.py | strixy16/Clairvoyante | 2bf60f9fc54d51518730d94cb05ffdf3a51f0176 | [
"BSD-3-Clause"
] | 27 | 2017-07-23T21:43:50.000Z | 2021-02-27T01:07:29.000Z | import os
import sys
import argparse
import param
import intervaltree
import random
class CandidateStdout(object):
def __init__(self, handle):
self.stdin = handle
def __del__(self):
self.stdin.close()
def MakeCandidates( args ):
fai_fn = "%s.fai" % (args.ref_fn)
if os.path.isfile(fai_fn) == False:
print >> sys.stderr, "Fasta index %s.fai doesn't exist." % (args.ref_fn)
sys.exit(1)
if args.ctgName == None:
print >> sys.stderr, "Please define --ctgName. Exiting ..."
sys.exit(1)
start = 1; end = -1
with open(fai_fn, "r") as f:
for l in f:
s = l.strip().split()
if s[0] == args.ctgName:
end = int(s[1])
if end == -1:
print >> sys.stderr, "Chromosome %s not found in %s" % (args.ctgName, fai_fn)
if args.ctgEnd != None and args.ctgEnd < end:
end = args.ctgEnd
if args.ctgStart != None and args.ctgStart > start:
start = args.ctgStart
tree = {}
if args.bed_fn != None:
import subprocess
import shlex
f = subprocess.Popen(shlex.split("gzip -fdc %s" % (args.bed_fn) ), stdout=subprocess.PIPE, bufsize=8388608)
for row in f.stdout:
row = row.strip().split()
name = row[0]
if name != args.ctgName:
continue
if name not in tree:
tree[name] = intervaltree.IntervalTree()
begin = int(row[1])
last = int(row[2])-1
if last == begin: last += 1
tree[name].addi(begin, last)
f.stdout.close()
f.wait()
if args.ctgName not in tree:
print >> sys.stderr, "ctgName is not in the bed file, are you using the correct bed file (%s)?" % (args.bed_fn)
sys.exit(1)
args.outputProb = (args.candidates * 2.) / (args.genomeSize)
for i in xrange(start, end, 1):
if args.bed_fn != None and len(tree[args.ctgName].search(i)) == 0:
continue
if random.uniform(0, 1) <= args.outputProb:
print >> sys.stdout, "%s\t%d" % (args.ctgName, i)
if args.can_fn != "PIPE":
can_fp.stdin.close()
can_fp.wait()
can_fpo.close()
def main():
parser = argparse.ArgumentParser(description="Generate variant candidates using alignments")
parser.add_argument('--ref_fn', type=str, default="ref.fa",
help="Reference fasta file input, mandatory, default: %(default)s")
parser.add_argument('--ctgName', type=str, default=None,
help="The name of the sequence to be processed, mandatory, default: %(default)s")
parser.add_argument('--can_fn', type=str, default="PIPE",
help="Randomly sampled genome position output, use PIPE for standard output, optional, default: %(default)s")
parser.add_argument('--candidates', type=int, default=7000000,
help="For the whole genome, the number of variant candidates to be generated, optional, default: %(default)s")
parser.add_argument('--genomeSize', type=int, default=3000000000,
help="The size of the genome, optional, default: %(default)s")
parser.add_argument('--bed_fn', type=str, default=None,
help="Generate positions only in these regions, works in intersection with ctgName, ctgStart and ctgEnd, optional, default: as defined by ctgName, ctgStart and ctgEnd")
parser.add_argument('--ctgStart', type=int, default=None,
help="The 1-bsae starting position of the sequence to be processed, optional")
parser.add_argument('--ctgEnd', type=int, default=None,
help="The inclusive ending position of the sequence to be processed, optional")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
MakeCandidates(args)
if __name__ == "__main__":
main()
| 33.663793 | 180 | 0.604866 |
ace47e74bba001cf560882c3b5e49d67342f7716 | 429 | py | Python | app.py | bkaraosmanoglu/Tweet-Mining-App | 53360e86776161d07d2fc1c422329736aab2d8a9 | [
"MIT"
] | null | null | null | app.py | bkaraosmanoglu/Tweet-Mining-App | 53360e86776161d07d2fc1c422329736aab2d8a9 | [
"MIT"
] | null | null | null | app.py | bkaraosmanoglu/Tweet-Mining-App | 53360e86776161d07d2fc1c422329736aab2d8a9 | [
"MIT"
] | null | null | null | #Burak
import streamlit as st
import tweetScrape
import tweetPreProcess
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True)
PAGES = {
"Tweet Query":tweetScrape,
"Tweet Preprocess":tweetPreProcess,
}
st.sidebar.title('MENU')
selection = st.sidebar.selectbox("Go to", list(PAGES.keys()))
page = PAGES[selection]
page.app()
| 19.5 | 62 | 0.680653 |
ace47ec42f49ad58ea22cab1c49e32c293dbc5a7 | 5,820 | py | Python | utils/train_eval_test_utils.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
] | 1 | 2021-10-18T01:27:04.000Z | 2021-10-18T01:27:04.000Z | utils/train_eval_test_utils.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
] | null | null | null | utils/train_eval_test_utils.py | Xtuden-com/tensor2robot | a3674958a046de711e37445d39afd4e529d8dd09 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as python3
"""Utility functions for train_eval tests for new models."""
import errno
import os
from typing import Callable, Optional, Text, List
import gin
from tensor2robot.utils import train_eval
import tensorflow.compat.v1 as tf
DEFAULT_TRAIN_FILENAME_PATTERNS = [
'operative_config-0.gin', 'model.ckpt-0.data-*', 'model.ckpt-0.meta',
'model.ckpt-0.index', 'checkpoint', 'graph.pbtxt'
]
DEFAULT_EVAL_FILENAME_PATTERNS = ['eval/events.*']
def assert_output_files(
test_case,
model_dir,
expected_output_filename_patterns = None):
"""Verify that the expected output files are generated.
Args:
test_case: The instance of the test used to assert that the output files are
generated.
model_dir: The path where the model should be stored.
expected_output_filename_patterns: All patterns of files which should exist
after train_and_eval, train, or eval. If None, the default expected
filename patterns are used.
"""
if expected_output_filename_patterns is None:
expected_output_filename_patterns = (
DEFAULT_TRAIN_FILENAME_PATTERNS + DEFAULT_EVAL_FILENAME_PATTERNS)
# Check that expected files have been written.
for pattern in expected_output_filename_patterns:
filename_pattern = os.path.join(model_dir, pattern)
tf.logging.info('file_pattern: %s', filename_pattern)
filenames = tf.io.gfile.glob(filename_pattern)
tf.logging.info('filenames: %s', filenames)
filenames_dir = tf.io.gfile.listdir(model_dir)
tf.logging.info('filenames_dir: %s', filenames_dir)
test_case.assertNotEmpty(
filenames, msg='No files found with pattern "%s"' % filename_pattern)
for filename in filenames:
with tf.io.gfile.GFile(filename) as f:
test_case.assertGreater(f.size(), 0, msg='%s is empty' % filename)
def test_train_eval_gin(test_case,
model_dir,
full_gin_path,
max_train_steps,
eval_steps,
gin_overwrites_fn = None,
assert_train_output_files = True,
assert_eval_output_files = True):
"""Train and eval a runnable gin config.
Until we have a proper gen_rule to create individual targets for every gin
file automatically, gin files can be tested using the pattern below.
Please, use 'test_train_eval_gin' as the test function name such that it
is easy to convert these tests as soon as the gen_rule is available.
@parameterized.parameters(
('first.gin',),
('second.gin',),
('third.gin',),
)
def test_train_eval_gin(self, gin_file):
full_gin_path = os.path.join(FLAGS.test_srcdir, BASE_GIN_PATH, gin_file)
model_dir = os.path.join(FLAGS.test_tmpdir, 'test_train_eval_gin', gin_file)
train_eval_test_utils.test_train_eval_gin(
test_case=self,
model_dir=model_dir,
full_gin_path=full_gin_path,
max_train_steps=MAX_TRAIN_STEPS,
eval_steps=EVAL_STEPS)
Args:
test_case: The instance of the test used to assert that the output files are
generated.
model_dir: The path where the model should be stored.
full_gin_path: The path of the gin file which parameterizes train_eval.
max_train_steps: The maximum number of training steps, should be small since
this is just for testing.
eval_steps: The number of eval steps, should be small since this is just for
testing.
gin_overwrites_fn: Optional function which binds gin parameters to
overwrite.
assert_train_output_files: If True, the expected output files of the
training run are checked, otherwise this check is skipped. If only
evaluation is performed this should be set to False.
assert_eval_output_files: If True, the output expected files of the
evaluation run are checked, otherwise this check is skipped. If only
training is performed this should be set to False. Note, if
assert_train_output_files is set to False the model_dir is not deleted
in order to load the model from training.
"""
# We clear all prior parameters set by gin to ensure that we can call this
# function sequentially for all parameterized tests.
gin.clear_config(clear_constants=True)
gin.parse_config_file(full_gin_path, print_includes_and_imports=True)
gin.bind_parameter('train_eval_model.model_dir', model_dir)
if gin_overwrites_fn is not None:
gin_overwrites_fn()
# Make sure that the model dir is empty. This is important for running
# tests locally.
if tf.io.gfile.exists(model_dir) and assert_train_output_files:
tf.io.gfile.rmtree(model_dir)
train_eval.train_eval_model(
model_dir=model_dir,
max_train_steps=max_train_steps,
eval_steps=eval_steps,
create_exporters_fn=None)
if assert_train_output_files:
assert_output_files(
test_case=test_case,
model_dir=model_dir,
expected_output_filename_patterns=DEFAULT_TRAIN_FILENAME_PATTERNS)
if assert_eval_output_files:
assert_output_files(
test_case=test_case,
model_dir=model_dir,
expected_output_filename_patterns=DEFAULT_EVAL_FILENAME_PATTERNS)
| 39.324324 | 80 | 0.727491 |
ace47f439c55c65f4e33e939ec9d977a312d57fb | 7,497 | py | Python | yalesmartalarmclient/auth.py | TempestuousWafer/Yale-Smart-Alarm-Client | e1763bf702c2d0bd7b26eb185b7be19c135b4acb | [
"Apache-2.0"
] | null | null | null | yalesmartalarmclient/auth.py | TempestuousWafer/Yale-Smart-Alarm-Client | e1763bf702c2d0bd7b26eb185b7be19c135b4acb | [
"Apache-2.0"
] | null | null | null | yalesmartalarmclient/auth.py | TempestuousWafer/Yale-Smart-Alarm-Client | e1763bf702c2d0bd7b26eb185b7be19c135b4acb | [
"Apache-2.0"
] | null | null | null | """Module for handling authentication against the Yale Smart API."""
import logging
from typing import Any, Dict, Optional, Tuple, cast
import requests
from .exceptions import AuthenticationError, UnknownError
from .const import (
HOST,
ENDPOINT_TOKEN,
ENDPOINT_SERVICES,
YALE_AUTH_TOKEN,
YALE_AUTHENTICATION_REFRESH_TOKEN,
YALE_AUTHENTICATION_ACCESS_TOKEN,
DEFAULT_REQUEST_TIMEOUT,
)
_LOGGER = logging.getLogger(__name__)
class YaleAuth:
"""Handle authentication and creating authorized calls on the yale apis."""
def __init__(self, username: str, password: str) -> None:
"""Initialize Authentication module."""
self._host = HOST
self.username = username
self.password = password
self.refresh_token: Optional[str] = None
self.access_token: Optional[str] = None
self._authorize()
@property
def auth_headers(self) -> Dict[str, str]:
"""Return authentication headers."""
return {"Authorization": "Bearer " + self.access_token}
def get_authenticated(self, endpoint: str) -> Dict[str, Any]:
"""Execute an GET request on an endpoint.
Args:
endpoint: parts of an url.
Returns:
a dictionary with the response.
"""
url = self._host + endpoint
try:
response = requests.get(
url, headers=self.auth_headers, timeout=DEFAULT_REQUEST_TIMEOUT
)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
_LOGGER.debug("Http Error: %s", error)
if response.status_code in [401, 403]:
self.refresh_token = None
self.access_token = None
self._authorize()
self.get_authenticated(endpoint)
raise ConnectionError(f"Connection error {error}")
except requests.exceptions.ConnectionError as error:
_LOGGER.debug("Connection Error: %s", error)
raise ConnectionError(f"Connection error {error}")
except requests.exceptions.Timeout as error:
_LOGGER.debug("Timeout Error: %s", error)
raise TimeoutError(f"Timeout {error}")
except requests.exceptions.RequestException as error:
_LOGGER.debug("Requests Error: %s", error)
raise UnknownError(f"Requests error {error}")
except Exception as error:
_LOGGER.debug("Unknown Error: %s", error)
raise UnknownError(f"Unknown error {error}")
return cast(Dict[str, Any], response.json())
def post_authenticated(
self, endpoint: str, params: Optional[Dict[Any, Any]] = None
) -> Dict[str, Any]:
"""Execute a POST request on an endpoint.
Args:
endpoint: URL endpoint to connect to.
Returns:
A dictionary with the response.
"""
if "panic" in endpoint:
url = self._host[:-5] + endpoint
else:
url = self._host + endpoint
try:
response = requests.post(
url,
headers=self.auth_headers,
data=params,
timeout=DEFAULT_REQUEST_TIMEOUT,
)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
_LOGGER.debug("Http Error: %s", error)
if response.status_code in [401, 403]:
self.refresh_token = None
self.access_token = None
self._authorize()
self.post_authenticated(endpoint, params)
raise ConnectionError(f"Connection error {error}")
except requests.exceptions.ConnectionError as error:
_LOGGER.debug("Connection Error: %s", error)
raise ConnectionError(f"Connection error {error}")
except requests.exceptions.Timeout as error:
_LOGGER.debug("Timeout Error: %s", error)
raise TimeoutError(f"Timeout {error}")
except requests.exceptions.RequestException as error:
_LOGGER.debug("Requests Error: %s", error)
raise UnknownError(f"Requests error {error}")
except Exception as error:
_LOGGER.debug("Unknown Error: %s", error)
raise UnknownError(f"Unknown error {error}")
if "panic" in endpoint:
return {"panic": "triggered"}
return cast(Dict[str, Any], response.json())
def _update_services(self) -> None:
data = self.get_authenticated(ENDPOINT_SERVICES)
url = data.get("yapi")
if url is not None:
if len(url) > 0:
_LOGGER.debug("Yale URL updated: %s", url)
if url.endswith("/"):
url = url[:-1]
self._host = url
else:
_LOGGER.debug("Services URL is empty")
else:
_LOGGER.debug("Unable to fetch services")
def _authorize(self) -> Tuple[str, str]:
if self.refresh_token:
payload = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
}
else:
payload = {
"grant_type": "password",
"username": self.username,
"password": self.password,
}
headers = {
"Authorization": "Basic " + YALE_AUTH_TOKEN,
}
url = self._host + ENDPOINT_TOKEN
_LOGGER.debug("Attempting authorization")
try:
response = requests.post(
url, headers=headers, data=payload, timeout=DEFAULT_REQUEST_TIMEOUT
)
response.raise_for_status()
except requests.exceptions.HTTPError as error:
_LOGGER.debug("Http Error: %s", error)
if response.status_code in [401, 403]:
raise AuthenticationError(f"Failed to authenticate {error}")
raise ConnectionError(f"Connection error {error}")
except requests.exceptions.ConnectionError as error:
_LOGGER.debug("Connection Error: %s", error)
raise ConnectionError(f"Connection error {error}")
except requests.exceptions.Timeout as error:
_LOGGER.debug("Timeout Error: %s", error)
raise TimeoutError(f"Timeout {error}")
except requests.exceptions.RequestException as error:
_LOGGER.debug("Requests Error: %s", error)
raise UnknownError(f"Requests error {error}")
except Exception as error:
_LOGGER.debug("Unknown Error: %s", error)
raise UnknownError(f"Unknown error {error}")
data = response.json()
_LOGGER.debug("Authorization response: %s", data)
_LOGGER.info("Authorization to Yale Alarm API successful.")
self.refresh_token = data.get(YALE_AUTHENTICATION_REFRESH_TOKEN)
self.access_token = data.get(YALE_AUTHENTICATION_ACCESS_TOKEN)
if self.refresh_token is None or self.access_token is None:
raise AuthenticationError(
"Failed to authenticate with Yale Smart Alarm. Invalid token."
)
self._update_services()
return self.access_token, self.refresh_token
| 38.055838 | 84 | 0.579699 |
ace47f4facd0cb37abc1ccefe6afe1c5b61e380f | 10,301 | py | Python | mesonbuild/mlog.py | ArtsiomCh/meson | 5920344b9283a55939ef322ca39fdd70f5aafd31 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mlog.py | ArtsiomCh/meson | 5920344b9283a55939ef322ca39fdd70f5aafd31 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mlog.py | ArtsiomCh/meson | 5920344b9283a55939ef322ca39fdd70f5aafd31 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import io
import sys
import time
import platform
from contextlib import contextmanager
import typing
from typing import Any, Generator, List, Optional, Sequence, TextIO, Union
from pathlib import Path
"""This is (mostly) a standalone module used to write logging
information about Meson runs. Some output goes to screen,
some to logging dir and some goes to both."""
def _windows_ansi() -> bool:
# windll only exists on windows, so mypy will get mad
from ctypes import windll, byref # type: ignore
from ctypes.wintypes import DWORD
kernel = windll.kernel32
stdout = kernel.GetStdHandle(-11)
mode = DWORD()
if not kernel.GetConsoleMode(stdout, byref(mode)):
return False
# ENABLE_VIRTUAL_TERMINAL_PROCESSING == 0x4
# If the call to enable VT processing fails (returns 0), we fallback to
# original behavior
return bool(kernel.SetConsoleMode(stdout, mode.value | 0x4) or os.environ.get('ANSICON'))
try:
if platform.system().lower() == 'windows':
colorize_console = os.isatty(sys.stdout.fileno()) and _windows_ansi() # type: bool
else:
colorize_console = os.isatty(sys.stdout.fileno()) and os.environ.get('TERM') != 'dumb'
except Exception:
colorize_console = False
log_dir = None # type: Optional[str]
log_file = None # type: Optional[TextIO]
log_fname = 'meson-log.txt' # type: str
log_depth = 0 # type: int
log_timestamp_start = None # type: Optional[float]
log_fatal_warnings = False # type: bool
log_disable_stdout = False # type: bool
log_errors_only = False # type: bool
_in_ci = 'CI' in os.environ # type: bool
def disable() -> None:
global log_disable_stdout
log_disable_stdout = True
def enable() -> None:
global log_disable_stdout
log_disable_stdout = False
def set_quiet() -> None:
global log_errors_only
log_errors_only = True
def set_verbose() -> None:
global log_errors_only
log_errors_only = False
def initialize(logdir: str, fatal_warnings: bool = False) -> None:
global log_dir, log_file, log_fatal_warnings
log_dir = logdir
log_file = open(os.path.join(logdir, log_fname), 'w', encoding='utf8')
log_fatal_warnings = fatal_warnings
def set_timestamp_start(start: float) -> None:
global log_timestamp_start
log_timestamp_start = start
def shutdown() -> Optional[str]:
global log_file
if log_file is not None:
path = log_file.name
exception_around_goer = log_file
log_file = None
exception_around_goer.close()
return path
return None
class AnsiDecorator:
plain_code = "\033[0m"
def __init__(self, text: str, code: str, quoted: bool = False):
self.text = text
self.code = code
self.quoted = quoted
def get_text(self, with_codes: bool) -> str:
text = self.text
if with_codes:
text = self.code + self.text + AnsiDecorator.plain_code
if self.quoted:
text = '"{}"'.format(text)
return text
def bold(text: str, quoted: bool = False) -> AnsiDecorator:
return AnsiDecorator(text, "\033[1m", quoted=quoted)
def red(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[1;31m")
def green(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[1;32m")
def yellow(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[1;33m")
def blue(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[1;34m")
def cyan(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[1;36m")
def normal_red(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[31m")
def normal_green(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[32m")
def normal_yellow(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[33m")
def normal_blue(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[34m")
def normal_cyan(text: str) -> AnsiDecorator:
return AnsiDecorator(text, "\033[36m")
# This really should be AnsiDecorator or anything that implements
# __str__(), but that requires protocols from typing_extensions
def process_markup(args: Sequence[Union[AnsiDecorator, str]], keep: bool) -> List[str]:
arr = [] # type: List[str]
if log_timestamp_start is not None:
arr = ['[{:.3f}]'.format(time.monotonic() - log_timestamp_start)]
for arg in args:
if arg is None:
continue
if isinstance(arg, str):
arr.append(arg)
elif isinstance(arg, AnsiDecorator):
arr.append(arg.get_text(keep))
else:
arr.append(str(arg))
return arr
def force_print(*args: str, **kwargs: Any) -> None:
global log_disable_stdout
if log_disable_stdout:
return
iostr = io.StringIO()
kwargs['file'] = iostr
print(*args, **kwargs)
raw = iostr.getvalue()
if log_depth > 0:
prepend = '|' * log_depth
raw = prepend + raw.replace('\n', '\n' + prepend, raw.count('\n') - 1)
# _Something_ is going to get printed.
try:
print(raw, end='')
except UnicodeEncodeError:
cleaned = raw.encode('ascii', 'replace').decode('ascii')
print(cleaned, end='')
# We really want a heterogeneous dict for this, but that's in typing_extensions
def debug(*args: Union[str, AnsiDecorator], **kwargs: Any) -> None:
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs)
log_file.flush()
def _debug_log_cmd(cmd: str, args: List[str]) -> None:
if not _in_ci:
return
args = ['"{}"'.format(x) for x in args] # Quote all args, just in case
debug('!meson_ci!/{} {}'.format(cmd, ' '.join(args)))
def cmd_ci_include(file: str) -> None:
_debug_log_cmd('ci_include', [file])
def log(*args: Union[str, AnsiDecorator], is_error: bool = False,
**kwargs: Any) -> None:
global log_errors_only
arr = process_markup(args, False)
if log_file is not None:
print(*arr, file=log_file, **kwargs)
log_file.flush()
if colorize_console:
arr = process_markup(args, True)
if not log_errors_only or is_error:
force_print(*arr, **kwargs)
def _log_error(severity: str, *rargs: Union[str, AnsiDecorator], **kwargs: Any) -> None:
from .mesonlib import get_error_location_string
from .environment import build_filename
from .mesonlib import MesonException
# The tping requirements here are non-obvious. Lists are invariant,
# therefore List[A] and List[Union[A, B]] are not able to be joined
if severity == 'warning':
label = [yellow('WARNING:')] # type: List[Union[str, AnsiDecorator]]
elif severity == 'error':
label = [red('ERROR:')]
elif severity == 'deprecation':
label = [red('DEPRECATION:')]
else:
raise MesonException('Invalid severity ' + severity)
# rargs is a tuple, not a list
args = label + list(rargs)
location = kwargs.pop('location', None)
if location is not None:
location_file = os.path.join(location.subdir, build_filename)
location_str = get_error_location_string(location_file, location.lineno)
# Unions are frankly awful, and we have to cast here to get mypy
# to understand that the list concatenation is safe
location_list = typing.cast(List[Union[str, AnsiDecorator]], [location_str])
args = location_list + args
log(*args, **kwargs)
global log_fatal_warnings
if log_fatal_warnings:
raise MesonException("Fatal warnings enabled, aborting")
def error(*args: Union[str, AnsiDecorator], **kwargs: Any) -> None:
return _log_error('error', *args, **kwargs, is_error=True)
def warning(*args: Union[str, AnsiDecorator], **kwargs: Any) -> None:
return _log_error('warning', *args, **kwargs, is_error=True)
def deprecation(*args: Union[str, AnsiDecorator], **kwargs: Any) -> None:
return _log_error('deprecation', *args, **kwargs, is_error=True)
def get_relative_path(target: Path, current: Path) -> Path:
"""Get the path to target from current"""
# Go up "current" until we find a common ancestor to target
acc = ['.']
for part in [current, *current.parents]:
try:
path = target.relative_to(part)
return Path(*acc, path)
except ValueError:
pass
acc += ['..']
# we failed, should not get here
return target
def exception(e: Exception, prefix: Optional[AnsiDecorator] = None) -> None:
if prefix is None:
prefix = red('ERROR:')
log()
args = [] # type: List[Union[AnsiDecorator, str]]
if hasattr(e, 'file') and hasattr(e, 'lineno') and hasattr(e, 'colno'):
# Mypy can't figure this out, and it's pretty easy to vidual inspect
# that this is correct, so we'll just ignore it.
path = get_relative_path(Path(e.file), Path(os.getcwd()))
args.append('%s:%d:%d:' % (path, e.lineno, e.colno)) # type: ignore
if prefix:
args.append(prefix)
args.append(str(e))
log(*args)
# Format a list for logging purposes as a string. It separates
# all but the last item with commas, and the last with 'and'.
def format_list(input_list: List[str]) -> str:
l = len(input_list)
if l > 2:
return ' and '.join([', '.join(input_list[:-1]), input_list[-1]])
elif l == 2:
return ' and '.join(input_list)
elif l == 1:
return input_list[0]
else:
return ''
@contextmanager
def nested() -> Generator[None, None, None]:
global log_depth
log_depth += 1
try:
yield
finally:
log_depth -= 1
| 33.884868 | 94 | 0.658965 |
ace480890e9ee64d2e1c887b95404c0bfd47ca27 | 9,525 | py | Python | Compliance_minimization/Figure_4/training_and_optimization_bayesian_optimization.py | julianschumann/ae-opt | 611b6c893546267732a2d690df20a4cc238002e6 | [
"CC0-1.0"
] | null | null | null | Compliance_minimization/Figure_4/training_and_optimization_bayesian_optimization.py | julianschumann/ae-opt | 611b6c893546267732a2d690df20a4cc238002e6 | [
"CC0-1.0"
] | null | null | null | Compliance_minimization/Figure_4/training_and_optimization_bayesian_optimization.py | julianschumann/ae-opt | 611b6c893546267732a2d690df20a4cc238002e6 | [
"CC0-1.0"
] | null | null | null | import numpy as np
from mpi4py import MPI
import os
from SIMP import TO_SIMP, make_Conn_matrix
import time
from keras.models import load_model
from bayesian_optimization import kriging, ExImp, corr_matrix
def get_void(nely,nelx):
v=np.zeros((nely,nelx))
R=min(nely,nelx)/15
loc=np.array([[1/3, 1/4], [2/3, 1/4],[ 1/3, 1/2], [2/3, 1/2], [1/3 , 3/4], [2/3, 3/4]])
loc=loc*np.array([[nely,nelx]])
for i in range(nely):
for j in range(nelx):
v[i,j]=R-np.min(np.sqrt(np.sum((loc-np.array([[i+1,j+1]]))**2,1)));
v=v>0
return v
def evaluate_design(Z,Decoder,volfrac,Iar,cMat,void,opt_it,typ):
beta=0.05
epsilon_1=1
epsilon_2=0.25
nelx=90
nely=45
penal=3
E0=1
nu=0.3
max_move=0.25
X=Decoder.predict(Z)
if typ=='sdf':
X=np.clip(X+0.5,0,1)
(n,nely,nelx)=X.shape
avoid=np.zeros((1,nely,nelx))
C=np.zeros(n)
X_out=np.zeros(X.shape)
for i in range(n):
X_out[i,:,:], _ = TO_SIMP(X[i,:,:] , volfrac, penal, beta, epsilon_1, max_move, E0, nu, Iar, cMat, False, void, avoid, 0, opt_it)
## Enforce a design with sparse densities
X_out[i,:,:], C[i] = TO_SIMP(X_out[i,:,:], volfrac, penal, beta, epsilon_2, max_move, E0, nu, Iar, cMat, True , void, avoid, 0, 10)
return X_out,C
## Implent multiprocessing, with size processors, where rank is the one currently executing this file
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
## Load the decoded latent space over which optimization is to be performed
Decoder_model='Sample_data/edim=100_pre=1_AAE=1_surr=1_sdf_case=3_Decoder.h5'
Decoder=load_model(Decoder_model)
## Set type of encoding
typ=Decoder_model[-21:-18] ## Only works for case<10
## Set number of optimization steps in latent space
opt_it=25
## Get problem dimensionality
[nely,nelx]=Decoder.output_shape[1:]
## Set parameters needed for cost function (Compliance minimization)
volfrac=0.4
Iar, cMat= make_Conn_matrix(nelx,nely)
void=get_void(nely, nelx)
## Get the dimensionality of the latent space over which optimization is to be performed
edim=Decoder.input_shape[1]
## Set the parameters for differential evolution (needed for ptimization of aqusition function)
multiplyer=0.6
prob_change=0.9
## Perform the Bayesian optimization
## Set the number of initial samples
train_samples=10*edim
train_samples_perrank=int(train_samples/size)
train_samples=train_samples_perrank*size
## Initialize Gaussian process model parameters
p=np.zeros(edim)
theta=np.ones(edim)
## Set number of itertions
bayes_iteration=edim*3
comm.Barrier()
start_time=time.time()
## Generate the number of training samples and evaluate them
Z0_rank=np.random.rand(train_samples_perrank,edim)
X0_rank,C0_rank=evaluate_design(Z0_rank,Decoder,volfrac,Iar,cMat,void,opt_it,typ)
## Share the samples across processor cores/ranks
if rank==0:
Z0_rec=np.empty((size,train_samples_perrank,edim))
X0_rec=np.empty((size,train_samples_perrank,nely,nelx))
C0_rec=np.empty((size,train_samples_perrank))
else:
Z0_rec=None
X0_rec=None
C0_rec=None
comm.Barrier()
comm.Gather(Z0_rank,Z0_rec,root=0)
comm.Gather(X0_rank,X0_rec,root=0)
comm.Gather(C0_rank,C0_rec,root=0)
if rank==0:
Z0=Z0_rec.reshape((train_samples,edim))
X0=X0_rec.reshape((train_samples,nely,nelx))
C0=C0_rec.reshape((train_samples,1))
else:
Z0=None
X0=None
C0=None
Z0=comm.bcast(Z0,root=0)
X0=comm.bcast(X0,root=0)
C0=comm.bcast(C0,root=0)
## Start the iterative optimization process
for ib in range(bayes_iteration):
if rank==0:
print(' Iteration {}'.format(ib))
start_time_it=time.time()
start_time_its=time.time()
## That the weight to focus on ...
if ib<bayes_iteration-100:
## ...exploration
weight_explore=10
else:
## ...exploitation
weight_explore=0
## Set number of optimization steps when generation Gaussian process model parameters
if ib==0:
k_iter=50
## Get Gaussian process model parameters
theta,p=kriging(Z0, C0,rank,size,[],k_iter)
else:
if np.mod(ib,100)==0:
k_iter=50
else:
k_iter=10
para_old=np.concatenate((theta,p-0.5),0)
## Get Gaussian process model parameters
theta,p=kriging(Z0, C0,rank,size,para_old,k_iter)
## Get the Gaussian process model corealation matrix for optimized parameters
K=corr_matrix(Z0, theta[:,np.newaxis], p[:,np.newaxis])[:,:,0]
## Get the inverse of the correlation matrix (adapt it when the matrix is singular)
inverse_failed=True
while inverse_failed:
try:
Kinv=np.linalg.inv(K)
inverse_failed=False
except np.linalg.LinAlgError:
K=K-np.identity(len(C0))*1e-4
stop_time_its=time.time()
if rank==0:
time_needed=stop_time_its-start_time_its
print(' Time needed for model training: {:10.1f}s'.format(time_needed))
start_time_its=time.time()
## Optimize aqusition function using differential evolution
EI_num_pop_perrank=int(np.ceil(2.5*edim/size))
EI_num_pop=size*EI_num_pop_perrank
## Initial generation is generated and evaluated
Zei_rank=np.random.rand(EI_num_pop_perrank,edim)
EI_rank=ExImp(Zei_rank, theta, p, Z0, C0, Kinv, weight_explore)
## Initial generation is shared over all ranks
if rank==0:
Zei_rec=np.empty((size,EI_num_pop_perrank,edim))
EI_rec=np.empty((size,EI_num_pop_perrank))
else:
Zei_rec=None
EI_rec=None
comm.Barrier()
comm.Gather(Zei_rank,Zei_rec,root=0)
comm.Gather(EI_rank,EI_rec,root=0)
if rank==0:
Zei=Zei_rec.reshape((EI_num_pop,edim))
EI=EI_rec.reshape(EI_num_pop)
else:
Zei=None
EI=None
Zei=comm.bcast(Zei,root=0)
EI=comm.bcast(EI,root=0)
loop_ei=0
loop_ei_max=500
## Generations are evolved
while loop_ei<loop_ei_max:
Zei_rank=Zei[rank*EI_num_pop_perrank:(rank+1)*EI_num_pop_perrank,:]
EI_rank=EI[rank*EI_num_pop_perrank:(rank+1)*EI_num_pop_perrank]
## Reproduction between differnt individuals from the population is perforemd
test_case=np.floor(np.random.rand(EI_num_pop_perrank,3)*(EI_num_pop-1e-7)).astype('int')
Za_rank=np.copy(Zei[test_case[:,0],:])
Zb_rank=np.copy(Zei[test_case[:,1],:])
Zc_rank=np.copy(Zei[test_case[:,2],:])
Zcom_rank=Za_rank+multiplyer*(Zb_rank-Zc_rank)
## Crossover between child and parent is performed
prob=np.random.rand(EI_num_pop_perrank,edim)
Zcom_rank[prob>prob_change]=np.copy(Zei_rank[prob>prob_change])
## Boundaries of design are enforced
Zcom_rank[Zcom_rank<0]=0
Zcom_rank[Zcom_rank>1]=1
## Selection between child (has to be evaluated first) and parent is performed
EI_compare=ExImp(Zcom_rank, theta, p, Z0, C0, Kinv, weight_explore)
EI_rank=np.minimum(EI_rank,EI_compare)
Zei_rank[EI_compare<=EI_rank,:]=Zcom_rank[EI_compare<=EI_rank,:]
## New population is shared between all ranks
if rank==0:
Zei_rec=np.empty((size,EI_num_pop_perrank,edim))
EI_rec=np.empty((size,EI_num_pop_perrank))
else:
Zei_rec=None
EI_rec=None
comm.Barrier()
comm.Gather(Zei_rank,Zei_rec,root=0)
comm.Gather(EI_rank,EI_rec,root=0)
if rank==0:
Zei=Zei_rec.reshape((EI_num_pop,edim))
EI=EI_rec.reshape(EI_num_pop)
else:
Zei=None
EI=None
Zei=comm.bcast(Zei,root=0)
EI=comm.bcast(EI,root=0)
loop_ei=loop_ei+1
stop_time_its=time.time()
if rank==0:
time_needed=stop_time_its-start_time_its
print(' Time needed for optimizing acqusition function: {:10.1f}s'.format(time_needed))
start_time_its=time.time()
## The training samples are updated with the one having the highest expected improvement
if rank==0:
jmin=np.argmin(EI)
Z_new=Zei[[jmin],:]
X_new,C_new=evaluate_design(Z_new,Decoder,volfrac,Iar,cMat,void,opt_it,typ)
C0=np.concatenate((C0,C_new[:,np.newaxis]),0)
Z0=np.concatenate((Z0,Z_new),0)
X0=np.concatenate((X0,X_new))
# The new samples are shared across ranks
Z0=comm.bcast(Z0,root=0)
X0=comm.bcast(X0,root=0)
C0=comm.bcast(C0,root=0)
stop_time_it=time.time()
stop_time_its=time.time()
if rank==0:
time_needed=stop_time_its-start_time_its
print(' Time needed for updating data: {:10.1f}s'.format(time_needed))
if rank==0:
time_needed=stop_time_it-start_time_it
print(' Time needed for iteration: {:10.1f}s'.format(time_needed))
comm.Barrier()
## Best training sample is determined
if rank==0:
Z_min=Z0[[np.argmin(C0)],:]
## Postprocessing is performed on best training sample
X_final,F_final=evaluate_design(Z_min,Decoder,volfrac,Iar,cMat,void,300,typ)
X_final=X_final[0,:,:]
F_final=F_final[0]
else:
Z_min=None
X_final=None
F_final=None
data=None
## Post process result of the optimization is saved is saved
comm.Barrier()
stop_time=time.time()
if rank==0:
data=[X_final,F_final,stop_time-start_time]
np.save('Sample_data/BO_test.npy',np.array(data))
| 32.958478 | 143 | 0.663832 |
ace482596cf091017080ce8adb1d7c21d3ece69b | 7,482 | py | Python | 2_gen_dsin_input.py | Extreme-lxh/DSIN | b83b0a30b0ea6ba82f3923900c6a671afcf37b38 | [
"Apache-2.0"
] | null | null | null | 2_gen_dsin_input.py | Extreme-lxh/DSIN | b83b0a30b0ea6ba82f3923900c6a671afcf37b38 | [
"Apache-2.0"
] | null | null | null | 2_gen_dsin_input.py | Extreme-lxh/DSIN | b83b0a30b0ea6ba82f3923900c6a671afcf37b38 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import os
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import numpy as np
import pandas as pd
from deepctr.utils import SingleFeat
from sklearn.preprocessing import LabelEncoder, StandardScaler
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
from config import DSIN_SESS_COUNT, DSIN_SESS_MAX_LEN, FRAC
FRAC = FRAC
SESS_COUNT = DSIN_SESS_COUNT
def gen_sess_feature_dsin(row):
sess_count = DSIN_SESS_COUNT
sess_max_len = DSIN_SESS_MAX_LEN
sess_input_dict = {}
sess_input_length_dict = {}
for i in range(sess_count):
sess_input_dict['sess_' + str(i)] = {'cate_id': [], 'brand': []}
sess_input_length_dict['sess_' + str(i)] = 0
sess_length = 0
user, time_stamp = row[1]['user'], row[1]['time_stamp']
# sample_time = pd.to_datetime(timestamp_datetime(time_stamp ))
if user not in user_hist_session:
for i in range(sess_count):
sess_input_dict['sess_' + str(i)]['cate_id'] = [0]
sess_input_dict['sess_' + str(i)]['brand'] = [0]
sess_input_length_dict['sess_' + str(i)] = 0
sess_length = 0
else:
valid_sess_count = 0
last_sess_idx = len(user_hist_session[user]) - 1
for i in reversed(range(len(user_hist_session[user]))):
cur_sess = user_hist_session[user][i]
if cur_sess[0][2] < time_stamp:
in_sess_count = 1
for j in range(1, len(cur_sess)):
if cur_sess[j][2] < time_stamp:
in_sess_count += 1
if in_sess_count > 2:
sess_input_dict['sess_0']['cate_id'] = [e[0] for e in cur_sess[max(0,
in_sess_count - sess_max_len):in_sess_count]]
sess_input_dict['sess_0']['brand'] = [e[1] for e in
cur_sess[max(0, in_sess_count - sess_max_len):in_sess_count]]
sess_input_length_dict['sess_0'] = min(
sess_max_len, in_sess_count)
last_sess_idx = i
valid_sess_count += 1
break
for i in range(1, sess_count):
if last_sess_idx - i >= 0:
cur_sess = user_hist_session[user][last_sess_idx - i]
sess_input_dict['sess_' + str(i)]['cate_id'] = [e[0]
for e in cur_sess[-sess_max_len:]]
sess_input_dict['sess_' + str(i)]['brand'] = [e[1]
for e in cur_sess[-sess_max_len:]]
sess_input_length_dict['sess_' +
str(i)] = min(sess_max_len, len(cur_sess))
valid_sess_count += 1
else:
sess_input_dict['sess_' + str(i)]['cate_id'] = [0]
sess_input_dict['sess_' + str(i)]['brand'] = [0]
sess_input_length_dict['sess_' + str(i)] = 0
sess_length = valid_sess_count
return sess_input_dict, sess_input_length_dict, sess_length
if __name__ == "__main__":
user_hist_session = {}
FILE_NUM = len(
list(filter(lambda x: x.startswith('user_hist_session_' + str(FRAC) + '_dsin_'),
os.listdir('../sampled_data/'))))
print('total', FILE_NUM, 'files')
for i in range(FILE_NUM):
user_hist_session_ = pd.read_pickle(
'../sampled_data/user_hist_session_' + str(FRAC) + '_dsin_' + str(i) + '.pkl') # 19,34
user_hist_session.update(user_hist_session_)
del user_hist_session_
sample_sub = pd.read_pickle(
'../sampled_data/raw_sample_' + str(FRAC) + '.pkl')
index_list = []
sess_input_dict = {}
sess_input_length_dict = {}
for i in range(SESS_COUNT):
sess_input_dict['sess_' + str(i)] = {'cate_id': [], 'brand': []}
sess_input_length_dict['sess_' + str(i)] = []
sess_length_list = []
for row in tqdm(sample_sub[['user', 'time_stamp']].iterrows()):
sess_input_dict_, sess_input_length_dict_, sess_length = gen_sess_feature_dsin(
row)
# index_list.append(index)
for i in range(SESS_COUNT):
sess_name = 'sess_' + str(i)
sess_input_dict[sess_name]['cate_id'].append(
sess_input_dict_[sess_name]['cate_id'])
sess_input_dict[sess_name]['brand'].append(
sess_input_dict_[sess_name]['brand'])
sess_input_length_dict[sess_name].append(
sess_input_length_dict_[sess_name])
sess_length_list.append(sess_length)
print('done')
user = pd.read_pickle('../sampled_data/user_profile_' + str(FRAC) + '.pkl')
ad = pd.read_pickle('../sampled_data/ad_feature_enc_' + str(FRAC) + '.pkl')
user = user.fillna(-1)
user.rename(
columns={'new_user_class_level ': 'new_user_class_level'}, inplace=True)
sample_sub = pd.read_pickle(
'../sampled_data/raw_sample_' + str(FRAC) + '.pkl')
sample_sub.rename(columns={'user': 'userid'}, inplace=True)
data = pd.merge(sample_sub, user, how='left', on='userid', )
data = pd.merge(data, ad, how='left', on='adgroup_id')
sparse_features = ['userid', 'adgroup_id', 'pid', 'cms_segid', 'cms_group_id', 'final_gender_code', 'age_level',
'pvalue_level', 'shopping_level', 'occupation', 'new_user_class_level', 'campaign_id',
'customer']
dense_features = ['price']
for feat in tqdm(sparse_features):
lbe = LabelEncoder() # or Hash
data[feat] = lbe.fit_transform(data[feat])
mms = StandardScaler()
data[dense_features] = mms.fit_transform(data[dense_features])
sparse_feature_list = [SingleFeat(feat, data[feat].max(
) + 1) for feat in sparse_features + ['cate_id', 'brand']]
dense_feature_list = [SingleFeat(feat, 1) for feat in dense_features]
sess_feature = ['cate_id', 'brand']
sess_input = []
sess_input_length = []
for i in tqdm(range(SESS_COUNT)):
sess_name = 'sess_' + str(i)
for feat in sess_feature:
sess_input.append(pad_sequences(
sess_input_dict[sess_name][feat], maxlen=DSIN_SESS_MAX_LEN, padding='post'))
sess_input_length.append(sess_input_length_dict[sess_name])
model_input = [data[feat.name].values for feat in sparse_feature_list] + \
[data[feat.name].values for feat in dense_feature_list]
sess_lists = sess_input + [np.array(sess_length_list)]
model_input += sess_lists
if not os.path.exists('../model_input/'):
os.mkdir('../model_input/')
pd.to_pickle(model_input, '../model_input/dsin_input_' +
str(FRAC) + '_' + str(SESS_COUNT) + '.pkl')
pd.to_pickle(data['clk'].values, '../model_input/dsin_label_' +
str(FRAC) + '_' + str(SESS_COUNT) + '.pkl')
pd.to_pickle({'sparse': sparse_feature_list, 'dense': dense_feature_list},
'../model_input/dsin_fd_' + str(FRAC) + '_' + str(SESS_COUNT) + '.pkl')
print("gen dsin input done")
| 42.511364 | 133 | 0.574713 |
ace48313242fdc2835e65b4e9bab7c284cd9524f | 202 | py | Python | modular_provider_architecture_definition/tests/cases/modular_provider_architecture/modular_provider_architecture/module_runtime/run.py | Incognito/python-architecture-linter | 534e1508aaa46920b31601f8fffbb0f132844883 | [
"MIT"
] | 5 | 2021-06-30T09:33:09.000Z | 2021-08-18T12:20:32.000Z | modular_provider_architecture_definition/tests/cases/modular_provider_architecture/modular_provider_architecture/module_runtime/run.py | Incognito/python-architecture-linter | 534e1508aaa46920b31601f8fffbb0f132844883 | [
"MIT"
] | 45 | 2021-06-27T10:35:43.000Z | 2022-03-28T04:09:05.000Z | modular_provider_architecture_definition/tests/cases/modular_provider_architecture/modular_provider_architecture/module_runtime/run.py | Incognito/python-architecture-linter | 534e1508aaa46920b31601f8fffbb0f132844883 | [
"MIT"
] | 1 | 2021-07-04T15:48:00.000Z | 2021-07-04T15:48:00.000Z | from modular_provider_architecture.module_runtime.provider import RuntimeProvider
if __name__ == "__main__":
provider = RuntimeProvider()
runtime = provider.provide_runtime()
runtime.run()
| 28.857143 | 81 | 0.777228 |
ace483635d19a610e66ada660ed7abcc73a948d7 | 253 | py | Python | model_deployment/model/grapy/dataloaders/mypath_cihp.py | Pherokung/VIRTUON | 987cf4e37a72b214f02f0f7fbda68c0cc74e6de4 | [
"MIT"
] | 8 | 2020-11-20T17:51:59.000Z | 2020-11-26T05:14:06.000Z | model_deployment/model/grapy/dataloaders/mypath_cihp.py | Pherokung/VIRTUON | 987cf4e37a72b214f02f0f7fbda68c0cc74e6de4 | [
"MIT"
] | 3 | 2021-09-22T18:45:51.000Z | 2022-02-10T09:09:23.000Z | model_deployment/model/grapy/dataloaders/mypath_cihp.py | Pherokung/VIRTUON | 987cf4e37a72b214f02f0f7fbda68c0cc74e6de4 | [
"MIT"
] | 11 | 2020-11-28T04:09:29.000Z | 2022-03-21T09:00:55.000Z | class Path(object):
@staticmethod
def db_root_dir(database):
if database == 'cihp':
return './model/input/'
else:
print('Database {} not available.'.format(database))
raise NotImplementedError
| 28.111111 | 64 | 0.577075 |
ace48363eeedfca329c88fe15c1126ff0cf22086 | 3,212 | py | Python | app/app/settings.py | andrewlabada/tdd-pos-app | f43051d6b068c012650c78a6bec96e4079ae5416 | [
"MIT"
] | null | null | null | app/app/settings.py | andrewlabada/tdd-pos-app | f43051d6b068c012650c78a6bec96e4079ae5416 | [
"MIT"
] | null | null | null | app/app/settings.py | andrewlabada/tdd-pos-app | f43051d6b068c012650c78a6bec96e4079ae5416 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-_pl-0unc63kq2&w)oqqskho@gf2ks6ul%zmry)9mtg8zenwjok'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.903226 | 91 | 0.702677 |
ace483bd5fd52c0a1e530c6e1b00948e62cb39fc | 1,299 | py | Python | playground.py | thehaus/AutoScraper | f3d8f674fc9c57ead2d90ebc5559db2bf1041d0f | [
"MIT"
] | null | null | null | playground.py | thehaus/AutoScraper | f3d8f674fc9c57ead2d90ebc5559db2bf1041d0f | [
"MIT"
] | null | null | null | playground.py | thehaus/AutoScraper | f3d8f674fc9c57ead2d90ebc5559db2bf1041d0f | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import re
import dbservice
import json
def has_class_but_no_id(tag):
return tag.has_attr('class')
url = 'https://www.edmunds.com/audi/q7/2019/'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
page = requests.get(url,headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
cleanedsoup = str(soup)
cleanedsoup = cleanedsoup.replace("\\n","")
cleanedsoup = cleanedsoup.replace("\\","")
cleanedsoup = cleanedsoup.replace(" ","")
dbservice.write('testaudifile',cleanedsoup)
cartrims = re.search('"trimInfo": {(.+?)}, "vehicle"',cleanedsoup)
if cartrims:
print('found trims')
jsonfromcartrims = cartrims.group(1)
jsonfromcartrims = '{'+jsonfromcartrims
jsonfromcartrims = jsonfromcartrims+'}'
jsonData = json.loads(jsonfromcartrims)
for key in jsonData:
datatosave = '{"make": "'+'Acura'+'", "model": "'+'TLX'+'", "trim": "'+str(key)+'"'
subkeys = json.loads(str(jsonData[key]).replace('\'','"'))
for subkey in subkeys:
datatosave += ', "'+str(subkey)+'": "'+str(subkeys[subkey]).replace('\'','"')+'"'
datatosave += '}'
dbservice.write('testtextfile2',datatosave) | 40.59375 | 149 | 0.665127 |
ace48462b88c992f06dd54289874ed7c57a7e06a | 38,612 | py | Python | _v5__main__kernel.py | konsan1101/pycv5 | c64dc03499a103ca697010be32071571012e8ae0 | [
"MIT"
] | null | null | null | _v5__main__kernel.py | konsan1101/pycv5 | c64dc03499a103ca697010be32071571012e8ae0 | [
"MIT"
] | null | null | null | _v5__main__kernel.py | konsan1101/pycv5 | c64dc03499a103ca697010be32071571012e8ae0 | [
"MIT"
] | 1 | 2020-05-12T06:22:41.000Z | 2020-05-12T06:22:41.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
print(os.path.dirname(__file__))
print(os.path.basename(__file__))
print(sys.version_info)
# インターフェース
qCtrl_control_kernel = 'temp/control_kernel.txt'
qCtrl_control_speech = 'temp/control_speech.txt'
qCtrl_control_vision = 'temp/control_vision.txt'
qCtrl_control_desktop = 'temp/control_desktop.txt'
qCtrl_control_self = qCtrl_control_kernel
qCtrl_control_bgm = 'temp/control_bgm.txt'
qCtrl_control_browser = 'temp/control_browser.txt'
qCtrl_control_player = 'temp/control_player.txt'
qCtrl_control_chatting = 'temp/control_chatting.txt'
qCtrl_control_knowledge = 'temp/control_knowledge.txt'
# Python
qPython_main_speech = '_v5__main_speech.py'
qPython_main_vision = '_v5__main_vision.py'
qPython_main_desktop = '_v5__main_desktop.py'
qPython_bgm = '_v5__sub_bgm.py'
qPython_browser = '_v5__sub_browser.py'
qPython_player = '_v5__sub_player.py'
qPython_chatting = '_v5__sub_chatting.py'
qPython_knowledge = '_v5__sub_knowledge.py'
qPython_selfcheck = '_v5_sub_self_check.py'
qPython_smartSpk = '_v5_sub_smart_speaker.py'
qPython_rssSearch = '_v5_sub_rss_search.py'
qPython_weather = '_v5_sub_weather_search.py'
# qLog,qFunc 共通ルーチン
import _v5__qLog
qLog = _v5__qLog.qLog_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
qPLATFORM = qFunc.getValue('qPLATFORM' )
qRUNATTR = qFunc.getValue('qRUNATTR' )
qHOSTNAME = qFunc.getValue('qHOSTNAME' )
qUSERNAME = qFunc.getValue('qUSERNAME' )
qPath_pictures = qFunc.getValue('qPath_pictures' )
qPath_videos = qFunc.getValue('qPath_videos' )
qPath_cache = qFunc.getValue('qPath_cache' )
qPath_sounds = qFunc.getValue('qPath_sounds' )
qPath_icons = qFunc.getValue('qPath_icons' )
qPath_fonts = qFunc.getValue('qPath_fonts' )
qPath_log = qFunc.getValue('qPath_log' )
qPath_work = qFunc.getValue('qPath_work' )
qPath_rec = qFunc.getValue('qPath_rec' )
qPath_s_ctrl = qFunc.getValue('qPath_s_ctrl' )
qPath_s_inp = qFunc.getValue('qPath_s_inp' )
qPath_s_wav = qFunc.getValue('qPath_s_wav' )
qPath_s_jul = qFunc.getValue('qPath_s_jul' )
qPath_s_STT = qFunc.getValue('qPath_s_STT' )
qPath_s_TTS = qFunc.getValue('qPath_s_TTS' )
qPath_s_TRA = qFunc.getValue('qPath_s_TRA' )
qPath_s_play = qFunc.getValue('qPath_s_play' )
qPath_v_ctrl = qFunc.getValue('qPath_v_ctrl' )
qPath_v_inp = qFunc.getValue('qPath_v_inp' )
qPath_v_jpg = qFunc.getValue('qPath_v_jpg' )
qPath_v_detect = qFunc.getValue('qPath_v_detect' )
qPath_v_cv = qFunc.getValue('qPath_v_cv' )
qPath_v_photo = qFunc.getValue('qPath_v_photo' )
qPath_v_msg = qFunc.getValue('qPath_v_msg' )
qPath_d_ctrl = qFunc.getValue('qPath_d_ctrl' )
qPath_d_play = qFunc.getValue('qPath_d_play' )
qPath_d_prtscn = qFunc.getValue('qPath_d_prtscn' )
qPath_d_movie = qFunc.getValue('qPath_d_movie' )
qPath_d_upload = qFunc.getValue('qPath_d_upload' )
qBusy_dev_cpu = qFunc.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qFunc.getValue('qBusy_dev_com' )
qBusy_dev_mic = qFunc.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qFunc.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qFunc.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qFunc.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qFunc.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qFunc.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qFunc.getValue('qBusy_s_inp' )
qBusy_s_wav = qFunc.getValue('qBusy_s_wav' )
qBusy_s_STT = qFunc.getValue('qBusy_s_STT' )
qBusy_s_TTS = qFunc.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qFunc.getValue('qBusy_s_TRA' )
qBusy_s_play = qFunc.getValue('qBusy_s_play' )
qBusy_v_ctrl = qFunc.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qFunc.getValue('qBusy_v_inp' )
qBusy_v_QR = qFunc.getValue('qBusy_v_QR' )
qBusy_v_jpg = qFunc.getValue('qBusy_v_jpg' )
qBusy_v_CV = qFunc.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qFunc.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qFunc.getValue('qBusy_d_inp' )
qBusy_d_QR = qFunc.getValue('qBusy_d_QR' )
qBusy_d_rec = qFunc.getValue('qBusy_d_rec' )
qBusy_d_play = qFunc.getValue('qBusy_d_play' )
qBusy_d_browser = qFunc.getValue('qBusy_d_browser')
qBusy_d_upload = qFunc.getValue('qBusy_d_upload' )
qRdy__s_force = qFunc.getValue('qRdy__s_force' )
qRdy__s_fproc = qFunc.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qFunc.getValue('qRdy__s_sendkey')
qRdy__v_reader = qFunc.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qFunc.getValue('qRdy__v_sendkey')
qRdy__d_reader = qFunc.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qFunc.getValue('qRdy__d_sendkey')
# debug
runMode = 'hud'
qApiInp = 'free'
qApiTrn = 'free'
qApiOut = qApiTrn
if (qPLATFORM == 'windows'):
qApiOut = 'winos'
if (qPLATFORM == 'darwin'):
qApiOut = 'macos'
qLangInp = 'ja'
#qLangTrn = 'en,fr,'
qLangTrn = 'en'
qLangTxt = qLangInp
qLangOut = qLangTrn[:2]
class main_kernel:
def __init__(self, name='thread', id='0', runMode='debug',
micDev='0', micType='bluetooth', micGuide='on', micLevel='777',
qApiInp='free', qApiTrn='free', qApiOut='free',
qLangInp='ja', qLangTrn='en,fr,', qLangTxt='ja', qLangOut='en',
):
self.runMode = runMode
self.micDev = micDev
self.micType = micType
self.micGuide = micGuide
self.micLevel = micLevel
self.qApiInp = qApiInp
self.qApiTrn = qApiTrn
self.qApiOut = qApiOut
self.qLangInp = qLangInp
self.qLangTrn = qLangTrn
self.qLangTxt = qLangTxt
self.qLangOut = qLangOut
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=20, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
qFunc.remove(qCtrl_control_self)
# 起動条件(controls.pyと合わせる)
main_speech_run = None
main_speech_switch = 'on'
main_vision_run = None
main_vision_switch = 'off'
main_desktop_run = None
main_desktop_switch = 'off'
bgm_run = None
bgm_switch = 'off'
browser_run = None
browser_switch = 'off'
player_run = None
player_switch = 'off'
chatting_run = None
chatting_switch = 'off'
knowledge_run = None
knowledge_switch = 'off'
if (self.runMode == 'debug'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
bgm_switch = 'on'
browser_switch = 'on'
player_switch = 'on'
elif (self.runMode == 'hud'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
bgm_switch = 'on'
browser_switch = 'on'
player_switch = 'on'
elif (self.runMode == 'live'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
bgm_switch = 'on'
browser_switch = 'on'
player_switch = 'on'
elif (self.runMode == 'translator'):
pass
elif (self.runMode == 'speech'):
pass
elif (self.runMode == 'number'):
pass
elif (self.runMode == 'camera'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
elif (self.runMode == 'assistant'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
elif (self.runMode == 'reception'):
main_vision_switch = 'on'
python_exe = 'python'
if (qPLATFORM == 'darwin'):
python_exe = 'python3'
# 待機ループ
self.proc_step = '5'
onece = True
last_alive = time.time()
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
qLog.log('info', self.proc_id, '' + str(txt))
if (txt == '_end_'):
break
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# 活動メッセージ
if ((time.time() - last_alive) > 30):
qLog.log('debug', self.proc_id, 'alive', display=True, )
last_alive = time.time()
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# スレッド設定
speechs = []
if (main_speech_run is None) and (main_speech_switch == 'on'):
cn_s.put(['guide', 'main_speech start!'])
if (qRUNATTR == 'python'):
main_speech_run = subprocess.Popen([python_exe, qPython_main_speech,
self.runMode,
self.micDev, self.micType, self.micGuide, self.micLevel,
self.qApiInp, self.qApiTrn, self.qApiOut,
self.qLangInp, self.qLangTrn, self.qLangTxt, self.qLangOut, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
main_speech_run = subprocess.Popen([qPython_main_speech[:-3],
self.runMode,
self.micDev, self.micType, self.micGuide, self.micLevel,
self.qApiInp, self.qApiTrn, self.qApiOut,
self.qLangInp, self.qLangTrn, self.qLangTxt, self.qLangOut, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug'):
speechs.append({ 'text':u'ハンズフリーコントロールシステムをデバッグモードで、起動しました。', 'wait':0, })
elif (self.runMode == 'live'):
speechs.append({ 'text':u'ハンズフリー翻訳機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'hud'):
speechs.append({ 'text':u'ヘッドアップディスプレイ機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'camera'):
speechs.append({ 'text':u'ハンズフリーカメラ機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'assistant'):
speechs.append({ 'text':u'AIアシスタント機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'reception'):
speechs.append({ 'text':u'AI受付機能を、起動しました。', 'wait':0, })
if (not main_speech_run is None) and (main_speech_switch != 'on'):
time.sleep(10.00)
#main_speech_run.wait()
main_speech_run.terminate()
main_speech_run = None
if (main_vision_run is None) and (main_vision_switch == 'on'):
cn_s.put(['guide', 'main_vision start!'])
if (qRUNATTR == 'python'):
main_vision_run = subprocess.Popen([python_exe, qPython_main_vision,
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
main_vision_run = subprocess.Popen([qPython_main_vision[:-3],
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'カメラ機能を、起動しました。', 'wait':0, })
if (not main_vision_run is None) and (main_vision_switch != 'on'):
time.sleep(10.00)
#main_vision_run.wait()
main_vision_run.terminate()
main_vision_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'カメラ機能を、終了しました。', 'wait':0, })
if (main_desktop_run is None) and (main_desktop_switch == 'on'):
cn_s.put(['guide', 'main_desktop start!'])
if (qRUNATTR == 'python'):
main_desktop_run = subprocess.Popen([python_exe, qPython_main_desktop,
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
main_desktop_run = subprocess.Popen([qPython_main_desktop[:-3],
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'デスクトップ制御機能を、起動しました。', 'wait':0, })
if (not main_desktop_run is None) and (main_desktop_switch != 'on'):
time.sleep(10.00)
#main_desktop_run.wait()
main_desktop_run.terminate()
main_desktop_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'デスクトップ制御機能を、終了しました。', 'wait':0, })
if (bgm_run is None) and (bgm_switch == 'on'):
cn_s.put(['guide', 'bgm control start!'])
if (qRUNATTR == 'python'):
bgm_run = subprocess.Popen([python_exe, qPython_bgm, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
bgm_run = subprocess.Popen([qPython_bgm[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'BGM再生機能を、起動しました。', 'wait':0, })
if (not bgm_run is None) and (bgm_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_bgm, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#bgm_run.wait()
bgm_run.terminate()
bgm_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'BGM再生機能を、終了しました。', 'wait':0, })
if (browser_run is None) and (browser_switch == 'on'):
cn_s.put(['guide', 'browser control start!'])
if (qRUNATTR == 'python'):
browser_run = subprocess.Popen([python_exe, qPython_browser, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
browser_run = subprocess.Popen([qPython_browser[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ブラウザー連携機能を、起動しました。', 'wait':0, })
if (not browser_run is None) and (browser_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_browser, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#browser_run.wait()
browser_run.terminate()
browser_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ブラウザー連携機能を、終了しました。', 'wait':0, })
if (player_run is None) and (player_switch == 'on'):
cn_s.put(['guide', 'player control start!'])
if (qRUNATTR == 'python'):
player_run = subprocess.Popen([python_exe, qPython_player, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
player_run = subprocess.Popen([qPython_player[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'動画連携機能を、起動しました。', 'wait':0, })
if (not player_run is None) and (player_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_player, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#player_run.wait()
player_run.terminate()
player_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'動画連携機能を、終了しました。', 'wait':0, })
if (chatting_run is None) and (chatting_switch == 'on'):
cn_s.put(['guide', 'chatting control start!'])
if (qRUNATTR == 'python'):
chatting_run = subprocess.Popen([python_exe, qPython_chatting, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
chatting_run = subprocess.Popen([qPython_chatting[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ドコモ雑談連携機能を、起動しました。', 'wait':0, })
if (not chatting_run is None) and (chatting_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_chatting, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#chatting_run.wait()
chatting_run.terminate()
chatting_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ドコモ雑談連携機能を、終了しました。', 'wait':0, })
if (knowledge_run is None) and (knowledge_switch == 'on'):
cn_s.put(['guide', 'knowledge control start!'])
if (qRUNATTR == 'python'):
knowledge_run = subprocess.Popen([python_exe, qPython_knowledge, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
knowledge_run = subprocess.Popen([qPython_knowledge[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ドコモ知識データベースを、起動しました。', 'wait':0, })
if (not knowledge_run is None) and (knowledge_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_knowledge, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#knowledge_run.wait()
knowledge_run.terminate()
knowledge_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ドコモ知識データベースを、終了しました。', 'wait':0, })
if (len(speechs) != 0):
qFunc.speech(id=main_id, speechs=speechs, lang='', )
if (onece == True):
onece = False
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
time.sleep(40)
speechs = []
speechs.append({ 'text':u'全ての準備が整いました。スタンバイしています。', 'wait':0, })
qFunc.speech(id=main_id, speechs=speechs, lang='', )
# レディー設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# リブート
if (control == '_reboot_'):
out_name = 'control'
out_value = control
cn_s.put([out_name, out_value])
# コントロール
if (control == '_speech_begin_'):
main_speech_switch = 'on'
if (control == '_speech_end_'):
main_speech_switch = 'off'
if (control == '_vision_begin_'):
main_vision_switch = 'on'
if (control == '_vision_end_'):
main_vision_switch = 'off'
if (control == '_desktop_begin_'):
main_desktop_switch = 'on'
if (control == '_desktop_end_'):
main_desktop_switch = 'off'
if (control == '_bgm_begin_'):
bgm_switch = 'on'
if (control == '_bgm_end_') or (control == '_reboot_'):
bgm_switch = 'off'
if (control == '_browser_begin_'):
browser_switch = 'on'
if (control == '_browser_end_') or (control == '_reboot_'):
browser_switch = 'off'
if (control == '_player_begin_'):
player_switch = 'on'
if (control == '_player_end_') or (control == '_reboot_'):
player_switch = 'off'
if (control == '_chatting_begin_'):
chatting_switch = 'on'
if (control == '_chatting_end_') or (control == '_reboot_'):
chatting_switch = 'off'
if (control == '_knowledge_begin_'):
knowledge_switch = 'on'
if (control == '_knowledge_end_') or (control == '_reboot_'):
knowledge_switch = 'off'
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.50)
else:
time.sleep(0.25)
# 終了処理
if (True):
# レディー解除
qFunc.statusSet(self.fileRdy, False)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
# プロセス終了
qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_speech ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_vision ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_desktop ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_bgm ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_browser ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_player ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_chatting ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_knowledge ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
# スレッド停止
if (not main_speech_run is None):
main_speech_run.wait()
main_speech_run.terminate()
main_speech_run = None
if (not main_vision_run is None):
main_vision_run.wait()
main_vision_run.terminate()
main_vision_run = None
if (not main_desktop_run is None):
main_desktop_run.wait()
main_desktop_run.terminate()
main_desktop_run = None
if (not bgm_run is None):
bgm_run.wait()
bgm_run.terminate()
bgm_run = None
if (not browser_run is None):
#browser_run.wait()
browser_run.terminate()
browser_run = None
if (not player_run is None):
#player_run.wait()
player_run.terminate()
player_run = None
if (not chatting_run is None):
#chatting_run.wait()
chatting_run.terminate()
chatting_run = None
if (not knowledge_run is None):
#knowledge_run.wait()
knowledge_run.terminate()
knowledge_run = None
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
# シグナル処理
import signal
def signal_handler(signal_number, stack_frame):
print(os.path.basename(__file__), 'accept signal =', signal_number)
#signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if __name__ == '__main__':
main_name = 'kernel'
main_id = '{0:10s}'.format(main_name).replace(' ', '_')
# 共通クラス
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
qLog.log('info', main_id, 'init')
qLog.log('info', main_id, 'exsample.py runMode, ..., ')
#runMode debug, hud, live, translator, speech, number, camera, assistant, reception,
# パラメータ
if (True):
#runMode = 'live'
micDev = '0'
micType = 'bluetooth'
micGuide = 'on'
micLevel = '777'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
if (runMode == 'debug'):
micType = 'bluetooth'
micGuide = 'on'
elif (runMode == 'hud'):
micType = 'bluetooth'
micGuide = 'off'
elif (runMode == 'live'):
micType = 'bluetooth'
micGuide = 'off'
elif (runMode == 'translator'):
micType = 'bluetooth'
micGuide = 'on'
elif (runMode == 'speech'):
micType = 'usb'
micGuide = 'on'
elif (runMode == 'number'):
micType = 'usb'
micGuide = 'on'
elif (runMode == 'camera'):
micType = 'usb'
micGuide = 'off'
elif (runMode == 'assistant'):
micType = 'usb'
micGuide = 'off'
elif (runMode == 'reception'):
micType = 'usb'
micGuide = 'off'
if (len(sys.argv) >= 3):
micDev = str(sys.argv[2]).lower()
if (not micDev.isdigit()):
micGuide = 'off'
if (len(sys.argv) >= 4):
micType = str(sys.argv[3]).lower()
if (len(sys.argv) >= 5):
micGuide = str(sys.argv[4]).lower()
if (len(sys.argv) >= 6):
p = str(sys.argv[5]).lower()
if (p.isdigit() and p != '0'):
micLevel = p
if (len(sys.argv) >= 7):
qApiInp = str(sys.argv[6]).lower()
if (qApiInp == 'google') or (qApiInp == 'watson') \
or (qApiInp == 'azure') or (qApiInp == 'aws') \
or (qApiInp == 'nict'):
qApiTrn = qApiInp
qApiOut = qApiInp
else:
qApiTrn = 'free'
qApiOut = 'free'
if (qApiInp == 'nict'):
#qLangTrn = 'en,fr,es,id,my,th,vi,zh,ko,'
qLangTrn = 'en,fr,es,id,zh,ko,'
qLangOut = qLangTrn[:2]
if (len(sys.argv) >= 8):
qApiTrn = str(sys.argv[7]).lower()
if (len(sys.argv) >= 9):
qApiOut = str(sys.argv[8]).lower()
if (len(sys.argv) >= 10):
qLangInp = str(sys.argv[9]).lower()
qLangTxt = qLangInp
if (len(sys.argv) >= 11):
qLangTrn = str(sys.argv[10]).lower()
qLangOut = qLangTrn[:2]
if (len(sys.argv) >= 12):
qLangTxt = str(sys.argv[11]).lower()
if (len(sys.argv) >= 13):
qLangOut = str(sys.argv[12]).lower()
qLog.log('info', main_id, 'runMode =' + str(runMode ))
qLog.log('info', main_id, 'micDev =' + str(micDev ))
qLog.log('info', main_id, 'micType =' + str(micType ))
qLog.log('info', main_id, 'micGuide =' + str(micGuide ))
qLog.log('info', main_id, 'micLevel =' + str(micLevel ))
qLog.log('info', main_id, 'qApiInp =' + str(qApiInp ))
qLog.log('info', main_id, 'qApiTrn =' + str(qApiTrn ))
qLog.log('info', main_id, 'qApiOut =' + str(qApiOut ))
qLog.log('info', main_id, 'qLangInp =' + str(qLangInp ))
qLog.log('info', main_id, 'qLangTrn =' + str(qLangTrn ))
qLog.log('info', main_id, 'qLangTxt =' + str(qLangTxt ))
qLog.log('info', main_id, 'qLangOut =' + str(qLangOut ))
# 初期設定
if (qPLATFORM == 'darwin'):
try:
subprocess.call(['/usr/bin/osascript', '-e',
'tell app "Finder" to set frontmost of process "python" to true'])
except Exception as e:
pass
if (True):
qFunc.remove(qCtrl_control_kernel )
qFunc.remove(qCtrl_control_speech )
qFunc.remove(qCtrl_control_vision )
qFunc.remove(qCtrl_control_desktop )
qFunc.remove(qCtrl_control_bgm )
qFunc.remove(qCtrl_control_browser )
qFunc.remove(qCtrl_control_player )
qFunc.remove(qCtrl_control_chatting )
qFunc.remove(qCtrl_control_knowledge )
qFunc.statusReset_speech(False)
qFunc.statusReset_vision(False)
qFunc.statusReset_desktop(False)
# 起動
if (True):
qLog.log('info', main_id, 'start')
qFunc.guideDisplay(display=True, panel='1', filename='_kernel_start_', txt='', )
guide_disp = True
guide_time = time.time()
main_core = main_kernel(main_id, '0',
runMode=runMode,
micDev=micDev, micType=micType, micGuide=micGuide, micLevel=micLevel,
qApiInp=qApiInp, qApiTrn=qApiTrn, qApiOut=qApiOut,
qLangInp=qLangInp, qLangTrn=qLangTrn, qLangTxt=qLangTxt, qLangOut=qLangOut, )
main_core.begin()
# 待機ループ
while (True):
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
qLog.log('info', main_id, '' + str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_self)
control = txt
# リブート
if (control == '_reboot_'):
main_core.abort()
del main_core
qFunc.remove(qCtrl_control_kernel)
main_core = None
main_core = main_kernel(main_id, '0',
runMode=runMode,
micDev=micDev, micType=micType, micGuide=micGuide, micLevel=micLevel,
qApiInp=qApiInp, qApiTrn=qApiTrn, qApiOut=qApiOut,
qLangInp=qLangInp, qLangTrn=qLangTrn, qLangTxt=qLangTxt, qLangOut=qLangOut, )
main_core.begin()
# スレッド応答
while (main_core.proc_r.qsize() != 0) and (control == ''):
res_data = main_core.get()
res_name = res_data[0]
res_value = res_data[1]
if (res_name == 'control'):
control = res_value
break
# ガイド表示
if (res_name == 'guide'):
if (guide_disp == True):
qFunc.guideDisplay(txt=res_value, )
guide_time = time.time()
else:
qFunc.guideDisplay(display=True, panel='1', filename='_kernel_guide_', txt=res_value, )
guide_disp = True
guide_time = time.time()
# ガイド表示終了
if (guide_disp == True):
if ((time.time() - guide_time) > 3):
qFunc.guideDisplay(display=False,)
guide_disp = False
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
time.sleep(0.25)
# 終了
if (True):
qLog.log('info', main_id, 'terminate')
qFunc.guideDisplay(display=True, panel='1', filename='_kernel_stop_', txt='', )
guide_disp = True
guide_time = time.time()
main_core.abort()
del main_core
qFunc.guideDisplay(display=False,)
guide_disp = False
qLog.log('info', main_id, 'bye!')
time.sleep(5.00)
sys.exit(0)
| 38.884189 | 126 | 0.508495 |
ace485537cb68889d4d64e6bb7a3ad09df1eae4e | 1,860 | py | Python | sparseodes/meas_to_rec.py | maimanuel/sparseodes | 2f580ca61c0991d83741ac417f57034890673417 | [
"Apache-2.0"
] | null | null | null | sparseodes/meas_to_rec.py | maimanuel/sparseodes | 2f580ca61c0991d83741ac417f57034890673417 | [
"Apache-2.0"
] | 2 | 2021-09-28T00:36:12.000Z | 2022-02-26T06:27:26.000Z | sparseodes/meas_to_rec.py | maimanuel/sparseodes | 2f580ca61c0991d83741ac417f57034890673417 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_meas_to_rec.ipynb (unless otherwise specified).
__all__ = ['rand_par', 'traj_solve']
# Cell
from time import time
import numpy as np
import copy as cp
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from numpy.random import permutation,rand,randn
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn import linear_model
from sklearn.feature_extraction.image import PatchExtractor as PE
from functools import partial,reduce
# Cell
def rand_par(par,cvar):
'''This function adds gaussian noise to parameters (means) stored in a dictionary.
Input
par: dictionary of ODE parameters which constitute the means
cvar: coeficient of variation of the distributon that each parameter will be sampled from (1 = 100% of the not noisy value).
return
dictionary with parameters sampled from gaussian around parameter means (inputs) or zero, if sampled value is negative
'''
temp = par.copy()
for key in temp.keys():
temp[key]=par[key]*(1+cvar*randn())
if temp[key] < 0:
temp[key] = 0
return temp
# Cell
def traj_solve(N,dt,model_der,mod_par,cvar):
'''Solve N trajectories with time delta dt for model given in model_der with parameters mod_par
and coefficient of variation cvar'''
t0 = 0
tend = 100
Nt = round((tend-t0)/float(dt))
time = np.linspace(t0,tend,Nt)
traj = np.full((N,len(time),2),-3.)
for i in range(N):
# add noise to the paramters
rlvpar = rand_par(mod_par,cvar)
yinit = rand(2)*np.array([3,0])
traj[i,:,:] = odeint(model_der,yinit,time,args = (rlvpar,))
return traj,time | 35.09434 | 132 | 0.712903 |
ace4867b03d901d2a61b596359d267b964f44bc4 | 445 | py | Python | config.py | IdanAtias/Felix | 2a3354a41824a6e082b875d42e4998964f23a3ee | [
"MIT"
] | null | null | null | config.py | IdanAtias/Felix | 2a3354a41824a6e082b875d42e4998964f23a3ee | [
"MIT"
] | 6 | 2020-10-18T05:36:33.000Z | 2020-12-11T11:47:25.000Z | config.py | IdanAtias/Felix | 2a3354a41824a6e082b875d42e4998964f23a3ee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
""" Bot Configuration """
class DefaultConfig:
""" Bot Configuration """
PORT = 3978
APP_ID = os.environ["MicrosoftAppId"]
APP_PASSWORD = os.environ["MicrosoftAppPassword"]
AAD_CONNECTION_NAME = os.environ.get("AadConnectionName")
GCP_CONNECTION_NAME = os.environ.get("GcpConnectionName")
| 24.722222 | 61 | 0.721348 |
ace4868617e96ecba13c61f3804b16f5773abc30 | 8,795 | py | Python | pretrain/data_loader.py | QZx7/KernelGAT | 20fe548496a0ca25393c9c6b29354deb947a7a14 | [
"MIT"
] | 150 | 2019-10-23T07:42:10.000Z | 2022-03-26T22:22:43.000Z | pretrain/data_loader.py | QZx7/KernelGAT | 20fe548496a0ca25393c9c6b29354deb947a7a14 | [
"MIT"
] | 30 | 2019-10-23T11:52:06.000Z | 2022-02-17T19:31:39.000Z | pretrain/data_loader.py | QZx7/KernelGAT | 20fe548496a0ca25393c9c6b29354deb947a7a14 | [
"MIT"
] | 32 | 2019-10-24T02:52:40.000Z | 2022-03-23T02:42:04.000Z | import os
import torch
import numpy as np
import json
from torch.autograd import Variable
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def tok2int_sent(sentence, tokenizer, max_seq_length):
"""Loads a data file into a list of `InputBatch`s."""
sent_a, sent_b = sentence
tokens_a = tokenizer.tokenize(sent_a)
tokens_b = None
if sent_b:
tokens_b = tokenizer.tokenize(sent_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens = tokens + tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def tok2int_list(src_list, tokenizer, max_seq_length, max_seq_size=-1):
inp_padding = list()
msk_padding = list()
seg_padding = list()
for step, sent in enumerate(src_list):
input_ids, input_mask, input_seg = tok2int_sent(sent, tokenizer, max_seq_length)
inp_padding.append(input_ids)
msk_padding.append(input_mask)
seg_padding.append(input_seg)
#if max_seq_size != -1:
# inp_padding = inp_padding[:max_seq_size]
# msk_padding = msk_padding[:max_seq_size]
# seg_padding = seg_padding[:max_seq_size]
# inp_padding += ([[0] * max_seq_length] * (max_seq_size - len(inp_padding)))
# msk_padding += ([[0] * max_seq_length] * (max_seq_size - len(msk_padding)))
# seg_padding += ([[0] * max_seq_length] * (max_seq_size - len(seg_padding)))
return inp_padding, msk_padding, seg_padding
class DataLoader(object):
''' For data iteration '''
def __init__(self, data_path, tokenizer, args, test=False, cuda=True, batch_size=64):
self.cuda = cuda
self.batch_size = batch_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.evi_num = args.evi_num
self.threshold = args.threshold
self.data_path = data_path
self.test = test
examples = self.read_file(data_path)
self.examples = examples
self.total_num = len(examples)
if self.test:
self.total_num = len(self.examples)
self.total_step = np.ceil(self.total_num * 1.0 / batch_size)
else:
self.total_step = self.total_num / batch_size
self.shuffle()
self.step = 0
def read_file(self, data_path):
examples = list()
with open(data_path) as fin:
for step, line in enumerate(fin):
data = json.loads(line)
claim = data["claim"]
evidences = data["evidence"]
pos_evi = list()
neg_evi = list()
for evidence in evidences:
if (evidence[3] == 1 or evidence[3] == 2) and evidence[2].strip() != "":
pos_evi.append(evidence)
elif evidence[3] == 0 and evidence[2].strip() != "":
neg_evi.append(evidence)
total_triples = pos_evi
pos_num = len(pos_evi)
neg_num = self.evi_num * pos_num
np.random.shuffle(neg_evi)
neg_evi = neg_evi[:neg_num]
total_triples += neg_evi
for triple in total_triples:
if triple[3] == 1 or triple[3] == 2:
examples.append([claim, triple[2], 1])
elif triple[3] == 0:
examples.append([claim, triple[2], 0])
return examples
def shuffle(self):
np.random.shuffle(self.examples)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
return self._n_batch
def next(self):
''' Get the next batch '''
if self.step < self.total_step:
examples = self.examples[self.step * self.batch_size : (self.step+1)*self.batch_size]
inputs = list()
labels = list()
for example in examples:
inputs.append([example[0], example[1]])
labels.append(example[2])
inp, msk, seg = tok2int_list(inputs, self.tokenizer, self.max_len)
inp_tensor = Variable(
torch.LongTensor(inp))
msk_tensor = Variable(
torch.LongTensor(msk))
seg_tensor = Variable(
torch.LongTensor(seg))
lab_tensor = Variable(
torch.LongTensor(labels))
if self.cuda:
inp_tensor = inp_tensor.cuda()
msk_tensor = msk_tensor.cuda()
seg_tensor = seg_tensor.cuda()
lab_tensor = lab_tensor.cuda()
self.step += 1
return inp_tensor, msk_tensor, seg_tensor, lab_tensor
else:
self.step = 0
if not self.test:
examples = self.read_file(self.data_path)
self.examples = examples
self.shuffle()
raise StopIteration()
class DataLoaderTest(object):
''' For data iteration '''
def __init__(self, data_path, tokenizer, args, cuda=True, batch_size=64):
self.cuda = cuda
self.batch_size = batch_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.evi_num = args.evi_num
self.threshold = args.threshold
self.data_path = data_path
inputs, ids, evi_list = self.read_file(data_path)
self.inputs = inputs
self.ids = ids
self.evi_list = evi_list
self.total_num = len(inputs)
self.total_step = np.ceil(self.total_num * 1.0 / batch_size)
self.step = 0
def read_file(self, data_path):
inputs = list()
ids = list()
evi_list = list()
with open(data_path) as fin:
for step, line in enumerate(fin):
instance = json.loads(line.strip())
claim = instance['claim']
id = instance['id']
for evidence in instance['evidence']:
ids.append(id)
inputs.append([claim, evidence[2]])
evi_list.append(evidence)
return inputs, ids, evi_list
def shuffle(self):
np.random.shuffle(self.examples)
def __iter__(self):
return self
def __next__(self):
return self.next()
def __len__(self):
return self._n_batch
def next(self):
''' Get the next batch '''
if self.step < self.total_step:
inputs = self.inputs[self.step * self.batch_size : (self.step+1)*self.batch_size]
ids = self.ids[self.step * self.batch_size: (self.step + 1) * self.batch_size]
evi_list = self.evi_list[self.step * self.batch_size: (self.step + 1) * self.batch_size]
inp, msk, seg = tok2int_list(inputs, self.tokenizer, self.max_len, -1)
inp_tensor_input = Variable(
torch.LongTensor(inp))
msk_tensor_input = Variable(
torch.LongTensor(msk))
seg_tensor_input = Variable(
torch.LongTensor(seg))
if self.cuda:
inp_tensor_input = inp_tensor_input.cuda()
msk_tensor_input = msk_tensor_input.cuda()
seg_tensor_input = seg_tensor_input.cuda()
self.step += 1
return inp_tensor_input, msk_tensor_input, seg_tensor_input, ids, evi_list
else:
self.step = 0
raise StopIteration()
| 34.762846 | 100 | 0.578965 |
ace487abe76be29618c790fc72514d913f5729d2 | 144 | py | Python | backend/src/baserow/api/__init__.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | 839 | 2020-07-20T13:29:34.000Z | 2022-03-31T21:09:16.000Z | backend/src/baserow/api/__init__.py | rasata/baserow | c6e1d7842c53f801e1c96b49f1377da2a06afaa9 | [
"MIT"
] | 28 | 2020-08-07T09:23:58.000Z | 2022-03-01T22:32:40.000Z | backend/src/baserow/api/__init__.py | rasata/baserow | c6e1d7842c53f801e1c96b49f1377da2a06afaa9 | [
"MIT"
] | 79 | 2020-08-04T01:48:01.000Z | 2022-03-27T13:30:54.000Z | from .extensions import ( # noqa: F401
DiscriminatorMappingSerializerExtension,
DiscriminatorCustomFieldsMappingSerializerExtension,
)
| 28.8 | 56 | 0.819444 |
ace4880470bf1db8607316e5ea2b7bbbcd3fd11e | 315 | py | Python | jenkinsapi/mutable_jenkins_thing.py | ifwe/jenkinsapi | 31a7fbb07efcd48e226f7dcf643fd2a2625416c0 | [
"MIT"
] | 1 | 2015-10-16T18:55:51.000Z | 2015-10-16T18:55:51.000Z | jenkinsapi/mutable_jenkins_thing.py | moustuk/jenkinsapi-1 | d18c1e669965c209093763f3295f79c9d3ccdeea | [
"MIT"
] | null | null | null | jenkinsapi/mutable_jenkins_thing.py | moustuk/jenkinsapi-1 | d18c1e669965c209093763f3295f79c9d3ccdeea | [
"MIT"
] | 1 | 2021-09-08T11:45:44.000Z | 2021-09-08T11:45:44.000Z | """
Module for MutableJenkinsThing
"""
class MutableJenkinsThing(object):
"""
A mixin for certain mutable objects which can be renamed and deleted.
"""
def get_delete_url(self):
return '%s/doDelete' % self.baseurl
def get_rename_url(self):
return '%s/doRename' % self.baseurl
| 21 | 73 | 0.663492 |
ace4884bb6ea4fbbce458ad08401ca9eb3f7567c | 944 | py | Python | Python OOP/Class Methods.py | RaghuBhogireddy/python | 2f4a0118715dcf6829dc72b32256d5cb1e6df19f | [
"Apache-2.0"
] | null | null | null | Python OOP/Class Methods.py | RaghuBhogireddy/python | 2f4a0118715dcf6829dc72b32256d5cb1e6df19f | [
"Apache-2.0"
] | null | null | null | Python OOP/Class Methods.py | RaghuBhogireddy/python | 2f4a0118715dcf6829dc72b32256d5cb1e6df19f | [
"Apache-2.0"
] | null | null | null | # instances methods works on sepcific objects
# class methods works on entire class
# static methods won't modify state of either class or specific class instance
class Book:
BOOK_TYPES=("HAND COVER", "PAPER BACK", "HARD COPY")
@classmethod
def getbooktypes(cls):
return cls.BOOK_TYPES
__booklist = None
@staticmethod
def getbooklist():
if Book.__booklist is None:
Book.__booklist = []
return Book.__booklist
def __init__(self,title,booktype):
self.title = title
if not booktype in Book.BOOK_TYPES:
raise ValueError(f"{booktype} is not a valid book type")
else: self.booktype = booktype
def setTitle(self,title):
self.title = title
print("booktypes:", Book.getbooktypes())
b1 = Book("Ramayan", "PAPER BACK")
b2 = Book("mahaBharat", "PAPER BACK")
books = Book.getbooklist()
books.append(b1)
books.append(b2)
print(books)
| 23.02439 | 78 | 0.663136 |
ace489b3100f88f8f0c8f364f3eb05d922dd6cd9 | 134 | py | Python | src/genie/libs/parser/iosxe/tests/ShowParserEncryptFileStatus/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowParserEncryptFileStatus/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/iosxe/tests/ShowParserEncryptFileStatus/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | null | null | null | expected_output = {
'feature' : True,
'file_format' : 'Cipher text',
'encryption_version' : 'ver1'
}
| 22.333333 | 38 | 0.514925 |
ace489e7ec0e8838985855722023b74dd6d21932 | 3,518 | py | Python | solutions/day21.py | nitekat1124/advent-of-code-2018 | a5db933b84f7df4a3b86d78787f89e739f06d881 | [
"WTFPL"
] | null | null | null | solutions/day21.py | nitekat1124/advent-of-code-2018 | a5db933b84f7df4a3b86d78787f89e739f06d881 | [
"WTFPL"
] | null | null | null | solutions/day21.py | nitekat1124/advent-of-code-2018 | a5db933b84f7df4a3b86d78787f89e739f06d881 | [
"WTFPL"
] | null | null | null | from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if (tr := str(func(i))) == r[0]:
print(f"test {test_counter} passed")
else:
print(f"your result: {tr}")
print(f"test answer: {r[0]}")
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
regs = [0] * 6
ip = int(data[0].split(" ")[1])
i = regs[ip]
while 1:
if i == 29:
return regs[4]
inst, *io = data[i + 1].split(" ")
io = [*map(int, io)]
regs = self.run_inst(regs, inst, io, False)
if regs[ip] + 1 < len(data) - 1:
regs[ip] += 1
i = regs[ip]
else:
break
def part2(self, data):
"""
super slow, might optimize later, have no idea how for now
"""
regs = [0] * 6
ip = int(data[0].split(" ")[1])
i = regs[ip]
detected = set()
prev = 0
while 1:
if i == 29:
if regs[4] in detected:
return prev
prev = regs[4]
detected.add(regs[4])
inst, *io = data[i + 1].split(" ")
io = [*map(int, io)]
regs = self.run_inst(regs, inst, io, False)
if regs[ip] + 1 < len(data) - 1:
regs[ip] += 1
i = regs[ip]
else:
break
def run_inst(self, regs, opcode, io, as_str=True):
if as_str:
regs = [*map(int, regs.split(", "))]
if opcode == "addr":
regs[io[2]] = regs[io[0]] + regs[io[1]]
elif opcode == "addi":
regs[io[2]] = regs[io[0]] + io[1]
elif opcode == "mulr":
regs[io[2]] = regs[io[0]] * regs[io[1]]
elif opcode == "muli":
regs[io[2]] = regs[io[0]] * io[1]
elif opcode == "banr":
regs[io[2]] = regs[io[0]] & regs[io[1]]
elif opcode == "bani":
regs[io[2]] = regs[io[0]] & io[1]
elif opcode == "borr":
regs[io[2]] = regs[io[0]] | regs[io[1]]
elif opcode == "bori":
regs[io[2]] = regs[io[0]] | io[1]
elif opcode == "setr":
regs[io[2]] = regs[io[0]]
elif opcode == "seti":
regs[io[2]] = io[0]
elif opcode == "gtir":
regs[io[2]] = int(io[0] > regs[io[1]])
elif opcode == "gtri":
regs[io[2]] = int(regs[io[0]] > io[1])
elif opcode == "gtrr":
regs[io[2]] = int(regs[io[0]] > regs[io[1]])
elif opcode == "eqir":
regs[io[2]] = int(io[0] == regs[io[1]])
elif opcode == "eqri":
regs[io[2]] = int(regs[io[0]] == io[1])
elif opcode == "eqrr":
regs[io[2]] = int(regs[io[0]] == regs[io[1]])
return ", ".join(str(r) for r in regs) if as_str else regs
| 30.859649 | 66 | 0.436896 |
ace489ef89d43f3b2d003d1a3ca311b4e27c7a71 | 3,401 | py | Python | api/tests/opentrons/hardware_control/test_simulator_setup.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/hardware_control/test_simulator_setup.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | api/tests/opentrons/hardware_control/test_simulator_setup.py | mrakitin/opentrons | d9c7ed23d13cdb62bd1bc397dc2871d4bd5b77e9 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from opentrons.config import robot_configs
from opentrons.hardware_control.modules import MagDeck, Thermocycler, TempDeck
from opentrons.hardware_control import simulator_setup
from opentrons.types import Mount
async def test_with_magdeck(loop):
setup = simulator_setup.SimulatorSetup(
attached_modules={'magdeck': [
simulator_setup.ModuleCall('engage', kwargs={'height': 3})]
})
simulator = await simulator_setup.create_simulator(setup)
assert type(simulator.attached_modules[0]) == MagDeck
assert simulator.attached_modules[0].live_data == {
'data': {
'engaged': True,
'height': 3
},
'status': 'engaged'
}
async def test_with_thermocycler(loop):
setup = simulator_setup.SimulatorSetup(
attached_modules={'thermocycler': [
simulator_setup.ModuleCall('set_temperature',
kwargs={
'temperature': 3,
'hold_time_seconds': 1,
'hold_time_minutes': 2,
'volume': 5
})
]})
simulator = await simulator_setup.create_simulator(setup)
assert type(simulator.attached_modules[0]) == Thermocycler
assert simulator.attached_modules[0].live_data == {
'data': {'currentCycleIndex': None,
'currentStepIndex': None,
'currentTemp': 3,
'holdTime': 121,
'lid': 'open',
'lidTarget': None,
'lidTemp': 23,
'rampRate': None,
'targetTemp': 3,
'totalCycleCount': None,
'totalStepCount': None},
'status': 'heating'
}
async def test_with_tempdeck(loop):
setup = simulator_setup.SimulatorSetup(
attached_modules={'tempdeck': [
simulator_setup.ModuleCall('set_temperature',
kwargs={'celsius': 23})
]})
simulator = await simulator_setup.create_simulator(setup)
assert type(simulator.attached_modules[0]) == TempDeck
assert simulator.attached_modules[0].live_data == {
'data': {
'currentTemp': 23,
'targetTemp': 23
},
'status': 'holding at target'
}
def test_persistance(tmpdir):
sim = simulator_setup.SimulatorSetup(
attached_instruments={
Mount.LEFT: {'max_volume': 300},
Mount.RIGHT: {'id': 'some id'},
},
attached_modules={
'magdeck': [
simulator_setup.ModuleCall('engage',
kwargs={'height': 3})
],
'tempdeck': [
simulator_setup.ModuleCall('set_temperature',
kwargs={'celsius': 23}),
simulator_setup.ModuleCall('set_temperature',
kwargs={'celsius': 24})
]
},
config=robot_configs.build_config({})
)
file = Path(tmpdir) / "sim_setup.json"
simulator_setup.save_simulator_setup(sim, file)
test_sim = simulator_setup.load_simulator_setup(file)
assert test_sim == sim
| 34.353535 | 78 | 0.53102 |
ace48c283677106f3ef3802daf318750640a8a2b | 22,309 | py | Python | polymorphic/query.py | benkonrath/django_polymorphic | badad18a63497f64692e34193f4bf9a89bacfc9c | [
"BSD-3-Clause"
] | null | null | null | polymorphic/query.py | benkonrath/django_polymorphic | badad18a63497f64692e34193f4bf9a89bacfc9c | [
"BSD-3-Clause"
] | null | null | null | polymorphic/query.py | benkonrath/django_polymorphic | badad18a63497f64692e34193f4bf9a89bacfc9c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
""" QuerySet for PolymorphicModel
Please see README.rst or DOCS.rst or http://chrisglass.github.com/django_polymorphic/
"""
from __future__ import absolute_import
import copy
from collections import defaultdict
import django
from django.db.models.query import QuerySet, Q
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from .query_translate import translate_polymorphic_filter_definitions_in_kwargs, translate_polymorphic_filter_definitions_in_args
from .query_translate import translate_polymorphic_field_path, translate_polymorphic_Q_object
# chunk-size: maximum number of objects requested per db-request
# by the polymorphic queryset.iterator() implementation; we use the same chunk size as Django
try:
from django.db.models.query import CHUNK_SIZE # this is 100 for Django 1.1/1.2
except ImportError:
# CHUNK_SIZE was removed in Django 1.6
CHUNK_SIZE = 100
Polymorphic_QuerySet_objects_per_request = CHUNK_SIZE
def transmogrify(cls, obj):
"""
Upcast a class to a different type without asking questions.
"""
if not '__init__' in obj.__dict__:
# Just assign __class__ to a different value.
new = obj
new.__class__ = cls
else:
# Run constructor, reassign values
new = cls()
for k, v in obj.__dict__.items():
new.__dict__[k] = v
return new
###################################################################################
# PolymorphicQuerySet
def _query_annotations(query):
try:
return query.annotations
except AttributeError:
# Django < 1.8
return query.aggregates
class PolymorphicQuerySet(QuerySet):
"""
QuerySet for PolymorphicModel
Contains the core functionality for PolymorphicModel
Usually not explicitly needed, except if a custom queryset class
is to be used.
"""
def __init__(self, *args, **kwargs):
"init our queryset object member variables"
self.polymorphic_disabled = False
# A parallel structure to django.db.models.query.Query.deferred_loading,
# which we maintain with the untranslated field names passed to
# .defer() and .only() in order to be able to retranslate them when
# retrieving the real instance (so that the deferred fields apply
# to that queryset as well).
self.polymorphic_deferred_loading = (set([]), True)
super(PolymorphicQuerySet, self).__init__(*args, **kwargs)
def _clone(self, *args, **kwargs):
"Django's _clone only copies its own variables, so we need to copy ours here"
new = super(PolymorphicQuerySet, self)._clone(*args, **kwargs)
new.polymorphic_disabled = self.polymorphic_disabled
new.polymorphic_deferred_loading = (
copy.copy(self.polymorphic_deferred_loading[0]),
self.polymorphic_deferred_loading[1])
return new
if django.VERSION >= (1, 7):
def as_manager(cls):
# Make sure the Django 1.7 way of creating managers works.
from .managers import PolymorphicManager
manager = PolymorphicManager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
def non_polymorphic(self):
"""switch off polymorphic behaviour for this query.
When the queryset is evaluated, only objects of the type of the
base class used for this query are returned."""
qs = self._clone()
qs.polymorphic_disabled = True
return qs
def instance_of(self, *args):
"""Filter the queryset to only include the classes in args (and their subclasses).
Implementation in _translate_polymorphic_filter_defnition."""
return self.filter(instance_of=args)
def not_instance_of(self, *args):
"""Filter the queryset to exclude the classes in args (and their subclasses).
Implementation in _translate_polymorphic_filter_defnition."""
return self.filter(not_instance_of=args)
def _filter_or_exclude(self, negate, *args, **kwargs):
"We override this internal Django functon as it is used for all filter member functions."
q_objects = translate_polymorphic_filter_definitions_in_args(self.model, args, using=self.db) # the Q objects
additional_args = translate_polymorphic_filter_definitions_in_kwargs(self.model, kwargs, using=self.db) # filter_field='data'
return super(PolymorphicQuerySet, self)._filter_or_exclude(negate, *(list(q_objects) + additional_args), **kwargs)
def order_by(self, *args, **kwargs):
"""translate the field paths in the args, then call vanilla order_by."""
new_args = [translate_polymorphic_field_path(self.model, a) for a in args]
return super(PolymorphicQuerySet, self).order_by(*new_args, **kwargs)
def defer(self, *fields):
"""
Translate the field paths in the args, then call vanilla defer.
Also retain a copy of the original fields passed, which we'll need
when we're retrieving the real instance (since we'll need to translate
them again, as the model will have changed).
"""
new_fields = [translate_polymorphic_field_path(self.model, a) for a in fields]
clone = super(PolymorphicQuerySet, self).defer(*new_fields)
clone._polymorphic_add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Translate the field paths in the args, then call vanilla only.
Also retain a copy of the original fields passed, which we'll need
when we're retrieving the real instance (since we'll need to translate
them again, as the model will have changed).
"""
new_fields = [translate_polymorphic_field_path(self.model, a) for a in fields]
clone = super(PolymorphicQuerySet, self).only(*new_fields)
clone._polymorphic_add_immediate_loading(fields)
return clone
def _polymorphic_add_deferred_loading(self, field_names):
"""
Follows the logic of django.db.models.query.Query.add_deferred_loading(),
but for the non-translated field names that were passed to self.defer().
"""
existing, defer = self.polymorphic_deferred_loading
if defer:
# Add to existing deferred names.
self.polymorphic_deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.polymorphic_deferred_loading = existing.difference(field_names), False
def _polymorphic_add_immediate_loading(self, field_names):
"""
Follows the logic of django.db.models.query.Query.add_immediate_loading(),
but for the non-translated field names that were passed to self.only()
"""
existing, defer = self.polymorphic_deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.model._meta.pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.polymorphic_deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.polymorphic_deferred_loading = field_names, False
def _process_aggregate_args(self, args, kwargs):
"""for aggregate and annotate kwargs: allow ModelX___field syntax for kwargs, forbid it for args.
Modifies kwargs if needed (these are Aggregate objects, we translate the lookup member variable)"""
___lookup_assert_msg = 'PolymorphicModel: annotate()/aggregate(): ___ model lookup supported for keyword arguments only'
if django.VERSION < (1, 8):
def patch_lookup(a):
a.lookup = translate_polymorphic_field_path(self.model, a.lookup)
def test___lookup(a):
assert '___' not in a.lookup, ___lookup_assert_msg
else:
def patch_lookup(a):
# With Django > 1.8, the field on which the aggregate operates is
# stored inside a complex query expression.
if isinstance(a, Q):
translate_polymorphic_Q_object(self.model, a)
elif hasattr(a, 'get_source_expressions'):
for source_expression in a.get_source_expressions():
if source_expression is not None:
patch_lookup(source_expression)
else:
a.name = translate_polymorphic_field_path(self.model, a.name)
def test___lookup(a):
""" *args might be complex expressions too in django 1.8 so
the testing for a '___' is rather complex on this one """
if isinstance(a, Q):
def tree_node_test___lookup(my_model, node):
" process all children of this Q node "
for i in range(len(node.children)):
child = node.children[i]
if type(child) == tuple:
# this Q object child is a tuple => a kwarg like Q( instance_of=ModelB )
assert '___' not in child[0], ___lookup_assert_msg
else:
# this Q object child is another Q object, recursively process this as well
tree_node_test___lookup(my_model, child)
tree_node_test___lookup(self.model, a)
elif hasattr(a, 'get_source_expressions'):
for source_expression in a.get_source_expressions():
test___lookup(source_expression)
else:
assert '___' not in a.name, ___lookup_assert_msg
for a in args:
test___lookup(a)
for a in six.itervalues(kwargs):
patch_lookup(a)
def annotate(self, *args, **kwargs):
"""translate the polymorphic field paths in the kwargs, then call vanilla annotate.
_get_real_instances will do the rest of the job after executing the query."""
self._process_aggregate_args(args, kwargs)
return super(PolymorphicQuerySet, self).annotate(*args, **kwargs)
def aggregate(self, *args, **kwargs):
"""translate the polymorphic field paths in the kwargs, then call vanilla aggregate.
We need no polymorphic object retrieval for aggregate => switch it off."""
self._process_aggregate_args(args, kwargs)
qs = self.non_polymorphic()
return super(PolymorphicQuerySet, qs).aggregate(*args, **kwargs)
if django.VERSION >= (1, 9):
# On Django < 1.9, 'qs.values(...)' returned a new special ValuesQuerySet
# object, which our polymorphic modifications didn't apply to.
# Starting with Django 1.9, the copy returned by 'qs.values(...)' has the
# same class as 'qs', so our polymorphic modifications would apply.
# We want to leave values queries untouched, so we set 'polymorphic_disabled'.
def _values(self, *args, **kwargs):
clone = super(PolymorphicQuerySet, self)._values(*args, **kwargs)
clone.polymorphic_disabled = True
return clone
# Since django_polymorphic 'V1.0 beta2', extra() always returns polymorphic results.
# The resulting objects are required to have a unique primary key within the result set
# (otherwise an error is thrown).
# The "polymorphic" keyword argument is not supported anymore.
# def extra(self, *args, **kwargs):
def _get_real_instances(self, base_result_objects):
"""
Polymorphic object loader
Does the same as:
return [ o.get_real_instance() for o in base_result_objects ]
but more efficiently.
The list base_result_objects contains the objects from the executed
base class query. The class of all of them is self.model (our base model).
Some, many or all of these objects were not created and stored as
class self.model, but as a class derived from self.model. We want to re-fetch
these objects from the db as their original class so we can return them
just as they were created/saved.
We identify these objects by looking at o.polymorphic_ctype, which specifies
the real class of these objects (the class at the time they were saved).
First, we sort the result objects in base_result_objects for their
subclass (from o.polymorphic_ctype), and then we execute one db query per
subclass of objects. Here, we handle any annotations from annotate().
Finally we re-sort the resulting objects into the correct order and
return them as a list.
"""
ordered_id_list = [] # list of ids of result-objects in correct order
results = {} # polymorphic dict of result-objects, keyed with their id (no order)
# dict contains one entry per unique model type occurring in result,
# in the format idlist_per_model[modelclass]=[list-of-object-ids]
idlist_per_model = defaultdict(list)
# django's automatic ".pk" field does not always work correctly for
# custom fields in derived objects (unclear yet who to put the blame on).
# We get different type(o.pk) in this case.
# We work around this by using the real name of the field directly
# for accessing the primary key of the the derived objects.
# We might assume that self.model._meta.pk.name gives us the name of the primary key field,
# but it doesn't. Therefore we use polymorphic_primary_key_name, which we set up in base.py.
pk_name = self.model.polymorphic_primary_key_name
# - sort base_result_object ids into idlist_per_model lists, depending on their real class;
# - also record the correct result order in "ordered_id_list"
# - store objects that already have the correct class into "results"
base_result_objects_by_id = {}
content_type_manager = ContentType.objects.db_manager(self.db)
self_model_class_id = content_type_manager.get_for_model(self.model, for_concrete_model=False).pk
self_concrete_model_class_id = content_type_manager.get_for_model(self.model, for_concrete_model=True).pk
for base_object in base_result_objects:
ordered_id_list.append(base_object.pk)
# check if id of the result object occurres more than once - this can happen e.g. with base_objects.extra(tables=...)
if not base_object.pk in base_result_objects_by_id:
base_result_objects_by_id[base_object.pk] = base_object
if base_object.polymorphic_ctype_id == self_model_class_id:
# Real class is exactly the same as base class, go straight to results
results[base_object.pk] = base_object
else:
real_concrete_class = base_object.get_real_instance_class()
real_concrete_class_id = base_object.get_real_concrete_instance_class_id()
if real_concrete_class_id is None:
# Dealing with a stale content type
continue
elif real_concrete_class_id == self_concrete_model_class_id:
# Real and base classes share the same concrete ancestor,
# upcast it and put it in the results
results[base_object.pk] = transmogrify(real_concrete_class, base_object)
else:
real_concrete_class = content_type_manager.get_for_id(real_concrete_class_id).model_class()
idlist_per_model[real_concrete_class].append(getattr(base_object, pk_name))
# For each model in "idlist_per_model" request its objects (the real model)
# from the db and store them in results[].
# Then we copy the annotate fields from the base objects to the real objects.
# Then we copy the extra() select fields from the base objects to the real objects.
# TODO: defer(), only(): support for these would be around here
for real_concrete_class, idlist in idlist_per_model.items():
real_objects = real_concrete_class.base_objects.db_manager(self.db).filter(**{
('%s__in' % pk_name): idlist,
})
real_objects.query.select_related = self.query.select_related # copy select related configuration to new qs
# Copy deferred fields configuration to the new queryset
deferred_loading_fields = []
existing_fields = self.polymorphic_deferred_loading[0]
for field in existing_fields:
try:
translated_field_name = translate_polymorphic_field_path(
real_concrete_class, field)
except AssertionError:
if '___' in field:
# The originally passed argument to .defer() or .only()
# was in the form Model2B___field2, where Model2B is
# now a superclass of real_concrete_class. Thus it's
# sufficient to just use the field name.
translated_field_name = field.rpartition('___')[-1]
else:
raise
deferred_loading_fields.append(translated_field_name)
real_objects.query.deferred_loading = (set(deferred_loading_fields), self.query.deferred_loading[1])
for real_object in real_objects:
o_pk = getattr(real_object, pk_name)
real_class = real_object.get_real_instance_class()
# If the real class is a proxy, upcast it
if real_class != real_concrete_class:
real_object = transmogrify(real_class, real_object)
if _query_annotations(self.query):
for anno_field_name in six.iterkeys(_query_annotations(self.query)):
attr = getattr(base_result_objects_by_id[o_pk], anno_field_name)
setattr(real_object, anno_field_name, attr)
if self.query.extra_select:
for select_field_name in six.iterkeys(self.query.extra_select):
attr = getattr(base_result_objects_by_id[o_pk], select_field_name)
setattr(real_object, select_field_name, attr)
results[o_pk] = real_object
# re-create correct order and return result list
resultlist = [results[ordered_id] for ordered_id in ordered_id_list if ordered_id in results]
# set polymorphic_annotate_names in all objects (currently just used for debugging/printing)
if _query_annotations(self.query):
annotate_names = list(six.iterkeys(_query_annotations(self.query))) # get annotate field list
for real_object in resultlist:
real_object.polymorphic_annotate_names = annotate_names
# set polymorphic_extra_select_names in all objects (currently just used for debugging/printing)
if self.query.extra_select:
extra_select_names = list(six.iterkeys(self.query.extra_select)) # get extra select field list
for real_object in resultlist:
real_object.polymorphic_extra_select_names = extra_select_names
return resultlist
def iterator(self):
"""
This function is used by Django for all object retrieval.
By overriding it, we modify the objects that this queryset returns
when it is evaluated (or its get method or other object-returning methods are called).
Here we do the same as:
base_result_objects=list(super(PolymorphicQuerySet, self).iterator())
real_results=self._get_real_instances(base_result_objects)
for o in real_results: yield o
but it requests the objects in chunks from the database,
with Polymorphic_QuerySet_objects_per_request per chunk
"""
base_iter = super(PolymorphicQuerySet, self).iterator()
# disabled => work just like a normal queryset
if self.polymorphic_disabled:
for o in base_iter:
yield o
return
while True:
base_result_objects = []
reached_end = False
for i in range(Polymorphic_QuerySet_objects_per_request):
try:
o = next(base_iter)
base_result_objects.append(o)
except StopIteration:
reached_end = True
break
real_results = self._get_real_instances(base_result_objects)
for o in real_results:
yield o
if reached_end:
return
def __repr__(self, *args, **kwargs):
if self.model.polymorphic_query_multiline_output:
result = [repr(o) for o in self.all()]
return '[ ' + ',\n '.join(result) + ' ]'
else:
return super(PolymorphicQuerySet, self).__repr__(*args, **kwargs)
class _p_list_class(list):
def __repr__(self, *args, **kwargs):
result = [repr(o) for o in self]
return '[ ' + ',\n '.join(result) + ' ]'
def get_real_instances(self, base_result_objects=None):
"same as _get_real_instances, but make sure that __repr__ for ShowField... creates correct output"
if not base_result_objects:
base_result_objects = self
olist = self._get_real_instances(base_result_objects)
if not self.model.polymorphic_query_multiline_output:
return olist
clist = PolymorphicQuerySet._p_list_class(olist)
return clist
| 46.867647 | 134 | 0.6453 |
ace48e987639c50f8f091b020d9c62553eaa84d4 | 6,745 | py | Python | zerver/tests/test_middleware.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 2 | 2019-04-24T15:22:52.000Z | 2020-01-18T11:01:31.000Z | zerver/tests/test_middleware.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 10 | 2019-02-26T11:10:42.000Z | 2019-02-26T14:30:24.000Z | zerver/tests/test_middleware.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 1 | 2020-01-07T15:49:54.000Z | 2020-01-07T15:49:54.000Z | import re
import time
from typing import List
from django.test import override_settings
from unittest.mock import Mock, patch
from zerver.lib.test_classes import ZulipTestCase
from zerver.middleware import is_slow_query
from zerver.middleware import write_log_line
class SlowQueryTest(ZulipTestCase):
SLOW_QUERY_TIME = 10
log_data = {'extra': '[transport=websocket]',
'time_started': 0,
'bugdown_requests_start': 0,
'bugdown_time_start': 0,
'remote_cache_time_start': 0,
'remote_cache_requests_start': 0}
def test_is_slow_query(self) -> None:
self.assertFalse(is_slow_query(1.1, '/some/random/url'))
self.assertTrue(is_slow_query(2, '/some/random/url'))
self.assertTrue(is_slow_query(5.1, '/activity'))
self.assertFalse(is_slow_query(2, '/activity'))
self.assertFalse(is_slow_query(2, '/json/report/error'))
self.assertFalse(is_slow_query(2, '/api/v1/deployments/report_error'))
self.assertFalse(is_slow_query(2, '/realm_activity/whatever'))
self.assertFalse(is_slow_query(2, '/user_activity/whatever'))
self.assertFalse(is_slow_query(9, '/accounts/webathena_kerberos_login/'))
self.assertTrue(is_slow_query(11, '/accounts/webathena_kerberos_login/'))
@override_settings(SLOW_QUERY_LOGS_STREAM="logs")
@patch('logging.info')
def test_slow_query_log(self, mock_logging_info: Mock) -> None:
self.log_data['time_started'] = time.time() - self.SLOW_QUERY_TIME
write_log_line(self.log_data, path='/socket/open', method='SOCKET',
remote_ip='123.456.789.012', email='unknown', client_name='?')
last_message = self.get_last_message()
self.assertEqual(last_message.sender.email, "error-bot@zulip.com")
self.assertIn("logs", str(last_message.recipient))
self.assertEqual(last_message.topic_name(), "testserver: slow queries")
self.assertRegexpMatches(last_message.content,
r"123\.456\.789\.012 SOCKET 200 10\.\ds .*")
@override_settings(ERROR_BOT=None)
@patch('logging.info')
@patch('zerver.lib.actions.internal_send_message')
def test_slow_query_log_without_error_bot(self, mock_internal_send_message: Mock,
mock_logging_info: Mock) -> None:
self.log_data['time_started'] = time.time() - self.SLOW_QUERY_TIME
write_log_line(self.log_data, path='/socket/open', method='SOCKET',
remote_ip='123.456.789.012', email='unknown', client_name='?')
mock_internal_send_message.assert_not_called()
class OpenGraphTest(ZulipTestCase):
def check_title_and_description(self, path: str, title: str,
in_description: List[str],
not_in_description: List[str]) -> None:
response = self.client_get(path)
self.assertEqual(response.status_code, 200)
decoded = response.content.decode('utf-8')
for title_string in [
'<meta property="og:title" content="{}">'.format(title),
'<meta property="twitter:title" content="{}">'.format(title)]:
self.assertIn(title_string, decoded)
open_graph_description = re.search( # type: ignore
r'<meta property="og:description" content="(?P<description>[^>]*)">',
decoded).group('description')
twitter_description = re.search( # type: ignore
r'<meta name="twitter:description" content="(?P<description>[^>]*)">',
decoded).group('description')
for substring in in_description:
self.assertIn(substring, open_graph_description)
self.assertIn(substring, twitter_description)
for substring in not_in_description:
self.assertNotIn(substring, open_graph_description)
self.assertNotIn(substring, twitter_description)
def test_admonition_and_link(self) -> None:
# disable-message-edit-history starts with an {!admin-only.md!}, and has a link
# in the first paragraph.
self.check_title_and_description(
'/help/disable-message-edit-history',
"Disable message edit history (Zulip Help Center)",
["By default, Zulip displays messages",
"users can view the edit history of a message. To remove the",
"best to delete the message entirely. "],
["Disable message edit history", "feature is only available", "Related articles",
"Restrict message editing"]
)
def test_settings_tab(self) -> None:
# deactivate-your-account starts with {settings_tab|your-account}
self.check_title_and_description(
'/help/deactivate-your-account',
"Deactivate your account (Zulip Help Center)",
["Any bots that you maintain will be disabled. Deactivating "],
["Confirm by clicking", " ", "\n"])
def test_tabs(self) -> None:
# logging-out starts with {start_tabs}
self.check_title_and_description(
'/help/logging-out',
"Logging out (Zulip Help Center)",
# Ideally we'd do something better here
["We're here to help! Email us at zulip-admin@example.com with questions, feedback, or " +
"feature requests."],
["Click on the gear"])
def test_index_pages(self) -> None:
self.check_title_and_description(
'/help/',
"Zulip Help Center",
[("Zulip is a group chat app. Its most distinctive characteristic is that "
"conversation within an organization is divided into “streams” and further ")], [])
self.check_title_and_description(
'/api/',
"Zulip API Documentation",
[("Zulip's APIs allow you to integrate other services with Zulip. This "
"guide should help you find the API you need:")], [])
def test_nonexistent_page(self) -> None:
response = self.client_get('/help/not-a-real-page')
# Test that our open graph logic doesn't throw a 500
self.assertEqual(response.status_code, 404)
self.assert_in_response(
# Probably we should make this "Zulip Help Center"
'<meta property="og:title" content="No such article. (Zulip Help Center)">', response)
self.assert_in_response('<meta property="og:description" content="No such article. '
'We\'re here to help! Email us at zulip-admin@example.com with questions, '
'feedback, or feature requests.">', response)
| 50.335821 | 107 | 0.632765 |
ace48edc34812d81b11826082f0a934af0300caf | 2,120 | py | Python | analyzer_project/source_parser/python_source_parser.py | Dakhnovskiy/linguistic_analyzer_projects | 36551d788f76be47b8892bce4073900e2cccec2f | [
"Apache-2.0"
] | null | null | null | analyzer_project/source_parser/python_source_parser.py | Dakhnovskiy/linguistic_analyzer_projects | 36551d788f76be47b8892bce4073900e2cccec2f | [
"Apache-2.0"
] | null | null | null | analyzer_project/source_parser/python_source_parser.py | Dakhnovskiy/linguistic_analyzer_projects | 36551d788f76be47b8892bce4073900e2cccec2f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Dmitriy.Dakhnovskiy'
import ast
from .abstract_source_parser import AbstractSourceParser
class PythonSourceParser(AbstractSourceParser):
def __init__(self, source):
"""
:param source: исходный код
"""
super().__init__(source)
@staticmethod
def _parse_source(source):
"""
возвращает разобранный в структуру код
:param source: исходный код
"""
try:
parsed_source = ast.parse(source)
except SyntaxError:
parsed_source = None
return parsed_source
@staticmethod
def _walk_element(element):
"""
генератор по элементам разобранной структуры
:param element: элемент разобранной структуры
"""
return ast.walk(element)
@staticmethod
def _is_variable(element):
"""
проаверяет является ли элемент структуры переменной
:param element: элемент
"""
return isinstance(element, ast.Name) and not isinstance(element.ctx, ast.Load)
@staticmethod
def _is_function(element):
"""
проаверяет является ли элемент структуры функцией
:param element: элемент
"""
if isinstance(element, ast.FunctionDef):
name_function = PythonSourceParser._get_element_name(element)
return not (name_function.startswith('__') and name_function.endswith('__'))
return False
@staticmethod
def _get_words_from_identificator(identificator):
"""
получить слова из идентификатора
:param identificator: идентификатор
"""
return identificator.split('_')
@staticmethod
def _get_element_name(element):
"""
получить наименование(идентификатор) элемента разобранной структуры
:param element: элемент структуры
"""
element_name = None
if hasattr(element, 'name'):
element_name = element.name.lower()
elif hasattr(element, 'id'):
element_name = element.id.lower()
return element_name
| 27.532468 | 88 | 0.625943 |
ace48f80fc5f9012a21f24b2abe99dfbefa61acf | 2,780 | py | Python | server.py | merwane-rakkaoui/TER_S6 | e51355c0b4daa3b7d61d8d2627977aeb412bb9d0 | [
"MIT"
] | 1 | 2021-03-13T19:33:47.000Z | 2021-03-13T19:33:47.000Z | server.py | merwane-rakkaoui/TER_S6 | e51355c0b4daa3b7d61d8d2627977aeb412bb9d0 | [
"MIT"
] | null | null | null | server.py | merwane-rakkaoui/TER_S6 | e51355c0b4daa3b7d61d8d2627977aeb412bb9d0 | [
"MIT"
] | 4 | 2021-02-17T18:10:44.000Z | 2022-01-26T16:53:05.000Z | from flask import Flask, request, jsonify
import os
import speech_recognition as sr
from sys import platform
from playsound import playsound
from utils import *
from wsgiref.simple_server import make_server
if get_os()=="pi":
from ctypes import *
# Define our error handler type
ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p)
def py_error_handler(filename, line, function, err, fmt):
return
c_error_handler = ERROR_HANDLER_FUNC(py_error_handler)
asound = cdll.LoadLibrary('libasound.so')
# Set error handler
asound.snd_lib_error_set_handler(c_error_handler)
path_volume= abspath(__file__)+"_data/"
start_rec_effect= path_volume+'effects/start_rec_effect.wav'
end_rec_effect= path_volume+'effects/end_rec_effect.wav'
#créer une application flask
app= Flask(__name__)
app.config.from_mapping(SECRET_KEY='mysecret')
@app.route('/',methods=['POST', 'GET'])
def index():
return jsonify(volume_content=os.listdir(path_volume))
@app.route('/test',methods=['POST', 'GET'])
def test():
return "ceci est un test"
@app.route('/playsound',methods=['POST', 'GET'])
def play():
# get data sent with the request
_file= str(request.args.get('file'))
try:
playsound(path_volume+_file)
except Exception as error:
return jsonify(status="fail",error=str(error))
return jsonify(status="succes")
@app.route('/microphone',methods=['POST', 'GET'])
def microphone():
# get data sent with the request
file_name= str(request.args.get('file'))
play_effect= str_to_bool(str(request.args.get('play_effect')))
if os.path.exists(path_volume+file_name):
os.remove(path_volume+file_name)
try:
# print(sr.Microphone.list_microphone_names())
r = sr.Recognizer()
mic = sr.Microphone()
with mic as source:
# start recording voice
if play_effect and os.path.isfile(start_rec_effect):
playsound(start_rec_effect)
print("##### start #####")
audio = r.listen(source)
print("###### end ######")
if play_effect and os.path.isfile(end_rec_effect):
playsound(end_rec_effect)
# save audio file in format wav
with open(path_volume+file_name, "wb") as f:
f.write(audio.get_wav_data())
except Exception as error:
return jsonify(status="fail",error=str(error))
return jsonify(status="succes")
def run_server():
#démarer app via WSGI
with make_server('127.0.0.1',5000,app) as server:
server.serve_forever()
if __name__ == '__main__':
#démarer app via WSGI
with make_server('127.0.0.1',5000,app) as server:
if(platform.startswith('win')):
print("Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)")
else:
print("Running on http://127.0.0.1:5000/ (Press CTRL+Z to quit)")
server.serve_forever()
| 28.659794 | 81 | 0.706115 |
ace490e165d45dda770f9079f68a09d7e4cdb7a0 | 168 | py | Python | Exercicio-12.py | NicolasFreitas1/Exercicio_LP_B1 | 34b07c3d9e5c03981922f2852e076bd536c29dc9 | [
"MIT"
] | null | null | null | Exercicio-12.py | NicolasFreitas1/Exercicio_LP_B1 | 34b07c3d9e5c03981922f2852e076bd536c29dc9 | [
"MIT"
] | null | null | null | Exercicio-12.py | NicolasFreitas1/Exercicio_LP_B1 | 34b07c3d9e5c03981922f2852e076bd536c29dc9 | [
"MIT"
] | null | null | null | salario = float(input("Digite seu salario: "))
aumento = salario *15 / 100
salariof = salario + aumento
print("O seu salario final será de: {:.2fc}".format(salariof)) | 28 | 62 | 0.702381 |
ace491687d50e2042b39ed26e310ea44a4879b8d | 2,499 | py | Python | .github/workflows/maturin_build_wheel.py | xyb/rocksdb3 | 92d554607cf7a1be1433eb28bf35797fcc63e78e | [
"Apache-2.0"
] | 14 | 2020-09-21T08:44:32.000Z | 2022-03-06T21:05:21.000Z | .github/workflows/maturin_build_wheel.py | xyb/rocksdb-py | e91b37d82c25e3763b18893595642bae5400edea | [
"Apache-2.0"
] | 5 | 2021-09-27T21:11:58.000Z | 2022-02-06T01:54:45.000Z | .github/workflows/maturin_build_wheel.py | xyb/rocksdb-py | e91b37d82c25e3763b18893595642bae5400edea | [
"Apache-2.0"
] | 4 | 2021-05-11T06:37:26.000Z | 2022-02-03T20:37:24.000Z | #! /usr/bin/env python3
import os
import platform
import subprocess
import sys
from pathlib import Path
ROOT = Path(__file__).parent.parent.parent
# Clean up wheels
WHEEL_DIR = ROOT / "target" / "wheels"
if WHEEL_DIR.exists():
for x in WHEEL_DIR.iterdir():
x.unlink()
# For macOS and Windows, we run Maturin against the Python interpreter that's
# been installed and configured for this CI run, i.e. the one that's running
# this script. (There are generally several versions installed by default, but
# that's not guaranteed.) For Linux, in order to get "manylinux" compatibility
# right, we need to run Maturin in a special Docker container. We hardcode
# paths to specific interpreter versions, based on where things are installed
# in this container. Our GitHub config has no effect on the the container, so
# we could build all the wheels in one job, but we stick to one-wheel-per-job
# for consistency.
if platform.system() == "Linux":
version_path_components = {
(3, 5): ("cp35-cp35m", "xieyanbo/manylinux-maturin:llvm-3.9.1-py-3.5"),
(3, 6): ("cp36-cp36m", "xieyanbo/manylinux-maturin:llvm-3.9.1"),
(3, 7): ("cp37-cp37m", "xieyanbo/manylinux-maturin:llvm-3.9.1"),
(3, 8): ("cp38-cp38", "xieyanbo/manylinux-maturin:llvm-3.9.1"),
(3, 9): ("cp39-cp39", "xieyanbo/manylinux-maturin:llvm-3.9.1"),
(3, 10): ("cp310-cp310", "xieyanbo/manylinux-maturin:llvm-3.9.1"),
# This list needs to be kept in sync with tag.yml.
}
(version_component, docker_image) = version_path_components[sys.version_info[:2]]
interpreter_path = "/opt/python/" + version_component + "/bin/python"
# See https://github.com/PyO3/maturin#manylinux-and-auditwheel
command = [
"docker",
"run",
"--rm",
"--volume=" + os.getcwd() + ":/io",
docker_image,
"build",
"--release",
"--no-sdist",
"--manylinux=2014",
"--interpreter=" + interpreter_path,
]
subprocess.run(command, check=True)
else:
command = [
"maturin",
"build",
"--release",
"--no-sdist",
"--interpreter",
sys.executable,
]
subprocess.run(command, check=True)
wheels = [x for x in (ROOT / "target" / "wheels").iterdir()]
if len(wheels) != 1:
raise RuntimeError("expected one wheel, found " + repr(wheels))
print("::set-output name=wheel_path::" + str(wheels[0]))
print("::set-output name=wheel_name::" + wheels[0].name)
| 36.217391 | 85 | 0.641056 |
ace492eb6b49c410cf0da1c162ec0b2bcb92af7f | 1,150 | py | Python | app/modules/storages/params.py | ssfdust/full-stack-flask-smorest | a0bdbd3a7d314b82bb43b265578aba7bbd175e51 | [
"Apache-2.0"
] | 33 | 2019-09-25T02:19:43.000Z | 2022-03-25T01:58:19.000Z | app/modules/storages/params.py | ssfdust/full-stack-flask-rest-api | a0bdbd3a7d314b82bb43b265578aba7bbd175e51 | [
"Apache-2.0"
] | null | null | null | app/modules/storages/params.py | ssfdust/full-stack-flask-rest-api | a0bdbd3a7d314b82bb43b265578aba7bbd175e51 | [
"Apache-2.0"
] | 6 | 2020-01-12T15:18:07.000Z | 2021-06-01T16:30:26.000Z | # Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from app.extensions import ma
from marshmallow import fields
from app.extensions.marshal.bases import UploadField
class UploadParams(ma.Schema):
"""
上传参数
"""
file = UploadField(
description="文件", allow_none=False, required=True, location="files"
)
name = fields.String(description="文件名", location="form")
storetype = fields.String(
description="存储类型", allow_none=False, required=True, location="form"
)
extra_args = UploadField(description="额外参数", location="json")
| 33.823529 | 76 | 0.731304 |
ace492f3155bc3619a5a70dd26dfbaba2eb7a5ce | 11,337 | py | Python | anosql/core.py | wallawaz/anosql | 6b0b13d8a5e5c838a510535c98bcdf2fbd9a41ab | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | anosql/core.py | wallawaz/anosql | 6b0b13d8a5e5c838a510535c98bcdf2fbd9a41ab | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | anosql/core.py | wallawaz/anosql | 6b0b13d8a5e5c838a510535c98bcdf2fbd9a41ab | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import os
from .adapters.mysql import MySQLDriverAdapter
from .adapters.psycopg2 import PsycoPG2Adapter
from .adapters.sqlite3 import SQLite3DriverAdapter
from .exceptions import SQLLoadException, SQLParseException
from .patterns import (
query_name_definition_pattern,
empty_pattern,
doc_comment_pattern,
valid_query_name_pattern,
)
_ADAPTERS = {
"psycopg2": PsycoPG2Adapter,
"sqlite3": SQLite3DriverAdapter,
"mysql": MySQLDriverAdapter,
}
def register_driver_adapter(driver_name, driver_adapter):
"""Registers custom driver adapter classes to extend ``anosql`` to to handle additional drivers.
For details on how to create a new driver adapter see :ref:`driver-adapters` documentation.
Args:
driver_name (str): The driver type name.
driver_adapter (callable): Either n class or function which creates an instance of a
driver adapter.
Returns:
None
Examples:
To register a new loader::
class MyDbAdapter():
def process_sql(self, name, op_type, sql):
pass
def select(self, conn, sql, parameters):
pass
@contextmanager
def select_cursor(self, conn, sql, parameters):
pass
def insert_update_delete(self, conn, sql, parameters):
pass
def insert_update_delete_many(self, conn, sql, parameters):
pass
def insert_returning(self, conn, sql, parameters):
pass
def execute_script(self, conn, sql):
pass
anosql.register_driver_adapter("mydb", MyDbAdapter)
If your adapter constructor takes arguments you can register a function which can build
your adapter instance::
def adapter_factory():
return MyDbAdapter("foo", 42)
anosql.register_driver_adapter("mydb", adapter_factory)
"""
_ADAPTERS[driver_name] = driver_adapter
def get_driver_adapter(driver_name):
"""Get the driver adapter instance registered by the ``driver_name``.
Args:
driver_name (str): The database driver name.
Returns:
object: A driver adapter class.
"""
try:
driver_adapter = _ADAPTERS[driver_name]
except KeyError:
raise ValueError("Encountered unregistered driver_name: {}".format(driver_name))
return driver_adapter()
class SQLOperationType(object):
"""Enumeration (kind of) of anosql operation types
"""
INSERT_RETURNING = 0
INSERT_UPDATE_DELETE = 1
INSERT_UPDATE_DELETE_MANY = 2
SCRIPT = 3
SELECT = 4
SELECT_ONE_ROW = 5
class Queries:
"""Container object with dynamic methods built from SQL queries.
The ``-- name`` definition comments in the SQL content determine what the dynamic
methods of this class will be named.
@DynamicAttrs
"""
def __init__(self, queries=None):
"""Queries constructor.
Args:
queries (list(tuple)):
"""
if queries is None:
queries = []
self._available_queries = set()
for query_name, fn in queries:
self.add_query(query_name, fn)
@property
def available_queries(self):
"""Returns listing of all the available query methods loaded in this class.
Returns:
list(str): List of dot-separated method accessor names.
"""
return sorted(self._available_queries)
def __repr__(self):
return "Queries(" + self.available_queries.__repr__() + ")"
def add_query(self, query_name, fn):
"""Adds a new dynamic method to this class.
Args:
query_name (str): The method name as found in the SQL content.
fn (function): The loaded query function.
Returns:
"""
setattr(self, query_name, fn)
self._available_queries.add(query_name)
def add_child_queries(self, child_name, child_queries):
"""Adds a Queries object as a property.
Args:
child_name (str): The property name to group the child queries under.
child_queries (Queries): Queries instance to add as sub-queries.
Returns:
None
"""
setattr(self, child_name, child_queries)
for child_query_name in child_queries.available_queries:
self._available_queries.add("{}.{}".format(child_name, child_query_name))
def _create_fns(query_name, docs, op_type, sql, driver_adapter):
def fn(conn, *args, **kwargs):
parameters = kwargs if len(kwargs) > 0 else args
if op_type == SQLOperationType.INSERT_RETURNING:
return driver_adapter.insert_returning(conn, query_name, sql, parameters)
elif op_type == SQLOperationType.INSERT_UPDATE_DELETE:
return driver_adapter.insert_update_delete(conn, query_name, sql, parameters)
elif op_type == SQLOperationType.INSERT_UPDATE_DELETE_MANY:
return driver_adapter.insert_update_delete_many(conn, query_name, sql, *parameters)
elif op_type == SQLOperationType.SCRIPT:
return driver_adapter.execute_script(conn, sql)
elif op_type == SQLOperationType.SELECT_ONE_ROW:
res = driver_adapter.select(conn, query_name, sql, parameters)
return res[0] if len(res) == 1 else None
elif op_type == SQLOperationType.SELECT:
return driver_adapter.select(conn, query_name, sql, parameters)
else:
raise ValueError("Unknown op_type: {}".format(op_type))
fn.__name__ = query_name
fn.__doc__ = docs
fn.sql = sql
ctx_mgr_method_name = "{}_cursor".format(query_name)
def ctx_mgr(conn, *args, **kwargs):
parameters = kwargs if len(kwargs) > 0 else args
return driver_adapter.select_cursor(conn, query_name, sql, parameters)
ctx_mgr.__name__ = ctx_mgr_method_name
ctx_mgr.__doc__ = docs
ctx_mgr.sql = sql
if op_type == SQLOperationType.SELECT:
return [(query_name, fn), (ctx_mgr_method_name, ctx_mgr)]
return [(query_name, fn)]
def load_methods(sql_text, driver_adapter):
lines = sql_text.strip().splitlines()
query_name = lines[0].replace("-", "_")
if query_name.endswith("<!"):
op_type = SQLOperationType.INSERT_RETURNING
query_name = query_name[:-2]
elif query_name.endswith("*!"):
op_type = SQLOperationType.INSERT_UPDATE_DELETE_MANY
query_name = query_name[:-2]
elif query_name.endswith("!"):
op_type = SQLOperationType.INSERT_UPDATE_DELETE
query_name = query_name[:-1]
elif query_name.endswith("#"):
op_type = SQLOperationType.SCRIPT
query_name = query_name[:-1]
elif query_name.endswith("?"):
op_type = SQLOperationType.SELECT_ONE_ROW
query_name = query_name[:-1]
else:
op_type = SQLOperationType.SELECT
if not valid_query_name_pattern.match(query_name):
raise SQLParseException(
'name must convert to valid python variable, got "{}".'.format(query_name)
)
docs = ""
sql = ""
for line in lines[1:]:
match = doc_comment_pattern.match(line)
if match:
docs += match.group(1) + "\n"
else:
sql += line + "\n"
docs = docs.strip()
sql = driver_adapter.process_sql(query_name, op_type, sql.strip())
return _create_fns(query_name, docs, op_type, sql, driver_adapter)
def load_queries_from_sql(sql, driver_adapter):
queries = []
for query_text in query_name_definition_pattern.split(sql):
if not empty_pattern.match(query_text):
for method_pair in load_methods(query_text, driver_adapter):
queries.append(method_pair)
return queries
def load_queries_from_file(file_path, driver_adapter):
with open(file_path) as fp:
return load_queries_from_sql(fp.read(), driver_adapter)
def load_queries_from_dir_path(dir_path, query_loader):
if not os.path.isdir(dir_path):
raise ValueError("The path {} must be a directory".format(dir_path))
def _recurse_load_queries(path):
queries = Queries()
for item in os.listdir(path):
item_path = os.path.join(path, item)
if os.path.isfile(item_path) and not item.endswith(".sql"):
continue
elif os.path.isfile(item_path) and item.endswith(".sql"):
for name, fn in load_queries_from_file(item_path, query_loader):
queries.add_query(name, fn)
elif os.path.isdir(item_path):
child_queries = _recurse_load_queries(item_path)
queries.add_child_queries(item, child_queries)
else:
# This should be practically unreachable.
raise SQLLoadException(
"The path must be a directory or file, got {}".format(item_path)
)
return queries
return _recurse_load_queries(dir_path)
def from_str(sql, driver_name):
"""Load queries from a SQL string.
Args:
sql (str) A string containing SQL statements and anosql name:
driver_name (str): The database driver to use to load and execute queries.
Returns:
Queries
Example:
Loading queries from a SQL string::
import sqlite3
import anosql
sql_text = \"""
-- name: get-all-greetings
-- Get all the greetings in the database
select * from greetings;
-- name: get-users-by-username
-- Get all the users from the database,
-- and return it as a dict
select * from users where username =:username;
\"""
queries = anosql.from_str(sql_text, db_driver="sqlite3")
queries.get_all_greetings(conn)
queries.get_users_by_username(conn, username="willvaughn")
"""
driver_adapter = get_driver_adapter(driver_name)
return Queries(load_queries_from_sql(sql, driver_adapter))
def from_path(sql_path, driver_name):
"""Load queries from a sql file, or a directory of sql files.
Args:
sql_path (str): Path to a ``.sql`` file or directory containing ``.sql`` files.
driver_name (str): The database driver to use to load and execute queries.
Returns:
Queries
Example:
Loading queries paths::
import sqlite3
import anosql
queries = anosql.from_path("./greetings.sql", driver_name="sqlite3")
queries2 = anosql.from_path("./sql_dir", driver_name="sqlite3")
"""
if not os.path.exists(sql_path):
raise SQLLoadException('File does not exist: {}.'.format(sql_path), sql_path)
driver_adapter = get_driver_adapter(driver_name)
if os.path.isdir(sql_path):
return load_queries_from_dir_path(sql_path, driver_adapter)
elif os.path.isfile(sql_path):
return Queries(load_queries_from_file(sql_path, driver_adapter))
else:
raise SQLLoadException(
'The sql_path must be a directory or file, got {}'.format(sql_path),
sql_path
)
| 31.667598 | 100 | 0.636147 |
ace492fa8af1db199df93f084af2be21c7f75b8a | 4,095 | py | Python | unicodeutils.py | choo/etlcdb-image-extractor | 3a8070bd2bfaf164033218100cf16d17d2671452 | [
"MIT"
] | 10 | 2020-10-13T05:46:34.000Z | 2021-07-04T09:58:58.000Z | unicodeutils.py | choo/etlcdb-image-extractor | 3a8070bd2bfaf164033218100cf16d17d2671452 | [
"MIT"
] | 6 | 2020-02-05T21:19:43.000Z | 2022-03-12T00:10:34.000Z | unicodeutils.py | choo/etlcdb-image-extractor | 3a8070bd2bfaf164033218100cf16d17d2671452 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
#! -*- coding: utf-8 -*-
import fsutils
import unicodedata
class UnicodeUtils(object):
comment_prefix = '#'
jis_x_0201_conf = {
'mapping_file': './charset_data/JIS0201.TXT',
'jis_code_col_idx': 0,
'unicode_col_idx': 1
}
jis_x_0208_conf = {
'mapping_file': './charset_data/JIS0208.TXT',
'jis_code_col_idx': 1,
'unicode_col_idx': 2
}
co59_mapping_file = './charset_data/co59-utf8.txt'
uni_hira_start = 'ぁ' # 0x3041
uni_hira_end = 'ゖ' # 0x3096
uni_kata_start = 'ァ' # 0x30a1
uni_kata_end = 'ヺ' # 0x30fa
uni_hira_kata_diff = ord(uni_kata_start) - ord(uni_hira_start)
def __init__(self):
self.jis0201_to_uni = self._load_jis_mapping_info(
self.jis_x_0201_conf['mapping_file'],
self.jis_x_0201_conf['jis_code_col_idx'],
self.jis_x_0201_conf['unicode_col_idx']
)
self.jis0208_to_uni = self._load_jis_mapping_info(
self.jis_x_0208_conf['mapping_file'],
self.jis_x_0208_conf['jis_code_col_idx'],
self.jis_x_0208_conf['unicode_col_idx']
)
self.co59_to_uni = self._load_co59_info(self.co59_mapping_file)
def convert_to_unicode(self, char_code, char_set = 'JIS_X_0208'):
'''
returns unicode hex string.
char_set is either JIS_X_0201, JIS_X_0208, CO-59.
if char_set is JIS, char_code is given as hex string.
if char_set is CO-59, char_code is given as tuple with 2 int elements.
'''
ret = char_code
if char_set == 'JIS_X_0208' and char_code in self.jis0208_to_uni:
ret = self.jis0208_to_uni[char_code]
if char_set == 'JIS_X_0201' and char_code in self.jis0201_to_uni:
ret = self.jis0201_to_uni[char_code]
if char_set == 'CO-59':
ret = self.co59_to_uni[char_code]
ret = '0x{:04x}'.format(int(ret, 16))
return ret
def _load_jis_mapping_info(self, filepath, jis_code_col_idx, unicode_col_idx):
lines = fsutils.read_csv(
filepath, has_header = False, comment_prefix = self.comment_prefix)
ret = {}
for line in lines:
jis_code = line[jis_code_col_idx].lower()
unicode_code = line[unicode_col_idx].lower()
ret[jis_code] = unicode_code
return ret
def _load_co59_info(self, filepath):
'''
'▲' means the character cannot be read,
so replace it with null char (0x00)
'''
lines = fsutils.read_lines(filepath)
ret = {}
for line in lines:
elms = line.split(':')
s = elms[0]
if s == '▲':
s = '\x00'
codes = elms[1].split(',') # 2 elms
codes = (int(codes[0]), int(codes[1]))
ret[codes] = hex(ord(s))
return ret
@classmethod
def hira2kata(cls, s):
ret = []
for c in s:
if cls.is_hiragana(c):
ret.append(chr(ord(c) + cls.uni_hira_kata_diff))
else:
ret.append(c)
return ''.join(ret)
@classmethod
def kata2hira(cls, s):
ret = []
for c in s:
if (cls.is_katakana(c) and
cls.is_hiragana(chr(ord(c) - cls.uni_hira_kata_diff))):
ret.append(chr(ord(c) - cls.uni_hira_kata_diff))
else:
ret.append(c)
return ''.join(ret)
@classmethod
def is_hiragana(cls, char):
return cls.uni_hira_start <= char <= cls.uni_hira_end
@classmethod
def is_katakana(cls, char):
'''Judge if given char is katakana or not
NOTE: halfwidth katakana is not be considered as true in thie method
'''
return cls.uni_kata_start <= char <= cls.uni_kata_end
@staticmethod
def normalize(s):
'''
wrapper method of unicodedata.noramlize
'''
return unicodedata.normalize('NFKC', s)
| 31.259542 | 83 | 0.571673 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.