hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace21fe5d373d8d83a4e507efcff004049718744 | 41 | py | Python | pyne/pyne/fortranformat/_exceptions.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 302d6dcc7c0a85a9191098366b076cf9cb5a9f6e | [
"MIT"
] | 1 | 2022-03-26T20:01:13.000Z | 2022-03-26T20:01:13.000Z | pyne/pyne/fortranformat/_exceptions.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 302d6dcc7c0a85a9191098366b076cf9cb5a9f6e | [
"MIT"
] | null | null | null | pyne/pyne/fortranformat/_exceptions.py | AllSafeCyberSecur1ty/Nuclear-Engineering | 302d6dcc7c0a85a9191098366b076cf9cb5a9f6e | [
"MIT"
] | 1 | 2022-03-26T19:59:13.000Z | 2022-03-26T19:59:13.000Z | class InvalidFormat(Exception):
pass
| 13.666667 | 31 | 0.756098 |
ace2202c43a30622061827e80e468be435a497f8 | 734 | py | Python | prefixSum/minimumValueToGetPositiveStepByStepSum.py | Jiganesh/High-On-DSA | 044a3941cec443a87e79d97962551d75a9639a57 | [
"MIT"
] | 76 | 2021-12-12T08:42:20.000Z | 2022-03-31T19:48:46.000Z | prefixSum/minimumValueToGetPositiveStepByStepSum.py | Jiganesh/High-On-DSA | 044a3941cec443a87e79d97962551d75a9639a57 | [
"MIT"
] | 4 | 2022-01-04T09:58:39.000Z | 2022-03-30T17:00:39.000Z | prefixSum/minimumValueToGetPositiveStepByStepSum.py | Jiganesh/High-On-DSA | 044a3941cec443a87e79d97962551d75a9639a57 | [
"MIT"
] | 13 | 2021-12-12T14:44:41.000Z | 2022-03-10T14:08:20.000Z | # https://leetcode.com/problems/minimum-value-to-get-positive-step-by-step-sum/
class Solution(object):
# Runtime: 12 ms, faster than 99.17% of Python online submissions for Minimum Value to Get Positive Step by Step Sum.
# Memory Usage: 13.3 MB, less than 88.43% of Python online submissions for Minimum Value to Get Positive Step by Step Sum.
def minStartValue(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
maximum = 1
for index, i in enumerate (nums):
result += nums[index]
nums[index] = result
maximum = max (maximum , 1- result)
return maximum | 31.913043 | 126 | 0.570845 |
ace22140c69d3c8d23314d83f80c716b6eda16c0 | 59 | py | Python | 000818CoursPyGusto/Coursera000818PyBasicsHSEw01TASK011_elClock_20200508.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000818CoursPyGusto/Coursera000818PyBasicsHSEw01TASK011_elClock_20200508.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 000818CoursPyGusto/Coursera000818PyBasicsHSEw01TASK011_elClock_20200508.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | n = int(input())
h = (n // 60) % 24
m = n % 60
print(h, m)
| 11.8 | 18 | 0.440678 |
ace221dd869eb56cd4cbd383e6c8a81fc291f2a4 | 10,147 | py | Python | hazelcast/serialization/serializer.py | SaitTalhaNisanci/hazelcast-python-client | af382081da02a360e826163b1026aa0f68b6e9b8 | [
"Apache-2.0"
] | null | null | null | hazelcast/serialization/serializer.py | SaitTalhaNisanci/hazelcast-python-client | af382081da02a360e826163b1026aa0f68b6e9b8 | [
"Apache-2.0"
] | null | null | null | hazelcast/serialization/serializer.py | SaitTalhaNisanci/hazelcast-python-client | af382081da02a360e826163b1026aa0f68b6e9b8 | [
"Apache-2.0"
] | null | null | null | import binascii
from datetime import datetime
from time import time
from hazelcast.serialization.bits import *
from hazelcast.serialization.api import StreamSerializer
from hazelcast.serialization.base import HazelcastSerializationError
from hazelcast.serialization.serialization_const import *
from hazelcast import six
from hazelcast.six.moves import range, cPickle
if not six.PY2:
long = int
class BaseSerializer(StreamSerializer):
def destroy(self):
pass
# DEFAULT SERIALIZERS
class NoneSerializer(BaseSerializer):
def read(self, inp):
return None
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_NULL
class BooleanSerializer(BaseSerializer):
def read(self, inp):
return inp.read_boolean()
def write(self, out, obj):
out.write_boolean(obj)
def get_type_id(self):
return CONSTANT_TYPE_BOOLEAN
class ByteSerializer(BaseSerializer):
def read(self, inp):
return inp.read_byte()
def write(self, out, obj):
out.write_byte(obj)
def get_type_id(self):
return CONSTANT_TYPE_BYTE
class CharSerializer(BaseSerializer):
def read(self, inp):
return inp.read_char()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_CHAR
class ShortSerializer(BaseSerializer):
def read(self, inp):
return inp.read_short()
def write(self, out, obj):
out.write_short(obj)
def get_type_id(self):
return CONSTANT_TYPE_SHORT
class IntegerSerializer(BaseSerializer):
def read(self, inp):
return inp.read_int()
def write(self, out, obj):
if obj.bit_length() < 32:
out.write_int(obj)
else:
raise ValueError("Serialization only supports 32 bit ints")
def get_type_id(self):
return CONSTANT_TYPE_INTEGER
class LongSerializer(BaseSerializer):
def read(self, inp):
return inp.read_long()
def write(self, out, obj):
if obj.bit_length() < 64:
out.write_long(obj)
else:
raise ValueError("Serialization only supports 64 bit longs")
def get_type_id(self):
return CONSTANT_TYPE_LONG
class FloatSerializer(BaseSerializer):
def read(self, inp):
return inp.read_float()
def write(self, out, obj):
out.write_float(obj)
def get_type_id(self):
return CONSTANT_TYPE_FLOAT
class DoubleSerializer(BaseSerializer):
def read(self, inp):
return inp.read_double()
def write(self, out, obj):
out.write_double(obj)
def get_type_id(self):
return CONSTANT_TYPE_DOUBLE
class StringSerializer(BaseSerializer):
def read(self, inp):
return inp.read_utf()
def write(self, out, obj):
out.write_utf(obj)
def get_type_id(self):
return CONSTANT_TYPE_STRING
# ARRAY SERIALIZERS
class BooleanArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_boolean_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_BOOLEAN_ARRAY
class ByteArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_byte_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_BYTE_ARRAY
class CharArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_char_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_CHAR_ARRAY
class ShortArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_short_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_SHORT_ARRAY
class IntegerArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_int_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_INTEGER_ARRAY
class LongArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_long_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_LONG_ARRAY
class FloatArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_float_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_FLOAT_ARRAY
class DoubleArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_double_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_DOUBLE_ARRAY
class StringArraySerializer(BaseSerializer):
def read(self, inp):
return inp.read_utf_array()
# "write(self, out, obj)" is never called so not implemented here
def get_type_id(self):
return CONSTANT_TYPE_STRING_ARRAY
# EXTENSIONS
class DateTimeSerializer(BaseSerializer):
def read(self, inp):
long_time = inp.read_long()
return datetime.fromtimestamp(long_time / 1000.0)
def write(self, out, obj):
long_time = long(time.mktime(obj.timetuple()))
out.write_long(long_time)
def get_type_id(self):
return JAVA_DEFAULT_TYPE_DATE
class BigIntegerSerializer(BaseSerializer):
def read(self, inp):
length = inp.read_int()
if length == NULL_ARRAY_LENGTH:
return None
result = bytearray(length)
if length > 0:
inp.read_into(result, 0, length)
if result[0] & 0x80:
neg = bytearray()
for c in result:
neg.append(c ^ 0xFF)
return -1 * int(binascii.hexlify(neg), 16) - 1
return int(binascii.hexlify(result), 16)
def write(self, out, obj):
the_big_int = -obj-1 if obj < 0 else obj
end_index = -1 if (type(obj) == long and six.PY2) else None
hex_str = hex(the_big_int)[2:end_index]
if len(hex_str) % 2 == 1:
prefix = '0' # "f" if obj < 0 else "0"
hex_str = prefix + hex_str
num_array = bytearray(binascii.unhexlify(bytearray(hex_str, encoding="utf-8")))
if obj < 0:
neg = bytearray()
for c in num_array:
neg.append(c ^ 0xFF)
num_array = neg
out.write_byte_array(num_array)
def get_type_id(self):
return JAVA_DEFAULT_TYPE_BIG_INTEGER
class BigDecimalSerializer(BaseSerializer):
def read(self, inp):
raise NotImplementedError("Big decimal numbers not supported")
def write(self, out, obj):
raise NotImplementedError("Big decimal numbers not supported")
def get_type_id(self):
return JAVA_DEFAULT_TYPE_BIG_DECIMAL
class JavaClassSerializer(BaseSerializer):
def read(self, inp):
return inp.read_utf()
def write(self, out, obj):
out.write_utf(obj)
def get_type_id(self):
return JAVA_DEFAULT_TYPE_CLASS
class JavaEnumSerializer(BaseSerializer):
def read(self, inp):
"""
:param inp:
:return: a tuple of (Enum-name, Enum-value-name)
"""
return tuple(inp.read_utf(), inp.read_utf())
def write(self, out, obj):
enum_name, enum_val_name = obj
out.write_utf(enum_name)
out.write_utf(enum_val_name)
def get_type_id(self):
return JAVA_DEFAULT_TYPE_ENUM
class ArrayListSerializer(BaseSerializer):
def read(self, inp):
size = inp.read_int()
if size > NULL_ARRAY_LENGTH:
return [inp.read_object() for _ in range(0, size)]
return None
def write(self, out, obj):
size = NULL_ARRAY_LENGTH if obj is None else len(obj)
out.write_int(size)
for i in range(0, size):
out.write_object(obj[i])
def get_type_id(self):
return JAVA_DEFAULT_TYPE_ARRAY_LIST
class LinkedListSerializer(BaseSerializer):
def read(self, inp):
size = inp.read_int()
if size > NULL_ARRAY_LENGTH:
return [inp.read_object() for _ in range(0, size)]
return None
def write(self, out, obj):
raise NotImplementedError("writing Link lists not supported")
def get_type_id(self):
return JAVA_DEFAULT_TYPE_LINKED_LIST
class PythonObjectSerializer(BaseSerializer):
def read(self, inp):
str = inp.read_utf().encode()
return cPickle.loads(str)
def write(self, out, obj):
out.write_utf(cPickle.dumps(obj, 0).decode("utf-8"))
def get_type_id(self):
return PYTHON_TYPE_PICKLE
class IdentifiedDataSerializer(BaseSerializer):
def __init__(self, factories):
self._factories = factories
def write(self, out, obj):
out.write_boolean(True) # Always identified
out.write_int(obj.get_factory_id())
out.write_int(obj.get_class_id())
obj.write_data(out)
def read(self, inp):
is_identified = inp.read_boolean()
if not is_identified:
raise HazelcastSerializationError("Native clients only support IdentifiedDataSerializable!")
factory_id = inp.read_int()
class_id = inp.read_int()
factory = self._factories.get(factory_id, None)
if factory is None:
raise HazelcastSerializationError("No DataSerializerFactory registered for namespace: {}".format(factory_id))
identified = factory.get(class_id, None)
if identified is None:
raise HazelcastSerializationError(
"{} is not be able to create an instance for id: {} on factoryId: {}".format(factory, class_id, factory_id))
instance = identified()
instance.read_data(inp)
return instance
def get_type_id(self):
return CONSTANT_TYPE_DATA_SERIALIZABLE
| 26.219638 | 128 | 0.660392 |
ace2240b27c1df3d0bd6a7d578a83028c8150eec | 3,815 | py | Python | mooc_cookie.py | fichas/Down_Mooc | 9777755cdf44aadb10100ddcd6437f2f16afe98c | [
"MIT"
] | 18 | 2020-02-28T08:42:19.000Z | 2021-08-24T15:53:35.000Z | mooc_cookie.py | fichas/Down_Mooc | 9777755cdf44aadb10100ddcd6437f2f16afe98c | [
"MIT"
] | 1 | 2020-08-07T06:59:19.000Z | 2020-08-07T07:25:53.000Z | mooc_cookie.py | fichas/Down_Mooc | 9777755cdf44aadb10100ddcd6437f2f16afe98c | [
"MIT"
] | 2 | 2020-08-11T13:25:19.000Z | 2021-08-31T03:23:40.000Z | #!/usr/bin/python3
import requests
import json
import lxml
from time import sleep
import urllib.parse
from bs4 import BeautifulSoup
import sys
import os
class ShowProcess():
"""
显示处理进度的类
调用该类相关函数即可实现处理进度的显示
"""
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
def objectid(strings):
soup = BeautifulSoup(strings, 'lxml')
anss=[]
for i in list(soup.select('script')):
# print(i)
ss = str(i)
if ss.find('function()') != -1:
ii = ss.split('try{')[1]
ii = ii.lstrip()
ii = ii.split('}catch')
ii = ii[0].split('=', 1)[1]
ii = ii.split(';')[0]
ii = str(ii)
try:
text = json.loads(ii)
for dic in text['attachments']:
if dic['property']['module']=='insertdoc'or dic['property']['module']=='insertvideo':
anss.append(dic['property']['objectid'])
except:
anss=[]
return anss
def get_cookie(strr):
cookie_dict = {}
s = strr.split(';')
for i in s:
key, value = i.lstrip().split('=', 1)
cookie_dict[key] = value
return cookie_dict
def obj(strings):
soup = BeautifulSoup(strings, 'lxml')
anss = []
for i in list(soup.select('iframe')):
# print(i)
ss=str(i['data'])
ii=ss
try:
text = json.loads(ii)
anss.append(text['objectid'])
except:
anss = anss
return anss
if __name__ == '__main__':
print('请输入要抓取的课程链接:')
url=input('> ')
query = dict(urllib.parse.parse_qsl(urllib.parse.urlsplit(url).query))
courseid=str(query['courseId'])
clazzid=str(query['clazzid'])
knows=[]
ans=[]
url='https://mooc1-1.chaoxing.com/mycourse/studentstudycourselist?courseId='+courseid+'&clazzid='+clazzid
print('请输入cookies:')
strr = input('> ')
cookie_dict=get_cookie(strr)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.106 Safari/537.36'
}
response = requests.request("GET", url, headers=headers, cookies=cookie_dict)
soup=BeautifulSoup(response.text.encode('utf8'),'lxml')
for i in soup.select('a'):
ss=str(i['href'])
know=ss.split('(')[1].split(')')[0].split(',')[-1]
know=know[1:-1]
knows.append(know)
print('获取到 %d 条任务,正在生成下载链接...'%len(knows))
process_bar = ShowProcess(len(knows), '下载链接已保存到output.txt')
for i in knows:
process_bar.show_process()
url='http://mooc1-1.chaoxing.com/knowledge/cards?clazzid='+clazzid+'&courseid='+courseid+'&knowledgeid='+str(i)
sleep(0.5)
response = requests.request("GET", url, headers=headers, cookies=cookie_dict)
tmp=obj(response.text.encode('utf8'))
ans+=tmp
f = open("output.txt", "w")
for i in ans:
f.write('http://cs.ananas.chaoxing.com/download/'+str(i)+'\n')
f.close()
| 27.846715 | 139 | 0.549934 |
ace22436640b0381d9fbefa81321cf54044c221b | 1,341 | py | Python | branches/spaghetty2/compile_source_basic.py | jeffhammond/spaghetty | e2dbe2dd2621110b899b21dff590906a579e8bf5 | [
"BSD-2-Clause"
] | 1 | 2018-01-05T16:13:08.000Z | 2018-01-05T16:13:08.000Z | branches/spaghetty2/compile_source_basic.py | jeffhammond/spaghetty | e2dbe2dd2621110b899b21dff590906a579e8bf5 | [
"BSD-2-Clause"
] | null | null | null | branches/spaghetty2/compile_source_basic.py | jeffhammond/spaghetty | e2dbe2dd2621110b899b21dff590906a579e8bf5 | [
"BSD-2-Clause"
] | null | null | null | import fileinput
import string
import sys
import os
# Goldstone old
fortran_compiler = 'ifort'
fortran_link_flags = '-O1 -mtune=core2 -msse3 -align '
fortran_opt_flags = '-O3 -mtune=core2 -msse3 -align -DBIGBLOCK=8 -c '
src_dir = '/home/jeff/code/spaghetty2/src/'
exe_dir = '/home/jeff/code/spaghetty2/exe/'
lib_name = 'tce_sort_new.a'
modlabel = ''
def perm(l):
sz = len(l)
if sz <= 1:
return [l]
return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])]
indices = ['4','3','2','1']
#all_permutations = perm(indices)
#all_permutations = [indices]
transpose_list = [indices]
#loop_list = perm(indices)
#transpose_list = perm(indices)
loop_list = perm(indices)
for transpose_order in transpose_list:
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
for loop_order in loop_list:
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
subroutine_name = 'transpose_'+A+B+C+D+'_loop_'+a+b+c+d+modlabel
source_name = subroutine_name+'.F'
print fortran_compiler+' '+fortran_opt_flags+' '+src_dir+source_name
os.system(fortran_compiler+' '+fortran_opt_flags+' '+src_dir+source_name)
os.system('ar -r '+lib_name+' '+subroutine_name+'.o')
os.system('rm '+subroutine_name+'.o')
#os.system('mv '+subroutine_name+'.F '+src_dir)
| 26.82 | 75 | 0.692021 |
ace224fc684a16b6c326f0e56b997debdb86c61f | 695 | py | Python | examples/rtr/describe_put_files.py | CrowdStrike/falconpy-tools | 7589540e7367ecfeb3eeea27ca80e9240dd6af9f | [
"Unlicense"
] | 2 | 2021-10-14T16:52:51.000Z | 2021-11-24T13:34:08.000Z | examples/rtr/describe_put_files.py | CrowdStrike/falconpy-tools | 7589540e7367ecfeb3eeea27ca80e9240dd6af9f | [
"Unlicense"
] | 1 | 2021-11-12T17:00:46.000Z | 2021-11-12T17:00:48.000Z | examples/rtr/describe_put_files.py | CrowdStrike/falconpy-tools | 7589540e7367ecfeb3eeea27ca80e9240dd6af9f | [
"Unlicense"
] | 2 | 2021-09-17T18:13:46.000Z | 2021-09-25T03:47:15.000Z | #!/usr/bin/env python3
"""
Caracara Examples Collection.
describe_put_files.py
This example will use the API credentials configuration in your config.yml file to
list all the files that can be used with the PUT command.
"""
import logging
from caracara import Client
from examples.common import caracara_example, pretty_print
@caracara_example
def describe_put_files(**kwargs):
"""Describe all available PUT files and and write the output to the log."""
client: Client = kwargs['client']
logger: logging.Logger = kwargs['logger']
logger.info("Listing available PUT files")
put_files = client.rtr.describe_put_files()
pretty_print(put_files)
describe_put_files()
| 23.965517 | 82 | 0.759712 |
ace225358e666007cd69648302043fefd24da721 | 4,704 | py | Python | scripts/area_embedding.py | PyGeoL/GeoL | 67a5bd2f63091e19041094c14d419055fa5ce6f0 | [
"MIT"
] | 8 | 2018-03-09T16:44:38.000Z | 2021-04-07T11:33:30.000Z | scripts/area_embedding.py | PyGeoL/GeoL | 67a5bd2f63091e19041094c14d419055fa5ce6f0 | [
"MIT"
] | 4 | 2020-03-24T15:34:54.000Z | 2021-06-01T21:54:33.000Z | scripts/area_embedding.py | PyGeoL/GeoL | 67a5bd2f63091e19041094c14d419055fa5ce6f0 | [
"MIT"
] | 1 | 2020-05-13T14:30:55.000Z | 2020-05-13T14:30:55.000Z | import os
import errno
import pandas as pd
import geopandas as gpd
from geopandas import GeoDataFrame
from shapely.geometry import Point
import sys
import getopt
sys.path.append('./GeoL')
from geol.utils import utils
import pathlib
import re
import gensim
import numpy as np
from sklearn import preprocessing
from scipy import stats
import seaborn as sns
sns.set_style("ticks")
sns.set_context("paper")
from sklearn.model_selection import train_test_split
import xgboost
from xgboost.sklearn import XGBRegressor
from sklearn import metrics # Additional scklearn functions
from sklearn.model_selection import GridSearchCV # Perforing grid search
from sklearn.svm import SVR
from sklearn.model_selection import learning_curve
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.svm import LinearSVR
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
import sklearn.metrics as metrics
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15, 4
import csv
# SET CITY NAME
# ,"milan","madrid","berlin","paris","stockholm"]
CITIES = ["barcelona", "london", "rome"]
SIZES = [50, 100, 200, 250]
# Base directory
BASE_DIR = os.path.abspath(".")
# base directory for data files
BASE_DIR_DATA = os.path.join(BASE_DIR, "data")
def csv_2_geodf(dataset_inputfile):
"""
Loads a .csv and returns a GeoPandas GeoDataFrame
"""
# LOAD DATASET
df = pd.DataFrame(pd.read_csv(
dataset_inputfile, sep="\t", low_memory=False))
print('dataset_inputfile', dataset_inputfile)
print(df.columns)
print('dataset caricato')
# Create Shapely Point Objects
geometry = [Point(xy) for xy in zip(df['longitude'], df['latitude'])]
# Store Points in a GeoDataFrame
data = gpd.GeoDataFrame(df, crs={'init': 'epsg:4326'}, geometry=geometry)
# data = data.to_crs({'init': 'epsg:4326'})
data.to_crs(data.crs, inplace=True)
return data
def sum_vectors(w_list):
"""
Inputs a list of Numpy vectors
Returns the sum
"""
e = 0
for i, e in enumerate(w_list):
e += w_list[i]
return e
def cell_vector_representation(poi_grid, w2v_model, level, output_file, size):
"""
Takes as input a spatial grid with POIs for each cell, a Word2Vec model, and a level of detail
For each cell:
Looks up each category in a cell for the given level in the W2V model, taking the corresponding vector representation
Sums all the vectors
Returns a dataframe with a w2v representation for all words in that cell in every row
"""
# load shapefile of mapped POIs
gdf = csv_2_geodf(poi_grid)
# load w2v_model
model = gensim.models.Word2Vec.load(w2v_model)
# group every cell
grouped_gdf = gdf.groupby('cellID')
output = {}
with open(output_file, 'w') as out:
for cell, group in grouped_gdf:
output[cell] = []
for categories_raw in group['categories']:
# select level
category = utils.select_category(
categories_raw.split(':'), level)[-1]
# lookup category in w2v
try:
vector = model[category]
output[cell].append(np.array(vector))
except(KeyError):
pass
if len(output[cell]) == 0:
output[cell] = [np.zeros(int(size))]
# sum vectors
sum_w = sum_vectors(output[cell])
sum_w_str = str("\t".join(map(str, sum_w)))
text_to_write = str(cell) + '\t' + sum_w_str + '\n'
out.write(text_to_write)
for CITY in CITIES:
print(CITY, CITIES)
BASE_DIR_CITY = os.path.join(BASE_DIR_DATA, CITY)
MODELS_DIR = os.path.join(BASE_DIR_CITY, 'output-skip', 'models')
GRID_DIR = os.path.join(BASE_DIR_CITY, 'mapped')
EMBEDDINGS_DIR = os.path.join(BASE_DIR_CITY, 'embeddings')
for MAPPED_GRID in os.listdir(GRID_DIR):
POI_GRID = os.path.join(GRID_DIR, MAPPED_GRID)
POI_GRID_SIZE = MAPPED_GRID.split('.')[0].split('_')[-1]
for MODEL in os.listdir(MODELS_DIR):
W2V_MODEL = os.path.join(MODELS_DIR, MODEL)
OUTPUT_NAME = CITY + "_gs" + POI_GRID_SIZE + \
"_"+MODEL.split('.')[0] + '.txt'
OUTPUT_PATH = os.path.join(EMBEDDINGS_DIR, OUTPUT_NAME)
print(OUTPUT_PATH)
m = re.search('_s([0-9]+)_', MODEL)
if m:
size = m.group(1)
print(OUTPUT_PATH)
cell_vector_representation(
POI_GRID, W2V_MODEL, 2, OUTPUT_PATH, size)
| 29.217391 | 125 | 0.654337 |
ace22618efed47d2bf623782408e5ecc40649a07 | 92,009 | py | Python | xalpha/universal.py | Razorro/xalpha | bcecd53dc9d081deb1b8235437a4f6b74951c23d | [
"MIT"
] | null | null | null | xalpha/universal.py | Razorro/xalpha | bcecd53dc9d081deb1b8235437a4f6b74951c23d | [
"MIT"
] | null | null | null | xalpha/universal.py | Razorro/xalpha | bcecd53dc9d081deb1b8235437a4f6b74951c23d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
modules for universal fetcher that gives historical daily data and realtime data
for almost everything in the market
"""
import os
import sys
import time
import datetime as dt
import numpy as np
import pandas as pd
import logging
import inspect
from bs4 import BeautifulSoup
from functools import wraps, lru_cache
from uuid import uuid4
from sqlalchemy import exc
from dateutil.relativedelta import relativedelta
try:
from jqdatasdk import (
get_index_weights,
query,
get_fundamentals,
valuation,
get_query_count,
finance,
get_index_stocks,
macro,
get_price,
)
# 本地导入
except ImportError:
try:
from jqdata import finance, macro # 云平台导入
except ImportError:
pass
from xalpha.info import basicinfo, fundinfo, mfundinfo, get_fund_holdings
from xalpha.indicator import indicator
from xalpha.cons import (
rget,
rpost,
rget_json,
rpost_json,
tz_bj,
last_onday,
region_trans,
today_obj,
_float,
)
from xalpha.provider import data_source
from xalpha.exceptions import DataPossiblyWrong, ParserFailure
pd.options.mode.chained_assignment = None # turn off setwith copy warning
thismodule = sys.modules[__name__]
xamodule = sys.modules["xalpha"]
logger = logging.getLogger(__name__)
def tomorrow_ts():
dto = dt.datetime.now() + dt.timedelta(1)
return dto.timestamp()
def has_weekday(start, end):
for d in pd.date_range(start, end):
if d.weekday() < 5:
return True
return False
def ts2pdts(ts):
dto = dt.datetime.fromtimestamp(ts / 1000, tz=tz_bj).replace(tzinfo=None)
return dto.replace(
hour=0, minute=0, second=0, microsecond=0
) # 雪球美股数据时间戳是美国0点,按北京时区换回时间后,把时分秒扔掉就重合了
def decouple_code(code):
"""
decompose SH600000.A into SH600000, after
:param code:
:return: Tuple
"""
if len(code[1:].split(".")) > 1: # .SPI in US stock!
type_ = code.split(".")[-1]
code = ".".join(code.split(".")[:-1])
if type_.startswith("b") or type_.startswith("B"):
type_ = "before"
elif type_.startswith("a") or type_.startswith("A"):
type_ = "after"
elif type_.startswith("n") or type_.startswith("N"):
type_ = "normal"
else:
logger.warning(
"unrecoginzed flag for adjusted factor %s, use default" % type_
)
type_ = "before"
else:
type_ = "before"
return code, type_
def lru_cache_time(ttl=None, maxsize=None):
"""
TTL support on lru_cache
:param ttl: float or int, seconds
:param maxsize: int, maxsize for lru_cache
:return:
"""
def wrapper(func):
# Lazy function that makes sure the lru_cache() invalidate after X secs
@lru_cache(maxsize)
def time_aware(_ttl, *args, **kwargs):
return func(*args, **kwargs)
setattr(thismodule, func.__name__ + "_ttl", time_aware)
@wraps(func)
def newfunc(*args, **kwargs):
ttl_hash = round(time.time() / ttl)
f_ttl = getattr(thismodule, func.__name__ + "_ttl")
return f_ttl(ttl_hash, *args, **kwargs)
return newfunc
return wrapper
# TODO: 缓存 token 的合适时间尺度
@lru_cache_time(ttl=300)
def get_token():
"""
获取雪球的验权 token,匿名也可获取,而且似乎永远恒定(大时间范围内会改变)
:return:
"""
r = rget("https://xueqiu.com", headers={"user-agent": "Mozilla"})
return r.cookies["xq_a_token"]
def get_historical_fromxq(code, count, type_="before", full=False):
"""
:param code:
:param count:
:param type_: str. normal, before, after
:param full:
:return:
"""
url = "https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={code}&begin={tomorrow}&period=day&type={type_}&count=-{count}"
if full:
url += "&indicator=kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance"
# pe 是 TTM 数据
r = rget_json(
url.format(
code=code, tomorrow=int(tomorrow_ts() * 1000), count=count, type_=type_
),
cookies={"xq_a_token": get_token()},
headers={"user-agent": "Mozilla/5.0"},
)
df = pd.DataFrame(data=r["data"]["item"], columns=r["data"]["column"])
df["date"] = (df["timestamp"]).apply(ts2pdts) # reset hours to zero
return df
@lru_cache()
def get_industry_fromxq(code):
"""
part of symbols has empty industry information
:param code:
:return: dict
"""
url = (
"https://xueqiu.com/stock/industry/stockList.json?code=%s&type=1&size=100"
% code
)
r = rget_json(url, cookies={"xq_a_token": get_token()})
return r
def get_historical_fromcninvesting(curr_id, st_date, end_date, app=False):
data = {
"curr_id": curr_id,
# "smlID": smlID, # ? but seems to be fixed with curr_id, it turns out it doesn't matter
"st_date": st_date, # %Y/%m/%d
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
if not app: # fetch from web api
r = rpost(
"https://cn.investing.com/instruments/HistoricalDataAjax",
data=data,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\
AppleWebKit/537.36 (KHTML, like Gecko)",
"Host": "cn.investing.com",
"X-Requested-With": "XMLHttpRequest",
},
)
else: # fetch from app api
r = rpost(
"https://cnappapi.investing.com/instruments/HistoricalDataAjax",
data=data,
headers={
"Accept": "*/*",
"Accept-Encoding": "gzip",
"Accept-Language": "zh-cn",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"User-Agent": "Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0",
"ccode": "CN",
#'ccode_time': '1585551041.986028',
"x-app-ver": "117",
"x-meta-ver": "14",
"x-os": "ios",
"x-uuid": str(uuid4()),
"Host": "cn.investing.com",
"X-Requested-With": "XMLHttpRequest",
},
)
s = BeautifulSoup(r.text, "lxml")
dfdict = {}
cols = []
for col in s.find_all("th"):
dfdict[str(col.contents[0])] = []
cols.append(str(col.contents[0]))
num_cols = len(cols)
for i, td in enumerate(s.find_all("td")[:-5]):
if cols[i % num_cols] == "日期":
dfdict[cols[i % num_cols]].append(
dt.datetime.strptime(str(td.string), "%Y年%m月%d日")
)
else:
dfdict[cols[i % num_cols]].append(str(td.string))
return pd.DataFrame(dfdict)
def prettify(df):
_map = {
"日期": "date",
"收盘": "close",
"开盘": "open",
"高": "high",
"低": "low",
"涨跌幅": "percent",
"交易量": "volume",
}
df.rename(_map, axis=1, inplace=True)
if len(df) > 1 and df.iloc[1]["date"] < df.iloc[0]["date"]:
df = df[::-1]
# df = df[["date", "open", "close", "high", "low", "percent"]]
df1 = df[["date"]]
for k in ["open", "close", "high", "low", "volume"]:
if k in df.columns:
df1[k] = df[k].apply(_float)
df1["percent"] = df["percent"]
return df1
def dstr2dobj(dstr):
if len(dstr.split("/")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y/%m/%d")
elif len(dstr.split(".")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y.%m.%d")
elif len(dstr.split("-")) > 1:
d_obj = dt.datetime.strptime(dstr, "%Y-%m-%d")
else:
d_obj = dt.datetime.strptime(dstr, "%Y%m%d")
return d_obj
@lru_cache(maxsize=1024)
def get_investing_id(suburl, app=False):
if not app:
url = "https://cn.investing.com"
else:
url = "https://cnappapi.investing.com"
if not suburl.startswith("/"):
url += "/"
url += suburl
if not app:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36"
}
else:
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip",
"Accept-Language": "zh-cn",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"User-Agent": "Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0",
"ccode": "CN",
#'ccode_time': '1585551041.986028',
"x-app-ver": "117",
"x-meta-ver": "14",
"x-os": "ios",
"x-uuid": str(uuid4()),
"Host": "cn.investing.com",
"X-Requested-With": "XMLHttpRequest",
}
r = rget(
url,
headers=headers,
)
s = BeautifulSoup(r.text, "lxml")
pid = s.find("span", id="last_last")["class"][-1].split("-")[1]
return pid
def _variate_ua():
last = 20 + np.random.randint(20)
ua = []
ua.append(
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko)"
)
ua.append(
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1"
)
choice = np.random.randint(2)
return ua[choice][:last]
@lru_cache_time(ttl=120, maxsize=128)
def get_rmb(start=None, end=None, prev=360, currency="USD/CNY"):
"""
获取人民币汇率中间价, 该 API 官网数据源,稳定性很差
:param start:
:param end:
:param prev:
:param currency:
:return: pd.DataFrame
"""
bl = ["USD", "EUR", "100JPY", "HKD", "GBP", "AUD", "NZD", "SGD", "CHF", "CAD"]
al = [
"MYR",
"RUB",
"ZAR",
"KRW",
"AED",
"SAR",
"HUF",
"PLN",
"DKK",
"SEK",
"NOK",
"TRY",
"MXN",
"THB",
]
is_inverse = False
if (currency[:3] in al) or (currency[4:] in bl):
is_inverse = True
currency = currency[4:] + "/" + currency[:3]
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bk-ccpr/CcprHisNew?startDate={start_str}&endDate={end_str}¤cy={currency}&pageNum=1&pageSize=300"
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(prev)
else:
start_obj = dstr2dobj(start)
start_str = start_obj.strftime("%Y-%m-%d")
end_str = end_obj.strftime("%Y-%m-%d")
count = (end_obj - start_obj).days + 1
rl = []
# API 很奇怪,需要经常变 UA 才好用
headers = {
"Referer": "http://www.chinamoney.com.cn/chinese/bkccpr/",
"Origin": "http://www.chinamoney.com.cn",
"Host": "www.chinamoney.com.cn",
"X-Requested-With": "XMLHttpRequest",
}
if count <= 360:
headers.update({"user-agent": _variate_ua()})
r = rpost_json(
url.format(start_str=start_str, end_str=end_str, currency=currency),
headers=headers,
)
rl.extend(r["records"])
else: # data more than 1 year cannot be fetched once due to API limitation
sepo_obj = end_obj
sepn_obj = sepo_obj - dt.timedelta(360)
# sep0_obj = end_obj - dt.timedelta(361)
while sepn_obj > start_obj: # [sepn sepo]
headers.update({"user-agent": _variate_ua()})
r = rpost_json(
url.format(
start_str=sepn_obj.strftime("%Y-%m-%d"),
end_str=sepo_obj.strftime("%Y-%m-%d"),
currency=currency,
),
headers=headers,
)
rl.extend(r["records"])
sepo_obj = sepn_obj - dt.timedelta(1)
sepn_obj = sepo_obj - dt.timedelta(360)
headers.update({"user-agent": _variate_ua()})
r = rpost_json(
url.format(
start_str=start_obj.strftime("%Y-%m-%d"),
end_str=sepo_obj.strftime("%Y-%m-%d"),
currency=currency,
),
headers=headers,
)
rl.extend(r["records"])
data = {"date": [], "close": []}
for d in rl:
data["date"].append(pd.Timestamp(d["date"]))
data["close"].append(d["values"][0])
df = pd.DataFrame(data)
df = df[::-1]
df["close"] = pd.to_numeric(df["close"])
if is_inverse:
df["close"] = 1 / df["close"]
return df
def get_fund(code):
# 随意设置非空 path,防止嵌套缓存到 fundinfo
if code[0] == "F":
if code.startswith("F96"):
return get_historical_from_ttjj_oversea(code)
else:
df = fundinfo(code[1:], path="nobackend", priceonly=True).price
elif code[0] == "T":
df = fundinfo(code[1:], path="nobackend", priceonly=True).price
df["netvalue"] = df["totvalue"]
elif code[0] == "M":
df = mfundinfo(code[1:], path="nobackend").price
else:
raise ParserFailure("Unknown fund code %s" % code)
df["close"] = df["netvalue"]
return df[["date", "close"]]
def get_historical_from_ttjj_oversea(code, start=None, end=None):
if code.startswith("F"):
code = code[1:]
pagesize = (
dt.datetime.strptime(end, "%Y%m%d") - dt.datetime.strptime(start, "%Y%m%d")
).days + 1
r = rget_json(
"http://overseas.1234567.com.cn/overseasapi/OpenApiHander.ashx?api=HKFDApi&m=MethodJZ&hkfcode={hkfcode}&action=2&pageindex=0&pagesize={pagesize}&date1={startdash}&date2={enddash}&callback=".format(
hkfcode=get_hkfcode(code),
pagesize=pagesize,
startdash=start[:4] + "-" + start[4:6] + "-" + start[6:],
enddash=end[:4] + "-" + end[4:6] + "-" + end[6:],
)
)
datalist = {"date": [], "close": []}
for dd in r["Data"]:
datalist["date"].append(pd.to_datetime(dd["PDATE"]))
datalist["close"].append(dd["NAV"])
df = pd.DataFrame(datalist)
df = df[df["date"] <= end]
df = df[df["date"] >= start]
df = df.sort_values("date", ascending=True)
return df
def get_portfolio_fromttjj(code, start=None, end=None):
startobj = dt.datetime.strptime(start, "%Y%m%d")
endobj = dt.datetime.strptime(end, "%Y%m%d")
if (endobj - startobj).days < 90:
return None # note start is always 1.1 4.1 7.1 10.1 in incremental updates
if code.startswith("F"):
code = code[1:]
r = rget("http://fundf10.eastmoney.com/zcpz_{code}.html".format(code=code))
s = BeautifulSoup(r.text, "lxml")
table = s.find("table", class_="tzxq")
df = pd.read_html(str(table))[0]
df["date"] = pd.to_datetime(df["报告期"])
df["stock_ratio"] = df["股票占净比"].replace("---", "0%").apply(lambda s: _float(s[:-1]))
df["bond_ratio"] = df["债券占净比"].replace("---", "0%").apply(lambda s: _float(s[:-1]))
df["cash_ratio"] = df["现金占净比"].replace("---", "0%").apply(lambda s: _float(s[:-1]))
# df["dr_ratio"] = df["存托凭证占净比"].replace("---", "0%").apply(lambda s: xa.cons._float(s[:-1]))
df["assets"] = df["净资产(亿元)"]
df = df[::-1]
return df[["date", "stock_ratio", "bond_ratio", "cash_ratio", "assets"]]
# this is the most elegant approach to dispatch get_daily, the definition can be such simple
# you actually don't need to bother on start end blah, everything is taken care of by ``cahcedio``
@data_source("jq")
def get_fundshare_byjq(code, **kws):
code = _inverse_convert_code(code)
df = finance.run_query(
query(finance.FUND_SHARE_DAILY)
.filter(finance.FUND_SHARE_DAILY.code == code)
.filter(finance.FUND_SHARE_DAILY.date >= kws["start"])
.filter(finance.FUND_SHARE_DAILY.date <= kws["end"])
.order_by(finance.FUND_SHARE_DAILY.date)
)
df["date"] = pd.to_datetime(df["date"])
df = df[["date", "shares"]]
return df
@lru_cache(maxsize=1024)
def get_futu_id(code):
r = rget("https://www.futunn.com/stock/{code}".format(code=code))
sind = r.text.find("securityId")
futuid = r.text[sind : sind + 30].split("=")[1].split(";")[0].strip(" ").strip("'")
sind = r.text.find("marketType")
market = r.text[sind : sind + 30].split("=")[1].split(";")[0].strip().strip("''")
return futuid, market
def get_futu_historical(code, start=None, end=None):
fid, market = get_futu_id(code)
r = rget(
"https://www.futunn.com/new-quote/kline?security_id={fid}&type=2&market_type={market}".format(
fid=fid, market=market
)
)
df = pd.DataFrame(r.json()["data"]["list"])
df["date"] = df["k"].map(
lambda s: dt.datetime.fromtimestamp(s)
.replace(hour=0, minute=0, second=0, microsecond=0)
.replace(tzinfo=None)
)
df["open"] = df["o"] / 1000
df["close"] = df["c"] / 1000
df["high"] = df["h"] / 1000
df["low"] = df["l"] / 1000
df["volume"] = df["v"]
df = df.drop(["k", "t", "o", "c", "h", "l", "v"], axis=1)
return df
def get_historical_fromsp(code, start=None, end=None, region="us", **kws):
"""
标普官网数据源
:param code:
:param start:
:param end:
:param kws:
:return:
"""
if code.startswith("SP"):
code = code[2:]
if len(code.split(".")) > 1:
col = code.split(".")[1]
code = code.split(".")[0]
else:
col = "1"
start_obj = dt.datetime.strptime(start, "%Y%m%d")
fromnow = (today_obj() - start_obj).days
if fromnow < 300:
flag = "one"
elif fromnow < 1000:
flag = "three"
else:
flag = "ten"
url = "https://{region}.spindices.com/idsexport/file.xls?\
selectedModule=PerformanceGraphView&selectedSubModule=Graph\
&yearFlag={flag}YearFlag&indexId={code}".format(
region=region, flag=flag, code=code
)
r = rget(
url,
headers={
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
},
)
df = pd.read_excel(r.content, engine="xlrd")
# print(df.iloc[:10])
df = df.iloc[6:]
df = df.dropna()
df["close"] = df["Unnamed: " + col]
df["date"] = pd.to_datetime(df["Unnamed: 0"])
df = df[["date", "close"]]
return df
def get_historical_frombb(code, start=None, end=None, **kws):
"""
https://www.bloomberg.com/ 数据源, 试验性支持。
似乎有很严格的 IP 封禁措施, 且最新数据更新滞后,且国内会被 reset,似乎难以支持 T-1 净值预测。强烈建议从英为或雅虎能找到的标的,不要用彭博源,该 API 只能作为 last resort。
:param code:
:param start:
:param end:
:param kws:
:return:
"""
if code.startswith("BB-"):
code = code[3:]
# end_obj = dt.datetime.strptime(end, "%Y%m%d")
start_obj = dt.datetime.strptime(start, "%Y%m%d")
fromnow = (today_obj() - start_obj).days
if fromnow < 20:
years = "1_MONTH"
elif fromnow < 300:
years = "1_YEAR"
else:
years = "5_YEAR"
url = "https://www.bloomberg.com/markets2/api/history/{code}/PX_LAST?\
timeframe={years}&period=daily&volumePeriod=daily".format(
years=years, code=code
)
r = rget_json(
url,
headers={
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko)",
"referer": "https://www.bloomberg.com/quote/{code}".format(code=code),
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"accept": "*/*",
},
)
df = pd.DataFrame(r[0]["price"])
df["close"] = df["value"]
df["date"] = pd.to_datetime(df["dateTime"])
df = df[["date", "close"]]
return df
def get_historical_fromft(code, start, end, _type="indices"):
"""
finance times 数据
:param code:
:param start:
:param end:
:return:
"""
if not code.isdigit():
code = get_ft_id(code, _type=_type)
start = start.replace("/", "").replace("-", "")
end = end.replace("/", "").replace("-", "")
start = start[:4] + "/" + start[4:6] + "/" + start[6:]
end = end[:4] + "/" + end[4:6] + "/" + end[6:]
url = "https://markets.ft.com/data/equities/ajax/\
get-historical-prices?startDate={start}&endDate={end}&symbol={code}".format(
code=code, start=start, end=end
)
r = rget_json(url, headers={"user-agent": "Mozilla/5.0"})
b = BeautifulSoup(r["html"], "lxml")
data = {"date": [], "open": [], "close": [], "high": [], "low": []}
for i, td in enumerate(b.findAll("td")):
if i % 6 == 0:
s = td.find("span").string.split(",")[1:]
s = ",".join(s)
data["date"].append(dt.datetime.strptime(s, " %B %d, %Y"))
elif i % 6 == 1:
data["open"].append(_float(td.string))
elif i % 6 == 2:
data["high"].append(_float(td.string))
elif i % 6 == 3:
data["low"].append(_float(td.string))
elif i % 6 == 4:
data["close"].append(_float(td.string))
df = pd.DataFrame(data)
df = df.iloc[::-1]
return df
def get_historical_fromyh(code, start=None, end=None):
"""
雅虎财经数据源,支持数据丰富,不限于美股。但存在部分历史数据缺失 NAN 或者周末进入交易日的现象,可能数据需要进一步清洗和处理。
:param code:
:param start:
:param end:
:return:
"""
if code.startswith("YH-"):
code = code[3:]
start_obj = dt.datetime.strptime(start, "%Y%m%d")
fromnow = (today_obj() - start_obj).days
if fromnow < 20:
range_ = "1mo"
elif fromnow < 50:
range_ = "3mo"
elif fromnow < 150:
range_ = "6mo"
elif fromnow < 300:
range_ = "1y"
elif fromnow < 600:
range_ = "2y"
elif fromnow < 1500:
range_ = "5y"
else:
range_ = "10y"
url = "https://query1.finance.yahoo.com/v8\
/finance/chart/{code}?region=US&lang=en-US&includePrePost=false\
&interval=1d&range={range_}&corsDomain=finance.yahoo.com&.tsrc=finance".format(
code=code, range_=range_
)
# 该 API 似乎也支持起止时间选择参数,period1=1427500800&period2=1585353600
# 也可直接从历史数据页面爬取: https://finance.yahoo.com/quote/CSGOLD.SW/history?period1=1427500800&period2=1585353600&interval=1d&filter=history&frequency=1d
r = rget_json(url)
data = {}
datel = []
for t in r["chart"]["result"][0]["timestamp"]:
t = dt.datetime.fromtimestamp(t)
if t.second != 0:
t -= dt.timedelta(hours=8)
datel.append(t.replace(tzinfo=None, hour=0, minute=0, second=0, microsecond=0))
data["date"] = datel
for k in ["close", "open", "high", "low"]:
data[k] = r["chart"]["result"][0]["indicators"]["quote"][0][k]
df = pd.DataFrame(data)
return df
def get_historical_fromzzindex(code, start, end=None):
"""
中证指数源
:param code:
:param start:
:param end:
:return:
"""
if code.startswith("ZZ"):
code = code[2:]
start_obj = dt.datetime.strptime(start, "%Y%m%d")
fromnow = (today_obj() - start_obj).days
if fromnow < 20:
flag = "1%E4%B8%AA%E6%9C%88"
elif fromnow < 60:
flag = "3%E4%B8%AA%E6%9C%88" # 个月
elif fromnow < 200:
flag = "1%E5%B9%B4" # 年
else:
flag = "5%E5%B9%B4"
r = rget_json(
"http://www.csindex.com.cn/zh-CN/indices/index-detail/\
{code}?earnings_performance={flag}&data_type=json".format(
code=code, flag=flag
),
headers={
"Host": "www.csindex.com.cn",
"Referer": "http://www.csindex.com.cn/zh-CN/indices/index-detail/{code}".format(
code=code
),
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36",
"X-Requested-With": "XMLHttpRequest",
"Accept": "application/json, text/javascript, */*; q=0.01",
},
)
df = pd.DataFrame(r)
df["date"] = pd.to_datetime(df["tradedate"])
df["close"] = df["tclose"].apply(_float)
return df[["date", "close"]]
def get_historical_fromgzindex(code, start, end):
"""
国证指数源
:param code:
:param start:
:param end:
:return:
"""
if code.startswith("GZ"):
code = code[2:]
start = start[:4] + "-" + start[4:6] + "-" + start[6:]
end = end[:4] + "-" + end[4:6] + "-" + end[6:]
params = {
"indexCode": code,
"startDate": start,
"endDate": end,
"frequency": "Day",
}
r = rget_json(
"http://hq.cnindex.com.cn/market/market/getIndexDailyDataWithDataFormat",
params=params,
)
df = pd.DataFrame(r["data"]["data"], columns=r["data"]["item"])
df["date"] = pd.to_datetime(df["timestamp"])
df = df[["date", "close", "open", "low", "high", "percent", "amount", "volume"]]
# TODO: 是否有这些列不全的国证指数?
df = df[::-1]
return df
def get_historical_fromhzindex(code, start, end):
"""
华证指数源
:param code:
:param start:
:param end:
:return:
"""
if code.startswith("HZ"):
code = code[2:]
r = rget_json(
"http://www.chindices.com/index/values.val?code={code}".format(code=code)
)
df = pd.DataFrame(r["data"])
df["date"] = pd.to_datetime(df["date"])
df = df[["date", "price", "pctChange"]]
df.rename(columns={"price": "close", "pctChange": "percent"}, inplace=True)
df = df[::-1]
return df
def get_historical_fromesunny(code, start=None, end=None):
"""
易盛商品指数
:param code: eg. ESCI000201
:param start: just placeholder
:param end: just placeholder
:return:
"""
# code
if code.startswith("ESCI"):
code = code[4:] + ".ESCI"
r = rget(
"http://www.esunny.com.cn/chartES/csv/shareday/day_易盛指数_{code}.es".format(
code=code
)
)
data = []
for l in r.text.split("\n"):
row = [s.strip() for s in l.split("|")] # 开 高 低 收 结
if len(row) > 1:
data.append(row[:7])
df = pd.DataFrame(
data, columns=["date", "open", "high", "low", "close", "settlement", "amount"]
)
df["date"] = pd.to_datetime(df["date"])
for c in ["open", "high", "low", "close", "settlement", "amount"]:
df[c] = df[c].apply(_float)
return df
def get_historical_fromycharts(code, start, end, category, metric):
params = {
"securities": "include:true,id:{code},,".format(code=code),
"calcs": "include:true,id:{metric},,".format(metric=metric),
"startDate": start, # %m/%d/%Y
"endDate": end, # %m/%d/%Y
"zoom": "custom",
}
r = rget_json(
"https://ycharts.com/charts/fund_data.json",
params=params,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\
AppleWebKit/537.36 (KHTML, like Gecko)",
"Host": "ycharts.com",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://ycharts.com/{category}/{code}/chart/".format(
category=category, code=code
),
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
},
)
df = pd.DataFrame(
data=r["chart_data"][0][0]["raw_data"], columns=["timestamp", "close"]
)
df["date"] = (df["timestamp"]).apply(ts2pdts)
return df[["date", "close"]]
@lru_cache()
def get_bond_rates(rating, date=None):
"""
获取各评级企业债的不同久期的预期利率
:param rating: str. eg AAA, AA-, N for 中国国债
:param date: %Y-%m-%d
:return:
"""
rating = rating.strip()
rating_uid = {
"N": "2c9081e50a2f9606010a3068cae70001", # 国债
"AAA": "2c9081e50a2f9606010a309f4af50111",
"AAA-": "8a8b2ca045e879bf014607ebef677f8e",
"AA+": "2c908188138b62cd01139a2ee6b51e25",
"AA": "2c90818812b319130112c279222836c3",
"AA-": "8a8b2ca045e879bf014607f9982c7fc0",
"A+": "2c9081e91b55cc84011be40946ca0925",
"A": "2c9081e91e6a3313011e6d438a58000d",
"A-": "8a8b2ca04142df6a014148ca880f3046",
"A": "2c9081e91e6a3313011e6d438a58000d",
"BBB+": "2c9081e91ea160e5011eab1f116c1a59",
"BBB": "8a8b2ca0455847ac0145650780ad68fb",
"BB": "8a8b2ca0455847ac0145650ba23b68ff",
"B": "8a8b2ca0455847ac0145650c3d726901",
}
# 上边字典不全,非常欢迎贡献 :)
def _fetch(date):
r = rpost(
"https://yield.chinabond.com.cn/cbweb-mn/yc/searchYc?\
xyzSelect=txy&&workTimes={date}&&dxbj=0&&qxll=0,&&yqqxN=N&&yqqxK=K&&\
ycDefIds={uid}&&wrjxCBFlag=0&&locale=zh_CN".format(
uid=rating_uid.get(rating, rating), date=date
),
)
return r
if not date:
date = dt.datetime.today().strftime("%Y-%m-%d")
r = _fetch(date)
while len(r.text.strip()) < 20: # 当天没有数据,非交易日
date = last_onday(date).strftime("%Y-%m-%d")
r = _fetch(date)
l = r.json()[0]["seriesData"]
l = [t for t in l if t[1]]
df = pd.DataFrame(l, columns=["year", "rate"])
return df
def get_bond_rates_range(rating, duration=3, freq="W-FRI", start=None, end=None):
l = []
if rating.startswith("B-"):
rating = rating[2:]
rs = rating.split(".")
if len(rs) > 1:
duration = float(rs[1])
rating = rs[0]
for d in pd.date_range(start, end, freq=freq):
df = get_bond_rates(rating, d.strftime("%Y-%m-%d"))
l.append([d, df[df["year"] <= duration].iloc[-1]["rate"]])
return pd.DataFrame(l, columns=["date", "close"])
@data_source("jq")
def get_macro(table, start, end, datecol="stat_year"):
df = macro.run_query(
query(getattr(macro, table))
.filter(getattr(getattr(macro, table), datecol) >= start)
.filter(getattr(getattr(macro, table), datecol) <= end)
.order_by(getattr(getattr(macro, table), datecol))
)
df[datecol] = pd.to_datetime(df[datecol])
df["date"] = df[datecol]
return df
def set_handler(method="daily", f=None):
"""
为 ``get_daily``, ``get_bar`` 或 ``get_rt`` 设置 hook,优先按照函数 f 进行处理,若返回 None,再按一般情形处理
:param method: str. daily, rt, bar
:param f: func, default None.
:return: None
"""
setattr(thismodule, "get_" + method + "_handler", f)
def _get_daily(
code, start=None, end=None, prev=365, _from=None, wrapper=True, handler=True, **kws
):
"""
universal fetcher for daily historical data of literally everything has a value in market.
数据来源包括但不限于天天基金,雪球,英为财情,外汇局官网,聚宽,标普官网,bloomberg,雅虎财经,ycharts等。
:param code: str.
1. 对于沪深市场的股票,指数,ETF,LOF 场内基金,可转债和债券,直接使用其代码,主要开头需要包括 SH 或者 SZ。如果数字代码之后接 .A .B .N 分别代表后复权,前复权和不复权数据,不加后缀默认前复权。港股美股同理。
2. 对于香港市场的股票,指数,使用其数字代码,同时开头要添加 HK。
3. 对于美国市场的股票,指数,ETF 等,直接使用其字母缩写代码即可。
4. 对于人民币中间价数据,使用 "USD/CNY" 的形式,具体可能的值可在 http://www.chinamoney.com.cn/chinese/bkccpr/ 历史数据的横栏查询,注意日元需要用 100JPY/CNY.
5. 对于所有可以在 cn.investing.com 网站查到的金融产品,其代码可以是该网站对应的统一代码,或者是网址部分,比如 DAX 30 的概览页面为 https://cn.investing.com/indices/germany-30,那么对应代码即为 "indices/germany-30"。也可去网页 inspect 手动查找其内部代码(一般不需要自己做,推荐直接使用网页url作为 code 变量值),手动 inspect 加粗的实时价格,其对应的网页 span class 中的 pid 的数值即为内部代码。
6. 对于国内发行的基金,使用基金代码,同时开头添加 F。若想考虑分红使用累计净值,则开头添加 T。
7. 对于国内发行的货币基金,使用基金代码,同时开头添加 M。(全部按照净值数据处理)
8. 形如 peb-000807.XSHG 或 peb-SH000807 格式的数据,可以返回每周的指数估值情况,需要 enable 聚宽数据源方可查看。
9. 形如 iw-000807.XSHG 或 iw-SH000807 格式的数据,可以返回每月的指数成分股和实时权重,需要 enable 聚宽数据源方可查看。
10. 形如 fs-SH501018 格式的数据,可以返回指定场内基金每日份额,需要 enable 聚宽数据源方可查看。
11. 形如 SP5475707.2 格式的数据,可以返回标普官网相关指数的日线数据(最近十年),id 5475707 部分可以从相关指数 export 按钮获取的链接中得到,小数点后的部分代表保存的列数。参考链接:https://us.spindices.com/indices/equity/sp-global-oil-index. 若SPC开头,则从中国网站获取。
12. 形如 BB-FGERBIU:ID 格式的数据,对应网页 https://www.bloomberg.com/quote/FGERBIU:ID,可以返回彭博的数据(最近五年)
13. 形如 sw-801720 格式的数据,可以返回对应申万行业的历史数据情况,需要 enable 聚宽数据源方可查看。
14. 形如 teb-SH000300 格式的数据,返回每周指数盈利和净资产总值数据(单位:亿人民币元),需要 enbale 聚宽数据方可查看。
15. 形如 YH-CSGOLD.SW 格式的数据,返回雅虎财经标的日线数据(最近十年)。代码来自标的网页 url:https://finance.yahoo.com/quote/CSGOLD.SW。
16. 形如 FT-22065529 格式的数据或 FT-INX:IOM,可以返回 financial times 的数据,推荐直接用后者。前者数字代码来源,打开浏览器 network 监视,切换图标时间轴时,会新增到 https://markets.ft.com/data/chartapi/series 的 XHR 请求,其 request payload 里的 [elements][symbol] 即为该指数对应数字。
17. 形如 FTC-WTI+Crude+Oil 格式的数据,开头可以是 FTC, FTE, FTX, FTF, FTB, FTI 对应 ft.com 子栏目 commdities,equities,currencies,funds,bonds,indicies。其中 FTI 和 FT 相同。
18. 形如 mcy-MAC_AREA_UNEMPLOY 格式的数据,返回相应的宏观数据,需要聚宽数据源。mcy,mcq,mcm 代表年度,季度和月度的数据,code 为表名,可以参考 https://www.joinquant.com/help/api/help?name=macroData
19. 形如 ZZ000905,ZZH30533 的代码,代表中证官网的指数,ZZ 之后接指数代码,注意有些指数代码里可能包含 H,历史数据最大到近五年。
20. 形如 GZB30018, GZ399299 格式的数据,代表国证系列指数, GZ 之后接指数代码,代码可能包含更多字母。
21. 形如 ESCI000201 格式的数据,易盛商品指数系列,参考 http://www.esunny.com.cn/index.php?a=lists&catid=60。
22. 形如 pt-F100032 格式的数据,返回指定基金每季度股票债券和现金的持仓比例
23. 形如 yc-companies/DBP,yc-companies/DBP/price 格式的数据,返回ycharts股票、ETF数据,对应网页 https://ycharts.com/companies/DBP/price,最后部分为数据含义,默认price,可选:net_asset_value(仅ETF可用)、total_return_price、total_return_forward_adjusted_price、average_volume_30,历史数据限制五年内。
24. 形如 yc-indices/^SPGSCICO,yc-indices/^SPGSCICO/level 格式的数据,返回ycharts指数数据,对应网页 https://ycharts.com/indices/%5ESPGSCICO/level,最后部分为数据含义,默认level,可选:total_return_forward_adjusted_price,历史数据限制五年内。
25. 形如 HZ999001 HZ999005 格式的数据,代表了华证系列指数 http://www.chindices.com/indicator.html#
26. 形如 B-AA+.3 格式的数据,代表了 AA+ 企业债三年久期利率数据 (每周)
27. 形如 fu-00700.HK 或 fu-BA.US 格式的数据,代表了来自 https://www.futunn.com/stock/BA-US 的日线行情数据
:param start: str. "20200101", "2020/01/01", "2020-01-01" are all legal. The starting date of daily data.
:param end: str. format is the same as start. The ending date of daily data.
:param prev: Optional[int], default 365. If start is not specified, start = end-prev.
:param _from: Optional[str]. 一般用户不需设定该选项。can be one of "xueqiu", "zjj", "investing", "tiantianjijin". Only used for debug to
enforce data source. For common use, _from can be chosed automatically based on code in the run time.
:param wrapper: bool. 一般用户不需设定该选项。
:param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的原函数嵌套调用。
:return: pd.Dataframe.
must include cols: date[pd.Timestamp],close[float64]。
"""
if handler:
if getattr(thismodule, "get_daily_handler", None):
args = inspect.getargvalues(inspect.currentframe())
f = getattr(thismodule, "get_daily_handler")
fr = f(**args.locals)
if fr is not None:
return fr
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(days=prev)
else:
start_obj = dstr2dobj(start)
if not _from:
if (code.startswith("SH") or code.startswith("SZ")) and code[2:8].isdigit():
_from = "xueqiu"
elif code.endswith("/CNY") or code.startswith("CNY/"):
_from = "zjj"
elif code.isdigit():
_from = "cninvesting"
elif code[0] in ["F", "M", "T"] and code[1:].isdigit():
_from = "ttjj"
elif code.startswith("HK") and code[2:7].isdigit():
_from = "xueqiu"
code = code[2:]
elif code.startswith("SP") and code[2:].split(".")[0].isdigit():
_from = "SP"
elif code.startswith("SPC") and code[3:].split(".")[0].isdigit():
_from = "SPC"
elif code.startswith("ZZ") and code[4:].isdigit(): # 注意中证系列指数的代码里可能包含字母!
_from = "ZZ"
elif code.startswith("GZ") and code[-3:].isdigit(): # 注意国证系列指数的代码里可能包含多个字母!
_from = "GZ"
elif code.startswith("HZ") and code[2:].isdigit():
_from = "HZ"
elif code.startswith("ESCI") and code[4:].isdigit():
_from = "ES"
elif code.startswith("yc-companies/") or code.startswith("yc-indices/"):
_from = "ycharts"
params = code.split("/")
code = params[1]
category = params[0].split("-")[1]
if len(params) == 3:
metric = params[2]
else:
if category == "companies":
metric = "price"
elif category == "indices":
metric = "level"
elif len(code.split("-")) >= 2 and len(code.split("-")[0]) <= 3:
# peb-000807.XSHG
_from = code.split("-")[0]
code = "-".join(code.split("-")[1:])
elif len(code[1:].split("/")) == 2:
_from = "cninvesting"
code = get_investing_id(code)
else:
_from = "xueqiu" # 美股代码
count = (today_obj() - start_obj).days + 1
start_str = start_obj.strftime("%Y/%m/%d")
end_str = end_obj.strftime("%Y/%m/%d")
if _from in ["cninvesting", "investing", "default", "IN"]:
df = get_historical_fromcninvesting(code, start_str, end_str)
df = prettify(df)
elif _from in ["xueqiu", "xq", "snowball", "XQ"]:
code, type_ = decouple_code(code)
df = get_historical_fromxq(code, count, type_=type_)
df = prettify(df)
elif _from in ["zhongjianjia", "zjj", "chinamoney", "ZJJ"]:
df = get_rmb(start, end, prev, currency=code)
elif _from in ["ttjj", "tiantianjijin", "xalpha", "eastmoney"]:
if code.startswith("F96"):
df = get_historical_from_ttjj_oversea(code, start=start, end=end)
else:
df = get_fund(code)
elif _from == "peb":
if (
code.startswith("SH000")
or code.startswith("SZ399")
or code.startswith("399")
or code.startswith("000")
):
df = _get_peb_range(code=code, start=start_str, end=end_str)
elif code.startswith("F"):
df = get_fund_peb_range(code=code, start=start, end=end)
else:
df = get_stock_peb_range(code=code, start=start, end=end, wrapper=True)
elif _from == "iw":
df = _get_index_weight_range(code=code, start=start_str, end=end_str)
elif _from == "fs":
df = get_fundshare_byjq(code, start=start, end=end)
elif _from == "SP":
df = get_historical_fromsp(code, start=start, end=end)
elif _from == "SPC":
df = get_historical_fromsp(code[3:], start=start, end=end, region="chinese")
elif _from == "BB":
df = get_historical_frombb(code, start=start, end=end)
elif _from == "ZZ":
df = get_historical_fromzzindex(code, start=start, end=end)
elif _from == "GZ":
df = get_historical_fromgzindex(code, start=start, end=end)
elif _from == "HZ":
df = get_historical_fromhzindex(code, start=start, end=end)
elif _from == "ES":
df = get_historical_fromesunny(code, start=start, end=end)
elif _from == "B":
df = get_bond_rates_range(code, start=start, end=end)
elif _from == "fu":
code = code.replace(".", "-")
df = get_futu_historical(code, start=start, end=end)
elif _from == "ycharts":
df = get_historical_fromycharts(
code,
start=start_obj.strftime("%m/%d/%Y"),
end=end_obj.strftime("%m/%d/%Y"),
category=category,
metric=metric,
)
elif _from == "sw":
df = get_sw_from_jq(code, start=start, end=end)
elif _from == "teb":
df = get_teb_range(code, start=start, end=end)
elif _from in ["pt", "portfolio"]:
df = get_portfolio_fromttjj(code, start=start, end=end)
elif _from == "YH":
df = get_historical_fromyh(code, start=start, end=end)
elif _from in ["FT", "FTI"]:
df = get_historical_fromft(code, start=start, end=end)
elif _from == "FTE":
df = get_historical_fromft(code, start=start, end=end, _type="equities")
elif _from == "FTB":
df = get_historical_fromft(code, start=start, end=end, _type="bonds")
elif _from == "FTF":
df = get_historical_fromft(code, start=start, end=end, _type="funds")
elif _from == "FTX":
df = get_historical_fromft(code, start=start, end=end, _type="currencies")
elif _from == "FTC":
df = get_historical_fromft(code, start=start, end=end, _type="commodities")
elif _from == "INA": # investing app
code = get_investing_id(code, app=True)
df = get_historical_fromcninvesting(code, start_str, end_str, app=True)
df = prettify(df)
elif _from == "mcy":
df = get_macro(code, start=start[:4], end=end[:4], datecol="stat_year")
elif _from == "mcq":
df = get_macro(code, start=start, end=end, datecol="stat_quarter")
elif _from == "mcm":
df = get_macro(code, start=start, end=end, datecol="stat_month")
elif _from == "mcd":
df = get_macro(code, start=start, end=end, datecol="day")
else:
raise ParserFailure("no such data source: %s" % _from)
if wrapper or len(df) == 0:
return df
else:
df = df[df.date <= end_str]
df = df[df.date >= start_str]
return df
def get_xueqiu_rt(code, token="a664afb60c7036c7947578ac1a5860c4cfb6b3b5"):
if code.startswith("HK") and code[2:].isdigit():
code = code[2:]
url = "https://stock.xueqiu.com/v5/stock/quote.json?symbol={code}&extend=detail"
r = rget_json(
url.format(code=code),
cookies={"xq_a_token": token},
headers={"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)"},
)
n = r["data"]["quote"]["name"]
q = r["data"]["quote"]["current"]
try:
q = _float(q)
except TypeError: # 针对雪球实时在9点后开盘前可能出现其他情形的fixup, 效果待 check
# 现在的怀疑是在9am 到9:15 am, 雪球 API current 字段返回 Null
q = _float(r["data"]["quote"]["last_close"])
q_ext = r["data"]["quote"].get("current_ext", None)
percent = r["data"]["quote"]["percent"]
try:
percent = _float(percent)
except:
pass
currency = r["data"]["quote"]["currency"]
market = r["data"]["market"]["region"]
timestr = dt.datetime.fromtimestamp(r["data"]["quote"]["time"] / 1000).strftime(
"%Y-%m-%d %H:%M:%S"
)
if r["data"]["quote"].get("timestamp_ext", None):
time_ext = dt.datetime.fromtimestamp(
r["data"]["quote"]["timestamp_ext"] / 1000
).strftime("%Y-%m-%d %H:%M:%S")
else:
time_ext = None
share = r["data"]["quote"]["total_shares"]
fshare = r["data"]["quote"]["float_shares"]
volume = r["data"]["quote"]["volume"]
return {
"name": n,
"current": q,
"percent": percent,
"current_ext": _float(q_ext) if q_ext else None,
"currency": currency,
"market": market, # HK, US, CN
"time": timestr,
"time_ext": time_ext,
"totshare": share,
"floatshare": fshare,
"volume": volume,
}
def get_cninvesting_rt(suburl, app=False):
if not app:
url = "https://cn.investing.com"
else:
url = "https://cnappapi.investing.com"
if not suburl.startswith("/"):
url += "/"
url += suburl
if not app:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36"
}
else:
headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip",
"Accept-Language": "zh-cn",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"User-Agent": "Investing.China/0.0.3 CFNetwork/1121.2.2 Darwin/19.3.0",
"ccode": "CN",
#'ccode_time': '1585551041.986028',
"x-app-ver": "117",
"x-meta-ver": "14",
"x-os": "ios",
"x-uuid": str(uuid4()),
"Host": "cn.investing.com",
"X-Requested-With": "XMLHttpRequest",
}
r = rget(
url,
headers=headers,
)
s = BeautifulSoup(r.text, "lxml")
last_last = s.find("span", id="last_last")
q = _float(last_last.string)
name = s.find("h1").string.strip()
ind = 0
timestr = s.select('span[class*="ClockBigIcon"]+span')[0].text
l = s.find("div", class_="lighterGrayFont").contents
for i, c in enumerate(l):
if isinstance(c, str) and c.strip() == "货币":
ind = i
break
if ind == 0:
currency = None
else:
currency = l[ind - 1].string
percent = _float(
s.find("span", attrs={"dir": "ltr", "class": "parentheses"}).string[:-1]
)
panhou = s.find("div", class_="afterHoursInfo")
if panhou:
q_ext = _float(panhou.find("span").string)
else:
q_ext = None
market = None
for span in s.findAll("span", class_="elp"):
if span.find("a") and span.find("a")["href"].startswith("/markets"):
market = span.string
market = region_trans.get(market, market)
time_ext = s.select("div[class~=lastUpdated]")
if time_ext:
time_ext = time_ext[0].text.strip()
else:
time_ext = None
d = {
"name": name,
"current": q,
"current_ext": q_ext,
"time": timestr,
"time_ext": time_ext,
"currency": currency,
"percent": percent,
"market": market,
}
if suburl.startswith("commodities"): # 商品期货展期日
try:
d["rollover"] = s.select("span[class*=float_lang_base_2]")[10].string
d["lastrollover"] = s.select("span[class*=float_lang_base_2]")[13].string
except (ValueError, IndexError, AttributeError):
logger.warning("%s cannot extract rollover date" % suburl)
# in case some commodities with strong page structure
return d
def get_rt_from_sina(code):
if (
code.startswith("SH") or code.startswith("SZ") or code.startswith("HK")
) and code[2:].isdigit():
tinycode = code[:2].lower() + code[2:]
if code.startswith("HK"): # 港股额外要求实时
tinycode = "rt_" + tinycode
else: # 美股
tinycode = "gb_"
if code.startswith("."):
code = code[1:]
tinycode += code.lower()
r = rget("https://hq.sinajs.cn/list={tinycode}".format(tinycode=tinycode))
l = r.text.split("=")[1].split(",")
d = {}
d["name"] = l[0].strip('"')
if (
code.startswith("SH") or code.startswith("SZ") or code.startswith("HK")
) and code[2:].isdigit():
# TODO: 20200819: API seems changed a bit, index shift?
# or things may get zero when the market is closed?
if code.startswith("HK"):
d["current"] = float(l[9]) # 英文股票名称占位
d["currency"] = "HKD"
d["percent"] = round(float(l[8]), 2)
d["market"] = "HK"
d["time"] = l[17] + " " + l[18]
d["current_ext"] = None
else: # A 股
d["current"] = float(l[3])
d["currency"] = "CNY"
d["percent"] = round((float(l[3]) / float(l[2]) - 1) * 100, 2)
d["market"] = "CN"
d["time"] = l[-4] + " " + l[-3]
for i in range(10, 19)[::2]:
d["buy" + str(int((i - 8) / 2))] = (l[i + 1], l[i])
for i in range(20, 29)[::2]:
d["sell" + str(int((i - 18) / 2))] = (l[i + 1], l[i])
d["current_ext"] = None
else:
d["currency"] = "USD"
d["current"] = float(l[1])
d["percent"] = float(l[2])
d["current_ext"] = _float(l[21]) if _float(l[21]) > 0 else None
d["market"] = "US"
d["time"] = l[3]
return d
def make_ft_url(code, _type="indices"):
"""
:param code:
:param _type: indices, commodities, currencies, funds, equities, bonds
:return:
"""
if _type == "indices":
url = "https://markets.ft.com/data/indices/tearsheet/summary?s={code}".format(
code=code
)
elif _type == "commodities":
url = (
"https://markets.ft.com/data/commodities/tearsheet/summary?c={code}".format(
code=code
)
)
elif _type == "currencies":
url = (
"https://markets.ft.com/data/currencies/tearsheet/summary?s={code}".format(
code=code
)
)
elif _type == "funds":
url = "https://markets.ft.com/data/funds/tearsheet/summary?s={code}".format(
code=code
)
elif _type == "equities":
url = "https://markets.ft.com/data/equities/tearsheet/summary?s={code}".format(
code=code
)
elif _type == "bonds":
url = "https://markets.ft.com/data/bonds/tearsheet/summary?s={code}".format(
code=code
)
else:
raise ParserFailure("no reconginzed type for ft datasource: %s" % _type)
return url
@lru_cache(maxsize=1024)
def get_ft_id(code, _type="indices"):
url = make_ft_url(code, _type=_type)
r = rget(url)
b = BeautifulSoup(r.text, "lxml")
return eval(
b.find("section", class_="mod-tearsheet-add-to-watchlist")["data-mod-config"]
)["xid"]
def get_rt_from_ft(code, _type="indices"):
url = make_ft_url(code, _type=_type)
r = rget(url)
b = BeautifulSoup(r.text, "lxml")
d = {}
d["name"] = b.find("h1").string
d["current"] = _float(b.find("span", class_="mod-ui-data-list__value").string)
d["percent"] = _float(
b.select("span[class^='mod-format--']")[0].text.split("/")[-1].strip()[:-1]
)
d["current_ext"] = None
d["market"] = None
d["currency"] = b.find("span", class_="mod-ui-data-list__label").string.split("(")[
1
][:-1]
d["time"] = b.find("div", class_="mod-disclaimer").string
return d
def get_rt_from_ycharts(code):
if code.startswith("yc-"):
code = code[3:]
url = "https://ycharts.com/" + code
r = rget(url)
s = BeautifulSoup(r.text, "lxml")
qdiv = s.select("div.index-rank.col-auto") # current
spans = [s for s in qdiv[0].contents if s != "\n" and s.contents]
d = {}
d["name"] = s.select("h1,h3[class=securityName]")[0].text.strip()
d["current"], d["percent"] = (
_float(spans[0].string), # current,
_float(spans[1].contents[-2].string[1:-1]), # percent
)
l = [
c.strip()
for c in s.select("span[class=index-info]")[0].string.split("\n")
if c.strip()
]
d["time"] = l[1]
d["currency"] = l[0].split(" ")[0].strip()
d["market"] = None
return d
@lru_cache_time(ttl=300, maxsize=512)
def get_newest_netvalue(code):
"""
防止天天基金总量 API 最新净值更新不及时,获取基金最新公布净值及对应日期, depracated, use get_rt("F501018") instead
:param code: six digits string for fund.
:return: netvalue, %Y-%m-%d
"""
code = code[1:]
r = rget("http://fund.eastmoney.com/{code}.html".format(code=code))
s = BeautifulSoup(r.text, "lxml")
return (
float(
s.findAll("dd", class_="dataNums")[1]
.find("span", class_="ui-font-large")
.string
),
str(s.findAll("dt")[1]).split("(")[1].split(")")[0][7:],
)
@lru_cache(maxsize=512)
def get_hkfcode(code):
if code.startswith("F"):
code = code[1:]
page = rget("http://overseas.1234567.com.cn/{code}".format(code=code)).text
page.find("hkfcode")
hkfcode = (
page[page.find("hkfcode") :]
.split("=")[1]
.split(";")[0]
.lstrip()
.lstrip("'")
.strip("'")
)
return hkfcode
def get_rt_from_ttjj_oversea(code):
if code.startswith("F"):
code = code[1:]
if not code.startswith("96"):
raise ValueError("%s is not an oversea fund" % code)
r = rget("http://overseas.1234567.com.cn/{code}.html".format(code=code))
r.encoding = "utf-8"
s = BeautifulSoup(r.text, "lxml")
start = s.select("dl.dataItem02")[0].text
start = start.split("(")[1].split(")")[0]
name = s.select("div[class='fundDetail-tit']")[0].text.split("(")[0].strip()
name = name.split("(")[0].strip()
value = _float(s.select("span.ui-font-large.ui-num")[0].text)
date = (
s.select("dl[class='dataItem01']")[0]
.find("p")
.text.split("(")[-1]
.split(")")[0]
)
infol = [
r for r in s.select("div[class='infoOfFund']")[0].text.split("\n") if r.strip()
]
return {
"name": name,
"time": date,
"current": value,
"market": "CN",
"currency": None, # 很可能存在非人民币计价的互认基金
"current_ext": None,
"type": infol[0].split(":")[1].strip(),
"scale": infol[1].split(":")[1].strip(),
"manager": infol[2].split(":")[1].strip(),
"startdate": start,
}
@lru_cache_time(ttl=600, maxsize=512)
def get_rt_from_ttjj(code):
code = code[1:]
if code.startswith("96"):
return get_rt_from_ttjj_oversea(code)
r = rget("http://fund.eastmoney.com/{code}.html".format(code=code))
r.encoding = "utf-8"
s = BeautifulSoup(r.text, "lxml")
name = s.select("div[style='float: left']")[0].text.split("(")[0]
if s.findAll("dd", class_="dataNums")[1].find(
"span", class_="ui-font-large"
): # 非货币基金
value, date = (
float(
s.findAll("dd", class_="dataNums")[1]
.find("span", class_="ui-font-large")
.string
),
str(s.findAll("dt")[1]).split("(")[1].split(")")[0][7:],
)
estimate = s.select("span[id=gz_gsz]")[0].text # after loading
if estimate == "--":
gsz = rget(
"http://fundgz.1234567.com.cn/js/{code}.js".format(code=code),
headers={
"Host": "fundgz.1234567.com.cn",
"Referer": "http://fund.eastmoney.com/",
},
)
try: # in case eval error
gsz_dict = eval(gsz.text[8:-2])
estimate = _float(gsz_dict["gsz"])
estimate_time = gsz_dict["gztime"]
except:
estimate = None
else:
try:
estimate = _float(estimate)
except ValueError:
logger.warning("unrecognized estimate netvalue %s" % estimate)
estimate = None
else:
value, date = (
s.findAll("dd", class_="dataNums")[1].text,
str(s.findAll("dt")[1]).split("(")[1].split(")")[0],
)
estimate = None
status = s.select("span[class='staticCell']")[0].text.strip()
tb = s.select("div.infoOfFund > table >tr>td")
infol = [i.text for i in tb]
try:
estimate_time
except NameError:
estimate_time = None
return {
"name": name,
"time": date,
"current": value,
"market": "CN",
"currency": "CNY",
"current_ext": None,
"status": status,
"type": infol[0].split(":")[1].split("\xa0")[0],
"scale": infol[1].split(":")[1],
"manager": infol[2].split(":")[1],
"company": infol[4].split(":")[1],
"estimate": estimate,
"estimate_time": estimate_time,
}
# 是否有美元份额计价的基金会出问题?
@lru_cache(2048)
def get_fund_type(code):
"""
given fund code, return unified fund category which is extracted from get_rt(code)["type"]
:param code:
:return: str.
"""
code = code[-6:]
t = get_rt("F" + code)["type"]
if t in ["联接基金", "股票指数"] or t.startswith("ETF"):
return "指数基金"
elif t.startswith("QDII"):
return "QDII"
elif t.startswith("股票"):
return "股票基金"
elif t.startswith("混合"):
return "混合基金"
elif t.startswith("债券"):
return "债券基金"
elif t.startswith("货币"):
return "货币基金"
else:
return "其他"
def get_rt(
code, _from=None, double_check=False, double_check_threhold=0.005, handler=True
):
"""
universal fetcher for realtime price of literally everything.
:param code: str. 规则同 :func:`get_daily`. 需要注意场外基金和外汇中间价是不支持实时行情的,因为其每日只有一个报价。对于 investing 的数据源,只支持网址格式代码。
:param _from: Optional[str]. can be one of "xueqiu", "investing". Only used for debug to
enfore data source. For common use, _from can be chosed automatically based on code in the run time.
:param double_check: Optional[bool], default False. 如果设为 True,只适用于 A 股,美股,港股实时行情,会通过至少两个不同的数据源交叉验证,确保正确。
适用于需要自动交易等情形,防止实时数据异常。
:param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的嵌套。
:return: Dict[str, Any].
包括 "name", "current", "percent" 三个必有项和 "current_ext"(盘后价格), "currency" (计价货币), "market" (发行市场), "time"(记录时间) 可能为 ``None`` 的选项。
"""
# 对于一些标的,get_rt 的主任务可能不是 current 价格,而是去拿 market currency 这些元数据
# 现在用的新浪实时数据源延迟严重, double check 并不靠谱,港股数据似乎有15分钟延迟(已解决)
# 雪球实时和新浪实时在9:00之后一段时间可能都有问题
# FT 数据源有10到20分钟的延迟
if handler:
if getattr(thismodule, "get_rt_handler", None):
args = inspect.getargvalues(inspect.currentframe())
f = getattr(thismodule, "get_rt_handler")
fr = f(**args.locals)
if fr:
return fr
if not _from:
# if code.startswith("HK") and code[2:].isdigit():
# _from = "xueqiu"
if code.startswith("yc-"):
_from = "ycharts"
elif len(code.split("-")) >= 2 and len(code.split("-")[0]) <= 3:
_from = code.split("-")[0]
code = "-".join(code.split("-")[1:])
elif (code.startswith("F") or code.startswith("T")) and code[1:].isdigit():
_from = "ttjj"
elif len(code.split("/")) > 1:
_from = "investing"
else: # 默认启用雪球实时,新浪纯指数行情不完整
_from = "xueqiu"
if _from in ["cninvesting", "investing"]:
try:
return get_cninvesting_rt(code)
except Exception as e:
logger.warning(
"Fails due to %s, now trying app source of investing.com" % e.args[0]
)
return get_cninvesting_rt(code, app=True)
elif double_check and _from in ["xueqiu", "sina"]:
r1 = get_xueqiu_rt(code, token=get_token())
r2 = get_rt_from_sina(code)
if abs(r1["current"] / r2["current"] - 1) > double_check_threhold:
raise DataPossiblyWrong("realtime data unmatch for %s" % code)
return r2
elif _from in ["xueqiu", "xq", "snowball"]:
try:
return get_xueqiu_rt(code, token=get_token())
except (IndexError, ValueError, AttributeError, TypeError) as e: # 默认雪球实时引入备份机制
logging.warning(
"Fails due to %s, now trying backup data source from sina" % e.args[0]
)
return get_rt_from_sina(code)
elif _from in ["sina", "sn", "xinlang"]:
try:
return get_rt_from_sina(code)
except (IndexError, ValueError, AttributeError, TypeError) as e: # 默认雪球实时引入备份机制
logging.warning(
"Fails due to %s, now trying backup data source from xueqiu" % e.args[0]
)
return get_xueqiu_rt(code, token=get_token())
elif _from in ["ttjj"]:
return get_rt_from_ttjj(code)
elif _from in ["FT", "ft", "FTI"]:
return get_rt_from_ft(code)
elif _from == "FTE":
return get_rt_from_ft(code, _type="equities")
elif _from == "FTB":
return get_rt_from_ft(code, _type="bonds")
elif _from == "FTF":
return get_rt_from_ft(code, _type="funds")
elif _from == "FTX":
return get_rt_from_ft(code, _type="currencies")
elif _from == "FTC":
return get_rt_from_ft(code, _type="commodities")
elif _from in ["INA"]: # investing app
return get_cninvesting_rt(code, app=True)
elif _from in ["yc", "ycharts"]:
return get_rt_from_ycharts(code)
else:
raise ParserFailure("unrecoginzed _from for %s" % _from)
get_realtime = get_rt
get_now = get_rt
_cached_data = {}
def reset_cache():
"""
clear all cache of daily data in memory.
:return: None.
"""
global _cached_data
_cached_data = {}
setattr(thismodule, "cached_dict", {})
def cached(s):
"""
**Deprecated**, use :func:`cachedio` instead, where ``backend="memory"``.
Usage as follows:
.. code-block:: python
@cached("20170101")
def get_daily(*args, **kws):
return xa.get_daily(*args, **kws)
Automatically cache the result in memory and avoid refetching
:param s: str. eg. "20160101", the starting date of cached table.
:return: wrapped function.
"""
def cached_start(f):
@wraps(f)
def wrapper(*args, **kws):
print("cached function is deprecated, please instead use cachedio")
if args:
code = args[0]
else:
code = kws.get("code")
start = kws.get("start", None)
end = kws.get("end", None)
prev = kws.get("prev", None)
if not prev:
prev = 365
if not end:
end_obj = today_obj()
else:
end_obj = dstr2dobj(end)
if not start:
start_obj = end_obj - dt.timedelta(prev)
else:
start_obj = dstr2dobj(start)
start_str = start_obj.strftime("%Y%m%d")
end_str = end_obj.strftime("%Y%m%d")
kws["start"] = s
kws["end"] = dt.datetime.now().strftime("%Y%m%d")
global _cached_data
_cached_data.setdefault(s, {})
if code not in _cached_data[s]:
df = f(*args, **kws)
# print("cached %s" % code)
_cached_data[s][code] = df
else:
pass
# print("directly call cache")
df = _cached_data[s][code]
df = df[df["date"] <= end_str]
df = df[df["date"] >= start_str]
return df
return wrapper
return cached_start
def cachedio(**ioconf):
"""
用法类似:func:`cached`,通用透明缓存器,用来作为 (code, start, end ...) -> pd.DataFrame 形式函数的缓存层,
避免重复爬取已有数据。
:param **ioconf: 可选关键字参数 backend: csv or sql or memory,
path: csv 文件夹或 sql engine, refresh True 会刷新结果,重新爬取, default False,
prefix 是 key 前统一部分, 缓存 hash 标志
:return:
"""
def cached(f):
@wraps(f)
def wrapper(*args, **kws):
if args:
code = args[0]
else:
code = kws.get("code")
date = ioconf.get("date", "date") # 没利用上这个栏的名字变化
precached = ioconf.get("precached", None)
precached = kws.get("precached", precached)
key = kws.get("key", code)
key = key.replace("/", " ")
key_func = ioconf.get("key_func", None)
key_func = ioconf.get("keyfunc", key_func)
if key_func is not None:
key = key_func(key)
defaultend = ioconf.get("defaultend", today_obj)
defaultend = ioconf.get("default_end", defaultend)
defaultprev = ioconf.get("defaultprev", 365)
defaultprev = ioconf.get("default_prev", defaultprev)
if isinstance(defaultend, str):
defaultend = defaultend.replace("/", "").replace("-", "")
defaultend = dt.datetime.strptime(defaultend, "%Y%m%d")
if callable(defaultend):
defaultend = defaultend()
start = kws.get("start", None)
end = kws.get("end", None)
prev = kws.get("prev", None)
prefix = ioconf.get("prefix", "")
key = prefix + key
if precached:
precached = precached.replace("/", "").replace("-", "")
precached_obj = dt.datetime.strptime(precached, "%Y%m%d")
if not prev:
prev = defaultprev
if not end:
end_obj = defaultend
else:
end_obj = dt.datetime.strptime(
end.replace("/", "").replace("-", ""), "%Y%m%d"
)
if not start:
start_obj = end_obj - dt.timedelta(days=prev)
else:
start_obj = dt.datetime.strptime(
start.replace("/", "").replace("-", ""), "%Y%m%d"
)
start_str = start_obj.strftime("%Y%m%d")
end_str = end_obj.strftime("%Y%m%d")
backend = ioconf.get("backend")
backend = kws.get("backend", backend)
# if backend == "sql": # reserved for case insensitive database settings
# key = key.lower()
refresh = ioconf.get("refresh", False)
refresh = kws.get("refresh", refresh)
fetchonly = ioconf.get("fetchonly", False)
fetchonly = ioconf.get("fetch_only", fetchonly)
fetchonly = kws.get("fetchonly", fetchonly)
fetchonly = kws.get("fetch_only", fetchonly)
path = ioconf.get("path")
path = kws.get("path", path)
kws["start"] = start_str
kws["end"] = end_str
if not backend:
df = f(*args, **kws)
df = df[df["date"] <= kws["end"]]
df = df[df["date"] >= kws["start"]]
return df
else:
if backend == "csv":
key = key + ".csv"
if not getattr(thismodule, "cached_dict", None):
setattr(thismodule, "cached_dict", {})
if refresh:
is_changed = True
df0 = f(*args, **kws)
else: # non refresh
try:
if backend == "csv":
if key in getattr(thismodule, "cached_dict"):
# 即使硬盘级别的缓存,也有内存层,加快读写速度
df0 = getattr(thismodule, "cached_dict")[key]
else:
df0 = pd.read_csv(os.path.join(path, key))
elif backend == "sql":
if key in getattr(thismodule, "cached_dict"):
df0 = getattr(thismodule, "cached_dict")[key]
else:
df0 = pd.read_sql(key, path)
elif backend == "memory":
df0 = getattr(thismodule, "cached_dict")[key]
else:
raise ValueError("no %s option for backend" % backend)
df0[date] = pd.to_datetime(df0[date])
# 向前延拓
is_changed = False
if df0.iloc[0][date] > start_obj and not fetchonly:
kws["start"] = start_str
kws["end"] = (
df0.iloc[0][date] - pd.Timedelta(days=1)
).strftime("%Y%m%d")
if has_weekday(kws["start"], kws["end"]):
# 考虑到海外市场的不同情况,不用 opendate 判断,采取保守型判别
df1 = f(*args, **kws)
if df1 is not None and len(df1) > 0:
df1 = df1[df1["date"] <= kws["end"]]
if df1 is not None and len(df1) > 0:
is_changed = True
df0 = df1.append(df0, ignore_index=True, sort=False)
# 向后延拓
if df0.iloc[-1][date] < end_obj and not fetchonly:
nextday_str = (
df0.iloc[-1][date] + dt.timedelta(days=1)
).strftime("%Y%m%d")
if len(df0[df0["date"] == df0.iloc[-1]["date"]]) == 1:
kws["start"] = (df0.iloc[-1][date]).strftime("%Y%m%d")
else: # 单日多行的表默认最后一日是准确的,不再刷新了
kws["start"] = nextday_str
kws["end"] = end_str
if has_weekday(nextday_str, kws["end"]): # 新更新的日期里有工作日
df2 = f(*args, **kws)
if df2 is not None and len(df2) > 0:
df2 = df2[df2["date"] >= kws["start"]]
if df2 is not None and len(df2) > 0:
is_changed = True
if (
len(df0[df0["date"] == df0.iloc[-1]["date"]])
== 1
):
df0 = df0.iloc[:-1]
df0 = df0.append(df2, ignore_index=True, sort=False)
# 注意这里抹去更新了原有最后一天的缓存,这是因为日线最新一天可能有实时数据污染
except (FileNotFoundError, exc.ProgrammingError, KeyError) as e:
if fetchonly:
logger.error(
"no cache in backend for %s but you insist `fetchonly`"
% code
)
raise e
if precached:
if start_obj > precached_obj:
kws["start"] = precached
if end_obj < today_obj():
kws["end"] = (
today_obj() - dt.timedelta(days=1)
).strftime("%Y%m%d")
is_changed = True
df0 = f(*args, **kws)
if df0 is not None and len(df0) > 0 and is_changed:
if backend == "csv":
df0.to_csv(os.path.join(path, key), index=False)
elif backend == "sql":
df0.to_sql(key, con=path, if_exists="replace", index=False)
# elif backend == "memory":
# 总是刷新内存层,即使是硬盘缓存
d = getattr(thismodule, "cached_dict")
d[key] = df0
if df0 is not None and len(df0) > 0:
df0 = df0[df0["date"] <= end_str]
df0 = df0[df0["date"] >= start_str]
return df0
return wrapper
return cached
def fetch_backend(key):
prefix = ioconf.get("prefix", "")
key = prefix + key
backend = ioconf.get("backend")
path = ioconf.get("path")
if backend == "csv":
key = key + ".csv"
try:
if backend == "csv":
df0 = pd.read_csv(os.path.join(path, key))
elif backend == "sql":
df0 = pd.read_sql(key, path)
else:
raise ValueError("no %s option for backend" % backend)
return df0
except (FileNotFoundError, exc.ProgrammingError, KeyError):
return None
def save_backend(key, df, mode="a", header=False):
prefix = ioconf.get("prefix", "")
key = prefix + key
backend = ioconf.get("backend")
path = ioconf.get("path")
if backend == "csv":
key = key + ".csv"
if backend == "csv":
if mode == "a":
df.to_csv(os.path.join(path, key), index=False, header=header, mode=mode)
else:
df.to_csv(os.path.join(path, key), index=False, mode=mode)
elif backend == "sql":
if mode == "a":
mode = "append"
else:
mode = "replace"
df.to_sql(key, con=path, if_exists=mode, index=False)
else:
raise ValueError("no %s option for backend" % backend)
logger.debug("%s saved into backend successfully" % key)
def check_cache(*args, omit_lines=0, **kws):
if omit_lines == 0:
assert (
_get_daily(*args, wrapper=False, **kws)
.reset_index(drop=True)
.equals(get_daily(*args, **kws).reset_index(drop=True))
)
else:
assert (
_get_daily(*args, wrapper=False, **kws)
.reset_index(drop=True)[:-omit_lines]
.equals(get_daily(*args, **kws).reset_index(drop=True)[:-omit_lines])
)
@data_source("jq")
def _get_index_weight_range(code, start, end):
if len(code.split(".")) != 2:
code = _inverse_convert_code(code)
start_obj = dt.datetime.strptime(start.replace("-", "").replace("/", ""), "%Y%m%d")
end_obj = dt.datetime.strptime(end.replace("-", "").replace("/", ""), "%Y%m%d")
start_m = start_obj.replace(day=1)
if start_m < start_obj:
start_m = start_m + relativedelta(months=1)
end_m = end_obj.replace(day=1)
if end_obj < end_m:
end_m = end_m - relativedelta(months=1)
d = start_m
df = pd.DataFrame({"code": [], "weight": [], "display_name": [], "date": []})
while True:
if d > end_m:
df["date"] = pd.to_datetime(df["date"])
return df
logger.debug("fetch index weight on %s for %s" % (d, code))
df0 = get_index_weights(index_id=code, date=d.strftime("%Y-%m-%d"))
df0["code"] = df0.index
df = df.append(df0, ignore_index=True, sort=False)
d = d + relativedelta(months=1)
@data_source("jq")
def _get_peb_range(code, start, end): # 盈利,净资产,总市值
"""
获取指定指数一段时间内的 pe pb 值。
:param code: 聚宽形式指数代码。
:param start:
:param end:
:return: pd.DataFrame
"""
if len(code.split(".")) != 2:
code = _inverse_convert_code(code)
data = {"date": [], "pe": [], "pb": []}
for d in pd.date_range(start=start, end=end, freq="W-FRI"):
data["date"].append(d)
logger.debug("compute pe pb on %s" % d)
r = get_peb(code, date=d.strftime("%Y-%m-%d"))
data["pe"].append(r["pe"])
data["pb"].append(r["pb"])
return pd.DataFrame(data)
def get_stock_peb_range(code, start, end, wrapper=False):
"""
获取股票历史 pe pb
:param code:
:param start:
:param end:
:return:
"""
if code.startswith("HK") and code[2:].isdigit():
code = code[2:]
count = (today_obj() - dt.datetime.strptime(start, "%Y%m%d")).days
df = get_historical_fromxq(code, count, full=True)
df = df[["date", "pe", "pb", "ps"]]
if not wrapper:
df = df[df["date"] >= start]
df = df[df["date"] <= end]
return df
@lru_cache()
def ttjjcode(code):
"""
将天天基金的持仓股票代码或其他来源的代码标准化
:param code: str.
:return: str.
"""
code = code.strip()
if code.endswith(".HK"):
return "HK" + code[:-3]
elif code.endswith(".US"):
return code[:-3]
elif code.isdigit() and len(code) == 5:
return "HK" + code
elif code.isdigit() and len(code) == 6:
if (
code.startswith("16")
or code.startswith("15")
or code.startswith("12")
or code.startswith("0")
or code.startswith("3")
):
# 注意这里只能对应个股,指数代码有重叠没有办法的事
return "SZ" + code
elif code.startswith("5") or code.startswith("6") or code.startswith("11"):
return "SH" + code
else:
logger.warning("unrecognized code format %s" % code)
return "0"
else:
logger.info("not so sure about code format %s, taken as US stock" % code)
return code
def get_fund_peb(code, date, threhold=0.3):
"""
根据基金的股票持仓,获取对应日期的 pe,pb 估值
:param code: str. 基金代码
:param date:
:param threhold: float, default 0.3. 为了计算快速,占比小于千分之三的股票将舍弃
:return:
"""
if code.startswith("F"):
code = code[1:]
date = date.replace("/", "").replace("-", "")
d = dt.datetime.strptime(date, "%Y%m%d")
if d.month > 3 and d.month < 8:
year = d.year - 1
season = 4
elif d.month <= 3:
year = d.year - 1
season = 2
else:
year = d.year
season = 2
# season 只选 2,4, 具有更详细的持仓信息
df = get_fund_holdings(code, year, season)
if df is None:
if season == 4:
season = 2
else:
year -= 1
season = 4
df = get_fund_holdings(code, year, season)
if df is None:
logger.warning("%s seems has no holdings data in this time %s" % (code, year))
return {"pe": None, "pb": None}
df = df[df["ratio"] >= threhold]
df["scode"] = df["code"].apply(ttjjcode)
df = df[df["scode"] != "0"]
if len(df) == 0:
return {"pe": None, "pb": None}
pel, pbl = [], []
for i, r in df.iterrows():
try:
fdf = get_daily("peb-" + r["scode"], end=date, prev=60)
if len(fdf) == 0:
# 已退市或改名
logger.warning("%s: 无法获取,可能已退市,当时休市或改名" % r["scode"])
pel.append(None)
pbl.append(None)
else:
fdf = fdf.iloc[-1]
pel.append(fdf["pe"])
pbl.append(fdf["pb"])
except (KeyError, TypeError, IndexError) as e:
logger.warning(
"%s: 获取历史估值出现问题: %s, 可能由于网站故障或股票代码非中美市场" % (r["scode"], e.args[0])
)
pel.append(None)
pbl.append(None)
df["pe"] = pel
df["pb"] = pbl
r = {}
pedf = df[~pd.isna(df["pe"])]
pbdf = df[~pd.isna(df["pb"])]
if len(pbdf) < 0.5 * len(df): # 有时候会有个别标的有pb值
r["pb"] = None
else:
pbdf["b"] = pbdf["ratio"] / (pbdf["pb"] + 0.000001)
r["pb"] = pbdf.ratio.sum() / pbdf.b.sum()
if len(pedf) == 0:
r["pe"] = None
else:
pedf["e"] = pedf["ratio"] / (pedf["pe"] + 0.000001)
r["pe"] = pedf.ratio.sum() / pedf.e.sum()
return r
def get_fund_peb_range(code, start, end):
"""
获取一段时间的基金历史估值,每周五为频率
:param code:
:param start:
:param end:
:return:
"""
if code.startswith("F"):
code = code[1:]
data = {"date": [], "pe": [], "pb": []}
for d in pd.date_range(start=start, end=end, freq="W-FRI"):
data["date"].append(d)
r = get_fund_peb(code, date=d.strftime("%Y-%m-%d"))
data["pe"].append(r["pe"])
data["pb"].append(r["pb"])
return pd.DataFrame(data)
def set_backend(**ioconf):
"""
设定 xalpha get_daily 函数的缓存后端,默认为内存。 ioconf 参数设置可参考 :func:`cachedio`
:param ioconf:
:return: None.
"""
if not ioconf:
ioconf = {"backend": "memory"}
get_daily = cachedio(**ioconf)(_get_daily)
prefix = ioconf.get("prefix", "")
ioconf["prefix"] = "iw-" + prefix
get_index_weight_range = cachedio(**ioconf)(_get_index_weight_range)
ioconf["prefix"] = "peb-" + prefix
get_peb_range = cachedio(**ioconf)(_get_peb_range)
setattr(thismodule, "get_daily", get_daily)
setattr(xamodule, "get_daily", get_daily)
setattr(thismodule, "get_index_weight_range", get_index_weight_range)
setattr(thismodule, "get_peb_range", get_peb_range)
ioconf["prefix"] = prefix
setattr(thismodule, "ioconf", ioconf)
set_backend()
@data_source("jq")
def get_peb(index, date=None, table=False):
"""
获取指数在指定日期的 pe 和 pb。采用当时各公司的最新财报和当时的指数成分股权重加权计算。
:param index: str. 聚宽形式的指数代码。
:param date: str. %Y-%m-%d
:param table: Optioanl[bool], default False. True 时返回整个计算的 DataFrame,用于 debug。
:return: Dict[str, float]. 包含 pe 和 pb 值的字典。
"""
if len(index.split(".")) == 2:
index = _convert_code(index)
middle = dt.datetime.strptime(
date.replace("/", "").replace("-", ""), "%Y%m%d"
).replace(day=1)
iwdf = get_index_weight_range(
index,
start=(middle - dt.timedelta(days=5)).strftime("%Y-%m-%d"),
end=(middle + dt.timedelta(days=5)).strftime("%Y-%m-%d"),
)
q = query(valuation).filter(valuation.code.in_(list(iwdf.code)))
logger.debug("get_fundamentals on %s" % (date))
df = get_fundamentals(q, date=date)
df = df.merge(iwdf, on="code")
df["e"] = df["weight"] / df["pe_ratio"]
df["b"] = df["weight"] / df["pb_ratio"]
df["p"] = df["weight"]
tote = df.e.sum()
totb = df.b.sum()
if table:
return df
return {"pe": round(100.0 / tote, 3), "pb": round(100.0 / totb, 3)}
@data_source("jq")
def get_sw_from_jq(code, start=None, end=None, **kws):
"""
:param code: str. eg. 801180 申万行业指数
:param start:
:param end:
:param kws:
:return:
"""
logger.debug("get sw data of %s" % code)
df = finance.run_query(
query(finance.SW1_DAILY_VALUATION)
.filter(finance.SW1_DAILY_VALUATION.date >= start)
.filter(finance.SW1_DAILY_VALUATION.date <= end)
.filter(finance.SW1_DAILY_VALUATION.code == code)
.order_by(finance.SW1_DAILY_VALUATION.date.asc())
)
df["date"] = pd.to_datetime(df["date"])
return df
@data_source("jq")
def get_teb(code, date):
if len(code.split(".")) != 2:
code = _inverse_convert_code(code)
sl = get_index_stocks(code, date=date)
logger.debug("get fundamentals from jq for %s" % code)
df = get_fundamentals(query(valuation).filter(valuation.code.in_(sl)), date=date)
df["e"] = df["market_cap"] / df["pe_ratio"]
df["b"] = df["market_cap"] / df["pb_ratio"]
return {"e": df["e"].sum(), "b": df["b"].sum(), "m": df["market_cap"].sum()} # 亿人民币
def get_teb_range(code, start, end, freq="W-FRI"):
if len(code.split(".")) != 2:
code = _inverse_convert_code(code)
data = {"date": [], "e": [], "b": [], "m": []}
for d in pd.date_range(start, end, freq=freq):
data["date"].append(d)
r = get_teb(code, d.strftime("%Y-%m-%d"))
data["e"].append(r["e"])
data["b"].append(r["b"])
data["m"].append(r["m"])
df = pd.DataFrame(data)
return df
def _convert_code(code):
"""
将聚宽形式的代码转化为 xalpha 形式
:param code:
:return:
"""
no, mk = code.split(".")
if mk == "XSHG":
return "SH" + no
elif mk == "XSHE":
return "SZ" + no
def _inverse_convert_code(code):
"""
将 xalpha 形式的代码转化为聚宽形式
:param code:
:return:
"""
if code.startswith("SH"):
return code[2:] + ".XSHG"
elif code.startswith("SZ"):
return code[2:] + ".XSHE"
@lru_cache_time(ttl=60, maxsize=512)
def get_bar(
code, prev=24, interval=3600, _from=None, handler=True, start=None, end=None
):
"""
:param code: str. 支持雪球和英为的代码
:param prev: points of data from now to back, often limited by API around several hundreds
:param interval: float, seconds. need to match the corresponding API,
typical values include 60, 300, 3600, 86400, 86400*7
:param handler: bool. Default True. 若为 False,则 handler 钩子失效,用于钩子函数中的嵌套。
:return: pd.DataFrame
"""
if handler:
if getattr(thismodule, "get_bar_handler", None):
args = inspect.getargvalues(inspect.currentframe())
f = getattr(thismodule, "get_bar_handler")
fr = f(**args.locals)
if fr is not None:
return fr
if not _from:
if (
(start is not None)
and (end is not None)
and (code.startswith("SH") or code.startswith("SZ"))
):
_from = "jq"
elif code.startswith("SH") or code.startswith("SZ"):
_from = "xueqiu"
elif code.isdigit():
_from = "cninvesting"
elif code.startswith("HK") and code[2:7].isdigit():
_from = "xueqiu"
code = code[2:]
elif len(code.split("-")) >= 2 and len(code.split("-")[0]) <= 3:
_from = code.split("-")[0]
code = "-".join(code.split("-")[1:])
elif len(code.split("/")) > 1:
_from = "cninvesting"
code = get_investing_id(code)
else:
_from = "xueqiu" # 美股
if _from in ["xq", "xueqiu", "XQ"]:
return get_bar_fromxq(code, prev, interval)
elif _from in ["IN", "cninvesting", "investing"]:
return get_bar_frominvesting(code, prev, interval)
elif _from in ["INA"]:
return get_bar_frominvesting(code, prev, interval)
# 这里 investing app 源是 404,只能用网页源
elif _from in ["jq"]:
code, type_ = decouple_code(code)
# 关于复权,聚宽各个时间密度的数据都有复权,雪球源日线以上的高频数据没有复权
type_map = {"after": "post", "before": "pre", "normal": None}
return get_bar_fromjq(
code, start=start, end=end, interval=interval, fq=type_map[type_]
)
elif _from in ["wsj"]:
return get_bar_fromwsj(code, interval=interval)[-prev:]
else:
raise ParserFailure("unrecoginized _from %s" % _from)
@data_source("jq")
def get_bar_fromjq(code, start, end, interval, fq="pre"):
code = _inverse_convert_code(code)
trans = {
"60": "1m",
"120": "2m",
"300": "5m",
"900": "15m",
"1800": "30m",
"3600": "60m",
"7200": "120m",
"86400": "daily",
}
interval = trans.get(str(interval), interval)
logger.debug("calling ``get_price`` from jq with %s" % code)
return get_price(code, start_date=start, end_date=end, frequency=interval, fq=fq)
def get_bar_frominvesting(code, prev=120, interval=3600):
"""
get bar data beyond daily bar
:param code: str. investing id or url
:param prev: int, data points from now, max might be around 500, if exceed, only None is returnd
:param interval: default 3600. optional 60, 300, 900, 1800, 18000, 86400, "week", "month"
:return: pd.DataFrame or None if prev and interval unmatch the API
"""
if interval == "day":
interval = 86400
elif interval == "hour":
interval = 3600
elif interval == "minute":
interval = 60
elif interval == 86400 * 7:
interval = "week"
elif interval == 86400 * 30:
interval = "month"
if len(code.split("/")) == 2:
code = get_investing_id(code)
url = "https://cn.investing.com"
headers = {
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4)\
AppleWebKit/537.36 (KHTML, like Gecko)",
"Host": "cn.investing.com",
"Referer": "https://cn.investing.com/commodities/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"X-Requested-With": "XMLHttpRequest",
}
r = rget(
url
+ "/common/modules/js_instrument_chart/api/data.php?pair_id={code}&pair_id_for_news={code}\
&chart_type=area&pair_interval={interval}&candle_count={prev}&events=yes&volume_series=yes&period=".format(
code=code, prev=str(prev), interval=str(interval)
),
headers=headers,
)
if not r.text:
return # None
r = r.json()
df = pd.DataFrame(r["candles"], columns=["date", "close", "0", "1"])
df = df.drop(["0", "1"], axis=1)
df["date"] = df["date"].apply(
lambda t: dt.datetime.fromtimestamp(t / 1000, tz=tz_bj).replace(tzinfo=None)
)
return df
def get_bar_fromxq(code, prev, interval=3600):
"""
:param code:
:param prev:
:param interval: 1m, 5m, 15m, 30m, 60m, 120m, month, quarter, year, week, day
:return:
"""
# max interval is also around 500
trans = {
"60": "1m",
"300": "5m",
"900": "15m",
"1800": "30m",
"3600": "60m",
"7200": "120m",
"86400": "day",
"604800": "week",
"2592000": "month",
}
code, type_ = decouple_code(code)
interval = trans.get(str(interval), interval)
url = "https://stock.xueqiu.com/v5/stock/chart/kline.json?symbol={code}&begin={tomorrow}&period={interval}&type={type_}\
&count=-{prev}&indicator=kline,pe,pb,ps,pcf,market_capital,agt,ggt,balance".format(
code=code,
tomorrow=int(tomorrow_ts() * 1000),
prev=prev,
interval=interval,
type_=type_,
)
r = rget(
url, headers={"user-agent": "Mozilla/5.0"}, cookies={"xq_a_token": get_token()}
)
if not r.text:
return # None
else:
df = pd.DataFrame(r.json()["data"]["item"], columns=r.json()["data"]["column"])
df["date"] = df["timestamp"].apply(
lambda t: dt.datetime.fromtimestamp(t / 1000, tz=tz_bj).replace(tzinfo=None)
)
df = df[
[
"date",
"open",
"high",
"low",
"close",
"volume",
"turnoverrate",
"percent",
]
]
return df
def get_bar_fromwsj(code, token=None, interval=3600):
# proxy required
# code = "FUTURE/US/XNYM/CLM20"
# TODO: also not explore the code format here extensively
trans = {"3600": "1H"}
# TODO: there is other freq tags, but I have no time to explore them, contributions are welcome:)
freq = trans.get(str(interval), interval)
if not token:
token = "cecc4267a0194af89ca343805a3e57af"
# the thing I am concerned here is whether token is refreshed
params = {
"json": '{"Step":"PT%s","TimeFrame":"D5","EntitlementToken":"%s",\
"IncludeMockTick":true,"FilterNullSlots":false,"FilterClosedPoints":true,"IncludeClosedSlots":false,\
"IncludeOfficialClose":true,"InjectOpen":false,"ShowPreMarket":false,"ShowAfterHours":false,\
"UseExtendedTimeFrame":false,"WantPriorClose":true,"IncludeCurrentQuotes":false,\
"ResetTodaysAfterHoursPercentChange":false,\
"Series":[{"Key":"%s","Dialect":"Charting","Kind":"Ticker","SeriesId":"s1","DataTypes":["Last"]}]}'
% (freq, token, code),
"ckey": token[:10],
}
r = rget_json(
"https://api-secure.wsj.net/api/michelangelo/timeseries/history",
params=params,
headers={
"user-agent": "Mozilla/5.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Dylan2010.EntitlementToken": token,
"Host": "api-secure.wsj.net",
"Origin": "https://www.marketwatch.com",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "cross-site",
},
)
df = pd.DataFrame(
{
"date": r["TimeInfo"]["Ticks"],
"close": [n[0] for n in r["Series"][0]["DataPoints"]],
}
)
df["date"] = pd.to_datetime(df["date"] * 1000000) + pd.Timedelta(hours=8)
df = df[df["close"] > -100.0] # 存在未来数据占位符需要排除
return df
class vinfo(basicinfo, indicator):
"""
vinfo is an info like class wrapper for get_daily, it behaves like info
"""
def __init__(
self,
code,
name=None,
start=None,
end=None,
rate=0,
col="close",
normalization=True,
**kws
):
if not name:
try:
name = get_rt(code)["name"]
except:
name = code
self.name = name
self.code = code
self.start = start # None is one year ago
self.end = end # None is yesterday
df = get_daily(code, start=start, end=end)
df[col] = pd.to_numeric(df[col]) # in case the col is not float
df["totvalue"] = df[col]
if normalization:
df["netvalue"] = df[col] / df.iloc[0][col]
else:
df["netvalue"] = df[col]
self.price = df
self.round_label = kws.get("round_label", 0)
self.dividend_label = kws.get("dividend_label", 0)
self.value_label = kws.get("value_label", 1) # 默认按金额赎回
self.specialdate = []
self.fenhongdate = []
self.zhesuandate = []
self.rate = rate
VInfo = vinfo
| 33.530977 | 277 | 0.544979 |
ace226eb15574bef91ff3310ddd4f25361a4f1a7 | 5,233 | py | Python | scripts/bsvdependencies.py | chanwooc/connectal | 4dcd9c47bd823659cfbf120e512142029e64ec5a | [
"MIT"
] | null | null | null | scripts/bsvdependencies.py | chanwooc/connectal | 4dcd9c47bd823659cfbf120e512142029e64ec5a | [
"MIT"
] | null | null | null | scripts/bsvdependencies.py | chanwooc/connectal | 4dcd9c47bd823659cfbf120e512142029e64ec5a | [
"MIT"
] | 1 | 2021-08-30T11:45:17.000Z | 2021-08-30T11:45:17.000Z | #!/usr/bin/env python
# Copyright (c) 2015 Connectal Project
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os, sys
import glob
import argparse
import re
import bsvpreprocess
import subprocess
def getBsvPackages(bluespecdir):
"""BLUESPECDIR is expected to be the path to the bluespec distribution.
The function GETBSVPACKAGES returns a list of all
the packages in the prelude library of this distribution.
"""
pkgs = []
for f in glob.glob('%s/Prelude/*.bo' % bluespecdir) + glob.glob('%s/Libraries/*.bo' % bluespecdir) + glob.glob('%s/Libraries/*/*.bo' % bluespecdir) + glob.glob('%s/Libraries/*/*/*.bo' % bluespecdir):
pkgs.append(os.path.splitext(os.path.basename(f))[0])
return pkgs
def bsvDependencies(bsvfile, allBsv=False, bluespecdir=None, argbsvpath=[], bsvdefine=[]):
"""Return the list of dependencies
[(NAME,BSVFILENAME,PACKAGES,INCLUDES,SYNTHESIZEDMODULES)] of
BSVFILE, adding the list BSVPATH to the directories to explore for
dependencies.
The boolean ALLBSV will generate entries for all
BSV files on path.
The string BLUESPECDIR will add the Prelude of
Bsv in packages.
The BSVDEFINE argument is passed to the
preprocessor.
"""
bsvpath = []
for p in argbsvpath:
ps = p.split(':')
bsvpath.extend(ps)
bsvpackages = getBsvPackages(bluespecdir)
project_packages = {}
if allBsv:
for d in bsvpath:
for bsvfilename in glob.glob('%s/*.bsv' % d):
package_name = os.path.basename(bsvfilename)
if bsvfilename not in bsvfile and package_name not in project_packages:
bsvfile.append(bsvfilename)
project_packages[package_name] = bsvfilename
abspaths = {}
for f in bsvfile:
abspaths[os.path.basename(f)] = f
for d in bsvpath:
for f in glob.glob('%s/*' % d):
abspaths[os.path.basename(f)] = f
generated = []
for bsvfilename in bsvfile:
vf = open(bsvfilename, 'r')
basename = os.path.basename(bsvfilename)
(name, ext) = os.path.splitext(basename)
source = vf.read()
##preprocessed = bsvpreprocess.preprocess(bsvfilename, source, bsvdefine, bsvpath)
bsc_search_path = '+:' + ':'.join(bsvpath)
bsc_define_args = []
for var in bsvdefine:
bsc_define_args.append('-D')
bsc_define_args.append(var)
cp = subprocess.check_output(['bsc', '-E', '-p', bsc_search_path] + bsc_define_args + [bsvfilename])
preprocessed = cp.decode('utf8')
packages = []
includes = []
synthesizedModules = []
synthesize = False
for line in preprocessed.split('\n'):
m = re.match('//`include "([^\"]+)"', line)
m1 = re.match('//`include(.*)', line)
if m:
iname = m.group(1)
if iname in abspaths:
iname = abspaths[iname]
else:
iname = 'obj/%s' % iname
includes.append(iname)
elif m1:
sys.stderr.write('bsvdepend %s: unhandled `include %s\n' % (bsvfilename, m1.group(1)))
if re.match('^//', line):
continue
m = re.match('import\s+([A-Za-z0-9_]+)\w*', line)
if m:
pkg = m.group(1)
if pkg not in packages and pkg not in bsvpackages:
packages.append(pkg)
if synthesize:
m = re.match('\s*module\s+([A-Za-z0-9_]+)', line)
if m:
synthesizedModules.append(m.group(1))
else:
sys.stderr.write('bsvdepend: in %s expecting module: %s\n' % (bsvfilename, line))
synth = line.find('(* synthesize *)')
attr = line.find('(* ')
if synth >= 0:
synthesize = True
elif attr >= 0:
pass # no change to synthesize
else:
synthesize = False
pass
generated.append((bsvfilename,packages,includes,synthesizedModules))
vf.close()
return (generated,bsvpath)
| 39.946565 | 203 | 0.610357 |
ace227aba6a2f69b07196eaf028de05cbcb57970 | 65 | py | Python | venv/lib/python2.7/site-packages/pyasn1_modules/__init__.py | bdh1011/wau | 9c822ae8455e12bf5adf94ee60a8b5ddfaf38965 | [
"MIT"
] | 1 | 2015-08-04T13:30:09.000Z | 2015-08-04T13:30:09.000Z | lib/pyasn1_modules/__init__.py | ikool/metact06-djan | 4f1e075110e3327c2ae03ce1598704823469b8c9 | [
"BSD-3-Clause"
] | null | null | null | lib/pyasn1_modules/__init__.py | ikool/metact06-djan | 4f1e075110e3327c2ae03ce1598704823469b8c9 | [
"BSD-3-Clause"
] | 1 | 2020-11-01T20:40:01.000Z | 2020-11-01T20:40:01.000Z | # http://www.python.org/dev/peps/pep-0396/
__version__ = '0.0.7'
| 21.666667 | 42 | 0.676923 |
ace227f8869b12b907f40611cf6e7d382701315b | 5,762 | py | Python | python/zed-opencv.py | dashaxin/zed-opencv | 523401633fd3df9a69b3ae60206389eae9060a78 | [
"MIT"
] | 1 | 2021-07-27T07:59:53.000Z | 2021-07-27T07:59:53.000Z | python/zed-opencv.py | dashaxin/zed-opencv | 523401633fd3df9a69b3ae60206389eae9060a78 | [
"MIT"
] | null | null | null | python/zed-opencv.py | dashaxin/zed-opencv | 523401633fd3df9a69b3ae60206389eae9060a78 | [
"MIT"
] | null | null | null | import sys
import numpy as np
import pyzed.sl as sl
import cv2
help_string = "[s] Save side by side image [d] Save Depth, [n] Change Depth format, [p] Save Point Cloud, [m] Change Point Cloud format, [q] Quit"
prefix_point_cloud = "Cloud_"
prefix_depth = "Depth_"
path = "./"
count_save = 0
mode_point_cloud = 0
mode_depth = 0
point_cloud_format = sl.POINT_CLOUD_FORMAT.POINT_CLOUD_FORMAT_XYZ_ASCII
depth_format = sl.DEPTH_FORMAT.DEPTH_FORMAT_PNG
def get_depth_format_name(f) :
if f == sl.DEPTH_FORMAT.DEPTH_FORMAT_PNG :
return "PNG"
elif f == sl.DEPTH_FORMAT.DEPTH_FORMAT_PFM :
return "PFM"
elif f == sl.DEPTH_FORMAT.DEPTH_FORMAT_PGM :
return "PGM"
else :
return ""
def get_point_cloud_format_name(f) :
if f == sl.POINT_CLOUD_FORMAT.POINT_CLOUD_FORMAT_XYZ_ASCII :
return "XYZ"
elif f == sl.POINT_CLOUD_FORMAT.POINT_CLOUD_FORMAT_PCD_ASCII :
return "PCD"
elif f == sl.POINT_CLOUD_FORMAT.POINT_CLOUD_FORMAT_PLY_ASCII :
return "PLY"
elif f == sl.POINT_CLOUD_FORMAT.POINT_CLOUD_FORMAT_VTK_ASCII :
return "VTK"
else :
return ""
def save_point_cloud(zed, filename) :
print("Saving Point Cloud...")
saved = sl.save_camera_point_cloud_as(zed, point_cloud_format, filename, True)
if saved :
print("Done")
else :
print("Failed... Please check that you have permissions to write on disk")
def save_depth(zed, filename) :
max_value = 65535.
scale_factor = max_value / zed.get_depth_max_range_value()
print("Saving Depth Map...")
saved = sl.save_camera_depth_as(zed, depth_format, filename, scale_factor)
if saved :
print("Done")
else :
print("Failed... Please check that you have permissions to write on disk")
def save_sbs_image(zed, filename) :
image_sl_left = sl.Mat()
zed.retrieve_image(image_sl_left, sl.VIEW.VIEW_LEFT)
image_cv_left = image_sl_left.get_data()
image_sl_right = sl.Mat()
zed.retrieve_image(image_sl_right, sl.VIEW.VIEW_RIGHT)
image_cv_right = image_sl_right.get_data()
sbs_image = np.concatenate((image_cv_left, image_cv_right), axis=1)
cv2.imwrite(filename, sbs_image)
def process_key_event(zed, key) :
global mode_depth
global mode_point_cloud
global count_save
global depth_format
global point_cloud_format
if key == 100 or key == 68:
save_depth(zed, path + prefix_depth + str(count_save))
count_save += 1
elif key == 110 or key == 78:
mode_depth += 1
depth_format = sl.DEPTH_FORMAT(mode_depth % 3)
print("Depth format: ", get_depth_format_name(depth_format))
elif key == 112 or key == 80:
save_point_cloud(zed, path + prefix_point_cloud + str(count_save))
count_save += 1
elif key == 109 or key == 77:
mode_point_cloud += 1
point_cloud_format = sl.POINT_CLOUD_FORMAT(mode_point_cloud % 4)
print("Point Cloud format: ", get_point_cloud_format_name(point_cloud_format))
elif key == 104 or key == 72:
print(help_string)
elif key == 115:
save_sbs_image(zed, "ZED_image" + str(count_save) + ".png")
count_save += 1
else:
a = 0
def print_help() :
print(" Press 's' to save Side by side images")
print(" Press 'p' to save Point Cloud")
print(" Press 'd' to save Depth image")
print(" Press 'm' to switch Point Cloud format")
print(" Press 'n' to switch Depth format")
def main() :
# Create a ZED camera object
zed = sl.Camera()
# Set configuration parameters
init = sl.InitParameters()
init.camera_resolution = sl.RESOLUTION.RESOLUTION_HD1080
init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE
init.coordinate_units = sl.UNIT.UNIT_METER
if len(sys.argv) >= 2 :
init.svo_input_filename = sys.argv[1]
# Open the camera
err = zed.open(init)
if err != sl.ERROR_CODE.SUCCESS :
print(repr(err))
zed.close()
exit(1)
# Display help in console
print_help()
# Set runtime parameters after opening the camera
runtime = sl.RuntimeParameters()
runtime.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD
# Prepare new image size to retrieve half-resolution images
image_size = zed.get_resolution()
new_width = image_size.width /2
new_height = image_size.height /2
# Declare your sl.Mat matrices
image_zed = sl.Mat(new_width, new_height, sl.MAT_TYPE.MAT_TYPE_8U_C4)
depth_image_zed = sl.Mat(new_width, new_height, sl.MAT_TYPE.MAT_TYPE_8U_C4)
point_cloud = sl.Mat()
key = ' '
while key != 113 :
err = zed.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS :
# Retrieve the left image, depth image in the half-resolution
zed.retrieve_image(image_zed, sl.VIEW.VIEW_LEFT, sl.MEM.MEM_CPU, int(new_width), int(new_height))
zed.retrieve_image(depth_image_zed, sl.VIEW.VIEW_DEPTH, sl.MEM.MEM_CPU, int(new_width), int(new_height))
# Retrieve the RGBA point cloud in half resolution
zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA, sl.MEM.MEM_CPU, int(new_width), int(new_height))
# To recover data from sl.Mat to use it with opencv, use the get_data() method
# It returns a numpy array that can be used as a matrix with opencv
image_ocv = image_zed.get_data()
depth_image_ocv = depth_image_zed.get_data()
cv2.imshow("Image", image_ocv)
cv2.imshow("Depth", depth_image_ocv)
key = cv2.waitKey(10)
process_key_event(zed, key)
cv2.destroyAllWindows()
zed.close()
print("\nFINISH")
if __name__ == "__main__":
main()
| 32.370787 | 146 | 0.669212 |
ace22809137083daf684b7d2ba0058723a49a4b2 | 804 | py | Python | manage.py | shark-S/First-django-app | 939fc80d84e5105b20a9cab7eff04e97da8500b2 | [
"MIT"
] | null | null | null | manage.py | shark-S/First-django-app | 939fc80d84e5105b20a9cab7eff04e97da8500b2 | [
"MIT"
] | null | null | null | manage.py | shark-S/First-django-app | 939fc80d84e5105b20a9cab7eff04e97da8500b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gitapp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 34.956522 | 77 | 0.641791 |
ace2287c205d5a81e9cc62959e37745d6944823f | 2,540 | py | Python | meregistro/apps/registro/models/DependenciaFuncional.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | meregistro/apps/registro/models/DependenciaFuncional.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | meregistro/apps/registro/models/DependenciaFuncional.py | MERegistro/meregistro | 6cde3cab2bd1a8e3084fa38147de377d229391e3 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.core.exceptions import ValidationError
from apps.registro.models.Jurisdiccion import Jurisdiccion
from apps.registro.models.TipoGestion import TipoGestion
from apps.registro.models.TipoEducacion import TipoEducacion
from apps.registro.models.TipoDependenciaFuncional import TipoDependenciaFuncional
from apps.seguridad.models import Ambito
from apps.seguridad.audit import audit
@audit
class DependenciaFuncional(models.Model):
nombre = models.CharField(max_length=255)
jurisdiccion = models.ForeignKey(Jurisdiccion)
tipo_gestion = models.ForeignKey(TipoGestion)
tipo_dependencia_funcional = models.ForeignKey(TipoDependenciaFuncional)
ambito = models.ForeignKey(Ambito, editable=False, null=True)
class Meta:
app_label = 'registro'
db_table = 'registro_dependencia_funcional'
ordering = ['nombre']
unique_together = ('jurisdiccion', 'tipo_gestion', 'tipo_dependencia_funcional')
def __unicode__(self):
return self.nombre
def clean(self):
" Chequea que no se repita la misma dependencia funcional "
jurisdiccion = self.jurisdiccion
tipo_dependencia_funcional = self.tipo_dependencia_funcional
tipo_gestion = self.tipo_gestion
if jurisdiccion and tipo_dependencia_funcional and tipo_gestion:
try:
df = DependenciaFuncional.objects.get(jurisdiccion=jurisdiccion, tipo_dependencia_funcional=tipo_dependencia_funcional, tipo_gestion=tipo_gestion)
if df and df != self:
raise ValidationError('Ya existe una dependencia funcional similar.')
except DependenciaFuncional.DoesNotExist:
pass
def save(self):
self.updateAmbito()
models.Model.save(self)
def updateAmbito(self):
if self.pk is None or self.ambito is None:
self.ambito = self.jurisdiccion.ambito.createChild(self.nombre, self)
else:
self.ambito.descripcion = self.nombre
self.ambito.save()
def hasEstablecimientos(self):
from apps.registro.models.Establecimiento import Establecimiento
establecimientos = Establecimiento.objects.filter(dependencia_funcional=self)
return establecimientos.count() > 0
def delete(self):
if self.hasEstablecimientos():
raise Exception("Dependencia funcional en uso")
ambito = self.ambito
models.Model.delete(self)
if ambito is not None:
ambito.delete()
| 40.31746 | 162 | 0.716142 |
ace228acbf5912c690e30dbac5a6b25cd68e7661 | 545 | py | Python | chembddb/migrations/0023_auto_20180405_0535.py | shirish510/ChemBDDB | 5094b21b14f026ef6f7867d1a2bcf94d34712e4b | [
"BSD-2-Clause"
] | 1 | 2018-12-10T16:17:08.000Z | 2018-12-10T16:17:08.000Z | chembddb/migrations/0023_auto_20180405_0535.py | shirish510/ChemBDDB | 5094b21b14f026ef6f7867d1a2bcf94d34712e4b | [
"BSD-2-Clause"
] | null | null | null | chembddb/migrations/0023_auto_20180405_0535.py | shirish510/ChemBDDB | 5094b21b14f026ef6f7867d1a2bcf94d34712e4b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-05 05:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chembddb', '0022_auto_20180405_0533'),
]
operations = [
migrations.AlterField(
model_name='data',
name='met',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='chembddb.Method'),
),
]
| 24.772727 | 114 | 0.649541 |
ace228e7e303ee124f93086150c2f6060209f3fe | 897 | py | Python | mymath.py | ViniciusPiresLopes/Pyxel | 5e0f53c4fa5af9289e78087850dfd7bcd8601008 | [
"MIT"
] | null | null | null | mymath.py | ViniciusPiresLopes/Pyxel | 5e0f53c4fa5af9289e78087850dfd7bcd8601008 | [
"MIT"
] | null | null | null | mymath.py | ViniciusPiresLopes/Pyxel | 5e0f53c4fa5af9289e78087850dfd7bcd8601008 | [
"MIT"
] | null | null | null | from vector import Vec2
def clamp(value, min_, max_):
if value < min_:
return min_
if value > max_:
return max_
return value
def get_line_pixels(start_pos: Vec2, end_pos: Vec2) -> []:
start_pos.x = round(start_pos.x)
start_pos.y = round(start_pos.y)
end_pos.x = round(end_pos.x)
end_pos.y = round(end_pos.y)
dist = Vec2(
end_pos.x - start_pos.x,
end_pos.y - start_pos.y
)
if abs(dist.x) > abs(dist.y):
steps = abs(dist.x)
else:
steps = abs(dist.y)
if steps == 0:
return [start_pos.copy()]
increment = Vec2(
dist.x / (steps),
dist.y / (steps)
)
pixels = []
pos = start_pos.copy()
for i in range(steps):
pos.x += increment.x
pos.y += increment.y
pixels.append(Vec2(round(pos.x), round(pos.y)))
return pixels
| 19.933333 | 58 | 0.548495 |
ace228f121b3442dd8f533d25d56c9c428db8dd9 | 1,818 | py | Python | montyhall.py | uzairnavid/MontyHall | 98747aba78511a9c638e3b7b9112a8a849c8b363 | [
"MIT"
] | null | null | null | montyhall.py | uzairnavid/MontyHall | 98747aba78511a9c638e3b7b9112a8a849c8b363 | [
"MIT"
] | 1 | 2018-04-28T10:13:41.000Z | 2018-05-05T23:10:00.000Z | montyhall.py | uzairnavid/MontyHall | 98747aba78511a9c638e3b7b9112a8a849c8b363 | [
"MIT"
] | null | null | null | # Simulates a generalized version of the Monty-Hall Problem
# with n-doors, where the narrator opens (n - 2) after the
# player picks a door. Highlights how the probability of success
# changes based on whether or not the player changes their chosen
# door after the first round.
#
# Uzair N Iftikhar, Dec '17
# [That one time I had 7 hours to kill at Heathrow]
import random
import sys # Can provide a command line argument for the number of doors
# Runs one simulation
# Args: boolean change [whether the player changes after the goat is revealed]; int number of doors
def simulate(change, numDoors):
carIn = random.randrange(0, numDoors) # The door the car is in
firstPick = random.randrange(0, numDoors) # The first door the player picks
montyHallOpens = ([x for x in range(0, numDoors) if x != carIn and x!= firstPick])
if (firstPick == carIn):
montyHallOpens = montyHallOpens[:-1] # Need to leave one door closed other than the one the player chose
if (change):
originalPick = firstPick
while (firstPick == originalPick or firstPick in montyHallOpens):
firstPick = random.randrange(0, numDoors)
return (firstPick == carIn)
# Driver method
def main():
if (len(sys.argv) < 2): # If the user didn't provide an argument
numDoors = 3 # Default to 3 doors
else:
numDoors = float(sys.argv[1]) # In case the user entered a non-int
if (numDoors <= 2 or numDoors != numDoors//1):
print("Please enter a valid integer value greater than 2")
return
numDoors = int(numDoors)
runs = 50000 # Number of simulations to run
for change in [True, False]: # So we can see how the probability changes
s = 0
for i in range(runs):
if (simulate(change, numDoors)):
s += 1
print("Probability of success is ", s/runs, "if change is", change)
if (__name__ == '__main__'):
main()
| 37.102041 | 106 | 0.713421 |
ace22918683ba65c4dc72ca4a5e3f8d6205132b3 | 4,822 | py | Python | models/EfficientNetV1/models.py | zkdlfrlwl2/Classification-For-Everyone | a99428080ef470a3270d3f4a6048df197216a050 | [
"MIT"
] | 5 | 2022-02-21T07:05:20.000Z | 2022-03-05T11:51:51.000Z | models/EfficientNetV1/models.py | zkdlfrlwl2/Classification-For-Everyone | a99428080ef470a3270d3f4a6048df197216a050 | [
"MIT"
] | 12 | 2022-02-04T14:31:42.000Z | 2022-03-24T14:25:21.000Z | models/EfficientNetV1/models.py | zkdlfrlwl2/Classification-For-Everyone | a99428080ef470a3270d3f4a6048df197216a050 | [
"MIT"
] | 3 | 2022-01-02T04:12:42.000Z | 2022-02-23T15:15:53.000Z | import torch
import torch.nn as nn
import numpy as np
from .blocks import *
from typing import Final, Dict
__all__ = [
"EfficientNet",
"EfficientNet_b0",
"EfficientNet_b1",
"EfficientNet_b2",
"EfficientNet_b3",
"EfficientNet_b4",
"EfficientNet_b5",
"EfficientNet_b6",
"EfficientNet_b7",
]
MODEL_BLOCKS: Final[Dict] = {
# numLayer, input, output, expand ratio, kernel size, padding, stride, reduction ratio
# fmt: off
'MBConv1': [1, 32, 16, 1.0, 3, 1, 1, 4],
'MBConv6_1': [2, 16, 24, 6.0, 3, 1, 2, 4],
'MBConv6_2': [2, 24, 40, 6.0, 5, 2, 2, 4],
'MBConv6_3': [3, 40, 80, 6.0, 3, 1, 2, 4],
'MBConv6_4': [3, 80, 112, 6.0, 5, 2, 1, 4],
'MBConv6_5': [4, 112, 192, 6.0, 5, 2, 2, 4],
'MBConv6_6': [1, 192, 320, 6.0, 3, 1, 1, 4]
}
MODEL_TYPES: Final[Dict] = {
# width depth image size
# fmt: off
'b0': [1.0, 1.0, 224],
'b1': [1.0, 1.1, 240],
'b2': [1.1, 1.2, 260],
'b3': [1.2, 1.4, 300],
'b4': [1.4, 1.8, 380],
'b5': [1.6, 2.2, 456],
'b6': [1.8, 2.6, 528],
'b7': [2.0, 3.1, 600]
}
class EfficientNet(nn.Module):
def __init__(
self,
model_type: str,
image_channels: int,
num_classes: int,
dropout_rate: float = 0.5,
) -> None:
super().__init__()
layers = []
Blockkeys = MODEL_BLOCKS.keys()
model_types = MODEL_TYPES[model_type]
self.width_coefficient = model_types[0]
self.depth_coefficient = model_types[1]
layers.append(
ConvBlock(
in_channels=image_channels,
out_channels=self.multiply_width(32),
kernel_size=3,
stride=2,
padding=1,
)
),
for key in Blockkeys:
layers.append(
MBConvBlock(
numLayer=self.multiply_depth(numLayer=MODEL_BLOCKS[key][0]),
dim=[
self.multiply_width(MODEL_BLOCKS[key][1]),
self.multiply_width(MODEL_BLOCKS[key][2]),
],
factor=MODEL_BLOCKS[key][3],
kernel_size=MODEL_BLOCKS[key][4],
padding=MODEL_BLOCKS[key][5],
stride=MODEL_BLOCKS[key][6],
reduction_ratio=MODEL_BLOCKS[key][7],
)
)
layers.append(
ConvBlock(
in_channels=self.multiply_width(MODEL_BLOCKS["MBConv6_6"][2]),
out_channels=self.multiply_width(1280),
kernel_size=1,
)
)
layers.append(nn.AdaptiveAvgPool2d(1))
self.feature_extractor = nn.Sequential(*layers)
self.classifier = Classifier(
in_features=self.multiply_width(1280),
out_features=num_classes,
dropout_rate=dropout_rate,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.feature_extractor(x)
x = torch.flatten(x, 1)
logits = self.classifier(x)
return logits
def multiply_width(self, dim: int) -> int:
return int(np.ceil(self.width_coefficient * dim))
def multiply_depth(self, numLayer: int) -> int:
return int(np.ceil(self.depth_coefficient * numLayer))
def EfficientNet_b0(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b0", image_channels, num_classes, dropout_rate)
def EfficientNet_b1(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b1", image_channels, num_classes, dropout_rate)
def EfficientNet_b2(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b2", image_channels, num_classes, dropout_rate)
def EfficientNet_b3(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b3", image_channels, num_classes, dropout_rate)
def EfficientNet_b4(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b4", image_channels, num_classes, dropout_rate)
def EfficientNet_b5(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b5", image_channels, num_classes, dropout_rate)
def EfficientNet_b6(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b6", image_channels, num_classes, dropout_rate)
def EfficientNet_b7(
image_channels: int, num_classes: int, dropout_rate: float = 0.5
) -> EfficientNet:
return EfficientNet("b7", image_channels, num_classes, dropout_rate)
| 28.702381 | 90 | 0.59353 |
ace229976048d877879512279a83889999c83bb5 | 440 | py | Python | pieces/rook.py | Jone1/chessPyQt | 9c5b35ef7d617ba05033daa0e3380d95dfcfc39e | [
"MIT"
] | null | null | null | pieces/rook.py | Jone1/chessPyQt | 9c5b35ef7d617ba05033daa0e3380d95dfcfc39e | [
"MIT"
] | null | null | null | pieces/rook.py | Jone1/chessPyQt | 9c5b35ef7d617ba05033daa0e3380d95dfcfc39e | [
"MIT"
] | null | null | null | from pieces.piece import AbstractPiece
__author__ = 'Jone'
class Rook(AbstractPiece):
# 30 min
src_black = "D:/workspace/chessQt/chessQt/gfx/rb.png"
src_white = "D:/workspace/chessQt/chessQt/gfx/rw.png"
def __init__(self, x, y, color):
super(Rook, self).__init__(x, y, color)
def moveValidator(self, x, y):
if y != self.y and x != self.x:
return False
return self.isEmptyTo(x, y) | 25.882353 | 57 | 0.631818 |
ace22ad0a534cd83b95a1e33fae6cc0f706684ca | 1,244 | py | Python | pandaharvester/harvestertest/getJobs.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 11 | 2017-06-01T10:16:58.000Z | 2019-11-22T08:41:36.000Z | pandaharvester/harvestertest/getJobs.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 34 | 2016-10-25T19:15:24.000Z | 2021-03-05T12:59:04.000Z | pandaharvester/harvestertest/getJobs.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 17 | 2016-10-24T13:29:45.000Z | 2021-03-23T17:35:27.000Z | #
# This file is used to call the dbproxy and get the list of all jobs in the database
#
import os
import sys
import logging
from future.utils import iteritems
from pandaharvester.harvestercore.db_proxy_pool import DBProxyPool as DBProxy
from pandaharvester.harvestercore.queue_config_mapper import QueueConfigMapper
for loggerName, loggerObj in iteritems(logging.Logger.manager.loggerDict):
if loggerName.startswith('panda.log'):
if len(loggerObj.handlers) == 0:
continue
if loggerName.split('.')[-1] in ['db_proxy']:
continue
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setFormatter(loggerObj.handlers[0].formatter)
loggerObj.addHandler(stdoutHandler)
queueName = sys.argv[1]
queueConfigMapper = QueueConfigMapper()
queueConfig = queueConfigMapper.get_queue(queueName)
proxy = DBProxy()
# get all jobs in table
print ('try to get all jobs')
alljobs = proxy.get_jobs()
print ('got {0} jobs'.format(len(alljobs)))
# loop over all found jobs
if len(alljobs) > 0 :
for jobSpec in alljobs:
print (' PandaID = %d status = %s subStatus = %s lockedBy = %s' %
(jobSpec.PandaID,jobSpec.status,jobSpec.subStatus,jobSpec.lockedBy))
| 31.897436 | 84 | 0.721865 |
ace22ae1cee11be1c69eb30d5101412391cb0f83 | 184 | py | Python | chibi_auth0/__init__.py | dem4ply/chibi_auth0 | c3bd8082a6977a4f2be31b1f8b54f7aeaae0d90b | [
"WTFPL"
] | null | null | null | chibi_auth0/__init__.py | dem4ply/chibi_auth0 | c3bd8082a6977a4f2be31b1f8b54f7aeaae0d90b | [
"WTFPL"
] | null | null | null | chibi_auth0/__init__.py | dem4ply/chibi_auth0 | c3bd8082a6977a4f2be31b1f8b54f7aeaae0d90b | [
"WTFPL"
] | null | null | null | # -*- coding: utf-8 -*-
from chibi_auth0.chibi_auth0 import Chibi_auth0
__author__ = """dem4ply"""
__email__ = 'dem4ply@gmail.com'
__version__ = '0.1.0'
__all__ = [ 'Chibi_auth0' ]
| 18.4 | 47 | 0.684783 |
ace22ba85da7bccc54a93eccd1f5b486f07bf53a | 10,232 | py | Python | tests/test_table.py | defgsus/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | 1 | 2021-02-17T17:50:28.000Z | 2021-02-17T17:50:28.000Z | tests/test_table.py | defgsus/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | 2 | 2021-03-29T02:09:41.000Z | 2022-03-01T20:09:48.000Z | tests/test_table.py | netzkolchose/elastipy | c1144ab39fa70571ba0e02ccf41d380a8a1bd730 | [
"Apache-2.0"
] | null | null | null | import unittest
import datetime
from decimal import Decimal
from elastipy.dump import Table
class TestTable(unittest.TestCase):
def assertTableStr(self, rows, expected_str, colors=False, ascii=True, **kwargs):
table = Table(rows)
expected_str = "\n".join(line.strip() for line in expected_str.splitlines()).strip()
table_str = table.to_str(colors=colors, ascii=ascii, **kwargs)
table_str = "\n".join(line.strip() for line in table_str.splitlines()).strip()
if table_str != expected_str:
expected_str = "\n".join(f"[{line}]" for line in expected_str.splitlines())
table_str = "\n".join(f"[{line}]" for line in table_str.splitlines())
raise AssertionError(
f"Table did not match.\n\n# Expected:\n{expected_str}\n\n# Got:\n{table_str}"
)
def test_table(self):
self.assertTableStr(
[
["string", "number", "another"],
["a", 1, Decimal(10)],
["baccus", 2, Decimal(9)],
["cedelio", 3, Decimal(8)],
],
"""
string | number | another
--------+--------+--------
a | 1 | 10
baccus | 2 | 9
cedelio | 3 | 8
""",
bars=False,
)
self.assertTableStr(
[
["string", "number", "another"],
["a", 1, Decimal(10)],
["baccus", 2, Decimal(9)],
["cedelio", 3, Decimal(8)],
],
"""
a | 1 | 10
baccus | 2 | 9
cedelio | 3 | 8
""",
bars=False,
header=False,
)
def test_digits(self):
table = [
["a", "b"],
[0.12345678, 0.12],
[0.98765432, "a"],
[None, 9.87654]
]
self.assertTableStr(
table,
"""
a | b
-----------+--------
0.12345678 | 0.12
0.98765432 | a
- | 9.87654
""",
bars=False,
)
self.assertTableStr(
table,
"""
a | b
------+------
0.123 | 0.12
0.988 | a
- | 9.877
""",
bars=False,
digits=3
)
def test_auto_max_width(self):
self.assertTableStr(
[["a", "b"], [1, 2]],
"""
a | b
--+--
1 | 2
""",
max_width=None
)
def test_sort(self):
table = [
["string", "number", "date"],
["a", 3, datetime.date(2000, 1, 2)],
["c", 1, None],
["b", None, datetime.date(2000, 1, 3)],
[None, 2, datetime.date(2000, 1, 1)],
]
for sort_key in ("string", 0):
self.assertTableStr(
table,
"""
string | number | date
-------+--------+-----------
- | 2 | 2000-01-01
a | 3 | 2000-01-02
b | - | 2000-01-03
c | 1 | -
""",
bars=False,
sort=sort_key,
)
for sort_key in ("number", 1):
self.assertTableStr(
table,
"""
string | number | date
-------+--------+-----------
b | - | 2000-01-03
c | 1 | -
- | 2 | 2000-01-01
a | 3 | 2000-01-02
""",
bars=False,
sort=sort_key,
)
for sort_key in ("date", 2):
self.assertTableStr(
table,
"""
string | number | date
-------+--------+-----------
c | 1 | -
- | 2 | 2000-01-01
a | 3 | 2000-01-02
b | - | 2000-01-03
""",
bars=False,
sort=sort_key,
)
for sort_key in ("-date", -2):
self.assertTableStr(
table,
"""
string | number | date
-------+--------+-----------
b | - | 2000-01-03
a | 3 | 2000-01-02
- | 2 | 2000-01-01
c | 1 | -
""",
bars=False,
sort=sort_key,
)
def test_bars(self):
self.assertTableStr(
[
["a", "b", "cccc"],
["x", 1, Decimal(10)],
["yyy", 9, None],
["z", 30, 8.],
],
"""
a | b | cccc
----+------------+------------
x | 1 : | 10 #######
yyy | 9 ##: | -
z | 30 ####### | 8.0 :
""",
bars=True,
max_width=30,
zero=False,
)
def test_bars_maxwidth(self):
# cccc... has a long header so it get's extra space for bars
self.assertTableStr(
[
["a", "b", "ccccccccccccc"],
["x", 1, Decimal(10)],
["yyy", 9, 4],
["z", 30, 8.],
],
"""
a | b | ccccccccccccc
----+------+--------------
x | 1 | 10 #########
yyy | 9 . | 4 :
z | 30 # | 8.0 ######
""",
bars=True,
max_width=26,
zero=False,
)
# 'b' would only have space for the 'space' character not for a bar itself
# so the bar is removed and extra space given to cccc..
self.assertTableStr(
[
["a", "b", "ccccccccccccc"],
["x", 1, Decimal(10)],
["yyy", 9, 4],
["z", 30, 8.],
],
"""
a | b | ccccccccccccc
----+----+---------------
x | 1 | 10 ##########
yyy | 9 | 4 :
z | 30 | 8.0 ######:
""",
bars=True,
max_width=25,
zero=False,
)
def test_bars_no_space(self):
self.assertTableStr(
[
["a", "b"],
[0, 0],
[10, 10],
],
"""
a | b
---+---
0 | 0
10 | 10
""",
bars=True,
max_width=7,
)
def test_no_data(self):
self.assertTableStr(
[
["a", "b"],
],
"""
a | b
--+--
""",
bars=True,
)
def test_bars_zero_param(self):
for zero in (True, False):
self.assertTableStr(
[
["number"],
[0],
[5],
],
"""
number
--------------------
0 :
5 ##################
""",
max_width=20,
zero=True,
)
self.assertTableStr(
[
["number"],
[3],
[5],
],
"""
number
--------------------
3 ###########
5 ##################
""",
max_width=20,
zero=True,
)
self.assertTableStr(
[
["number"],
[3],
[5],
],
"""
number
--------------------
3 :
5 ##################
""",
max_width=20,
zero=False,
)
def test_bars_zero_param_neg(self):
for zero in (True, False):
self.assertTableStr(
[
["number"],
[-5],
[0],
[5],
],
"""
number
--------------------
-5 :
0 ########:
5 #################
""",
max_width=20,
zero=zero,
)
self.assertTableStr(
[
["number"],
[-5],
[0],
[5],
],
"""
number
--------------------
-5
0 :
5 #################
""",
max_width=20,
zero=0,
)
self.assertTableStr(
[
["number"],
[-5],
[0],
[5],
],
"""
number
--------------------
-5 ######
0 ###########:
5 #################
""",
max_width=20,
zero=-10,
)
def test_incomplete_rows(self):
self.assertTableStr(
[
["a", "b"],
[1, 2],
[3]
],
"""
a | b
--+--
1 | 2
3 | -
""",
bars=False,
)
self.assertTableStr(
[
{"a": 1, "b": 2},
{"a": 3},
{"b": 4},
{},
],
"""
a | b
--+--
1 | 2
3 | -
- | 4
- | -
""",
bars=False,
)
if __name__ == "__main__":
unittest.main()
| 25.838384 | 93 | 0.270622 |
ace22d0c5272363085d5d26418218c11c5731105 | 6,730 | py | Python | scripts/dart/report_coverage.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 10 | 2020-12-28T17:04:44.000Z | 2022-03-12T03:20:43.000Z | scripts/dart/report_coverage.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 1 | 2022-01-14T23:38:40.000Z | 2022-01-14T23:38:40.000Z | scripts/dart/report_coverage.py | opensource-assist/fuschia | 66646c55b3d0b36aae90a4b6706b87f1a6261935 | [
"BSD-3-Clause"
] | 4 | 2020-12-28T17:04:45.000Z | 2022-03-12T03:20:44.000Z | #!/usr/bin/env python2.7
# This program generates a combined coverage report for all host-side dart tests.
# See example_commands and arg help strings in ParseArgs() for usage.
#
# Implementation sketch:
# Search the host_tests directory for tests that use dart-tools/fuchsia_tester.
# Run each test with --coverage and --coverage-path.
# Combine the coverage data from each test into one.
# Generate an HTML report.
#
# This is all pretty hacky. Longer term efforts to make this more automatic and
# less hacky tracked by IN-427.
from __future__ import print_function
import argparse
import collections
import distutils.spawn
import glob
import os
from multiprocessing.pool import ThreadPool
import paths
import re
import subprocess
import sys
import tempfile
TestResult = collections.namedtuple(
'TestResult', ('exit_code', 'coverage_data_path', 'package_dir'))
DEV_NULL = open('/dev/null', 'w')
LCOV = 'lcov'
GENHTML = 'genhtml'
def ParseArgs():
example_commands = """
Examples:
$ report_coverage.py --report-dir /tmp/cov
$ report_coverage.py --test-patterns 'foo_*_test,bar_test' --report-dir ...
$ report_coverage.py --out-dir out/x64 --report-dir ...
"""
p = argparse.ArgumentParser(
description='Generates a coverage report for dart tests',
epilog=example_commands,
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument(
'--report-dir',
type=str,
help='Where to write the report. Will be created if needed',
required=True)
p.add_argument(
'--test-patterns',
type=str,
help=('Comma-separated list of glob patterns to match against test file '
'base names'),
default='*')
p.add_argument('--out-dir', type=str, help='fuchsia build out dir')
return p.parse_args()
def OutDir(args):
if args.out_dir:
out_dir = args.out_dir
if not os.path.isabs(out_dir):
out_dir = os.path.join(paths.FUCHSIA_ROOT, out_dir)
if not os.path.isdir(out_dir):
sys.exit(out_dir + ' is not a directory')
return out_dir
if os.environ.get('FUCHSIA_BUILD_DIR'):
return os.environ.get('FUCHSIA_BUILD_DIR')
fuchsia_dir = os.environ.get('FUCHSIA_DIR', paths.FUCHSIA_ROOT)
fuchsia_config_file = os.path.join(fuchsia_dir, '.fx-build-dir')
if os.path.isfile(fuchsia_config_file):
fuchsia_config = open(fuchsia_config_file).read()
return os.path.join(fuchsia_dir, fuchsia_config.strip())
return None
class TestRunner(object):
def __init__(self, out_dir):
self.out_dir = out_dir
def RunTest(self, test_path):
# This whole function super hacky. Assumes implementation details which are
# not meant to be public.
# test_path actually refers to a script that executes other tests.
# The other tests that get executed go into this list.
leaf_test_paths = []
test_lines = open(test_path, 'r').readlines()
# We expect a script that starts with shebang.
if not test_lines or not test_lines[0].startswith('#!'):
return []
for test_line in test_lines[1:]: # Skip the shebang.
test_line_parts = test_line.strip().split()
if not test_line_parts:
continue
if os.path.join(self.out_dir, 'dartlang', 'gen') in test_line_parts[0]:
leaf_test_paths.append(test_line_parts[0])
results = [self._RunLeafTest(p) for p in leaf_test_paths]
return [result for result in results if result] # filter None
def _RunLeafTest(self, test_path):
test_lines = open(test_path, 'r').readlines()
for test_line in test_lines:
test_line_parts = test_line.strip().split()
if not test_line_parts:
continue
if test_line_parts[0].endswith('dart-tools/fuchsia_tester'):
is_dart_test = True
elif test_line_parts[0].startswith('--test-directory='):
test_directory = test_line_parts[0].split('=')[1]
if not is_dart_test:
return None
if not test_directory:
raise ValueError('Failed to find --test-directory arg in %s' % test_path)
coverage_data_handle, coverage_data_path = tempfile.mkstemp()
os.close(coverage_data_handle)
exit_code = subprocess.call((
test_path, '--coverage', '--coverage-path=%s' % coverage_data_path),
stdout=DEV_NULL, stderr=DEV_NULL)
if not os.stat(coverage_data_path).st_size:
print('%s produced no coverage data' % os.path.basename(test_path),
file=sys.stderr)
return None
return TestResult(
exit_code, coverage_data_path, os.path.dirname(test_directory))
def MakeRelativePathsAbsolute(test_result):
"""Change source-file paths from relative-to-the-package to absolute."""
with open(test_result.coverage_data_path, 'r+') as coverage_data_file:
fixed_data = coverage_data_file.read().replace(
'SF:', 'SF:%s/' % test_result.package_dir)
coverage_data_file.seek(0)
coverage_data_file.write(fixed_data)
def CombineCoverageData(test_results):
output_handle, output_path = tempfile.mkstemp()
os.close(output_handle)
lcov_cmd = [LCOV, '--output-file', output_path]
for test_result in test_results:
lcov_cmd.extend(['--add-tracefile', test_result.coverage_data_path])
subprocess.check_call(lcov_cmd, stdout=DEV_NULL, stderr=DEV_NULL)
return output_path
def main():
args = ParseArgs()
out_dir = OutDir(args)
if not out_dir:
sys.exit('Couldn\'t find the output directory, pass --out-dir '
'(absolute or relative to Fuchsia root) or set FUCHSIA_BUILD_DIR.')
if not (distutils.spawn.find_executable(LCOV) and
distutils.spawn.find_executable(GENHTML)):
sys.exit('\'lcov\' and \'genhtml\' must be installed and in the PATH')
host_tests_dir = os.path.join(out_dir, 'host_tests')
test_patterns = args.test_patterns.split(',')
test_paths = []
for test_pattern in test_patterns:
test_paths.extend(glob.glob(os.path.join(host_tests_dir, test_pattern)))
thread_pool = ThreadPool()
test_runner = TestRunner(out_dir)
results_lists = thread_pool.map(test_runner.RunTest, test_paths)
# flatten
results = [result for sublist in results_lists for result in sublist]
if not results:
sys.exit('Found no dart tests that produced coverage data')
for result in results:
if result.exit_code:
sys.exit('%s failed' % test_path)
thread_pool.map(MakeRelativePathsAbsolute, results)
combined_coverage_path = CombineCoverageData(results)
subprocess.check_call(
(GENHTML, combined_coverage_path, '--output-directory', args.report_dir),
stdout=DEV_NULL, stderr=DEV_NULL)
print('Open file://%s to view the report' %
os.path.join(os.path.abspath(args.report_dir), 'index.html'),
file=sys.stderr)
if __name__ == '__main__':
main()
| 34.690722 | 81 | 0.711144 |
ace22d30fefa274541a4c57c50fadd2d370fdf59 | 15,321 | py | Python | datadotworld/client/_swagger/models/linked_dataset_summary_response.py | DanialBetres/data.world-py | 0e3acf2be9a07c5ab62ecac9289eb662088d54c7 | [
"Apache-2.0"
] | 99 | 2017-01-23T16:24:18.000Z | 2022-03-30T22:51:58.000Z | datadotworld/client/_swagger/models/linked_dataset_summary_response.py | DanialBetres/data.world-py | 0e3acf2be9a07c5ab62ecac9289eb662088d54c7 | [
"Apache-2.0"
] | 77 | 2017-01-26T04:33:06.000Z | 2022-03-11T09:39:50.000Z | datadotworld/client/_swagger/models/linked_dataset_summary_response.py | DanialBetres/data.world-py | 0e3acf2be9a07c5ab62ecac9289eb662088d54c7 | [
"Apache-2.0"
] | 29 | 2017-01-25T16:55:23.000Z | 2022-01-31T01:44:15.000Z | # coding: utf-8
"""
data.world API
# data.world in a nutshell data.world is a productive, secure platform for modern data teamwork. We bring together your data practitioners, subject matter experts, and other stakeholders by removing costly barriers to data discovery, comprehension, integration, and sharing. Everything your team needs to quickly understand and use data stays with it. Social features and integrations encourage collaborators to ask and answer questions, share discoveries, and coordinate closely while still using their preferred tools. Our focus on interoperability helps you enhance your own data with data from any source, including our vast and growing library of free public datasets. Sophisticated permissions, auditing features, and more make it easy to manage who views your data and what they do with it. # Conventions ## Authentication All data.world API calls require an API token. OAuth2 is the preferred and most secure method for authenticating users of your data.world applications. Visit our [oauth documentation](https://apidocs.data.world/toolkit/oauth) for additional information. Alternatively, you can obtain a token for _personal use or testing_ by navigating to your profile settings, under the Advanced tab ([https://data.world/settings/advanced](https://data.world/settings/advanced)). Authentication must be provided in API requests via the `Authorization` header. For example, for a user whose API token is `my_api_token`, the request header should be `Authorization: Bearer my_api_token` (note the `Bearer` prefix). ## Content type By default, `application/json` is the content type used in request and response bodies. Exceptions are noted in respective endpoint documentation. ## HTTPS only Our APIs can only be accessed via HTTPS. # Interested in building data.world apps? Check out our [developer portal](https://apidocs.data.world) for tips on how to get started, tutorials, and to interact with the API endpoints right within your browser.
OpenAPI spec version: 0.21.0
Contact: help@data.world
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class LinkedDatasetSummaryResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_level': 'str',
'created': 'str',
'description': 'str',
'id': 'str',
'license': 'str',
'owner': 'str',
'summary': 'str',
'tags': 'list[str]',
'title': 'str',
'updated': 'str',
'version': 'str',
'visibility': 'str'
}
attribute_map = {
'access_level': 'accessLevel',
'created': 'created',
'description': 'description',
'id': 'id',
'license': 'license',
'owner': 'owner',
'summary': 'summary',
'tags': 'tags',
'title': 'title',
'updated': 'updated',
'version': 'version',
'visibility': 'visibility'
}
def __init__(self, access_level=None, created=None, description=None, id=None, license=None, owner=None, summary=None, tags=None, title=None, updated=None, version=None, visibility=None):
"""
LinkedDatasetSummaryResponse - a model defined in Swagger
"""
self._access_level = None
self._created = None
self._description = None
self._id = None
self._license = None
self._owner = None
self._summary = None
self._tags = None
self._title = None
self._updated = None
self._version = None
self._visibility = None
self.access_level = access_level
self.created = created
if description is not None:
self.description = description
self.id = id
if license is not None:
self.license = license
self.owner = owner
if summary is not None:
self.summary = summary
if tags is not None:
self.tags = tags
self.title = title
self.updated = updated
if version is not None:
self.version = version
self.visibility = visibility
@property
def access_level(self):
"""
Gets the access_level of this LinkedDatasetSummaryResponse.
The level of access the authenticated user is allowed with respect to dataset: * `NONE` Not allowed any access. * `READ` Allowed to know that the dataset exists, view and download data and metadata. * `WRITE` Allowed to update data and metadata, in addition to what READ allows. * `ADMIN` Allowed to delete dataset, in addition to what WRITE allows.
:return: The access_level of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._access_level
@access_level.setter
def access_level(self, access_level):
"""
Sets the access_level of this LinkedDatasetSummaryResponse.
The level of access the authenticated user is allowed with respect to dataset: * `NONE` Not allowed any access. * `READ` Allowed to know that the dataset exists, view and download data and metadata. * `WRITE` Allowed to update data and metadata, in addition to what READ allows. * `ADMIN` Allowed to delete dataset, in addition to what WRITE allows.
:param access_level: The access_level of this LinkedDatasetSummaryResponse.
:type: str
"""
if access_level is None:
raise ValueError("Invalid value for `access_level`, must not be `None`")
self._access_level = access_level
@property
def created(self):
"""
Gets the created of this LinkedDatasetSummaryResponse.
Date and time when the dataset was created.
:return: The created of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this LinkedDatasetSummaryResponse.
Date and time when the dataset was created.
:param created: The created of this LinkedDatasetSummaryResponse.
:type: str
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`")
self._created = created
@property
def description(self):
"""
Gets the description of this LinkedDatasetSummaryResponse.
Short dataset description.
:return: The description of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this LinkedDatasetSummaryResponse.
Short dataset description.
:param description: The description of this LinkedDatasetSummaryResponse.
:type: str
"""
self._description = description
@property
def id(self):
"""
Gets the id of this LinkedDatasetSummaryResponse.
Unique identifier of dataset.
:return: The id of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this LinkedDatasetSummaryResponse.
Unique identifier of dataset.
:param id: The id of this LinkedDatasetSummaryResponse.
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`")
self._id = id
@property
def license(self):
"""
Gets the license of this LinkedDatasetSummaryResponse.
Dataset license. Find additional info for allowed values [here](https://data.world/license-help).
:return: The license of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._license
@license.setter
def license(self, license):
"""
Sets the license of this LinkedDatasetSummaryResponse.
Dataset license. Find additional info for allowed values [here](https://data.world/license-help).
:param license: The license of this LinkedDatasetSummaryResponse.
:type: str
"""
self._license = license
@property
def owner(self):
"""
Gets the owner of this LinkedDatasetSummaryResponse.
User name and unique identifier of the creator of the dataset.
:return: The owner of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this LinkedDatasetSummaryResponse.
User name and unique identifier of the creator of the dataset.
:param owner: The owner of this LinkedDatasetSummaryResponse.
:type: str
"""
if owner is None:
raise ValueError("Invalid value for `owner`, must not be `None`")
self._owner = owner
@property
def summary(self):
"""
Gets the summary of this LinkedDatasetSummaryResponse.
Long-form dataset summary (Markdown supported).
:return: The summary of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._summary
@summary.setter
def summary(self, summary):
"""
Sets the summary of this LinkedDatasetSummaryResponse.
Long-form dataset summary (Markdown supported).
:param summary: The summary of this LinkedDatasetSummaryResponse.
:type: str
"""
self._summary = summary
@property
def tags(self):
"""
Gets the tags of this LinkedDatasetSummaryResponse.
Dataset tags. Letters numbers and spaces only (max 25 characters).
:return: The tags of this LinkedDatasetSummaryResponse.
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this LinkedDatasetSummaryResponse.
Dataset tags. Letters numbers and spaces only (max 25 characters).
:param tags: The tags of this LinkedDatasetSummaryResponse.
:type: list[str]
"""
self._tags = tags
@property
def title(self):
"""
Gets the title of this LinkedDatasetSummaryResponse.
Dataset name.
:return: The title of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this LinkedDatasetSummaryResponse.
Dataset name.
:param title: The title of this LinkedDatasetSummaryResponse.
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`")
self._title = title
@property
def updated(self):
"""
Gets the updated of this LinkedDatasetSummaryResponse.
Date and time when the dataset was last updated.
:return: The updated of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this LinkedDatasetSummaryResponse.
Date and time when the dataset was last updated.
:param updated: The updated of this LinkedDatasetSummaryResponse.
:type: str
"""
if updated is None:
raise ValueError("Invalid value for `updated`, must not be `None`")
self._updated = updated
@property
def version(self):
"""
Gets the version of this LinkedDatasetSummaryResponse.
Dataset version
:return: The version of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this LinkedDatasetSummaryResponse.
Dataset version
:param version: The version of this LinkedDatasetSummaryResponse.
:type: str
"""
self._version = version
@property
def visibility(self):
"""
Gets the visibility of this LinkedDatasetSummaryResponse.
Dataset visibility. `OPEN` if the dataset can be seen by any member of data.world. `PRIVATE` if the dataset can be seen by its owner and authorized collaborators. `DISCOVERABLE` if the dataset can be seen by any member of data.world, but only files marked `sample` or `preview` are visible
:return: The visibility of this LinkedDatasetSummaryResponse.
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""
Sets the visibility of this LinkedDatasetSummaryResponse.
Dataset visibility. `OPEN` if the dataset can be seen by any member of data.world. `PRIVATE` if the dataset can be seen by its owner and authorized collaborators. `DISCOVERABLE` if the dataset can be seen by any member of data.world, but only files marked `sample` or `preview` are visible
:param visibility: The visibility of this LinkedDatasetSummaryResponse.
:type: str
"""
if visibility is None:
raise ValueError("Invalid value for `visibility`, must not be `None`")
self._visibility = visibility
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, LinkedDatasetSummaryResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.741497 | 1,984 | 0.629724 |
ace22dc0348c4b4fbf895b831aa24c236b1263a9 | 2,205 | py | Python | tests/importer/onnx_/preprocess/test_letterbox2.py | xhuohai/nncase | cf7921c273c7446090939c64f57ef783a62bf29c | [
"Apache-2.0"
] | null | null | null | tests/importer/onnx_/preprocess/test_letterbox2.py | xhuohai/nncase | cf7921c273c7446090939c64f57ef783a62bf29c | [
"Apache-2.0"
] | null | null | null | tests/importer/onnx_/preprocess/test_letterbox2.py | xhuohai/nncase | cf7921c273c7446090939c64f57ef783a62bf29c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2021 Canaan Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
import pytest
import torch
import numpy as np
from onnx_test_runner import OnnxTestRunner
def _make_module(v_shape):
class BinaryModule(torch.nn.Module):
def __init__(self):
super(BinaryModule, self).__init__()
self.v = torch.from_numpy(np.ones_like(*v_shape).astype(np.uint8))
def forward(self, x):
x = torch.add(x, self.v)
return x
return BinaryModule()
lhs_shapes = [
[1, 3, 28, 32],
[1, 3, 56, 56],
[1, 3, 76, 80]
]
rhs_shapes = [
[1]
]
@pytest.mark.parametrize('lhs_shape', lhs_shapes)
@pytest.mark.parametrize('rhs_shape', rhs_shapes)
def test_letterbox2(lhs_shape, rhs_shape, request):
module = _make_module(rhs_shape)
overwrite_cfg = """
case:
preprocess_opt:
- name: preprocess
values:
- true
- name: swapRB
values:
- false
- name: input_shape
values:
- [1,56,56,3]
- name: mean
values:
- [0,0,0]
- name: std
values:
- [1,1,1]
- name: input_range
values:
- [0,255]
- name: input_type
values:
- uint8
- name: input_layout
values:
- NHWC
- name: output_layout
values:
- NHWC
- name: letterbox_value
values:
- 114.
"""
runner = OnnxTestRunner(request.node.name, overwrite_configs=overwrite_cfg)
model_file = runner.from_torch(module, lhs_shape)
runner.run(model_file)
if __name__ == "__main__":
pytest.main(['-vv', 'test_letterbox2.py'])
| 23.967391 | 79 | 0.639002 |
ace22e08f66a0df91dc5f19faa09d7f42c844cb1 | 1,060 | py | Python | dsa/patterns/breadth_first_search/right_view_of_binary_tree.py | bksahu/dsa | 4b36abbb3e00ce449c435c44260316f46d6d35ec | [
"MIT"
] | null | null | null | dsa/patterns/breadth_first_search/right_view_of_binary_tree.py | bksahu/dsa | 4b36abbb3e00ce449c435c44260316f46d6d35ec | [
"MIT"
] | 4 | 2019-10-02T14:24:54.000Z | 2020-03-26T07:06:15.000Z | dsa/patterns/breadth_first_search/right_view_of_binary_tree.py | bksahu/dsa | 4b36abbb3e00ce449c435c44260316f46d6d35ec | [
"MIT"
] | 2 | 2019-10-02T15:57:51.000Z | 2020-04-10T07:22:06.000Z | """
Given a binary tree, return an array containing nodes in its right view. The right view of a binary tree is the set of nodes visible when the tree is seen from the right side.
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
def tree_right_view(root):
result = []
if not root: return result
q = deque([root])
while q:
if q: result.append(q[-1])
for _ in range(len(q)):
curr_node = q.popleft()
if curr_node.left: q.append(curr_node.left)
if curr_node.right: q.append(curr_node.right)
return result
if __name__ == "__main__":
root = TreeNode(12)
root.left = TreeNode(7)
root.right = TreeNode(1)
root.left.left = TreeNode(9)
root.right.left = TreeNode(10)
root.right.right = TreeNode(5)
root.left.left.left = TreeNode(3)
result = tree_right_view(root)
print("Tree right view: ")
for node in result:
print(str(node.val) + " ", end='')
| 25.853659 | 175 | 0.625472 |
ace22f0a43fd455fe1c68a99cd1b5625cdf7c410 | 31,712 | py | Python | third_party/python/Lib/socket.py | appotry/cosmopolitan | af4687cc3f2331a23dc336183ab58fe001cda082 | [
"ISC"
] | null | null | null | third_party/python/Lib/socket.py | appotry/cosmopolitan | af4687cc3f2331a23dc336183ab58fe001cda082 | [
"ISC"
] | null | null | null | third_party/python/Lib/socket.py | appotry/cosmopolitan | af4687cc3f2331a23dc336183ab58fe001cda082 | [
"ISC"
] | null | null | null | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
fromshare() -- create a socket object from data received from socket.share() [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
IntEnum constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Integer constants:
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
import os, sys, io, selectors
from enum import IntEnum, IntFlag
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EAGAIN = getattr(errno, 'EAGAIN', 11)
EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
__all__ = ["fromfd", "getfqdn", "create_connection",
"AddressFamily", "SocketKind"]
__all__.extend(os._get_exports_list(_socket))
# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
# nicer string representations.
# Note that _socket only knows about the integer values. The public interface
# in this module understands the enums and translates them back from integers
# where needed (e.g. .family property of a socket object).
IntEnum._convert(
'AddressFamily',
__name__,
lambda C: C.isupper() and C.startswith('AF_'))
IntEnum._convert(
'SocketKind',
__name__,
lambda C: C.isupper() and C.startswith('SOCK_'))
IntFlag._convert(
'MsgFlag',
__name__,
lambda C: C.isupper() and C.startswith('MSG_'))
IntFlag._convert(
'AddressInfo',
__name__,
lambda C: C.isupper() and C.startswith('AI_'))
_LOCALHOST = '127.0.0.1'
_LOCALHOST_V6 = '::1'
def _intenum_converter(value, enum_klass):
"""Convert a numeric family value to an IntEnum member.
If it's not a known member, return the numeric value itself.
"""
try:
return enum_klass(value)
except ValueError:
return value
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
class _GiveupOnSendfile(Exception): pass
class socket(_socket.socket):
"""A subclass of _socket.socket adding the makefile() method."""
__slots__ = ["__weakref__", "_io_refs", "_closed"]
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None):
# For user code address family and type values are IntEnum members, but
# for the underlying _socket.socket they're just integers. The
# constructor of _socket.socket converts the given argument to an
# integer automatically.
_socket.socket.__init__(self, family, type, proto, fileno)
self._io_refs = 0
self._closed = False
def __enter__(self):
return self
def __exit__(self, *args):
if not self._closed:
self.close()
def __repr__(self):
"""Wrap __repr__() to reveal the real class name and socket
address(es).
"""
closed = getattr(self, '_closed', False)
s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
% (self.__class__.__module__,
self.__class__.__qualname__,
" [closed]" if closed else "",
self.fileno(),
self.family,
self.type,
self.proto)
if not closed:
try:
laddr = self.getsockname()
if laddr:
s += ", laddr=%s" % str(laddr)
except error:
pass
try:
raddr = self.getpeername()
if raddr:
s += ", raddr=%s" % str(raddr)
except error:
pass
s += '>'
return s
def __getstate__(self):
raise TypeError("Cannot serialize socket object")
def dup(self):
"""dup() -> socket object
Duplicate the socket. Return a new socket object connected to the same
system resource. The new socket is non-inheritable.
"""
fd = dup(self.fileno())
sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
sock.settimeout(self.gettimeout())
return sock
def accept(self):
"""accept() -> (socket object, address info)
Wait for an incoming connection. Return a new socket
representing the connection, and the address of the client.
For IP sockets, the address info is a pair (hostaddr, port).
"""
fd, addr = self._accept()
# If our type has the SOCK_NONBLOCK flag, we shouldn't pass it onto the
# new socket. We do not currently allow passing SOCK_NONBLOCK to
# accept4, so the returned socket is always blocking.
type = self.type & ~globals().get("SOCK_NONBLOCK", 0)
sock = socket(self.family, type, self.proto, fileno=fd)
# Issue #7995: if no default timeout is set and the listening
# socket had a (non-zero) timeout, force the new socket in blocking
# mode to override platform-specific socket flags inheritance.
if getdefaulttimeout() is None and self.gettimeout():
sock.setblocking(True)
return sock, addr
def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename, except the only
supported mode values are 'r' (default), 'w' and 'b'.
"""
# XXX refactor to share code?
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text
if hasattr(os, 'sendfile'):
def _sendfile_use_sendfile(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
sockno = self.fileno()
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise _GiveupOnSendfile(err) # not a regular file
try:
fsize = os.fstat(fileno).st_size
except OSError as err:
raise _GiveupOnSendfile(err) # not a regular file
if not fsize:
return 0 # empty file
blocksize = fsize if not count else count
timeout = self.gettimeout()
if timeout == 0:
raise ValueError("non-blocking sockets are not supported")
# poll/select have the advantage of not requiring any
# extra file descriptor, contrarily to epoll/kqueue
# (also, they require a single syscall).
if hasattr(selectors, 'PollSelector'):
selector = selectors.PollSelector()
else:
selector = selectors.SelectSelector()
selector.register(sockno, selectors.EVENT_WRITE)
total_sent = 0
# localize variable access to minimize overhead
selector_select = selector.select
os_sendfile = os.sendfile
try:
while True:
if timeout and not selector_select(timeout):
raise _socket.timeout('timed out')
if count:
blocksize = count - total_sent
if blocksize <= 0:
break
try:
sent = os_sendfile(sockno, fileno, offset, blocksize)
except BlockingIOError:
if not timeout:
# Block until the socket is ready to send some
# data; avoids hogging CPU resources.
selector_select()
continue
except OSError as err:
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
raise _GiveupOnSendfile(err)
raise err from None
else:
if sent == 0:
break # EOF
offset += sent
total_sent += sent
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset)
else:
def _sendfile_use_sendfile(self, file, offset=0, count=None):
raise _GiveupOnSendfile(
"os.sendfile() not available on this platform")
def _sendfile_use_send(self, file, offset=0, count=None):
self._check_sendfile_params(file, offset, count)
if self.gettimeout() == 0:
raise ValueError("non-blocking sockets are not supported")
if offset:
file.seek(offset)
blocksize = min(count, 8192) if count else 8192
total_sent = 0
# localize variable access to minimize overhead
file_read = file.read
sock_send = self.send
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
data = memoryview(file_read(blocksize))
if not data:
break # EOF
while True:
try:
sent = sock_send(data)
except BlockingIOError:
continue
else:
total_sent += sent
if sent < len(data):
data = data[sent:]
else:
break
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not self.type & SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
def sendfile(self, file, offset=0, count=None):
"""sendfile(file[, offset[, count]]) -> sent
Send a file until EOF is reached by using high-performance
os.sendfile() and return the total number of bytes which
were sent.
*file* must be a regular file object opened in binary mode.
If os.sendfile() is not available (e.g. Windows) or file is
not a regular file socket.send() will be used instead.
*offset* tells from where to start reading the file.
If specified, *count* is the total number of bytes to transmit
as opposed to sending the file until EOF is reached.
File position is updated on return or also in case of error in
which case file.tell() can be used to figure out the number of
bytes which were sent.
The socket must be of SOCK_STREAM type.
Non-blocking sockets are not supported.
"""
try:
return self._sendfile_use_sendfile(file, offset, count)
except _GiveupOnSendfile:
return self._sendfile_use_send(file, offset, count)
def _decref_socketios(self):
if self._io_refs > 0:
self._io_refs -= 1
if self._closed:
self.close()
def _real_close(self, _ss=_socket.socket):
# This function should not reference any globals. See issue #808164.
_ss.close(self)
def close(self):
# This function should not reference any globals. See issue #808164.
self._closed = True
if self._io_refs <= 0:
self._real_close()
def detach(self):
"""detach() -> file descriptor
Close the socket object without closing the underlying file descriptor.
The object cannot be used after this call, but the file descriptor
can be reused for other purposes. The file descriptor is returned.
"""
self._closed = True
return super().detach()
@property
def family(self):
"""Read-only access to the address family for this socket.
"""
return _intenum_converter(super().family, AddressFamily)
@property
def type(self):
"""Read-only access to the socket type.
"""
return _intenum_converter(super().type, SocketKind)
if os.name == 'nt':
def get_inheritable(self):
return os.get_handle_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_handle_inheritable(self.fileno(), inheritable)
else:
def get_inheritable(self):
return os.get_inheritable(self.fileno())
def set_inheritable(self, inheritable):
os.set_inheritable(self.fileno(), inheritable)
get_inheritable.__doc__ = "Get the inheritable flag of the socket"
set_inheritable.__doc__ = "Set the inheritable flag of the socket"
def fromfd(fd, family, type, proto=0):
""" fromfd(fd, family, type[, proto]) -> socket object
Create a socket object from a duplicate of the given file
descriptor. The remaining arguments are the same as for socket().
"""
nfd = dup(fd)
return socket(family, type, proto, nfd)
if hasattr(_socket.socket, "share"):
def fromshare(info):
""" fromshare(info) -> socket object
Create a socket object from the bytes object returned by
socket.share(pid).
"""
return socket(0, 0, 0, info)
__all__.append("fromshare")
if hasattr(_socket, "socketpair"):
def socketpair(family=None, type=SOCK_STREAM, proto=0):
"""socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is
AF_UNIX if defined on the platform; otherwise, the default is AF_INET.
"""
if family is None:
try:
family = AF_UNIX
except NameError:
family = AF_INET
a, b = _socket.socketpair(family, type, proto)
a = socket(family, type, proto, a.detach())
b = socket(family, type, proto, b.detach())
return a, b
else:
# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
if family == AF_INET:
host = _LOCALHOST
elif family == AF_INET6:
host = _LOCALHOST_V6
else:
raise ValueError("Only AF_INET and AF_INET6 socket address families "
"are supported")
if type != SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with
# setblocking(False) that prevents us from having to create a thread.
lsock = socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen()
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
__all__.append("socketpair")
socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
Create a pair of socket objects from the sockets returned by the platform
socketpair() function.
The arguments are the same as for socket() except the default family is AF_UNIX
if defined on the platform; otherwise, the default is AF_INET.
"""
_blocking_errnos = { EAGAIN, EWOULDBLOCK }
class SocketIO(io.RawIOBase):
"""Raw I/O implementation for stream sockets.
This class supports the makefile() method on sockets. It provides
the raw I/O interface on top of a socket object.
"""
# One might wonder why not let FileIO do the job instead. There are two
# main reasons why FileIO is not adapted:
# - it wouldn't work under Windows (where you can't used read() and
# write() on a socket handle)
# - it wouldn't work with socket timeouts (FileIO would ignore the
# timeout and consider the socket non-blocking)
# XXX More docs
def __init__(self, sock, mode):
if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
raise ValueError("invalid mode: %r" % mode)
io.RawIOBase.__init__(self)
self._sock = sock
if "b" not in mode:
mode += "b"
self._mode = mode
self._reading = "r" in mode
self._writing = "w" in mode
self._timeout_occurred = False
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise OSError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise
def write(self, b):
"""Write the given bytes or bytearray object *b* to the socket
and return the number of bytes written. This can be less than
len(b) if not all data could be written. If the socket is
non-blocking and no bytes could be written None is returned.
"""
self._checkClosed()
self._checkWritable()
try:
return self._sock.send(b)
except error as e:
# XXX what about EINTR?
if e.args[0] in _blocking_errnos:
return None
raise
def readable(self):
"""True if the SocketIO is open for reading.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._reading
def writable(self):
"""True if the SocketIO is open for writing.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return self._writing
def seekable(self):
"""True if the SocketIO is open for seeking.
"""
if self.closed:
raise ValueError("I/O operation on closed socket.")
return super().seekable()
def fileno(self):
"""Return the file descriptor of the underlying socket.
"""
self._checkClosed()
return self._sock.fileno()
@property
def name(self):
if not self.closed:
return self.fileno()
else:
return -1
@property
def mode(self):
return self._mode
def close(self):
"""Close the SocketIO object. This doesn't close the underlying
socket, except if all references to it have disappeared.
"""
if self.closed:
return
io.RawIOBase.close(self)
if hasattr(self._sock, '_decref_socketios'):
self._sock._decref_socketios()
self._sock = None
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
A host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
# Break explicitly a reference cycle
err = None
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
"""Resolve host and port into list of address info entries.
Translate the host/port argument into a sequence of 5-tuples that contain
all the necessary arguments for creating a socket connected to that service.
host is a domain name, a string representation of an IPv4/v6 address or
None. port is a string service name such as 'http', a numeric port number or
None. By passing None as the value of host and port, you can pass NULL to
the underlying C API.
The family, type and proto arguments can be optionally specified in order to
narrow the list of addresses returned. Passing zero as a value for each of
these arguments selects the full range of results.
"""
# We override this function since we want to translate the numeric family
# and socket type values to enum constants.
addrlist = []
for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
af, socktype, proto, canonname, sa = res
addrlist.append((_intenum_converter(af, AddressFamily),
_intenum_converter(socktype, SocketKind),
proto, canonname, sa))
return addrlist
if __name__ == 'PYOBJ.COM':
AF_APPLETALK = 0
AF_ASH = 0
AF_ATMPVC = 0
AF_ATMSVC = 0
AF_AX25 = 0
AF_BRIDGE = 0
AF_CAN = 0
AF_ECONET = 0
AF_INET = 0
AF_INET6 = 0
AF_IPX = 0
AF_IRDA = 0
AF_KEY = 0
AF_LLC = 0
AF_NETBEUI = 0
AF_NETROM = 0
AF_PACKET = 0
AF_PPPOX = 0
AF_RDS = 0
AF_ROSE = 0
AF_ROUTE = 0
AF_SECURITY = 0
AF_SNA = 0
AF_UNIX = 0
AF_UNSPEC = 0
AF_X25 = 0
AI_ADDRCONFIG = 0
AI_ALL = 0
AI_CANONNAME = 0
AI_NUMERICHOST = 0
AI_NUMERICSERV = 0
AI_PASSIVE = 0
AI_V4MAPPED = 0
CAPI = 0
EAI_ADDRFAMILY = 0
EAI_AGAIN = 0
EAI_BADFLAGS = 0
EAI_FAIL = 0
EAI_FAMILY = 0
EAI_MEMORY = 0
EAI_NODATA = 0
EAI_NONAME = 0
EAI_OVERFLOW = 0
EAI_SERVICE = 0
EAI_SOCKTYPE = 0
EAI_SYSTEM = 0
INADDR_ALLHOSTS_GROUP = 0
INADDR_ANY = 0
INADDR_BROADCAST = 0
INADDR_LOOPBACK = 0
INADDR_MAX_LOCAL_GROUP = 0
INADDR_NONE = 0
INADDR_UNSPEC_GROUP = 0
IPPORT_RESERVED = 0
IPPORT_USERRESERVED = 0
IPPROTO_AH = 0
IPPROTO_DSTOPTS = 0
IPPROTO_EGP = 0
IPPROTO_ESP = 0
IPPROTO_FRAGMENT = 0
IPPROTO_GRE = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 0
IPPROTO_ICMPV6 = 0
IPPROTO_IDP = 0
IPPROTO_IGMP = 0
IPPROTO_IP = 0
IPPROTO_IPIP = 0
IPPROTO_IPV6 = 0
IPPROTO_MAX = 0
IPPROTO_NONE = 0
IPPROTO_PIM = 0
IPPROTO_PUP = 0
IPPROTO_RAW = 0
IPPROTO_ROUTING = 0
IPPROTO_RSVP = 0
IPPROTO_SCTP = 0
IPPROTO_TCP = 0
IPPROTO_TP = 0
IPPROTO_UDP = 0
IP_ADD_MEMBERSHIP = 0
IP_DEFAULT_MULTICAST_LOOP = 0
IP_DEFAULT_MULTICAST_TTL = 0
IP_DROP_MEMBERSHIP = 0
IP_HDRINCL = 0
IP_MAX_MEMBERSHIPS = 0
IP_MULTICAST_IF = 0
IP_MULTICAST_LOOP = 0
IP_MULTICAST_TTL = 0
IP_OPTIONS = 0
IP_RECVOPTS = 0
IP_RECVRETOPTS = 0
IP_RETOPTS = 0
IP_TOS = 0
IP_TRANSPARENT = 0
IP_TTL = 0
MSG_CMSG_CLOEXEC = 0
MSG_CONFIRM = 0
MSG_CTRUNC = 0
MSG_DONTROUTE = 0
MSG_DONTWAIT = 0
MSG_EOF = 0
MSG_EOR = 0
MSG_ERRQUEUE = 0
MSG_FASTOPEN = 0
MSG_MORE = 0
MSG_NOSIGNAL = 0
MSG_NOTIFICATION = 0
MSG_OOB = 0
MSG_PEEK = 0
MSG_TRUNC = 0
MSG_WAITALL = 0
NI_DGRAM = 0
NI_MAXHOST = 0
NI_MAXSERV = 0
NI_NAMEREQD = 0
NI_NOFQDN = 0
NI_NUMERICHOST = 0
NI_NUMERICSERV = 0
PF_CAN = 0
PF_PACKET = 0
PF_RDS = 0
SHUT_RD = 0
SHUT_RDWR = 0
SHUT_WR = 0
SOCK_CLOEXEC = 0
SOCK_DGRAM = 0
SOCK_NONBLOCK = 0
SOCK_RAW = 0
SOCK_RDM = 0
SOCK_SEQPACKET = 0
SOCK_STREAM = 0
SOL_IP = 0
SOL_RDS = 0
SOL_SOCKET = 0
SOL_TCP = 0
SOL_UDP = 0
SOMAXCONN = 0
SO_ACCEPTCONN = 0
SO_BINDTODEVICE = 0
SO_BROADCAST = 0
SO_DEBUG = 0
SO_DOMAIN = 0
SO_DONTROUTE = 0
SO_ERROR = 0
SO_KEEPALIVE = 0
SO_LINGER = 0
SO_MARK = 0
SO_OOBINLINE = 0
SO_PASSCRED = 0
SO_PASSSEC = 0
SO_PEERCRED = 0
SO_PEERSEC = 0
SO_PRIORITY = 0
SO_PROTOCOL = 0
SO_RCVBUF = 0
SO_RCVLOWAT = 0
SO_RCVTIMEO = 0
SO_REUSEADDR = 0
SO_REUSEPORT = 0
SO_SNDBUF = 0
SO_SNDLOWAT = 0
SO_SNDTIMEO = 0
SO_TYPE = 0
SocketType = 0
TCP_CONGESTION = 0
TCP_CORK = 0
TCP_DEFER_ACCEPT = 0
TCP_FASTOPEN = 0
TCP_FASTOPEN_CONNECT = 0
TCP_INFO = 0
TCP_KEEPCNT = 0
TCP_KEEPIDLE = 0
TCP_KEEPINTVL = 0
TCP_LINGER2 = 0
TCP_MAXSEG = 0
TCP_NODELAY = 0
TCP_QUICKACK = 0
TCP_SAVED_SYN = 0
TCP_SAVE_SYN = 0
TCP_SYNCNT = 0
TCP_USER_TIMEOUT = 0
TCP_WINDOW_CLAMP = 0
dup = 0
error = 0
gaierror = 0
getaddrinfo = 0
getdefaulttimeout = 0
gethostbyaddr = 0
gethostbyname = 0
gethostbyname_ex = 0
gethostname = 0
getnameinfo = 0
getprotobyname = 0
getservbyname = 0
getservbyport = 0
has_ipv6 = 0
herror = 0
htonl = 0
htons = 0
inet_aton = 0
inet_ntoa = 0
inet_ntop = 0
inet_pton = 0
ntohl = 0
ntohs = 0
setdefaulttimeout = 0
sethostname = 0
socket = 0
socketpair = 0
timeout = 0
| 32.862176 | 95 | 0.595043 |
ace22f0ea48915e428b97a97be987c21f5caf53a | 4,583 | py | Python | stefuna/stefuna.py | irothschild/stefuna | 4482537a6fcb3e87f5f59507af2fc2abff8f26a7 | [
"MIT"
] | 23 | 2017-08-15T21:47:45.000Z | 2021-04-06T13:48:21.000Z | stefuna/stefuna.py | irothschild/stefuna | 4482537a6fcb3e87f5f59507af2fc2abff8f26a7 | [
"MIT"
] | 10 | 2018-04-11T23:10:21.000Z | 2020-09-23T14:52:17.000Z | stefuna/stefuna.py | irothschild/stefuna | 4482537a6fcb3e87f5f59507af2fc2abff8f26a7 | [
"MIT"
] | 10 | 2018-03-27T00:59:01.000Z | 2021-12-10T11:55:30.000Z | #!/usr/bin/env python
import sys
sys.path.append('.')
import argparse # noqa
from stefuna import Server, configure_logger # noqa
from pydoc import locate # noqa
from multiprocessing import cpu_count, set_start_method # noqa
import logging # noqa
configure_logger('',
'%(asctime)s [%(levelname)s][%(processName)s/%(process)d] %(message)s',
logging.StreamHandler())
logger = logging.getLogger('stefuna')
# boto connectionpool will log an INFO message "Resetting dropped connection" every 4 minutes or so.
# We turn this off by raising the loglevel of the botocore connectionpool.
logging.getLogger("botocore.vendored.requests.packages.urllib3.connectionpool").setLevel(logging.WARNING)
# Default config
config = {
'name': 'StepFunActivityWorker',
'activity_arn': None,
'processes': None,
'heartbeat': 0,
'healthcheck': 8080,
'maxtasksperchild': 100,
'server': None,
'server_config': {},
'worker': 'UNSET_WORKER_CLASS',
'worker_config': {},
'loglevel': 'info',
'start_method': 'spawn'
}
def main():
parser = argparse.ArgumentParser(description='Run a Step Function Activity server.')
parser.add_argument('--config', dest='config', action='store', required=False,
help='Module or dict of config to override defaults')
parser.add_argument('--worker', dest='worker', action='store', required=False,
help='Module and class of worker in dot notation. Overrides config setting.')
parser.add_argument('--activity-arn', dest='activity_arn', action='store', required=False,
help='Step Function Activity ARN, Overrides config setting.')
parser.add_argument('--processes', type=int, dest='processes', action='store', required=False,
help='Number of worker processes. Overrides config setting. If 0, cpu_count is used.')
parser.add_argument('--loglevel', dest='loglevel', action='store', required=False,
help='Loglevel (debug, info, warning, error or critical). Overrides config setting.')
args = parser.parse_args()
if args.config:
local_config = locate(args.config)
if local_config is None:
sys.stderr.write('Error loading config {0}\n'.format(args.config))
sys.exit(-1)
if type(local_config) is not dict:
local_config = {k: v for k, v in vars(local_config).items() if not k.startswith('_')}
config.update(local_config)
if args.loglevel:
config['loglevel'] = args.loglevel
loglevel_numeric = None
loglevel = config.get('loglevel')
if loglevel is not None:
loglevel_numeric = getattr(logging, loglevel.upper(), None)
if not isinstance(loglevel_numeric, int):
raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(loglevel_numeric)
logging.getLogger('').setLevel(loglevel_numeric)
if args.worker:
config['worker'] = args.worker
if args.activity_arn:
config['activity_arn'] = args.activity_arn
if args.processes is not None:
# Setting to None will use the cpu_count processes
config['processes'] = args.processes if args.processes else None
worker_count = config.get('processes')
if worker_count is None:
worker_count = cpu_count()
logger.info('Running {0} for activity {1} {2} with {3} workers'.format(
config['worker'], config['name'], config['activity_arn'], worker_count))
start_method = config['start_method']
if start_method:
set_start_method(start_method)
Server.worker_class = locate(config['worker'])
server_class = Server
if config['server']:
server_class = locate(config['server'])
if server_class is None:
sys.stderr.write('Error locating server class {0}\n'.format(config['server']))
sys.exit(-1)
server = server_class(name=config['name'], activity_arn=config['activity_arn'],
processes=config['processes'], heartbeat=config['heartbeat'],
maxtasksperchild=config['maxtasksperchild'],
server_config=config['server_config'],
worker_config=config['worker_config'],
healthcheck=config['healthcheck'],
loglevel=loglevel_numeric)
server.run() # does not return until server stopped
logger.info('Server exiting.')
sys.exit(0)
if __name__ == "__main__":
main()
| 37.565574 | 110 | 0.643683 |
ace22f496d0a2e29767f714017fddc9f5e108a46 | 482 | py | Python | app/urls.py | LaurierCS/Pod3 | ebc1c60e1f5871346460fd3d1226cc7cd03616a8 | [
"MIT"
] | null | null | null | app/urls.py | LaurierCS/Pod3 | ebc1c60e1f5871346460fd3d1226cc7cd03616a8 | [
"MIT"
] | null | null | null | app/urls.py | LaurierCS/Pod3 | ebc1c60e1f5871346460fd3d1226cc7cd03616a8 | [
"MIT"
] | null | null | null | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.homepage, name='index'),
path('deck/<int:deckId>/', views.deck, name='deck'),
path('study/<int:deckId>/', views.study, name='study'),
path('update/<int:deckId>/<int:cardLedgerId>/<int:seconds>/', views.updateLedger, name='update' ),
path('accounts/', include('django.contrib.auth.urls')),
path('accounts/create_account', views.create_account, name='create_account'),
] | 43.818182 | 102 | 0.680498 |
ace22f63d13b67c7b8ebe2a036b50f2fcc608ce6 | 1,117 | py | Python | db/util.py | therealplato/vim-awesome | 54e33073f4d470230e4578239ceb8f203f5f6613 | [
"MIT"
] | 1,379 | 2016-04-10T06:35:30.000Z | 2022-03-31T16:07:38.000Z | db/util.py | therealplato/vim-awesome | 54e33073f4d470230e4578239ceb8f203f5f6613 | [
"MIT"
] | 119 | 2016-04-10T04:26:48.000Z | 2022-03-25T15:10:06.000Z | db/util.py | therealplato/vim-awesome | 54e33073f4d470230e4578239ceb8f203f5f6613 | [
"MIT"
] | 83 | 2016-04-10T11:32:03.000Z | 2022-03-16T01:56:35.000Z | import rethinkdb as r
# TODO(alpert): Read port and db from app.config?
def r_conn(box=[None]):
if box[0] is None:
box[0] = r.connect()
box[0].use('vim_awesome')
return box[0]
def get_first(query):
results = list(query.limit(1).run(r_conn()))
return results[0] if results else None
def ensure_db(db_name, *args, **kwargs):
"""Creates a DB if it doesn't exist."""
conn = r.connect()
try:
r.db_create(db_name, *args, **kwargs).run(conn)
except r.RqlRuntimeError:
pass # Ignore DB already created
def ensure_table(table_name, *args, **kwargs):
"""Creates a table if it doesn't exist."""
try:
r.table_create(table_name, *args, **kwargs).run(r_conn())
except r.RqlRuntimeError:
pass # Ignore table already created
def ensure_index(table_name, index_name, *args, **kwargs):
"""Creates an index if it doesn't exist."""
indices = r.table(table_name).index_list().run(r_conn())
if index_name not in indices:
r.table(table_name).index_create(index_name, *args, **kwargs).run(
r_conn())
| 27.243902 | 74 | 0.633841 |
ace23132792fea3d609bf88d6f90e9596c0f7487 | 293 | py | Python | src/dstools/pipeline/util/__init__.py | edublancas/python-ds-tools | 1da2337961db9c50562349c28c9115d3a7cc6c0c | [
"MIT"
] | 1 | 2021-11-02T05:48:00.000Z | 2021-11-02T05:48:00.000Z | src/dstools/pipeline/util/__init__.py | edublancas/python-ds-tools | 1da2337961db9c50562349c28c9115d3a7cc6c0c | [
"MIT"
] | 8 | 2016-05-25T01:50:14.000Z | 2021-03-03T14:52:36.000Z | src/dstools/pipeline/util/__init__.py | edublancas/dstools | 1da2337961db9c50562349c28c9115d3a7cc6c0c | [
"MIT"
] | null | null | null | from dstools.pipeline.util.util import (safe_remove, image_bytes2html,
clean_up_files)
from dstools.pipeline.util.param_grid import Interval, ParamGrid
__all__ = ['safe_remove', 'image_bytes2html', 'clean_up_files', 'Interval',
'ParamGrid']
| 41.857143 | 75 | 0.665529 |
ace23291ccc16a4572035b70924528a2c0d1626c | 2,975 | py | Python | exp/check_weights.py | Bleyddyn/malpi | 9315f19366bd56da12c6dc7a84d830bbec530753 | [
"MIT"
] | 5 | 2017-03-27T22:15:54.000Z | 2022-01-19T23:46:46.000Z | exp/check_weights.py | Bleyddyn/malpi | 9315f19366bd56da12c6dc7a84d830bbec530753 | [
"MIT"
] | 10 | 2017-01-19T19:22:06.000Z | 2022-02-27T21:29:50.000Z | exp/check_weights.py | Bleyddyn/malpi | 9315f19366bd56da12c6dc7a84d830bbec530753 | [
"MIT"
] | null | null | null | import numpy as np
import pickle
from malpi.optimizer import *
from malpi import optim
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from optparse import OptionParser
def stats(arr, msg=""):
mi = np.min(arr)
ma = np.max(arr)
av = np.mean(arr)
std = np.std(arr)
arr_abs = np.abs(arr)
mi_abs = np.min(arr_abs)
ma_abs = np.max(arr_abs)
print "%sMin/Max/Mean/Stdev abs(Min/Max): %g/%g/%g/%g %g/%g" % (msg,mi,ma,av,std,mi_abs,ma_abs)
parser = OptionParser()
(options, args) = parser.parse_args()
if len(args) != 1:
print "Usage: python check_weights.py <model name>"
exit()
if args[0].endswith('.pickle'):
args[0] = args[0][:-7]
with open(args[0] + '.pickle') as f:
model = pickle.load( f )
for k,w in model.params.iteritems():
stats( w, msg=k+" " )
xs = []
ys = []
zs = []
zs2 = []
zs3 = []
for x in np.random.uniform( -1.3, 0.7, 20 ):
for y in np.random.uniform( -0.8, 0.8, 20 ):
qvalues,_ = model.forward( np.reshape( np.array([x, y]), (1,2)), mode="test")
xs.append(x)
ys.append(y)
zs.append( qvalues[0][0] )
zs2.append( qvalues[0][1] )
zs3.append( qvalues[0][2] )
print "Max 0: %f" % np.max(zs)
print "Max 1: %f" % np.max(zs2)
print "Max 2: %f" % np.max(zs3)
fig = plt.figure(1,figsize=(16, 18), dpi=80)
ax = fig.add_subplot(311, projection='3d')
ax.scatter(xs,ys,zs=zs)
ax.set_xlabel('Location')
ax.set_ylabel('Velocity')
ax.set_title('Action Left')
ax = fig.add_subplot(312, projection='3d')
ax.scatter(xs,ys,zs=zs2)
ax.set_xlabel('Location')
ax.set_ylabel('Velocity')
ax.set_title('Action Noop')
ax = fig.add_subplot(313, projection='3d')
ax.scatter(xs,ys,zs=zs3)
ax.set_xlabel('Location')
ax.set_ylabel('Velocity')
ax.set_title('Action Right')
plt.show()
# get qvalues for a range of mc inputs and plot them
#High: [ 0.6 0.07]
#Low: [-1.2 -0.07]
#for i in range(10):
# state = np.random.uniform( 0.0, 1.0, (1,4,84,84) )
# q_values, _ = model.forward(state, mode="test")
# print q_values[0]
#with open('optimizer_test.pickle') as f:
# (w,dw,config) = pickle.load( f )
#
#del config['cache']
#
#update_rule = optim.rmsprop
#
#model.params = {'W5': w}
#optim = Optimizer("rmsprop", model, learning_rate=config['learning_rate'], decay_rate=config['decay_rate'], epsilon=config['epsilon'])
#print config
#optim.describe()
#
#diff = model.params['W5'] - w
#stats(diff, 'before ')
#
#next_w, next_config = update_rule(w, dw, config)
#
#grads = {'W5': dw}
#optim.update(grads)
#
#diff = model.params['W5'] - next_w
#stats(diff, 'after ')
#diff = optim.cache['W5'] - next_config['cache']
#stats(optim.cache['W5'], 'cache ')
#stats(diff, 'diffs ')
#
#if False:
# for k,w in model.params.iteritems():
# print k
# mask_zeros = w != 0.0
# mask = np.abs(w) < 1e-20
# mask = np.logical_and(mask_zeros,mask)
# if np.count_nonzero(mask) > 0:
# print "Underflow in %s " % (k,)
| 25.42735 | 135 | 0.623866 |
ace233ce43ee2cba5640e39f887b71be9b53dc04 | 23 | py | Python | __init__.py | ctmil/website_multi_image | acedf573b90c4faf8a8c95c18421af07b233a217 | [
"MIT"
] | null | null | null | __init__.py | ctmil/website_multi_image | acedf573b90c4faf8a8c95c18421af07b233a217 | [
"MIT"
] | null | null | null | __init__.py | ctmil/website_multi_image | acedf573b90c4faf8a8c95c18421af07b233a217 | [
"MIT"
] | 1 | 2020-06-01T11:21:59.000Z | 2020-06-01T11:21:59.000Z | import product_images
| 11.5 | 22 | 0.869565 |
ace233edddf09dc0255d81cfab9852391d063f07 | 2,609 | py | Python | tasks/R2R-pano/models/encoder.py | josephch405/selfmonitoring-agent | 4523edb7b90619c85f94127544aee96c6956d72f | [
"MIT"
] | null | null | null | tasks/R2R-pano/models/encoder.py | josephch405/selfmonitoring-agent | 4523edb7b90619c85f94127544aee96c6956d72f | [
"MIT"
] | null | null | null | tasks/R2R-pano/models/encoder.py | josephch405/selfmonitoring-agent | 4523edb7b90619c85f94127544aee96c6956d72f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from models.rnn import CustomRNN
class EncoderRNN(nn.Module):
""" Encodes navigation instructions, returning hidden state context (for
attention methods) and a decoder initial state. """
def __init__(self, opts, vocab_size, embedding_size, hidden_size, padding_idx,
dropout_ratio, bidirectional=False, num_layers=1):
super(EncoderRNN, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.embedding_size = embedding_size
hidden_size = hidden_size // 2 if bidirectional else hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(vocab_size, embedding_size, padding_idx)
self.drop = nn.Dropout(p=dropout_ratio)
self.bidirectional = bidirectional
self.rnn_kwargs = {
'cell_class': nn.LSTMCell,
'input_size': embedding_size,
'hidden_size': hidden_size,
'num_layers': num_layers,
'batch_first': True,
'dropout': 0,
}
self.rnn = CustomRNN(**self.rnn_kwargs)
def create_mask(self, batchsize, max_length, length):
"""Given the length create a mask given a padded tensor"""
tensor_mask = torch.zeros(batchsize, max_length)
for idx, row in enumerate(tensor_mask):
row[:length[idx]] = 1
return tensor_mask.to(self.device)
def flip(self, x, dim):
indices = [slice(None)] * x.dim()
indices[dim] = torch.arange(x.size(dim) - 1, -1, -1,
dtype=torch.long, device=x.device)
return x[tuple(indices)]
def forward(self, inputs, lengths):
"""
Expects input vocab indices as (batch, seq_len). Also requires a list of lengths for dynamic batching.
"""
embeds = self.embedding(inputs) # (batch, seq_len, embedding_size)
embeds = self.drop(embeds)
embeds_mask = self.create_mask(embeds.size(0), embeds.size(1), lengths)
if self.bidirectional:
output_1, (ht_1, ct_1) = self.rnn(embeds, mask=embeds_mask)
output_2, (ht_2, ct_2) = self.rnn(self.flip(embeds, 1), mask=self.flip(embeds_mask, 1))
output = torch.cat((output_1, self.flip(output_2, 0)), 2)
ht = torch.cat((ht_1, ht_2), 2)
ct = torch.cat((ct_1, ct_2), 2)
else:
output, (ht, ct) = self.rnn(embeds, mask=embeds_mask)
return output.transpose(0, 1), ht.squeeze(0), ct.squeeze(0), embeds_mask
| 40.765625 | 110 | 0.610962 |
ace234d7a633c0ba9552121e19b0606c6a3b6aac | 5,873 | py | Python | src/jobwatcher/jobwatcher.py | agobeaux/aws-parallelcluster-node | 69c7cfdc00bcec923e313b7094ccc93cf73043ba | [
"Apache-2.0"
] | null | null | null | src/jobwatcher/jobwatcher.py | agobeaux/aws-parallelcluster-node | 69c7cfdc00bcec923e313b7094ccc93cf73043ba | [
"Apache-2.0"
] | null | null | null | src/jobwatcher/jobwatcher.py | agobeaux/aws-parallelcluster-node | 69c7cfdc00bcec923e313b7094ccc93cf73043ba | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
from datetime import datetime
import boto3
from botocore.config import Config
from configparser import ConfigParser
from retrying import retry
from common.time_utils import seconds
from common.utils import (
get_asg_name,
get_asg_settings,
get_compute_instance_type,
get_instance_properties,
load_module,
sleep_remaining_loop_time,
)
LOOP_TIME = 60
UPDATE_INSTANCE_PROPERTIES_INTERVAL = 180
log = logging.getLogger(__name__)
JobwatcherConfig = collections.namedtuple(
"JobwatcherConfig", ["region", "scheduler", "stack_name", "pcluster_dir", "proxy_config"]
)
def _get_config():
"""
Get configuration from config file.
:return: configuration parameters
"""
config_file = "/etc/jobwatcher.cfg"
log.info("Reading %s", config_file)
config = ConfigParser()
config.read(config_file)
if config.has_option("jobwatcher", "loglevel"):
lvl = logging._levelNames[config.get("jobwatcher", "loglevel")]
logging.getLogger().setLevel(lvl)
region = config.get("jobwatcher", "region")
scheduler = config.get("jobwatcher", "scheduler")
stack_name = config.get("jobwatcher", "stack_name")
pcluster_dir = config.get("jobwatcher", "cfncluster_dir")
_proxy = config.get("jobwatcher", "proxy")
proxy_config = Config()
if _proxy != "NONE":
proxy_config = Config(proxies={"https": _proxy})
log.info(
"Configured parameters: region=%s scheduler=%s stack_name=%s pcluster_dir=%s proxy=%s",
region,
scheduler,
stack_name,
pcluster_dir,
_proxy,
)
return JobwatcherConfig(region, scheduler, stack_name, pcluster_dir, proxy_config)
def _poll_scheduler_status(config, asg_name, scheduler_module):
"""
Verify scheduler status and ask the ASG new nodes, if required.
:param config: JobwatcherConfig object
:param asg_name: ASG name
:param scheduler_module: scheduler module
"""
instance_type = None
instance_properties = None
update_instance_properties_timer = 0
while True:
start_time = datetime.now()
# Get instance properties
if not instance_properties or update_instance_properties_timer >= UPDATE_INSTANCE_PROPERTIES_INTERVAL:
logging.info("Refreshing compute instance properties")
update_instance_properties_timer = 0
new_instance_type = get_compute_instance_type(
config.region, config.proxy_config, config.stack_name, fallback=instance_type
)
if new_instance_type != instance_type:
instance_type = new_instance_type
instance_properties = get_instance_properties(config.region, config.proxy_config, instance_type)
update_instance_properties_timer += LOOP_TIME
# get current limits
_, current_desired, max_size = get_asg_settings(config.region, config.proxy_config, asg_name)
# Get current number of nodes
running = scheduler_module.get_busy_nodes()
# Get number of nodes requested
pending = scheduler_module.get_required_nodes(instance_properties, max_size)
log.info("%d nodes requested, %d nodes busy or unavailable", pending, running)
if pending < 0:
log.critical("Error detecting number of required nodes. The cluster will not scale up.")
elif pending == 0:
log.info("There are no pending jobs or the requirements on pending jobs cannot be satisfied. Noop.")
else:
# Check to make sure requested number of instances is within ASG limits
required = running + pending
if required <= current_desired:
log.info("%d nodes required, %d nodes in asg. Noop" % (required, current_desired))
else:
if required > max_size:
log.info(
"The number of required nodes %d is greater than max %d. Requesting max %d."
% (required, max_size, max_size)
)
else:
log.info(
"Setting desired to %d nodes, requesting %d more nodes from asg."
% (required, required - current_desired)
)
requested = min(required, max_size)
# update ASG
asg_client = boto3.client("autoscaling", region_name=config.region, config=config.proxy_config)
asg_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=requested)
sleep_remaining_loop_time(LOOP_TIME, start_time)
@retry(wait_fixed=seconds(LOOP_TIME))
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s")
log.info("jobwatcher startup")
try:
config = _get_config()
asg_name = get_asg_name(config.stack_name, config.region, config.proxy_config)
scheduler_module = load_module("jobwatcher.plugins." + config.scheduler)
_poll_scheduler_status(config, asg_name, scheduler_module)
except Exception as e:
log.exception("An unexpected error occurred: %s", e)
raise
if __name__ == "__main__":
main()
| 35.593939 | 119 | 0.671037 |
ace23697111dab04bc9993fdf34966e4b39f893e | 26,110 | py | Python | trove/taskmanager/manager.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 244 | 2015-01-01T12:04:44.000Z | 2022-03-25T23:38:39.000Z | trove/taskmanager/manager.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 6 | 2015-08-18T08:19:10.000Z | 2022-03-05T02:32:36.000Z | trove/taskmanager/manager.py | a4913994/openstack_trove | 3b550048dd1e5841ad0f3295679e0f0b913a5687 | [
"Apache-2.0"
] | 178 | 2015-01-02T15:16:58.000Z | 2022-03-23T03:30:20.000Z | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_service import periodic_task
from oslo_utils import importutils
from trove.backup.models import Backup
import trove.common.cfg as cfg
from trove.common import clients
from trove.common.context import TroveContext
from trove.common import exception
from trove.common.exception import ReplicationSlaveAttachError
from trove.common.exception import TroveError
from trove.common.i18n import _
from trove.common.notification import DBaaSQuotas, EndNotification
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import strategy
from trove.datastore.models import DatastoreVersion
import trove.extensions.mgmt.instances.models as mgmtmodels
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import models
from trove.taskmanager.models import FreshInstanceTasks, BuiltInstanceTasks
from trove.quota.quota import QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Manager(periodic_task.PeriodicTasks):
def __init__(self):
super(Manager, self).__init__(CONF)
self.admin_context = TroveContext(
user=CONF.service_credentials.username,
tenant=CONF.service_credentials.project_id,
user_domain_name=CONF.service_credentials.user_domain_name)
if CONF.exists_notification_transformer:
self.exists_transformer = importutils.import_object(
CONF.exists_notification_transformer,
context=self.admin_context)
def resize_volume(self, context, instance_id, new_size):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.resize_volume(new_size)
def resize_flavor(self, context, instance_id, old_flavor, new_flavor):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.resize_flavor(old_flavor, new_flavor)
def reboot(self, context, instance_id):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.reboot()
def restart(self, context, instance_id):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.restart()
def detach_replica(self, context, instance_id):
with EndNotification(context):
slave = models.BuiltInstanceTasks.load(context, instance_id)
master_id = slave.slave_of_id
master = models.BuiltInstanceTasks.load(context, master_id)
slave.detach_replica(master)
def _set_task_status(self, instances, status):
for instance in instances:
setattr(instance.db_info, 'task_status', status)
instance.db_info.save()
def promote_to_replica_source(self, context, instance_id):
# TODO(atomic77) Promote and eject need to be able to handle the case
# where a datastore like Postgresql needs to treat the slave to be
# promoted differently from the old master and the slaves which will
# be simply reassigned to a new master. See:
# https://bugs.launchpad.net/trove/+bug/1553339
def _promote_to_replica_source(old_master, master_candidate,
replica_models):
# First, we transition from the old master to new as quickly as
# possible to minimize the scope of unrecoverable error
# NOTE(zhaochao): we cannot reattach the old master to the new
# one immediately after the new master is up, because for MariaDB
# the other replicas are still connecting to the old master, and
# during reattaching the old master as a slave, new GTID may be
# created and synced to the replicas. After that, when attaching
# the replicas to the new master, 'START SLAVE' will fail by
# 'fatal error 1236' if the binlog of the replica diverged from
# the new master. So the proper order should be:
# -1. make the old master read only (and detach floating ips)
# -2. make sure the new master is up-to-date
# -3. detach the new master from the old one
# -4. enable the new master (and attach floating ips)
# -5. attach the other replicas to the new master
# -6. attach the old master to the new one
# (and attach floating ips)
# -7. demote the old master
# What we changed here is the order of the 6th step, previously
# this step took place right after step 4, which causes failures
# with MariaDB replications.
old_master.make_read_only(True)
latest_txn_id = old_master.get_latest_txn_id()
master_candidate.wait_for_txn(latest_txn_id)
master_candidate.detach_replica(old_master, for_failover=True)
master_candidate.enable_as_master()
master_candidate.make_read_only(False)
# At this point, should something go wrong, there
# should be a working master with some number of working slaves,
# and possibly some number of "orphaned" slaves
exception_replicas = []
error_messages = ""
for replica in replica_models:
try:
if replica.id != master_candidate.id:
replica.detach_replica(old_master, for_failover=True)
replica.attach_replica(master_candidate, restart=True)
except exception.TroveError as ex:
log_fmt = ("Unable to migrate replica %(slave)s from "
"old replica source %(old_master)s to "
"new source %(new_master)s on promote.")
exc_fmt = _("Unable to migrate replica %(slave)s from "
"old replica source %(old_master)s to "
"new source %(new_master)s on promote.")
msg_content = {
"slave": replica.id,
"old_master": old_master.id,
"new_master": master_candidate.id}
LOG.error(log_fmt, msg_content)
exception_replicas.append(replica)
error_messages += "%s (%s)\n" % (
exc_fmt % msg_content, ex)
# dealing with the old master after all the other replicas
# has been migrated.
old_master.attach_replica(master_candidate, restart=False)
try:
old_master.demote_replication_master()
except Exception as ex:
log_fmt = "Exception demoting old replica source %s."
exc_fmt = _("Exception demoting old replica source %s.")
LOG.error(log_fmt, old_master.id)
exception_replicas.append(old_master)
error_messages += "%s (%s)\n" % (
exc_fmt % old_master.id, ex)
self._set_task_status([old_master] + replica_models,
InstanceTasks.NONE)
if exception_replicas:
self._set_task_status(exception_replicas,
InstanceTasks.PROMOTION_ERROR)
msg = (_("promote-to-replica-source %(id)s: The following "
"replicas may not have been switched: %(replicas)s:"
"\n%(err)s") %
{"id": master_candidate.id,
"replicas": [repl.id for repl in exception_replicas],
"err": error_messages})
raise ReplicationSlaveAttachError(msg)
LOG.info('Finished to promote %s as master.', instance_id)
with EndNotification(context):
LOG.info('Promoting %s as replication master', instance_id)
master_candidate = BuiltInstanceTasks.load(context, instance_id)
old_master = BuiltInstanceTasks.load(context,
master_candidate.slave_of_id)
replicas = []
for replica_dbinfo in old_master.slaves:
if replica_dbinfo.id == instance_id:
replica = master_candidate
else:
replica = BuiltInstanceTasks.load(context,
replica_dbinfo.id)
replicas.append(replica)
try:
_promote_to_replica_source(old_master, master_candidate,
replicas)
except ReplicationSlaveAttachError:
raise
except Exception:
self._set_task_status([old_master] + replicas,
InstanceTasks.PROMOTION_ERROR)
raise
# pulled out to facilitate testing
def _get_replica_txns(self, replica_models):
return [[repl] + repl.get_last_txn() for repl in replica_models]
def _most_current_replica(self, old_master, replica_models):
# last_txns is [instance, master UUID, last txn]
last_txns = self._get_replica_txns(replica_models)
master_ids = [txn[1] for txn in last_txns if txn[1]]
if len(set(master_ids)) > 1:
raise TroveError(_("Replicas of %s not all replicating"
" from same master") % old_master.id)
return sorted(last_txns, key=lambda x: x[2], reverse=True)[0][0]
def eject_replica_source(self, context, instance_id):
def _eject_replica_source(old_master, replica_models):
master_candidate = self._most_current_replica(old_master,
replica_models)
LOG.info('New master selected: %s', master_candidate.id)
master_candidate.detach_replica(old_master, for_failover=True)
master_candidate.enable_as_master()
master_candidate.make_read_only(False)
exception_replicas = []
error_messages = ""
for replica in replica_models:
try:
if replica.id != master_candidate.id:
replica.detach_replica(old_master, for_failover=True)
replica.attach_replica(master_candidate)
except exception.TroveError as ex:
log_fmt = ("Unable to migrate replica %(slave)s from "
"old replica source %(old_master)s to "
"new source %(new_master)s on eject.")
exc_fmt = _("Unable to migrate replica %(slave)s from "
"old replica source %(old_master)s to "
"new source %(new_master)s on eject.")
msg_content = {
"slave": replica.id,
"old_master": old_master.id,
"new_master": master_candidate.id}
LOG.error(log_fmt, msg_content)
exception_replicas.append(replica)
error_messages += "%s (%s)\n" % (exc_fmt % msg_content, ex)
self._set_task_status([old_master] + replica_models,
InstanceTasks.NONE)
if exception_replicas:
self._set_task_status(exception_replicas,
InstanceTasks.EJECTION_ERROR)
msg = (_("eject-replica-source %(id)s: The following "
"replicas may not have been switched: %(replicas)s:"
"\n%(err)s") %
{"id": master_candidate.id,
"replicas": [repl.id for repl in exception_replicas],
"err": error_messages})
raise ReplicationSlaveAttachError(msg)
LOG.info('New master enabled: %s', master_candidate.id)
with EndNotification(context):
master = BuiltInstanceTasks.load(context, instance_id)
replicas = [BuiltInstanceTasks.load(context, dbinfo.id)
for dbinfo in master.slaves]
try:
_eject_replica_source(master, replicas)
except ReplicationSlaveAttachError:
raise
except Exception:
self._set_task_status([master] + replicas,
InstanceTasks.EJECTION_ERROR)
raise
def migrate(self, context, instance_id, host):
with EndNotification(context):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.migrate(host)
def rebuild(self, context, instance_id, image_id):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
instance_tasks.rebuild(image_id)
def delete_instance(self, context, instance_id):
with EndNotification(context):
try:
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.delete_async()
except exception.UnprocessableEntity:
instance_tasks = models.FreshInstanceTasks.load(context,
instance_id)
instance_tasks.delete_async()
def delete_backup(self, context, backup_id):
with EndNotification(context):
models.BackupTasks.delete_backup(context, backup_id)
def create_backup(self, context, backup_info, instance_id):
with EndNotification(context, backup_id=backup_info['id']):
instance_tasks = models.BuiltInstanceTasks.load(context,
instance_id)
instance_tasks.create_backup(backup_info)
def _create_replication_slave(self, context, instance_id, name, flavor,
image_id, databases, users,
datastore_manager, packages, volume_size,
availability_zone, root_password, nics,
overrides, slave_of_id, backup_id,
volume_type, modules, access=None,
ds_version=None):
if type(instance_id) in [list]:
ids = instance_id
root_passwords = root_password
else:
ids = [instance_id]
root_passwords = [root_password]
replica_number = 0
replica_backup_id = backup_id
replicas = []
master_instance_tasks = BuiltInstanceTasks.load(context, slave_of_id)
server_group = master_instance_tasks.server_group
scheduler_hints = srv_grp.ServerGroup.convert_to_hint(server_group)
LOG.debug("Using scheduler hints %s for creating instance %s",
scheduler_hints, instance_id)
# Create backup for master
snapshot = None
try:
instance_tasks = FreshInstanceTasks.load(context, ids[0])
snapshot = instance_tasks.get_replication_master_snapshot(
context, slave_of_id, flavor,
parent_backup_id=replica_backup_id)
LOG.info('Snapshot info for creating replica of %s: %s',
slave_of_id, snapshot)
except Exception as err:
LOG.error('Failed to get master snapshot info for creating '
'replica, error: %s', str(err))
if snapshot and snapshot.get('dataset', {}).get('snapshot_id'):
backup_id = snapshot['dataset']['snapshot_id']
Backup.delete(context, backup_id)
raise
# Create replicas using the master backup
replica_backup_id = snapshot['dataset']['snapshot_id']
try:
for replica_index in range(0, len(ids)):
replica_number += 1
LOG.info(f"Creating replica {replica_number} "
f"({ids[replica_index]}) of {len(ids)}.")
instance_tasks = FreshInstanceTasks.load(
context, ids[replica_index])
instance_tasks.create_instance(
flavor, image_id, databases, users, datastore_manager,
packages, volume_size, replica_backup_id,
availability_zone, root_passwords[replica_index],
nics, overrides, None, snapshot, volume_type,
modules, scheduler_hints, access=access,
ds_version=ds_version)
replicas.append(instance_tasks)
for replica in replicas:
replica.wait_for_instance(CONF.restore_usage_timeout, flavor)
except Exception as err:
LOG.error('Failed to create replica from %s, error: %s',
slave_of_id, str(err))
raise
finally:
Backup.delete(context, replica_backup_id)
def _create_instance(self, context, instance_id, name, flavor,
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules, locality,
access=None, ds_version=None):
if slave_of_id:
self._create_replication_slave(context, instance_id, name,
flavor, image_id, databases, users,
datastore_manager, packages,
volume_size,
availability_zone, root_password,
nics, overrides, slave_of_id,
backup_id, volume_type, modules,
access=access,
ds_version=ds_version)
else:
if type(instance_id) in [list]:
raise AttributeError(_(
"Cannot create multiple non-replica instances."))
scheduler_hints = srv_grp.ServerGroup.build_scheduler_hint(
context, locality, instance_id
)
LOG.debug("Using scheduler hints %s for creating instance %s",
scheduler_hints, instance_id)
instance_tasks = FreshInstanceTasks.load(context, instance_id)
instance_tasks.create_instance(
flavor, image_id, databases, users,
datastore_manager, packages,
volume_size, backup_id,
availability_zone, root_password,
nics, overrides, cluster_config,
None, volume_type, modules,
scheduler_hints, access=access, ds_version=ds_version
)
timeout = (CONF.restore_usage_timeout if backup_id
else CONF.usage_timeout)
instance_tasks.wait_for_instance(timeout, flavor)
def create_instance(self, context, instance_id, name, flavor,
image_id, databases, users, datastore_manager,
packages, volume_size, backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules, locality,
access=None, ds_version=None):
with EndNotification(
context,
instance_id=(
instance_id[0]
if isinstance(instance_id, list)
else instance_id
)
):
self._create_instance(context, instance_id, name, flavor,
image_id, databases, users,
datastore_manager, packages, volume_size,
backup_id, availability_zone,
root_password, nics, overrides, slave_of_id,
cluster_config, volume_type, modules,
locality, access=access,
ds_version=ds_version)
def upgrade(self, context, instance_id, datastore_version_id):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
datastore_version = DatastoreVersion.load_by_uuid(datastore_version_id)
with EndNotification(context):
instance_tasks.upgrade(datastore_version)
def update_access(self, context, instance_id, access):
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
try:
instance_tasks.update_access(access)
except Exception as e:
LOG.error(f"Failed to update access configuration for "
f"{instance_id}: {str(e)}")
self.update_db(task_status=InstanceTasks.UPDATING_ERROR_ACCESS)
def create_cluster(self, context, cluster_id):
with EndNotification(context, cluster_id=cluster_id):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.create_cluster(context, cluster_id)
def grow_cluster(self, context, cluster_id, new_instance_ids):
with EndNotification(context, cluster_id=cluster_id,
instance_ids=new_instance_ids):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.grow_cluster(context, cluster_id, new_instance_ids)
def shrink_cluster(self, context, cluster_id, instance_ids):
with EndNotification(context, cluster_id=cluster_id,
instance_ids=instance_ids):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.shrink_cluster(context, cluster_id, instance_ids)
def restart_cluster(self, context, cluster_id):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.restart_cluster(context, cluster_id)
def upgrade_cluster(self, context, cluster_id, datastore_version_id):
datastore_version = DatastoreVersion.load_by_uuid(datastore_version_id)
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.upgrade_cluster(context, cluster_id, datastore_version)
def delete_cluster(self, context, cluster_id):
with EndNotification(context):
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
cluster_tasks.delete_cluster(context, cluster_id)
def reapply_module(self, context, module_id, md5, include_clustered,
batch_size, batch_delay, force):
models.ModuleTasks.reapply_module(
context, module_id, md5, include_clustered,
batch_size, batch_delay, force)
if CONF.exists_notification_transformer:
@periodic_task.periodic_task
def publish_exists_event(self, context):
"""
Push this in Instance Tasks to fetch a report/collection
:param context: currently None as specied in bin script
"""
mgmtmodels.publish_exist_events(self.exists_transformer,
self.admin_context)
if CONF.quota_notification_interval:
@periodic_task.periodic_task(spacing=CONF.quota_notification_interval)
def publish_quota_notifications(self, context):
nova_client = clients.create_nova_client(self.admin_context)
for tenant in nova_client.tenants.list():
for quota in QUOTAS.get_all_quotas_by_tenant(tenant.id):
usage = QUOTAS.get_quota_usage(quota)
DBaaSQuotas(self.admin_context, quota, usage).notify()
def __getattr__(self, name):
"""
We should only get here if Python couldn't find a "real" method.
"""
def raise_error(msg):
raise AttributeError(msg)
manager, sep, method = name.partition('_')
if not manager:
raise_error('Cannot derive manager from attribute name "%s"' %
name)
task_strategy = strategy.load_taskmanager_strategy(manager)
if not task_strategy:
raise_error('No task manager strategy for manager "%s"' % manager)
if method not in task_strategy.task_manager_manager_actions:
raise_error('No method "%s" for task manager strategy for manager'
' "%s"' % (method, manager))
return task_strategy.task_manager_manager_actions.get(method)
| 47.645985 | 79 | 0.588778 |
ace23712b41a4e2bee9f496935b1b5a05426f111 | 3,105 | py | Python | tri_star/include/tri_star/color_print.py | ScazLab/Frontiers_Robot_Tool_Use | ebace49e88562c18b3b967ec5360a4cec4f8fe56 | [
"MIT"
] | null | null | null | tri_star/include/tri_star/color_print.py | ScazLab/Frontiers_Robot_Tool_Use | ebace49e88562c18b3b967ec5360a4cec4f8fe56 | [
"MIT"
] | null | null | null | tri_star/include/tri_star/color_print.py | ScazLab/Frontiers_Robot_Tool_Use | ebace49e88562c18b3b967ec5360a4cec4f8fe56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Software License Agreement (MIT License)
#
# Copyright (c) 2020, tri_star
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Meiying Qin, Jake Brawer
# https://misc.flogisoft.com/bash/tip_colors_and_formatting
# https://gist.github.com/vratiu/9780109
# tested under unbuntu 18 bash
RESET = '\033[0m'
BLACK = '0' # light grey is dark grey
RED = '1'
GREEN = '2'
YELLOW = '3'
BLUE = '4'
MAGENTA = '5'
CYAN = '6'
LIGHT_GRAY = '7' # light grey is white
FOREGROUND_REGULAR = '3'
FOREGROUND_LIGHT = '9'
BACKGROUND_REGULAR = '4'
BACKGROUND_LIGHT = '10'
BOLD = '1'
DIM = '2'
ITALIC = '3'
UNDERLINE = '4'
BLINK = '5'
REVERSE = '7'
HIDDEN = '8'
STRIKETHROUGH = '9'
def colored_text(text, text_color='', text_color_light=False, background_color='', background_color_light=False, bold=False, dim=False, italic=False, underline=False, blink=False, reverse=False, hidden=False, strikethrough=False):
formatting = ''
template = "\033[{}m{}\033[00m"
if bold:
formatting += BOLD + ';'
if dim:
formatting += DIM + ';'
if italic:
formatting += ITALIC + ';'
if underline:
formatting += UNDERLINE + ';'
if blink:
formatting += BLINK + ';'
if reverse:
formatting += REVERSE + ';'
if hidden:
formatting += HIDDEN + ';'
if strikethrough:
formatting += STRIKETHROUGH + ';'
if text_color:
if text_color_light:
formatting += FOREGROUND_LIGHT + text_color + ';'
else:
formatting += FOREGROUND_REGULAR + text_color + ';'
if background_color:
if background_color_light:
formatting += BACKGROUND_LIGHT + background_color + ';'
else:
formatting += BACKGROUND_REGULAR + background_color + ';'
if formatting:
formatting = formatting[:-1]
return template.format(formatting, text)
else:
return text | 30.145631 | 230 | 0.648309 |
ace2374e856bc29869def0e11c9ddc4df369bf4e | 820 | py | Python | python/sync/summer.py | kfsone/tinker | 81ed372117bcad691176aac960302f497adf8d82 | [
"MIT"
] | null | null | null | python/sync/summer.py | kfsone/tinker | 81ed372117bcad691176aac960302f497adf8d82 | [
"MIT"
] | null | null | null | python/sync/summer.py | kfsone/tinker | 81ed372117bcad691176aac960302f497adf8d82 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
#
# Takes a block number followed by a list of files, and returns the md5 checkum
# of the Nth block of each file.
#
# e.g. 0 /etc/motd /etc/motd.issue
# will yield the checksum of block 0 (the first 64k) of motd and motd.issue
#
# Output format is:
#
# <filename> <md5sum for the given block>
from __future__ import print_function
import hashlib
import mmap
import sys
BlockSize = 64 * 1024
block, files = int(sys.argv[1]), sys.argv[2:]
start_offset = BlockSize * block
for filename in files:
with open(filename, "rb") as fh:
mm = mmap.mmap(fh.fileno(), 0, access=mmap.ACCESS_READ)
length = min(mm.size() - start_offset, BlockSize)
buf = buffer(mm, start_offset, length)
del mm
hashval = hashlib.md5(buf)
print(filename, hashval.hexdigest())
| 24.848485 | 79 | 0.680488 |
ace237ab1ff21c584ac0fd17662f163cf7abad7f | 2,479 | py | Python | lib/url.py | mattecapu/jekyll-zulip-archive | ea73b0fd4953b115ed4dfedbd434c4f643b8192e | [
"MIT"
] | null | null | null | lib/url.py | mattecapu/jekyll-zulip-archive | ea73b0fd4953b115ed4dfedbd434c4f643b8192e | [
"MIT"
] | null | null | null | lib/url.py | mattecapu/jekyll-zulip-archive | ea73b0fd4953b115ed4dfedbd434c4f643b8192e | [
"MIT"
] | null | null | null | '''
Sometimes it feels like 80% of the battle with creating a
static website is getting all the URLs correct.
These are some helpers.
Here are some naming conventions for URL pieces:
zulip_url: https://example.zulip.com
site_url: https://example.zulip-archive.com
html_root: archive
And then URLs use Zulip stream/topics, which are sometimes
"sanitized" to guarantee uniqueness and not have special characters:
stream_id: 599
stream_name: general
topic_name: lunch
sanitized_stream_name : 599-general
sanitized_topic_name: lunch
'''
import urllib.parse
def zulip_post_url(zulip_url, stream_id, stream_name, topic_name, post_id):
'''
https://example.zulipchat.com/#narrow/stream/213222-general/topic/hello/near/179892604
'''
sanitized = urllib.parse.quote(
'{0}-{1}/topic/{2}/near/{3}'.format(stream_id, stream_name, topic_name, post_id))
return zulip_url + '#narrow/stream/' + sanitized
def archive_stream_url(site_url, html_root, sanitized_stream_name):
'''
http://127.0.0.1:4000/archive/stream/213222-general/index.html
'''
base_url = urllib.parse.urljoin(site_url, html_root)
return f'{base_url}/stream/{sanitized_stream_name}/index.html'
def archive_topic_url(site_url, html_root, sanitized_stream_name, sanitized_topic_name):
'''
http://127.0.0.1:4000/archive/stream/213222-general/topic/newstreams.html
'''
base_url = urllib.parse.urljoin(site_url, html_root)
return f'{base_url}/stream/{sanitized_stream_name}/topic/topic_{sanitized_topic_name}.html'
def archive_message_url(site_url, html_root, sanitized_stream_name, sanitized_topic_name, msg_id):
'''
http://127.0.0.1:4000/archive/stream/213222-general/topic/newstreams.html#1234567
'''
topic_url = archive_topic_url(site_url, html_root, sanitized_stream_name, sanitized_topic_name)
return f'{topic_url}#{msg_id}'
## String cleaning functions
# remove non-alnum ascii symbols from string
def sanitize(s):
return "".join(filter(lambda x:x.isalnum or x==' ', s.encode('ascii', 'ignore')\
.decode('utf-8'))).replace(' ','-').replace('?','%3F')
# create a unique sanitized identifier for a topic
def sanitize_topic(topic_name):
return urllib.parse.quote(topic_name, safe='~()*!.\'').replace('.','%2E').replace('%','.')
# create a unique sanitized identifier for a stream
def sanitize_stream(stream_name, stream_id):
return str(stream_id) + '-' + sanitize(stream_name)
| 35.927536 | 99 | 0.726906 |
ace239d7733dd5c972f2b45d95886fd4e63ae477 | 26,332 | py | Python | blender/arm/lightmapper/operators/tlm.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | blender/arm/lightmapper/operators/tlm.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | blender/arm/lightmapper/operators/tlm.py | notwarp/armory | bd6078e3035eefcb3c725664698eeb369b4c2d88 | [
"Zlib"
] | null | null | null | import bpy, os, time, blf, webbrowser, platform
import math, subprocess, multiprocessing
from .. utility import build
from .. utility.cycles import cache
from .. network import server
class TLM_BuildLightmaps(bpy.types.Operator):
bl_idname = "tlm.build_lightmaps"
bl_label = "Build Lightmaps"
bl_description = "Build Lightmaps"
bl_options = {'REGISTER', 'UNDO'}
def modal(self, context, event):
#Add progress bar from 0.15
print("MODAL")
return {'PASS_THROUGH'}
def invoke(self, context, event):
if not bpy.app.background:
build.prepare_build(self, False)
else:
print("Running in background mode. Contextual operator not available. Use command 'thelightmapper.addon.build.prepare_build()'")
return {'RUNNING_MODAL'}
def cancel(self, context):
pass
def draw_callback_px(self, context, event):
pass
class TLM_CleanLightmaps(bpy.types.Operator):
bl_idname = "tlm.clean_lightmaps"
bl_label = "Clean Lightmaps"
bl_description = "Clean Lightmaps"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
filepath = bpy.data.filepath
dirpath = os.path.join(os.path.dirname(bpy.data.filepath), scene.TLM_EngineProperties.tlm_lightmap_savedir)
if os.path.isdir(dirpath):
for file in os.listdir(dirpath):
os.remove(os.path.join(dirpath + "/" + file))
for obj in bpy.data.objects:
if obj.type == "MESH":
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
cache.backup_material_restore(obj)
for obj in bpy.data.objects:
if obj.type == "MESH":
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
cache.backup_material_rename(obj)
for mat in bpy.data.materials:
if mat.users < 1:
bpy.data.materials.remove(mat)
for mat in bpy.data.materials:
if mat.name.startswith("."):
if "_Original" in mat.name:
bpy.data.materials.remove(mat)
for image in bpy.data.images:
if image.name.endswith("_baked"):
bpy.data.images.remove(image, do_unlink=True)
for obj in bpy.data.objects:
if obj.type == "MESH":
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
if obj.TLM_ObjectProperties.tlm_postpack_object:
atlas = obj.TLM_ObjectProperties.tlm_postatlas_pointer
atlas_resize = False
for atlasgroup in scene.TLM_PostAtlasList:
if atlasgroup.name == atlas:
atlas_resize = True
if atlas_resize:
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
#print(x)
uv_layers = obj.data.uv_layers
for i in range(0, len(uv_layers)):
if uv_layers[i].name == 'UVMap_Lightmap':
uv_layers.active_index = i
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Lightmap shift A")
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.select_all(action='SELECT')
bpy.ops.uv.pack_islands(rotate=False, margin=0.001)
bpy.ops.uv.select_all(action='DESELECT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
#print(obj.name + ": Active UV: " + obj.data.uv_layers[obj.data.uv_layers.active_index].name)
print("Resized for obj: " + obj.name)
return {'FINISHED'}
class TLM_ExploreLightmaps(bpy.types.Operator):
bl_idname = "tlm.explore_lightmaps"
bl_label = "Explore Lightmaps"
bl_description = "Explore Lightmaps"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
cycles = scene.cycles
if not bpy.data.is_saved:
self.report({'INFO'}, "Please save your file first")
return {"CANCELLED"}
filepath = bpy.data.filepath
dirpath = os.path.join(os.path.dirname(bpy.data.filepath), scene.TLM_EngineProperties.tlm_lightmap_savedir)
if platform.system() != "Linux":
if os.path.isdir(dirpath):
webbrowser.open('file://' + dirpath)
else:
os.mkdir(dirpath)
webbrowser.open('file://' + dirpath)
else:
if os.path.isdir(dirpath):
os.system('xdg-open "%s"' % dirpath)
#webbrowser.open('file://' + dirpath)
else:
os.mkdir(dirpath)
os.system('xdg-open "%s"' % dirpath)
#webbrowser.open('file://' + dirpath)
return {'FINISHED'}
class TLM_EnableSelection(bpy.types.Operator):
"""Enable for selection"""
bl_idname = "tlm.enable_selection"
bl_label = "Enable for selection"
bl_description = "Enable for selection"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
for obj in bpy.context.selected_objects:
obj.TLM_ObjectProperties.tlm_mesh_lightmap_use = True
if scene.TLM_SceneProperties.tlm_override_object_settings:
obj.TLM_ObjectProperties.tlm_mesh_lightmap_resolution = scene.TLM_SceneProperties.tlm_mesh_lightmap_resolution
obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode = scene.TLM_SceneProperties.tlm_mesh_lightmap_unwrap_mode
obj.TLM_ObjectProperties.tlm_mesh_unwrap_margin = scene.TLM_SceneProperties.tlm_mesh_unwrap_margin
obj.TLM_ObjectProperties.tlm_postpack_object = scene.TLM_SceneProperties.tlm_postpack_object
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA":
obj.TLM_ObjectProperties.tlm_atlas_pointer = scene.TLM_SceneProperties.tlm_atlas_pointer
obj.TLM_ObjectProperties.tlm_postatlas_pointer = scene.TLM_SceneProperties.tlm_postatlas_pointer
return{'FINISHED'}
class TLM_DisableSelection(bpy.types.Operator):
"""Disable for selection"""
bl_idname = "tlm.disable_selection"
bl_label = "Disable for selection"
bl_description = "Disable for selection"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for obj in bpy.context.selected_objects:
obj.TLM_ObjectProperties.tlm_mesh_lightmap_use = False
return{'FINISHED'}
class TLM_RemoveLightmapUV(bpy.types.Operator):
"""Remove Lightmap UV for selection"""
bl_idname = "tlm.remove_uv_selection"
bl_label = "Remove Lightmap UV"
bl_description = "Remove Lightmap UV for selection"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for obj in bpy.context.selected_objects:
if obj.type == "MESH":
uv_layers = obj.data.uv_layers
for uvlayer in uv_layers:
if uvlayer.name == "UVMap_Lightmap":
uv_layers.remove(uvlayer)
return{'FINISHED'}
class TLM_SelectLightmapped(bpy.types.Operator):
"""Select all objects for lightmapping"""
bl_idname = "tlm.select_lightmapped_objects"
bl_label = "Select lightmap objects"
bl_description = "Remove Lightmap UV for selection"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
for obj in bpy.data.objects:
if obj.type == "MESH":
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
obj.select_set(True)
return{'FINISHED'}
class TLM_AtlasListNewItem(bpy.types.Operator):
# Add a new item to the list
bl_idname = "tlm_atlaslist.new_item"
bl_label = "Add a new item"
bl_description = "Create a new AtlasGroup"
def execute(self, context):
scene = context.scene
scene.TLM_AtlasList.add()
scene.TLM_AtlasListItem = len(scene.TLM_AtlasList) - 1
scene.TLM_AtlasList[len(scene.TLM_AtlasList) - 1].name = "AtlasGroup"
return{'FINISHED'}
class TLM_PostAtlasListNewItem(bpy.types.Operator):
# Add a new item to the list
bl_idname = "tlm_postatlaslist.new_item"
bl_label = "Add a new item"
bl_description = "Create a new AtlasGroup"
bl_description = ""
def execute(self, context):
scene = context.scene
scene.TLM_PostAtlasList.add()
scene.TLM_PostAtlasListItem = len(scene.TLM_PostAtlasList) - 1
scene.TLM_PostAtlasList[len(scene.TLM_PostAtlasList) - 1].name = "AtlasGroup"
return{'FINISHED'}
class TLM_AtlastListDeleteItem(bpy.types.Operator):
# Delete the selected item from the list
bl_idname = "tlm_atlaslist.delete_item"
bl_label = "Deletes an item"
bl_description = "Delete an AtlasGroup"
@classmethod
def poll(self, context):
""" Enable if there's something in the list """
scene = context.scene
return len(scene.TLM_AtlasList) > 0
def execute(self, context):
scene = context.scene
list = scene.TLM_AtlasList
index = scene.TLM_AtlasListItem
for obj in bpy.data.objects:
atlasName = scene.TLM_AtlasList[index].name
if obj.TLM_ObjectProperties.tlm_atlas_pointer == atlasName:
obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode = "SmartProject"
list.remove(index)
if index > 0:
index = index - 1
scene.TLM_AtlasListItem = index
return{'FINISHED'}
class TLM_PostAtlastListDeleteItem(bpy.types.Operator):
# Delete the selected item from the list
bl_idname = "tlm_postatlaslist.delete_item"
bl_label = "Deletes an item"
bl_description = "Delete an AtlasGroup"
@classmethod
def poll(self, context):
""" Enable if there's something in the list """
scene = context.scene
return len(scene.TLM_PostAtlasList) > 0
def execute(self, context):
scene = context.scene
list = scene.TLM_PostAtlasList
index = scene.TLM_PostAtlasListItem
for obj in bpy.data.objects:
atlasName = scene.TLM_PostAtlasList[index].name
if obj.TLM_ObjectProperties.tlm_atlas_pointer == atlasName:
obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode = "SmartProject"
list.remove(index)
if index > 0:
index = index - 1
scene.TLM_PostAtlasListItem = index
return{'FINISHED'}
class TLM_AtlasListMoveItem(bpy.types.Operator):
# Move an item in the list
bl_idname = "tlm_atlaslist.move_item"
bl_label = "Move an item in the list"
bl_description = "Move an item in the list"
direction: bpy.props.EnumProperty(
items=(
('UP', 'Up', ""),
('DOWN', 'Down', ""),))
def move_index(self):
# Move index of an item render queue while clamping it
scene = context.scene
index = scene.TLM_AtlasListItem
list_length = len(scene.TLM_AtlasList) - 1
new_index = 0
if self.direction == 'UP':
new_index = index - 1
elif self.direction == 'DOWN':
new_index = index + 1
new_index = max(0, min(new_index, list_length))
scene.TLM_AtlasList.move(index, new_index)
scene.TLM_AtlasListItem = new_index
def execute(self, context):
scene = context.scene
list = scene.TLM_AtlasList
index = scene.TLM_AtlasListItem
if self.direction == 'DOWN':
neighbor = index + 1
self.move_index()
elif self.direction == 'UP':
neighbor = index - 1
self.move_index()
else:
return{'CANCELLED'}
return{'FINISHED'}
class TLM_PostAtlasListMoveItem(bpy.types.Operator):
# Move an item in the list
bl_idname = "tlm_postatlaslist.move_item"
bl_label = "Move an item in the list"
bl_description = "Move an item in the list"
direction: bpy.props.EnumProperty(
items=(
('UP', 'Up', ""),
('DOWN', 'Down', ""),))
def move_index(self):
# Move index of an item render queue while clamping it
scene = context.scene
index = scene.TLM_PostAtlasListItem
list_length = len(scene.TLM_PostAtlasList) - 1
new_index = 0
if self.direction == 'UP':
new_index = index - 1
elif self.direction == 'DOWN':
new_index = index + 1
new_index = max(0, min(new_index, list_length))
scene.TLM_PostAtlasList.move(index, new_index)
scene.TLM_PostAtlasListItem = new_index
def execute(self, context):
scene = context.scene
list = scene.TLM_PostAtlasList
index = scene.TLM_PostAtlasListItem
if self.direction == 'DOWN':
neighbor = index + 1
self.move_index()
elif self.direction == 'UP':
neighbor = index - 1
self.move_index()
else:
return{'CANCELLED'}
return{'FINISHED'}
class TLM_StartServer(bpy.types.Operator):
bl_idname = "tlm.start_server"
bl_label = "Start Network Server"
bl_description = "Start Network Server"
bl_options = {'REGISTER', 'UNDO'}
def modal(self, context, event):
#Add progress bar from 0.15
print("MODAL")
return {'PASS_THROUGH'}
def invoke(self, context, event):
server.startServer()
return {'RUNNING_MODAL'}
class TLM_BuildEnvironmentProbes(bpy.types.Operator):
bl_idname = "tlm.build_environmentprobe"
bl_label = "Build Environment Probes"
bl_description = "Build all environment probes from reflection cubemaps"
bl_options = {'REGISTER', 'UNDO'}
def invoke(self, context, event):
for obj in bpy.data.objects:
if obj.type == "LIGHT_PROBE":
if obj.data.type == "CUBEMAP":
cam_name = "EnvPCam_" + obj.name
camera = bpy.data.cameras.new(cam_name)
camobj_name = "EnvPCamera_" + obj.name
cam_obj = bpy.data.objects.new(camobj_name, camera)
bpy.context.collection.objects.link(cam_obj)
cam_obj.location = obj.location
camera.angle = math.radians(90)
prevResx = bpy.context.scene.render.resolution_x
prevResy = bpy.context.scene.render.resolution_y
prevCam = bpy.context.scene.camera
prevEngine = bpy.context.scene.render.engine
bpy.context.scene.camera = cam_obj
bpy.context.scene.render.engine = bpy.context.scene.TLM_SceneProperties.tlm_environment_probe_engine
bpy.context.scene.render.resolution_x = int(bpy.context.scene.TLM_SceneProperties.tlm_environment_probe_resolution)
bpy.context.scene.render.resolution_y = int(bpy.context.scene.TLM_SceneProperties.tlm_environment_probe_resolution)
savedir = os.path.dirname(bpy.data.filepath)
directory = os.path.join(savedir, "Probes")
t = 90
inverted = bpy.context.scene.TLM_SceneProperties.tlm_invert_direction
if inverted:
positions = {
"xp" : (math.radians(t), 0, math.radians(0)),
"zp" : (math.radians(t), 0, math.radians(t)),
"xm" : (math.radians(t), 0, math.radians(t*2)),
"zm" : (math.radians(t), 0, math.radians(-t)),
"yp" : (math.radians(t*2), 0, math.radians(t)),
"ym" : (0, 0, math.radians(t))
}
else:
positions = {
"xp" : (math.radians(t), 0, math.radians(t*2)),
"zp" : (math.radians(t), 0, math.radians(-t)),
"xm" : (math.radians(t), 0, math.radians(0)),
"zm" : (math.radians(t), 0, math.radians(t)),
"yp" : (math.radians(t*2), 0, math.radians(-t)),
"ym" : (0, 0, math.radians(-t))
}
cam = cam_obj
image_settings = bpy.context.scene.render.image_settings
image_settings.file_format = "HDR"
image_settings.color_depth = '32'
for val in positions:
cam.rotation_euler = positions[val]
filename = os.path.join(directory, val) + "_" + camobj_name + ".hdr"
bpy.data.scenes['Scene'].render.filepath = filename
print("Writing out: " + val)
bpy.ops.render.render(write_still=True)
cmft_path = bpy.path.abspath(os.path.join(os.path.dirname(bpy.data.filepath), bpy.context.scene.TLM_SceneProperties.tlm_cmft_path))
output_file_irr = camobj_name + ".hdr"
posx = directory + "/" + "xp_" + camobj_name + ".hdr"
negx = directory + "/" + "xm_" + camobj_name + ".hdr"
posy = directory + "/" + "yp_" + camobj_name + ".hdr"
negy = directory + "/" + "ym_" + camobj_name + ".hdr"
posz = directory + "/" + "zp_" + camobj_name + ".hdr"
negz = directory + "/" + "zm_" + camobj_name + ".hdr"
output = directory + "/" + camobj_name
if platform.system() == 'Windows':
envpipe = [cmft_path,
'--inputFacePosX', posx,
'--inputFaceNegX', negx,
'--inputFacePosY', posy,
'--inputFaceNegY', negy,
'--inputFacePosZ', posz,
'--inputFaceNegZ', negz,
'--output0', output,
'--output0params',
'hdr,rgbe,latlong']
else:
envpipe = [cmft_path + '--inputFacePosX' + posx
+ '--inputFaceNegX' + negx
+ '--inputFacePosY' + posy
+ '--inputFaceNegY' + negy
+ '--inputFacePosZ' + posz
+ '--inputFaceNegZ' + negz
+ '--output0' + output
+ '--output0params' + 'hdr,rgbe,latlong']
if bpy.context.scene.TLM_SceneProperties.tlm_verbose:
print("Calling CMFT with:" + str(envpipe))
if bpy.context.scene.TLM_SceneProperties.tlm_create_spherical:
subprocess.call(envpipe, shell=True)
input2 = output + ".hdr"
output2 = directory + "/" + camobj_name
if platform.system() == 'Windows':
envpipe2 = [cmft_path,
'--input', input2,
'--filter', 'shcoeffs',
'--outputNum', '1',
'--output0', output2]
else:
envpipe2 = [cmft_path +
'--input' + input2
+ '-filter' + 'shcoeffs'
+ '--outputNum' + '1'
+ '--output0' + output2]
if bpy.context.scene.TLM_SceneProperties.tlm_write_sh:
subprocess.call(envpipe2, shell=True)
if bpy.context.scene.TLM_SceneProperties.tlm_write_radiance:
use_opencl = 'false'
cpu_count = 2
# 4096 = 256 face
# 2048 = 128 face
# 1024 = 64 face
target_w = int(512)
face_size = target_w / 8
if target_w == 2048:
mip_count = 9
elif target_w == 1024:
mip_count = 8
else:
mip_count = 7
output_file_rad = directory + "/" + camobj_name + "_rad.hdr"
if platform.system() == 'Windows':
envpipe3 = [
cmft_path,
'--input', input2,
'--filter', 'radiance',
'--dstFaceSize', str(face_size),
'--srcFaceSize', str(face_size),
'--excludeBase', 'false',
# '--mipCount', str(mip_count),
'--glossScale', '8',
'--glossBias', '3',
'--lightingModel', 'blinnbrdf',
'--edgeFixup', 'none',
'--numCpuProcessingThreads', str(cpu_count),
'--useOpenCL', use_opencl,
'--clVendor', 'anyGpuVendor',
'--deviceType', 'gpu',
'--deviceIndex', '0',
'--generateMipChain', 'true',
'--inputGammaNumerator', '1.0',
'--inputGammaDenominator', '1.0',
'--outputGammaNumerator', '1.0',
'--outputGammaDenominator', '1.0',
'--outputNum', '1',
'--output0', output_file_rad,
'--output0params', 'hdr,rgbe,latlong'
]
subprocess.call(envpipe3)
else:
envpipe3 = cmft_path + \
' --input "' + input2 + '"' + \
' --filter radiance' + \
' --dstFaceSize ' + str(face_size) + \
' --srcFaceSize ' + str(face_size) + \
' --excludeBase false' + \
' --glossScale 8' + \
' --glossBias 3' + \
' --lightingModel blinnbrdf' + \
' --edgeFixup none' + \
' --numCpuProcessingThreads ' + str(cpu_count) + \
' --useOpenCL ' + use_opencl + \
' --clVendor anyGpuVendor' + \
' --deviceType gpu' + \
' --deviceIndex 0' + \
' --generateMipChain true' + \
' --inputGammaNumerator ' + '1.0' + \
' --inputGammaDenominator 1.0' + \
' --outputGammaNumerator 1.0' + \
' --outputGammaDenominator 1.0' + \
' --outputNum 1' + \
' --output0 "' + output_file_rad + '"' + \
' --output0params hdr,rgbe,latlong'
subprocess.call([envpipe3], shell=True)
for obj in bpy.data.objects:
obj.select_set(False)
cam_obj.select_set(True)
bpy.ops.object.delete()
bpy.context.scene.render.resolution_x = prevResx
bpy.context.scene.render.resolution_y = prevResy
bpy.context.scene.camera = prevCam
bpy.context.scene.render.engine = prevEngine
print("Finished building environment probes")
return {'RUNNING_MODAL'}
class TLM_CleanBuildEnvironmentProbes(bpy.types.Operator):
bl_idname = "tlm.clean_environmentprobe"
bl_label = "Clean Environment Probes"
bl_description = "Clean Environment Probes"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
savedir = os.path.dirname(bpy.data.filepath)
dirpath = os.path.join(savedir, "Probes")
if os.path.isdir(dirpath):
for file in os.listdir(dirpath):
os.remove(os.path.join(dirpath + "/" + file))
return {'FINISHED'}
class TLM_MergeAdjacentActors(bpy.types.Operator):
bl_idname = "tlm.merge_adjacent_actors"
bl_label = "Merge adjacent actors"
bl_description = "Merges the adjacent faces/vertices of selected objects"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = context.scene
return {'FINISHED'}
def TLM_DoubleResolution():
pass
def TLM_HalfResolution():
pass
def TLM_DivideLMGroups():
pass
def TLM_LoadFromFolder():
pass | 37.403409 | 151 | 0.52028 |
ace23a3562e4a242498533b33987f4f997477a44 | 59 | py | Python | lang/py/cookbook/v2/source/cb2_19_7_exm_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_19_7_exm_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_19_7_exm_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | the_dict = dict(windows(flat_alternating_keys_and_values))
| 29.5 | 58 | 0.864407 |
ace23a4a121292ee91a1204e2cba375b91901c2a | 2,220 | py | Python | hurt.py | DrMeepster/DarviLStuff | 1f6fffc93200606b75d286afca357db989d8df4e | [
"MIT"
] | 2 | 2021-02-02T02:11:41.000Z | 2021-02-10T13:09:44.000Z | hurt.py | DrMeepster/DarviLStuff | 1f6fffc93200606b75d286afca357db989d8df4e | [
"MIT"
] | 1 | 2020-12-28T15:46:12.000Z | 2020-12-28T19:27:19.000Z | hurt.py | DrMeepster/DarviLStuff | 1f6fffc93200606b75d286afca357db989d8df4e | [
"MIT"
] | null | null | null | #!/bin/env python3
from os import path
from random import randint
import argparse
def parseArgs():
global args
argparser = argparse.ArgumentParser(description="Damages a file by writing random bytes at random positions to it.", epilog="Written by David Losantos")
argparser.add_argument("file", help="File to open")
argparser.add_argument("-p", "--passes", help="Quantity of bytes to modify. Default is 15", type=int, default=15)
argparser.add_argument("-o", "--output", help="File to output the generated bytes to", default=None)
argparser.add_argument("-s", "--string", help="Take the string specified for the filename as input", action="store_true")
argparser.add_argument("-q", "--quiet", help="Do not output any text to stdout", action="store_true")
args = argparser.parse_args()
if not path.isfile(args.file):
if not args.string:
print(f"The file '{args.file}' does not exist.")
quit()
def capValue(value, max=float('inf'), min=float('-inf')):
"""Clamp a value to a minimun and/or maximun value."""
if value > max:
return max
elif value < min:
return min
else:
return value
def main():
byteArr: list = []
positions: list = []
if args.string:
for char in args.file:
byteArr.append(str.encode(char))
else:
with open(args.file, "rb") as f:
# Iterate over every byte and append it to a bytelist
byte = f.read(1)
while byte:
byteArr.append(byte)
byte = f.read(1)
# Generate unique positions for all passes
maxPasses = capValue(args.passes, len(byteArr))
for nxt in range(0, maxPasses):
rnd = randint(0, len(byteArr)) - 1
while rnd in positions:
rnd = randint(0, len(byteArr)) - 1
positions.append(rnd)
# Replace a random byte on the array with a random value on every pass
for index in positions:
rnd = bytes([randint(0, 255)])
byteArr.pop(index)
byteArr.insert(index, rnd)
if not args.quiet:
for byte in byteArr:
print(str(byte.decode("utf-8", "replace")), end="")
print()
if args.output:
with open(args.output, "wb") as out:
for byte in byteArr:
out.write(byte)
if __name__ == "__main__":
try:
parseArgs()
main()
except KeyboardInterrupt:
quit() | 21.142857 | 153 | 0.673874 |
ace23b06741548bb4aca532c028390bd6341a488 | 55,927 | bzl | Python | bazel/repository_locations.bzl | liuchangyan/envoy | 4a03e625960267032577f6792eef41d715b87b40 | [
"Apache-2.0"
] | null | null | null | bazel/repository_locations.bzl | liuchangyan/envoy | 4a03e625960267032577f6792eef41d715b87b40 | [
"Apache-2.0"
] | 15 | 2022-02-10T18:17:07.000Z | 2022-03-25T17:42:12.000Z | bazel/repository_locations.bzl | MarcinFalkowski/envoy | 4a03e625960267032577f6792eef41d715b87b40 | [
"Apache-2.0"
] | null | null | null | # This should match the schema defined in external_deps.bzl.
REPOSITORY_LOCATIONS_SPEC = dict(
bazel_compdb = dict(
project_name = "bazel-compilation-database",
project_desc = "Clang JSON compilation database support for Bazel",
project_url = "https://github.com/grailbio/bazel-compilation-database",
version = "0.5.2",
sha256 = "d32835b26dd35aad8fd0ba0d712265df6565a3ad860d39e4c01ad41059ea7eda",
strip_prefix = "bazel-compilation-database-{version}",
urls = ["https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz"],
release_date = "2021-09-10",
use_category = ["build"],
),
bazel_gazelle = dict(
project_name = "Gazelle",
project_desc = "Bazel BUILD file generator for Go projects",
project_url = "https://github.com/bazelbuild/bazel-gazelle",
version = "0.24.0",
sha256 = "de69a09dc70417580aabf20a28619bb3ef60d038470c7cf8442fafcf627c21cb",
urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"],
release_date = "2021-10-11",
use_category = ["build"],
),
bazel_toolchains = dict(
project_name = "bazel-toolchains",
project_desc = "Bazel toolchain configs for RBE",
project_url = "https://github.com/bazelbuild/bazel-toolchains",
version = "5.1.1",
sha256 = "e52789d4e89c3e2dc0e3446a9684626a626b6bec3fde787d70bae37c6ebcc47f",
strip_prefix = "bazel-toolchains-{version}",
urls = [
"https://github.com/bazelbuild/bazel-toolchains/archive/v{version}.tar.gz",
],
release_date = "2021-11-30",
use_category = ["build"],
),
build_bazel_rules_apple = dict(
project_name = "Apple Rules for Bazel",
project_desc = "Bazel rules for Apple platforms",
project_url = "https://github.com/bazelbuild/rules_apple",
version = "0.32.0",
sha256 = "77e8bf6fda706f420a55874ae6ee4df0c9d95da6c7838228b26910fc82eea5a2",
urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"],
release_date = "2021-10-29",
use_category = ["build"],
),
rules_fuzzing = dict(
project_name = "Fuzzing Rules for Bazel",
project_desc = "Bazel rules for fuzz tests",
project_url = "https://github.com/bazelbuild/rules_fuzzing",
version = "0.3.1",
sha256 = "4965ff7341f4759f07c83b146f603d6e8cfc35ef99fee3ef39bf61ffa96b1f8b",
strip_prefix = "rules_fuzzing-{version}",
urls = ["https://github.com/bazelbuild/rules_fuzzing/archive/v{version}.tar.gz"],
release_date = "2022-01-24",
use_category = ["test_only"],
implied_untracked_deps = [
# This is a repository rule generated to define an OSS-Fuzz fuzzing
# engine target from the CFLAGS/CXXFLAGS environment.
"rules_fuzzing_oss_fuzz",
],
),
envoy_build_tools = dict(
project_name = "envoy-build-tools",
project_desc = "Common build tools shared by the Envoy/UDPA ecosystem",
project_url = "https://github.com/envoyproxy/envoy-build-tools",
version = "1162be3669036d2c09359a95d39ff65fc6608f39",
sha256 = "8f5ac011a443649a27a7b82bc447de4f564f9cb5b6812d87c3bc1b1e74d2055f",
strip_prefix = "envoy-build-tools-{version}",
urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"],
release_date = "2022-01-28",
use_category = ["build"],
),
boringssl = dict(
project_name = "BoringSSL",
project_desc = "Minimal OpenSSL fork",
project_url = "https://github.com/google/boringssl",
# To update BoringSSL, which tracks Chromium releases:
# 1. Open https://omahaproxy.appspot.com/ and note <current_version> of linux/beta release.
# 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags/<current_version>/DEPS and note <boringssl_revision>.
# 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges <boringssl_revision>.
#
# chromium-99.0.4844.17 (linux/beta)
version = "5416e4f16bffdc24e71e84d6e3977eed73c6f6b3",
sha256 = "864cb8005739db6f2b615ed167ba12a88a7f5c54e39c911e1c271fdfad21a3e7",
strip_prefix = "boringssl-{version}",
urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2022-01-05",
cpe = "cpe:2.3:a:google:boringssl:*",
),
boringssl_fips = dict(
project_name = "BoringSSL (FIPS)",
project_desc = "FIPS compliant BoringSSL",
project_url = "https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md",
version = "fips-20190808",
sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8",
urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2019-08-08",
cpe = "cpe:2.3:a:google:boringssl:*",
),
com_google_absl = dict(
project_name = "Abseil",
project_desc = "Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase",
project_url = "https://abseil.io/",
version = "17c954d90d5661e27db8fc5f086085690a8372d9",
sha256 = "2e4ace2ed32a4ccfd29e856ad72b4fd1eae2ec060d3ba8646857fa170d6e8269",
strip_prefix = "abseil-cpp-{version}",
urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2021-06-03",
cpe = "N/A",
),
# This dependency is built only when performance tracing is enabled with the
# option --define=perf_tracing=enabled. It's never built for releases.
com_github_google_perfetto = dict(
project_name = "Perfetto",
project_desc = "Perfetto Tracing SDK",
project_url = "https://perfetto.dev/",
version = "22.1",
sha256 = "013ba743019a1ca04627f7ce8bf424b60ed7f0f57e232eff719ae879be4c90fd",
strip_prefix = "perfetto-{version}/sdk",
urls = ["https://github.com/google/perfetto/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2021-12-07",
cpe = "N/A",
),
com_github_c_ares_c_ares = dict(
project_name = "c-ares",
project_desc = "C library for asynchronous DNS requests",
project_url = "https://c-ares.haxx.se/",
version = "1.18.1",
sha256 = "1a7d52a8a84a9fbffb1be9133c0f6e17217d91ea5a6fa61f6b4729cda78ebbcf",
strip_prefix = "c-ares-{version}",
urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2021-10-27",
cpe = "cpe:2.3:a:c-ares_project:c-ares:*",
),
com_github_circonus_labs_libcircllhist = dict(
project_name = "libcircllhist",
project_desc = "An implementation of Circonus log-linear histograms",
project_url = "https://github.com/circonus-labs/libcircllhist",
version = "63a16dd6f2fc7bc841bb17ff92be8318df60e2e1",
sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c",
strip_prefix = "libcircllhist-{version}",
urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"],
use_category = ["controlplane", "observability_core", "dataplane_core"],
release_date = "2019-02-11",
cpe = "N/A",
),
com_github_cyan4973_xxhash = dict(
project_name = "xxHash",
project_desc = "Extremely fast hash algorithm",
project_url = "https://github.com/Cyan4973/xxHash",
version = "0.8.1",
sha256 = "3bb6b7d6f30c591dd65aaaff1c8b7a5b94d81687998ca9400082c739a690436c",
strip_prefix = "xxHash-{version}",
urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2021-11-29",
cpe = "N/A",
),
com_github_envoyproxy_sqlparser = dict(
project_name = "C++ SQL Parser Library",
project_desc = "Forked from Hyrise SQL Parser",
project_url = "https://github.com/envoyproxy/sql-parser",
version = "3b40ba2d106587bdf053a292f7e3bb17e818a57f",
sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71",
strip_prefix = "sql-parser-{version}",
urls = ["https://github.com/envoyproxy/sql-parser/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.filters.network.mysql_proxy",
"envoy.filters.network.postgres_proxy",
],
release_date = "2020-06-10",
cpe = "N/A",
),
com_github_mirror_tclap = dict(
project_name = "tclap",
project_desc = "Small, flexible library that provides a simple interface for defining and accessing command line arguments",
project_url = "http://tclap.sourceforge.net",
version = "1.2.5",
sha256 = "7e87d13734076fa4f626f6144ce9a02717198b3f054341a6886e2107b048b235",
strip_prefix = "tclap-{version}",
urls = ["https://github.com/mirror/tclap/archive/v{version}.tar.gz"],
release_date = "2021-11-01",
use_category = ["other"],
cpe = "cpe:2.3:a:tclap_project:tclap:*",
),
com_github_fmtlib_fmt = dict(
project_name = "fmt",
project_desc = "{fmt} is an open-source formatting library providing a fast and safe alternative to C stdio and C++ iostreams",
project_url = "https://fmt.dev",
version = "7.0.3",
sha256 = "decfdf9ad274070fa85f26407b816f5a4d82205ae86bac1990be658d0795ea4d",
strip_prefix = "fmt-{version}",
urls = ["https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-08-07",
cpe = "cpe:2.3:a:fmt:fmt:*",
),
com_github_gabime_spdlog = dict(
project_name = "spdlog",
project_desc = "Very fast, header-only/compiled, C++ logging library",
project_url = "https://github.com/gabime/spdlog",
version = "1.7.0",
sha256 = "f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62",
strip_prefix = "spdlog-{version}",
urls = ["https://github.com/gabime/spdlog/archive/v{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-07-09",
cpe = "N/A",
),
com_github_google_libprotobuf_mutator = dict(
project_name = "libprotobuf-mutator",
project_desc = "Library to randomly mutate protobuffers",
project_url = "https://github.com/google/libprotobuf-mutator",
version = "1.0",
sha256 = "792f250fb546bde8590e72d64311ea00a70c175fd77df6bb5e02328fa15fe28e",
strip_prefix = "libprotobuf-mutator-{version}",
urls = ["https://github.com/google/libprotobuf-mutator/archive/v{version}.tar.gz"],
release_date = "2020-11-13",
use_category = ["test_only"],
),
com_github_google_libsxg = dict(
project_name = "libsxg",
project_desc = "Signed HTTP Exchange library",
project_url = "https://github.com/google/libsxg",
version = "beaa3939b76f8644f6833267e9f2462760838f18",
sha256 = "082bf844047a9aeec0d388283d5edc68bd22bcf4d32eb5a566654ae89956ad1f",
strip_prefix = "libsxg-{version}",
urls = ["https://github.com/google/libsxg/archive/{version}.tar.gz"],
use_category = ["other"],
extensions = ["envoy.filters.http.sxg"],
release_date = "2021-07-08",
cpe = "N/A",
),
com_github_google_tcmalloc = dict(
project_name = "tcmalloc",
project_desc = "Fast, multi-threaded malloc implementation",
project_url = "https://github.com/google/tcmalloc",
version = "9f385356c34d4fc11f76a000b609e2b446c20667",
sha256 = "652e48e0b9ef645db04bff8a3d4841c60ce07275f5d98e18e698dc92bd111291",
strip_prefix = "tcmalloc-{version}",
urls = ["https://github.com/google/tcmalloc/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-11-04",
cpe = "N/A",
),
com_github_gperftools_gperftools = dict(
project_name = "gperftools",
project_desc = "tcmalloc and profiling libraries",
project_url = "https://github.com/gperftools/gperftools",
version = "2.9.1",
sha256 = "ea566e528605befb830671e359118c2da718f721c27225cbbc93858c7520fee3",
strip_prefix = "gperftools-{version}",
urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"],
release_date = "2021-03-03",
use_category = ["dataplane_core", "controlplane"],
cpe = "cpe:2.3:a:gperftools_project:gperftools:*",
),
com_github_grpc_grpc = dict(
project_name = "gRPC",
project_desc = "gRPC C core library",
project_url = "https://grpc.io",
version = "a3ae8e00a2c5553c806e83fae83e33f0198913f0",
sha256 = "1ccc2056b68b81ada8df61310e03dfa0541c34821fd711654d0590a7321db9c8",
strip_prefix = "grpc-{version}",
urls = ["https://github.com/grpc/grpc/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2021-06-07",
cpe = "cpe:2.3:a:grpc:grpc:*",
),
com_github_intel_ipp_crypto_crypto_mb = dict(
project_name = "libipp-crypto",
project_desc = "Intel® Integrated Performance Primitives Cryptography",
project_url = "https://github.com/intel/ipp-crypto",
version = "2021.5",
sha256 = "0b277548c59e6bfe489e634d622b54be3708086fc006a441d39922c2d6d43f0d",
strip_prefix = "ipp-crypto-ippcp_{version}",
urls = ["https://github.com/intel/ipp-crypto/archive/ippcp_{version}.tar.gz"],
release_date = "2021-12-21",
use_category = ["dataplane_ext"],
extensions = ["envoy.tls.key_providers.cryptomb"],
cpe = "cpe:2.3:a:intel:cryptography_for_intel_integrated_performance_primitives:*",
),
com_github_luajit_luajit = dict(
project_name = "LuaJIT",
project_desc = "Just-In-Time compiler for Lua",
project_url = "https://luajit.org",
# The last release version, 2.1.0-beta3 has a number of CVEs filed
# against it. These may not impact correct non-malicious Lua code, but for prudence we bump.
version = "1d8b747c161db457e032a023ebbff511f5de5ec2",
sha256 = "20a159c38a98ecdb6368e8d655343b6036622a29a1621da9dc303f7ed9bf37f3",
strip_prefix = "LuaJIT-{version}",
urls = ["https://github.com/LuaJIT/LuaJIT/archive/{version}.tar.gz"],
release_date = "2020-10-12",
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.lua"],
cpe = "cpe:2.3:a:luajit:luajit:*",
),
com_github_moonjit_moonjit = dict(
project_name = "Moonjit",
project_desc = "LuaJIT fork with wider platform support",
project_url = "https://github.com/moonjit/moonjit",
version = "2.2.0",
sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6",
strip_prefix = "moonjit-{version}",
urls = ["https://github.com/moonjit/moonjit/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.lua"],
release_date = "2020-01-14",
cpe = "cpe:2.3:a:moonjit_project:moonjit:*",
),
com_github_nghttp2_nghttp2 = dict(
project_name = "Nghttp2",
project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C",
project_url = "https://nghttp2.org",
version = "1.46.0",
sha256 = "4b6d11c85f2638531d1327fe1ed28c1e386144e8841176c04153ed32a4878208",
strip_prefix = "nghttp2-{version}",
urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2021-10-19",
cpe = "cpe:2.3:a:nghttp2:nghttp2:*",
),
io_opentracing_cpp = dict(
project_name = "OpenTracing",
project_desc = "Vendor-neutral APIs and instrumentation for distributed tracing",
project_url = "https://opentracing.io",
version = "1.5.1",
sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301",
strip_prefix = "opentracing-cpp-{version}",
urls = ["https://github.com/opentracing/opentracing-cpp/archive/v{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = [
"envoy.tracers.datadog",
"envoy.tracers.dynamic_ot",
"envoy.tracers.lightstep",
],
release_date = "2019-01-16",
cpe = "N/A",
),
com_lightstep_tracer_cpp = dict(
project_name = "lightstep-tracer-cpp",
project_desc = "LightStep distributed tracing library for C++",
project_url = "https://github.com/lightstep/lightstep-tracer-cpp",
version = "1942b3f142e218ebc143a043f32e3278dafec9aa",
sha256 = "3238921a8f578beb26c2215cd277e8f6752f3d29b020b881d60d96a240a38aed",
strip_prefix = "lightstep-tracer-cpp-{version}",
urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.lightstep"],
release_date = "2020-08-25",
cpe = "N/A",
),
skywalking_data_collect_protocol = dict(
project_name = "skywalking-data-collect-protocol",
project_desc = "Data Collect Protocols of Apache SkyWalking",
project_url = "https://github.com/apache/skywalking-data-collect-protocol",
name = "skywalking_data_collect_protocol",
sha256 = "49bd689b9c1c0ea12064bd35581689cef7835e5ac15d335dc425fbfc2029aa90",
urls = ["https://github.com/apache/skywalking-data-collect-protocol/archive/v{version}.tar.gz"],
strip_prefix = "skywalking-data-collect-protocol-{version}",
version = "8.9.1",
use_category = ["observability_ext"],
extensions = ["envoy.tracers.skywalking"],
release_date = "2021-12-11",
cpe = "cpe:2.3:a:apache:skywalking:*",
),
com_github_skyapm_cpp2sky = dict(
project_name = "cpp2sky",
project_desc = "C++ SDK for Apache SkyWalking",
project_url = "https://github.com/SkyAPM/cpp2sky",
sha256 = "f65b1054bd6eadadff0618f272f6d645a1ec933fa14af922a8e3c39603e45eaf",
version = "0.3.1",
strip_prefix = "cpp2sky-{version}",
urls = ["https://github.com/SkyAPM/cpp2sky/archive/v{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.skywalking"],
release_date = "2021-06-17",
cpe = "N/A",
),
com_github_datadog_dd_opentracing_cpp = dict(
project_name = "Datadog OpenTracing C++ Client",
project_desc = "Datadog OpenTracing C++ Client",
project_url = "https://github.com/DataDog/dd-opentracing-cpp",
version = "1.2.1",
sha256 = "ae44699e4aa2d21b70ed897a6c0cf3ed7dfb411e1aae4e686e39af75cec7c9bf",
strip_prefix = "dd-opentracing-cpp-{version}",
urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.datadog"],
release_date = "2021-01-27",
cpe = "N/A",
),
com_github_google_benchmark = dict(
project_name = "Benchmark",
project_desc = "Library to benchmark code snippets",
project_url = "https://github.com/google/benchmark",
version = "1.5.1",
sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2",
strip_prefix = "benchmark-{version}",
urls = ["https://github.com/google/benchmark/archive/v{version}.tar.gz"],
use_category = ["test_only"],
release_date = "2020-06-09",
),
com_github_libevent_libevent = dict(
project_name = "libevent",
project_desc = "Event notification library",
project_url = "https://libevent.org",
# This SHA includes the new "prepare" and "check" watchers, used for event loop performance
# stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition
# in the watchers (see https://github.com/libevent/libevent/pull/802).
# This also includes the fixes for https://github.com/libevent/libevent/issues/806
# and https://github.com/envoyproxy/envoy-mobile/issues/215.
# This also includes the fixes for Phantom events with EV_ET (see
# https://github.com/libevent/libevent/issues/984).
# This also includes the wepoll backend for Windows (see
# https://github.com/libevent/libevent/pull/1006)
# TODO(adip): Update to v2.2 when it is released.
version = "62c152d9a7cd264b993dad730c4163c6ede2e0a3",
sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213",
strip_prefix = "libevent-{version}",
urls = ["https://github.com/libevent/libevent/archive/{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2020-07-28",
cpe = "cpe:2.3:a:libevent_project:libevent:*",
),
# This should be removed, see https://github.com/envoyproxy/envoy/issues/13261.
net_zlib = dict(
project_name = "zlib",
project_desc = "zlib compression library",
project_url = "https://zlib.net",
version = "79baebe50e4d6b73ae1f8b603f0ef41300110aa3",
# Use the dev branch of zlib to resolve fuzz bugs and out of bound
# errors resulting in crashes in zlib 1.2.11.
# TODO(asraa): Remove when zlib > 1.2.11 is released.
sha256 = "155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e",
strip_prefix = "zlib-{version}",
urls = ["https://github.com/madler/zlib/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2019-04-14",
cpe = "cpe:2.3:a:gnu:zlib:*",
),
org_brotli = dict(
project_name = "brotli",
project_desc = "brotli compression library",
project_url = "https://brotli.org",
# Use the dev branch of brotli to resolve compilation issues.
# TODO(rojkov): Remove when brotli > 1.0.9 is released.
version = "0cd2e3926e95e7e2930f57ae3f4885508d462a25",
sha256 = "93810780e60304b51f2c9645fe313a6e4640711063ed0b860cfa60999dd256c5",
strip_prefix = "brotli-{version}",
urls = ["https://github.com/google/brotli/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.compression.brotli.compressor",
"envoy.compression.brotli.decompressor",
],
release_date = "2020-09-08",
cpe = "cpe:2.3:a:google:brotli:*",
),
com_github_zlib_ng_zlib_ng = dict(
project_name = "zlib-ng",
project_desc = "zlib fork (higher performance)",
project_url = "https://github.com/zlib-ng/zlib-ng",
version = "2.0.6",
sha256 = "8258b75a72303b661a238047cb348203d88d9dddf85d480ed885f375916fcab6",
strip_prefix = "zlib-ng-{version}",
urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2021-12-24",
cpe = "N/A",
),
com_github_jbeder_yaml_cpp = dict(
project_name = "yaml-cpp",
project_desc = "YAML parser and emitter in C++ matching the YAML 1.2 spec",
project_url = "https://github.com/jbeder/yaml-cpp",
version = "db6deedcd301754723065e0bbb1b75927c5b49c7",
sha256 = "387d7f25467312ca59068081f9a25bbab02bb6af32fd3e0aec1bd59163558171",
strip_prefix = "yaml-cpp-{version}",
urls = ["https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz"],
# YAML is also used for runtime as well as controlplane. It shouldn't appear on the
# dataplane but we can't verify this automatically due to code structure today.
use_category = ["controlplane", "dataplane_core"],
release_date = "2021-07-23",
cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*",
),
com_github_msgpack_msgpack_c = dict(
project_name = "msgpack for C/C++",
project_desc = "MessagePack is an efficient binary serialization format",
project_url = "https://github.com/msgpack/msgpack-c",
version = "3.3.0",
sha256 = "6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b",
strip_prefix = "msgpack-{version}",
urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.datadog"],
release_date = "2020-06-05",
cpe = "N/A",
),
com_github_google_jwt_verify = dict(
project_name = "jwt_verify_lib",
project_desc = "JWT verification library for C++",
project_url = "https://github.com/google/jwt_verify_lib",
version = "e5d6cf7067495b0868787e1fd1e75cef3242a840",
sha256 = "0d294dc8697049a0d7f2aaa81d08713fea581061c5359d6edb229b3e7c6cf58e",
strip_prefix = "jwt_verify_lib-{version}",
urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.jwt_authn"],
release_date = "2021-03-05",
cpe = "N/A",
),
com_github_nodejs_http_parser = dict(
project_name = "HTTP Parser",
project_desc = "Parser for HTTP messages written in C",
project_url = "https://github.com/nodejs/http-parser",
# This SHA includes fix for https://github.com/nodejs/http-parser/issues/517 which allows (opt-in) to serve
# requests with both Content-Legth and Transfer-Encoding: chunked headers set.
version = "4f15b7d510dc7c6361a26a7c6d2f7c3a17f8d878",
sha256 = "6a12896313ce1ca630cf516a0ee43a79b5f13f5a5d8143f56560ac0b21c98fac",
strip_prefix = "http-parser-{version}",
urls = ["https://github.com/nodejs/http-parser/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2020-07-10",
cpe = "cpe:2.3:a:nodejs:node.js:*",
),
com_github_alibaba_hessian2_codec = dict(
project_name = "hessian2-codec",
project_desc = "hessian2-codec is a C++ library for hessian2 codec",
project_url = "https://github.com/alibaba/hessian2-codec.git",
version = "dd8e05487a27b367b90ce81f4e6e6f62d693a212",
sha256 = "93260c54406e11b7be078a7ea120f7ab0df475c733e68d010fde400c5c8c8162",
strip_prefix = "hessian2-codec-{version}",
urls = ["https://github.com/alibaba/hessian2-codec/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.dubbo_proxy"],
release_date = "2021-04-05",
cpe = "N/A",
),
com_github_tencent_rapidjson = dict(
project_name = "RapidJSON",
project_desc = "Fast JSON parser/generator for C++",
project_url = "https://rapidjson.org",
version = "dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1",
sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b",
strip_prefix = "rapidjson-{version}",
urls = ["https://github.com/Tencent/rapidjson/archive/{version}.tar.gz"],
# We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to
# disentangle uses on the dataplane, e.g. header_formatter, Squash filter.
use_category = ["controlplane", "dataplane_core"],
release_date = "2019-12-03",
cpe = "cpe:2.3:a:tencent:rapidjson:*",
),
com_github_nlohmann_json = dict(
project_name = "nlohmann JSON",
project_desc = "Fast JSON parser/generator for C++",
project_url = "https://nlohmann.github.io/json",
version = "3.10.5",
sha256 = "5daca6ca216495edf89d167f808d1d03c4a4d929cef7da5e10f135ae1540c7e4",
strip_prefix = "json-{version}",
urls = ["https://github.com/nlohmann/json/archive/v{version}.tar.gz"],
# This will be a replacement for rapidJSON used in extensions and may also be a fast
# replacement for protobuf JSON.
use_category = ["controlplane", "dataplane_core"],
release_date = "2022-01-03",
cpe = "cpe:2.3:a:json-for-modern-cpp_project:json-for-modern-cpp:*",
),
# This is an external dependency needed while running the
# envoy docker image. A bazel target has been created since
# there is no binary package available for the utility on Ubuntu
# which is the base image used to build an envoy container.
# This is not needed to build an envoy binary or run tests.
com_github_ncopa_suexec = dict(
project_name = "su-exec",
project_desc = "Utility to switch user and group id, setgroups and exec",
project_url = "https://github.com/ncopa/su-exec",
version = "212b75144bbc06722fbd7661f651390dc47a43d1",
sha256 = "939782774079ec156788ea3e04dd5e340e993544f4296be76a9c595334ca1779",
strip_prefix = "su-exec-{version}",
urls = ["https://github.com/ncopa/su-exec/archive/{version}.tar.gz"],
use_category = ["other"],
release_date = "2019-09-18",
cpe = "N/A",
),
com_google_googletest = dict(
project_name = "Google Test",
project_desc = "Google's C++ test framework",
project_url = "https://github.com/google/googletest",
# Pick up fix for MOCK_METHOD compilation with clang-cl for Windows (resolved after 1.10.0)
# see https://github.com/google/googletest/issues/2490
version = "a4ab0abb93620ce26efad9de9296b73b16e88588",
sha256 = "7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751",
strip_prefix = "googletest-{version}",
urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"],
release_date = "2020-09-10",
use_category = ["test_only"],
cpe = "cpe:2.3:a:google:google_test:*",
),
com_google_protobuf = dict(
project_name = "Protocol Buffers",
project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data",
project_url = "https://developers.google.com/protocol-buffers",
version = "3.19.4",
sha256 = "ba0650be1b169d24908eeddbe6107f011d8df0da5b1a5a4449a913b10e578faf",
strip_prefix = "protobuf-{version}",
urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"],
use_category = ["dataplane_core", "controlplane"],
release_date = "2022-01-28",
cpe = "cpe:2.3:a:google:protobuf:*",
),
grpc_httpjson_transcoding = dict(
project_name = "grpc-httpjson-transcoding",
project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC",
project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding",
version = "9e2a4b583fef47d85775fb8bd3baf7115b0e71b1",
sha256 = "4637b335dfd82f721c675159101714a7e1ad42129a8a118a5c62680dbc90bdb2",
strip_prefix = "grpc-httpjson-transcoding-{version}",
urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.http.grpc_json_transcoder"],
release_date = "2022-01-06",
cpe = "N/A",
),
io_bazel_rules_go = dict(
project_name = "Go rules for Bazel",
project_desc = "Bazel rules for the Go language",
project_url = "https://github.com/bazelbuild/rules_go",
version = "0.29.0",
sha256 = "2b1641428dff9018f9e85c0384f03ec6c10660d935b750e3fa1492a281a53b0f",
urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.zip"],
use_category = ["build", "api"],
release_date = "2021-10-06",
implied_untracked_deps = [
"com_github_golang_protobuf",
"io_bazel_rules_nogo",
"org_golang_google_protobuf",
"org_golang_x_tools",
],
),
rules_cc = dict(
project_name = "C++ rules for Bazel",
project_desc = "Bazel rules for the C++ language",
project_url = "https://github.com/bazelbuild/rules_cc",
version = "0.0.1",
sha256 = "4dccbfd22c0def164c8f47458bd50e0c7148f3d92002cdb459c2a96a68498241",
urls = ["https://github.com/bazelbuild/rules_cc/releases/download/{version}/rules_cc-{version}.tar.gz"],
release_date = "2021-10-07",
use_category = ["build"],
),
rules_foreign_cc = dict(
project_name = "Rules for using foreign build systems in Bazel",
project_desc = "Rules for using foreign build systems in Bazel",
project_url = "https://github.com/bazelbuild/rules_foreign_cc",
version = "0.7.1",
sha256 = "bcd0c5f46a49b85b384906daae41d277b3dc0ff27c7c752cc51e43048a58ec83",
strip_prefix = "rules_foreign_cc-{version}",
urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"],
release_date = "2022-01-03",
use_category = ["build", "dataplane_core", "controlplane"],
),
rules_python = dict(
project_name = "Python rules for Bazel",
project_desc = "Bazel rules for the Python language",
project_url = "https://github.com/bazelbuild/rules_python",
version = "0.6.0",
sha256 = "a30abdfc7126d497a7698c29c46ea9901c6392d6ed315171a6df5ce433aa4502",
release_date = "2022-01-05",
strip_prefix = "rules_python-{version}",
urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"],
use_category = ["build"],
),
rules_pkg = dict(
project_name = "Packaging rules for Bazel",
project_desc = "Bazel rules for the packaging distributions",
project_url = "https://github.com/bazelbuild/rules_pkg",
version = "0.6.0",
sha256 = "04535dbfbdf3ec839a2c578a0705a34e5a0bbfd4438b29e285b961e6e0b97ce1",
strip_prefix = "rules_pkg-{version}",
urls = ["https://github.com/bazelbuild/rules_pkg/archive/{version}.tar.gz"],
use_category = ["build"],
release_date = "2022-01-24",
),
org_llvm_llvm = dict(
# When changing this, you must re-generate the list of llvm libs
# see `bazel/foreign_cc/BUILD` for further information.
project_name = "LLVM",
project_desc = "LLVM Compiler Infrastructure",
project_url = "https://llvm.org",
version = "12.0.1",
sha256 = "7d9a8405f557cefc5a21bf5672af73903b64749d9bc3a50322239f56f34ffddf",
strip_prefix = "llvm-{version}.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/llvm-{version}.src.tar.xz"],
release_date = "2021-07-09",
use_category = ["dataplane_ext"],
extensions = [
"envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
],
cpe = "cpe:2.3:a:llvm:*:*",
),
com_github_wamr = dict(
project_name = "Webassembly Micro Runtime",
project_desc = "A standalone runtime with a small footprint for WebAssembly",
project_url = "https://github.com/bytecodealliance/wasm-micro-runtime",
version = "WAMR-12-30-2021",
sha256 = "ab6e8643ec553347b0e129a355dc723969f49355d1d5bfa74d724d984c238037",
strip_prefix = "wasm-micro-runtime-{version}",
urls = ["https://github.com/bytecodealliance/wasm-micro-runtime/archive/{version}.tar.gz"],
release_date = "2021-12-30",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wamr"],
cpe = "N/A",
),
com_github_wavm_wavm = dict(
project_name = "WAVM",
project_desc = "WebAssembly Virtual Machine",
project_url = "https://wavm.github.io",
version = "c8997ebf154f3b42e688e670a7d0fa045b7a32a0",
sha256 = "bf2b2aec8a4c6a5413081c0527cb40dd16cb67e9c74a91f8a82fe1cf27a3c5d5",
strip_prefix = "WAVM-{version}",
urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"],
release_date = "2021-12-15",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wavm"],
cpe = "cpe:2.3:a:webassembly_virtual_machine_project:webassembly_virtual_machine:*",
),
com_github_wasmtime = dict(
project_name = "wasmtime",
project_desc = "A standalone runtime for WebAssembly",
project_url = "https://github.com/bytecodealliance/wasmtime",
version = "0.33.0",
sha256 = "c59a2aa110b25921d370944287cd97205c73cf3dc76776c5b3551135c1e42ddc",
strip_prefix = "wasmtime-{version}",
urls = ["https://github.com/bytecodealliance/wasmtime/archive/v{version}.tar.gz"],
release_date = "2022-01-05",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
cpe = "cpe:2.3:a:bytecodealliance:wasmtime:*",
),
com_github_wasm_c_api = dict(
project_name = "wasm-c-api",
project_desc = "WebAssembly C and C++ API",
project_url = "https://github.com/WebAssembly/wasm-c-api",
# this is the submodule's specific commit used by wasmtime
# https://github.com/bytecodealliance/wasmtime/tree/v0.25.0/crates/c-api
version = "c9d31284651b975f05ac27cee0bab1377560b87e",
sha256 = "c774044f51431429e878bd1b9e2a4e38932f861f9211df72f75e9427eb6b8d32",
strip_prefix = "wasm-c-api-{version}",
urls = ["https://github.com/WebAssembly/wasm-c-api/archive/{version}.tar.gz"],
release_date = "2021-01-11",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
cpe = "N/A",
),
io_opencensus_cpp = dict(
project_name = "OpenCensus C++",
project_desc = "OpenCensus tracing library",
project_url = "https://github.com/census-instrumentation/opencensus-cpp",
version = "ba631066779a534267fdb1321b19850eb2b0c000",
sha256 = "f239a40803f6e2e42b57c9e68771b0990c4ca8b2d76b440073cdf14f4211ad26",
strip_prefix = "opencensus-cpp-{version}",
urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"],
use_category = ["observability_ext"],
extensions = ["envoy.tracers.opencensus"],
release_date = "2020-10-08",
cpe = "N/A",
),
# This should be removed, see https://github.com/envoyproxy/envoy/issues/11816.
com_github_curl = dict(
project_name = "curl",
project_desc = "Library for transferring data with URLs",
project_url = "https://curl.haxx.se",
version = "7.81.0",
sha256 = "ac8e1087711084548d788ef18b9b732c8de887457b81f616fc681d1044b32f98",
strip_prefix = "curl-{version}",
urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"],
use_category = ["dataplane_ext", "observability_ext"],
extensions = [
"envoy.filters.http.aws_lambda",
"envoy.filters.http.aws_request_signing",
"envoy.grpc_credentials.aws_iam",
"envoy.tracers.opencensus",
],
release_date = "2022-01-05",
cpe = "cpe:2.3:a:haxx:libcurl:*",
),
com_googlesource_chromium_v8 = dict(
project_name = "V8",
project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++",
project_url = "https://v8.dev",
version = "9.9.115.3",
# This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh
# and contains complete checkout of V8 with all dependencies necessary to build wee8.
sha256 = "4f4353928d10adbc07503edcb7dbd5a20981de669225dcffc450472cbfb179c2",
urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"],
strip_prefix = "wee8",
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.v8"],
release_date = "2022-01-24",
cpe = "cpe:2.3:a:google:v8:*",
),
com_github_google_quiche = dict(
project_name = "QUICHE",
project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols",
project_url = "https://github.com/google/quiche",
version = "a780cd0c3b26c0307bd63c68c4d72dd04f63d8bb",
sha256 = "d5985917e0e91306f5e37cae50fa7153d9716184cc536c2db30908cf9ef6ffef",
urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"],
strip_prefix = "quiche-{version}",
use_category = ["dataplane_core"],
release_date = "2022-01-31",
cpe = "N/A",
),
com_googlesource_googleurl = dict(
project_name = "Chrome URL parsing library",
project_desc = "Chrome URL parsing library",
project_url = "https://quiche.googlesource.com/googleurl",
# Static snapshot of https://quiche.googlesource.com/googleurl/+archive/561705e0066ff11e6cb97b8092f1547835beeb92.tar.gz.
version = "561705e0066ff11e6cb97b8092f1547835beeb92",
sha256 = "7ce00768fea1fa4c7bf658942f13e41c9ba30e9cff931a6cda2f9fd02289f673",
urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
extensions = [],
release_date = "2021-08-31",
cpe = "N/A",
),
com_google_cel_cpp = dict(
project_name = "Common Expression Language (CEL) C++ library",
project_desc = "Common Expression Language (CEL) C++ library",
project_url = "https://opensource.google/projects/cel",
version = "60c7aeabb4e6fa633b49c14d6c6fc8f0516761b9",
sha256 = "7cb1e8ce293182e1d28321d4d6baecdacbc263cffcd9da1f7ffd25312611a329",
strip_prefix = "cel-cpp-{version}",
urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.extension_filters.cel",
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.rbac",
"envoy.filters.http.wasm",
"envoy.filters.network.rbac",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
"envoy.rbac.matchers.upstream_ip_port",
],
release_date = "2021-11-08",
cpe = "N/A",
),
com_github_google_flatbuffers = dict(
project_name = "FlatBuffers",
project_desc = "Cross platform serialization library architected for maximum memory efficiency",
project_url = "https://github.com/google/flatbuffers",
version = "2.0.0",
sha256 = "9ddb9031798f4f8754d00fca2f1a68ecf9d0f83dfac7239af1311e4fd9a565c4",
strip_prefix = "flatbuffers-{version}",
urls = ["https://github.com/google/flatbuffers/archive/v{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.extension_filters.cel",
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.rbac",
"envoy.filters.http.wasm",
"envoy.filters.network.rbac",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
"envoy.rbac.matchers.upstream_ip_port",
],
release_date = "2021-05-10",
cpe = "cpe:2.3:a:google:flatbuffers:*",
),
com_googlesource_code_re2 = dict(
project_name = "RE2",
project_desc = "RE2, a regular expression library",
project_url = "https://github.com/google/re2",
version = "2021-11-01",
sha256 = "8c45f7fba029ab41f2a7e6545058d9eec94eef97ce70df58e92d85cfc08b4669",
strip_prefix = "re2-{version}",
urls = ["https://github.com/google/re2/archive/{version}.tar.gz"],
use_category = ["controlplane", "dataplane_core"],
release_date = "2021-11-01",
cpe = "N/A",
),
# Included to access FuzzedDataProvider.h. This is compiler agnostic but
# provided as part of the compiler-rt source distribution. We can't use the
# Clang variant as we are not a Clang-LLVM only shop today.
org_llvm_releases_compiler_rt = dict(
project_name = "compiler-rt",
project_desc = "LLVM compiler runtime library",
project_url = "https://compiler-rt.llvm.org",
version = "12.0.1",
sha256 = "b4c8d5f2a802332987c1c0a95b5afb35b1a66a96fe44add4e4ed4792c4cba0a4",
# Only allow peeking at fuzzer related files for now.
strip_prefix = "compiler-rt-{version}.src",
urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"],
release_date = "2021-07-09",
use_category = ["test_only"],
cpe = "cpe:2.3:a:llvm:compiler-rt:*",
),
upb = dict(
project_name = "upb",
project_desc = "A small protobuf implementation in C (gRPC dependency)",
project_url = "https://github.com/protocolbuffers/upb",
version = "de76b31f9c56b28120580d53a6f8d7941fdb79eb",
sha256 = "487d84ce85065ff89ccde1c1ac2ea1515d2be411306e4adf1be6861dc4a4a86b",
release_date = "2020-12-29",
strip_prefix = "upb-{version}",
urls = ["https://github.com/protocolbuffers/upb/archive/{version}.tar.gz"],
use_category = ["controlplane"],
cpe = "N/A",
),
kafka_source = dict(
project_name = "Kafka (source)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "3.0.0",
sha256 = "862526ee07c372d7b2f7e672c096fe84bb1e115ef536e0ad7497e6fb50e08e02",
strip_prefix = "kafka-{version}/clients/src/main/resources/common/message",
urls = ["https://github.com/apache/kafka/archive/{version}.zip"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.kafka_broker", "envoy.filters.network.kafka_mesh"],
release_date = "2021-09-08",
cpe = "cpe:2.3:a:apache:kafka:*",
),
edenhill_librdkafka = dict(
project_name = "Kafka (C/C++ client)",
project_desc = "C/C++ client for Apache Kafka (open-source distributed event streaming platform)",
project_url = "https://github.com/edenhill/librdkafka",
version = "1.8.2",
sha256 = "6a747d293a7a4613bd2897e28e8791476fbe1ae7361f2530a876e0fd483482a6",
strip_prefix = "librdkafka-{version}",
urls = ["https://github.com/edenhill/librdkafka/archive/v{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.filters.network.kafka_mesh"],
release_date = "2021-10-18",
cpe = "N/A",
),
kafka_server_binary = dict(
project_name = "Kafka (server binary)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "3.0.0",
sha256 = "a82728166bbccf406009747a25e1fe52dbcb4d575e4a7a8616429b5818cd02d1",
strip_prefix = "kafka_2.13-{version}",
urls = ["https://archive.apache.org/dist/kafka/{version}/kafka_2.13-{version}.tgz"],
release_date = "2021-09-20",
use_category = ["test_only"],
),
kafka_python_client = dict(
project_name = "Kafka (Python client)",
project_desc = "Open-source distributed event streaming platform",
project_url = "https://kafka.apache.org",
version = "2.0.2",
sha256 = "5dcf87c559e7aee4f18d621a02e247db3e3552ee4589ca611d51eef87b37efed",
strip_prefix = "kafka-python-{version}",
urls = ["https://github.com/dpkp/kafka-python/archive/{version}.tar.gz"],
release_date = "2020-09-30",
use_category = ["test_only"],
),
proxy_wasm_cpp_sdk = dict(
project_name = "WebAssembly for Proxies (C++ SDK)",
project_desc = "WebAssembly for Proxies (C++ SDK)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk",
version = "27dbe2e4d3ac2c2b7ecd919d30973486bd522517",
sha256 = "4f08d11d7a7264355cc9da51e0499f5b929ba557c17da79d67269b51b8a12d7c",
strip_prefix = "proxy-wasm-cpp-sdk-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
"envoy.wasm.runtime.null",
"envoy.wasm.runtime.v8",
"envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
"envoy.wasm.runtime.wasmtime",
],
release_date = "2022-01-19",
cpe = "N/A",
),
proxy_wasm_cpp_host = dict(
project_name = "WebAssembly for Proxies (C++ host implementation)",
project_desc = "WebAssembly for Proxies (C++ host implementation)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host",
version = "819dcc02bd2bc6fdec07720379c4d522d6b7da08",
sha256 = "95b72d43d7cacc608bbc1370ba83fbfa893f807620b39470101079a23327a7c3",
strip_prefix = "proxy-wasm-cpp-host-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
"envoy.wasm.runtime.null",
"envoy.wasm.runtime.v8",
"envoy.wasm.runtime.wamr",
"envoy.wasm.runtime.wavm",
"envoy.wasm.runtime.wasmtime",
],
release_date = "2022-02-01",
cpe = "N/A",
),
proxy_wasm_rust_sdk = dict(
project_name = "WebAssembly for Proxies (Rust SDK)",
project_desc = "WebAssembly for Proxies (Rust SDK)",
project_url = "https://github.com/proxy-wasm/proxy-wasm-rust-sdk",
version = "abd0f5437212e5fd3dd6a70eac3959934278e643",
sha256 = "a7c7f2fadc151e00694507598880894dfe2d2ea24f858ec9681d38f2abfbe811",
strip_prefix = "proxy-wasm-rust-sdk-{version}",
urls = ["https://github.com/proxy-wasm/proxy-wasm-rust-sdk/archive/{version}.tar.gz"],
use_category = ["test_only"],
release_date = "2021-07-13",
cpe = "N/A",
),
emscripten_toolchain = dict(
project_name = "Emscripten SDK",
project_desc = "Emscripten SDK (use by Wasm)",
project_url = "https://github.com/emscripten-core/emsdk",
version = "3.1.1",
sha256 = "3a4893f0bb8203469e1197aa235fc49ed6f5dd2d490e9244a6899a8ad860f3e6",
strip_prefix = "emsdk-{version}",
urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.tar.gz"],
use_category = ["build"],
release_date = "2022-01-10",
),
rules_rust = dict(
project_name = "Bazel rust rules",
project_desc = "Bazel rust rules (used by Wasm)",
project_url = "https://github.com/bazelbuild/rules_rust",
version = "82b650d5d0709ae4c0ee8584f4ed92112ba11d67",
sha256 = "d087851b76204935f7f23c172eb0d136c09720b8484d8151019523652ce77004",
strip_prefix = "rules_rust-{version}",
urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = ["envoy.wasm.runtime.wasmtime"],
release_date = "2021-10-19",
cpe = "N/A",
),
rules_antlr = dict(
project_name = "ANTLR Rules for Bazel",
project_desc = "Bazel rules for ANTLR",
project_url = "https://github.com/marcohu/rules_antlr",
version = "3cc2f9502a54ceb7b79b37383316b23c4da66f9a",
sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429",
strip_prefix = "rules_antlr-{version}",
urls = ["https://github.com/marcohu/rules_antlr/archive/{version}.tar.gz"],
# ANTLR has a runtime component, so is not purely build.
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.extension_filters.cel",
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
],
release_date = "2019-06-21",
cpe = "N/A",
),
antlr4_runtimes = dict(
project_name = "ANTLR v4",
project_desc = "ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files",
project_url = "https://github.com/antlr/antlr4",
version = "4.7.2",
sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64",
strip_prefix = "antlr4-{version}",
urls = ["https://github.com/antlr/antlr4/archive/{version}.tar.gz"],
use_category = ["dataplane_ext"],
extensions = [
"envoy.access_loggers.extension_filters.cel",
"envoy.access_loggers.wasm",
"envoy.bootstrap.wasm",
"envoy.rate_limit_descriptors.expr",
"envoy.filters.http.wasm",
"envoy.filters.network.wasm",
"envoy.stat_sinks.wasm",
],
release_date = "2018-12-18",
cpe = "N/A",
),
com_github_fdio_vpp_vcl = dict(
project_name = "VPP Comms Library",
project_desc = "FD.io Vector Packet Processor (VPP) Comms Library",
project_url = "https://fd.io/",
version = "7c3275e84b64ade4e20d00e4457bd4e437b1894f",
sha256 = "d456d37bbb7f90ec1ef166c1387e788b4c3078d38303f12ab41f1d0ac1a1cfc0",
strip_prefix = "vpp-{version}",
urls = ["https://github.com/FDio/vpp/archive/{version}.tar.gz"],
use_category = ["other"],
extensions = ["envoy.bootstrap.vcl"],
release_date = "2021-12-10",
cpe = "N/A",
),
)
| 50.384685 | 185 | 0.648381 |
ace23b75ab958386a6483863142959fb02f2c418 | 1,339 | py | Python | Python/Vectorization/VectorSolutions.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 10 | 2016-10-18T19:54:25.000Z | 2021-10-09T20:12:38.000Z | Python/Vectorization/VectorSolutions.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | null | null | null | Python/Vectorization/VectorSolutions.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 2 | 2017-05-14T16:07:59.000Z | 2020-06-20T09:05:06.000Z | import numpy as np
from numpy.random import rand
import matplotlib.pyplot as plt
# Assorted vectorization problems
def assortment():
# a
X = rand(100, 10)
X.dot(X.T)
# b
(X*X).sum(axis=1)
# c
A = rand(10, 10)
V = rand(100, 10)
(V.dot(A)*V).sum(axis=1)
# d
A = rand(1000)
(A<.5).sum()
# e
A[A<.25] = 0.
# f
A = rand(10, 10)
X = rand(100, 10)
A.dot(X.T).T
# g
A = rand(10, 2, 2)
B = rand(20, 2)
A.dot(B.T).swapaxes(1, 2)
# h
A = rand(100, 100)
(A[:,0] < .5).dot(A).sum()
# i
P = rand(100)
D = (rand(100, 100) < .5)
P * D.sum(axis=1) - D.dot(P)
# j (shuffle problem)
A = np.arange(52)
A[::2], A[1::2] = A[A.shape[0]//2:].copy(), A[:A.shape[0]//2].copy()
# image vectorization problem
def image_vect():
# a
I = rand(100, 200, 3)
I.mean(axis=2)
# b
return np.absolute(I - I.mean(axis=2, keepdims=True))
#edit==1 inverts
#edit==2 grayscales
#edit==3 does a motion blur of n
def imageEditor(X,edit,n=1):
if edit==1:
Xnew = 255 - X
elif edit == 2:
Xnew = X.mean(axis=2, dtype=int)
else:
Xnew = X.copy()/n
for i in xrange(1, n):
Xnew[:,:-i,:] += X[:,i:,:] / n
Xnew[:,-i:,:] += Xnew[:,-i:,:] / n
plt.imshow(Xnew)
plt.show()
| 20.921875 | 72 | 0.487677 |
ace23c1e70e84e49f049028f006492430358c2d7 | 2,478 | py | Python | app/controllers/skill.py | lmregus/mywebsite | 6a73c1ed582f2b3380806c0bb23ce077af3c39bf | [
"MIT"
] | null | null | null | app/controllers/skill.py | lmregus/mywebsite | 6a73c1ed582f2b3380806c0bb23ce077af3c39bf | [
"MIT"
] | null | null | null | app/controllers/skill.py | lmregus/mywebsite | 6a73c1ed582f2b3380806c0bb23ce077af3c39bf | [
"MIT"
] | null | null | null | from server import app
from flask import render_template
from flask import request
from flask import redirect
from flask_login import login_required
from models.skill import Skill
create_page_title = 'Create Skill'
edit_page_title = 'Edit Skill'
@app.route('/admin/skill')
@login_required
def skill():
skills = Skill()
return render_template('/admin/skill.html', skills = skills.get_all(),
page_title = create_page_title)
@app.route('/admin/skill/create', methods = ['POST'])
@login_required
def create_skill():
skill = Skill()
skills = Skill().get_all()
message = ''
if request.method == 'POST':
data = {
'title': request.form['title'],
'proficiency': request.form['proficiency'],
'category': request.form['category'],
}
skill = skill.create(data)
message = skill.title + ' was created'
else:
message = 'There was an error trying to create the entry'
return render_template('/admin/skill.html', skills = skills, message = message)
@app.route('/admin/skill/edit/<skill_id>', methods = ['POST', 'GET'])
@login_required
def edit_skill(skill_id):
skill = Skill().get_by_id(skill_id)
return render_template('admin/update_skill.html', skill = skill,
page_title = edit_page_title)
@app.route('/admin/skill/update', methods = ['POST'])
@login_required
def update_skill():
skill = Skill()
skills = skill.get_all()
message = ''
if request.method == 'POST':
data = {
'id': request.form['id'],
'title': request.form['title'],
'proficiency': request.form['proficiency'],
'category': request.form['category'],
}
skill = skill.update(data)
message = skill.title + ' was updated'
else:
message = 'There was an error trying to update the entry'
return render_template('admin/skill.html', skills = skills,
message = message,
page_title = create_page_title)
@app.route('/admin/skill/delete/<skill_id>')
@login_required
def delete_skill(skill_id):
skill = Skill()
skills = skill.get_all()
skill = skill.delete(skill_id)
message = skill.title + ' deleted'
return render_template('admin/skill.html', skills = skills,
message = message,
page_title = create_page_title)
| 31.769231 | 83 | 0.61138 |
ace23c42ac6cd6ba7edc32ab86732664871dda3e | 23,446 | py | Python | imageset-viewer.py | its-jd/imageset-viewer | 53998bbcdfe1aad91664ff791c489b3f59a501f9 | [
"MIT"
] | 53 | 2018-05-17T06:12:07.000Z | 2022-03-28T10:41:24.000Z | imageset-viewer.py | its-jd/imageset-viewer | 53998bbcdfe1aad91664ff791c489b3f59a501f9 | [
"MIT"
] | 7 | 2020-06-09T15:33:32.000Z | 2021-11-12T14:20:54.000Z | imageset-viewer.py | its-jd/imageset-viewer | 53998bbcdfe1aad91664ff791c489b3f59a501f9 | [
"MIT"
] | 18 | 2019-05-30T15:22:40.000Z | 2022-01-06T15:34:30.000Z | #!/usr/bin/env python
# coding: utf-8
__author__ = 'Zhuo Zhang'
__copyright__ = 'Copyright 2017-2020, Zhuo Zhang'
__license__ = 'MIT'
__version__ = '0.5'
__email__ = 'imzhuo@foxmail.com'
__status__ = 'Development'
__description__ = 'Tkinter based GUI, visualizing PASCAL VOC object detection annotation'
"""
Changelog:
- 2020-06-16 11:39 v0.5
Support specifying ignore and not ignore class names. Better logger. Fix MacOS font.
- 2020-06-13 00:48 v0.4
API change: add class name mapping dict, mapping xml class name to shown class name.
Based on this, ImageNet2012 and self-defined VOC format style dataset labels can show.
Supported image extension: bmp, jpg, jpeg, png and their upper cases.
- 2020-06-09 23:14 v0.3
User select saving directory(optional) for picking up interested images.
By pressing left control button, selected image is saved.
- 2020-06-02 16:40 v0.2
User choose image and annotation folders separately. Better UI layout.
Colorful boxes and class name text.
- 2020-06-01 14:44 v0.1
Draw object class name. Add license. Polish meta info. Adjust UI.
- 2017.10.22 22:36 v0.0
Created project. Dependencies: Python, Tkinter(GUI), opencv(image processing),
lxml(annotation parsing).
You may need this: pip install --upgrade image pillow lxml numpy
"""
from PIL import Image, ImageTk, ImageFont, ImageDraw # pillow module
import os
import cv2
from lxml import etree
import numpy as np
import random
import colorsys
import shutil
import platform
import matplotlib.font_manager as fm # to create font
import six
import logging
from natsort import natsorted
import time
if six.PY3:
import tkinter as tk
from tkinter.filedialog import askdirectory
else:
import Tkinter as tk
from tkFileDialog import askdirectory
def draw_text(im, text, text_org, color=(0,0,255,0), font=None):
"""
Draw text on OpenCV's Image (ndarray)
Implemented by: ndarray -> pil's image -> draw text -> ndarray
Note: OpenCV puttext's drawback: font too large, no anti-alias, can't show Chinese chars
@param im: opencv loaded image
@param text: text(string) to be put. support Chinese
@param font: font, e.g. ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size)
Example Usage:
font_size = 20
font = ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size)
text_org = (256, 256)
im = draw_text(im, "object", text_org, font)
"""
im_pil = Image.fromarray(im)
draw = ImageDraw.Draw(im_pil)
draw.text(text_org, text, font=font, fill=color)
return np.array(im_pil)
class BndBox(object):
def __init__(self, x1=0, y1=0, x2=0, y2=0, cls_name=None):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.cls_name = cls_name # class name
class PascalVOC2007XML:
def __init__(self, xml_pth):
# TODO: validate xml_pth's content
self.tree = etree.parse(xml_pth)
self.boxes = []
def get_boxes(self):
if len(self.boxes) == 0:
for obj in self.tree.xpath('//object'):
box = BndBox()
for item in obj.getchildren():
if (item.tag=='name'):
box.cls_name = item.text
elif (item.tag=='bndbox'):
coords = [int(float(_.text)) for _ in item.getchildren()]
box.x1, box.y1, box.x2, box.y2 = coords
self.boxes.append(box)
return self.boxes
def get_color_table(num_cls=20):
hsv_tuples = [(x*1.0 / num_cls, 1., 1.) for x in range(num_cls)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(42)
random.shuffle(colors)
random.seed(None)
return colors
class VOCViewer(tk.Tk):
def __init__(self, im_dir=None, anno_dir=None, save_dir=None, max_width=None, max_height=None, box_thick=1,
name_mapping=None, ignore_names=None, not_ignore_names=None):
"""
@param im_dir: the directory which contains images, e.g. "JPEGImages"
@param max_width: max image width when image is displayed
@param max_height: max image height when image is displayed
@param box_thick: thickness of bounding box
@param name_mapping: dict of: class name in XML => class name to be viewed
@param ignore_names: list of class names that will be ignored on viewer
@param not_ignore_names: list of all class names to be viewed
@note `ignore_names` and `not_ignore_names` shouldn't be setting at the same time
@note loading image: tk doesn't support directly load image. Pillow module is required as intermidiate stuff.
"""
#super().__init__() # not working for Python2
tk.Tk.__init__(self)
self.init_logger()
self.init_layout(im_dir, anno_dir, save_dir, max_width, max_height, box_thick)
self.init_dataset(name_mapping, ignore_names, not_ignore_names)
def init_logger(self):
logger = logging.getLogger()
logger.setLevel(logging.WARN)
formatter = logging.Formatter(
'%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
time_line = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
logfile = os.getcwd() + '/view-' + time_line + '.log'
# print to file via FileHandler
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# print to screen via StreamHandler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
# add two Handler
logger.addHandler(ch)
logger.addHandler(fh)
self.logger = logger
def should_ignore(self, cls_name):
if self.ignore_names is not None:
if cls_name in self.ignore_names:
return True
else:
return False
if self.not_ignore_names is not None:
if cls_name in self.not_ignore_names:
return False
return True
return False
def init_dataset(self, name_mapping, ignore_names, not_ignore_names):
if (ignore_names is not None and not_ignore_names is not None):
self.logger.fatal("ignore_names and not_ignore_names can't be setting at the same time")
self.name_mapping = dict()
if name_mapping is not None:
self.name_mapping = name_mapping
self.ignore_names = None
if ignore_names is not None:
self.ignore_names = ignore_names
self.not_ignore_names = None
if not_ignore_names is not None:
self.not_ignore_names = not_ignore_names
self.color_table = get_color_table()
self.class_to_ind = dict()
for cls_name in self.name_mapping.keys():
next_ind = len(self.class_to_ind)
self.class_to_ind[cls_name] = next_ind
self.supported_im_ext = ['bmp', 'BMP', 'png', 'PNG',
'jpg', 'JPG', 'jpeg', 'JPEG', 'jpe', 'jif', 'jfif', 'jfi']
def get_color_by_cls_name(self, cls_name):
ind = self.class_to_ind[cls_name]
return self.color_table[ind]
def init_layout(self, im_dir, anno_dir, save_dir, max_width, max_height, box_thick):
# custom settings
self.max_width = max_width
self.max_height = max_height
self.box_thick = box_thick
self.bg = '#34373c'
self.fg = '#f2f2f2'
# MacOSX's tk is wired and I don't want tkmacosx
if platform.system()=='Darwin':
self.bg, self.fg = self.fg, self.bg
# set title, window size and background
self.title('ImageSet Viewer ' + __version__)
self.width = (int)(0.6 * self.winfo_screenwidth())
self.height = (int)(0.6 * self.winfo_screenheight())
self.geometry('%dx%d+200+100' % (self.width, self.height))
self.configure(bg=self.bg)
self.minsize(800, 600)
# Setting top level widget's row & column weight,
# children widgets won't stretch-and-fill-in until setting this weight
# ref: https://blog.csdn.net/acaic/article/details/80963688
self.rowconfigure(0,weight=1)
self.columnconfigure(0,weight=1)
# Top Level Layout: main_frame & side_frame
main_frame_width = (int)(0.8*self.width)
main_frame = tk.LabelFrame(self, bg=self.bg, width=main_frame_width)
main_frame.grid(row=0, column=0, padx=10, pady=10, sticky=tk.NSEW)
side_frame = tk.LabelFrame(self, bg=self.bg)
side_frame.grid(row=0, column=1, padx=10, pady=10, sticky=tk.NSEW)
# main_frame: directory_frame & image_frame
main_frame.rowconfigure(0, weight=20)
main_frame.rowconfigure(1, weight=80)
main_frame.columnconfigure(0, weight=1)
directory_frame = tk.LabelFrame(main_frame, bg=self.bg)
directory_frame.grid(row=0, column=0, sticky=tk.NSEW)
image_frame_height = (int)(0.7*self.height)
image_frame = tk.LabelFrame(main_frame, height=image_frame_height, bg=self.bg)
image_frame.grid(row=1, column=0, sticky=tk.NSEW)
# keep widgets size stay, instead of change when switching to another image
# ref: https://zhidao.baidu.com/question/1643979034294549180.html
image_frame.grid_propagate(0)
# image_frame
image_frame.rowconfigure(0, weight=1)
image_frame.columnconfigure(0, weight=1)
self.surface = self.get_surface_image() # Surface image
# self.surface = self.cv_to_tk(cv2.imread('surface.jpg')) # Use image file
self.image_label = tk.Label(image_frame, image=self.surface,
bg=self.bg, fg=self.fg,compound='center')
self.image_label.grid(row=0, column=0, sticky=tk.NSEW)
#self.image_label.bind('<Configure>', self.changeSize) #TODO
# side_frame
side_frame.rowconfigure(0, weight=5)
side_frame.rowconfigure(1, weight=95)
image_names_label = tk.Label(side_frame, text="Image Files", bg=self.bg, fg=self.fg)
image_names_label.grid(row=0, column=0)
self.scrollbar = tk.Scrollbar(side_frame, orient=tk.VERTICAL)
self.listbox = tk.Listbox(side_frame, yscrollcommand=self.scrollbar.set)
self.listbox.grid(row=1, column=0, sticky=tk.NS)
# directory_frame
directory_frame.rowconfigure(0, weight=5)
directory_frame.rowconfigure(1, weight=5)
directory_frame.rowconfigure(2, weight=5)
directory_frame.columnconfigure(0, weight=1)
directory_frame.columnconfigure(1, weight=9)
# im_dir button
choose_im_dir_btn = tk.Button(directory_frame, text='Image Directory',
command=self.select_image_directory, bg=self.bg, fg=self.fg)
choose_im_dir_btn.grid(row=0, column=0, sticky=tk.NSEW)
self.im_dir = tk.StringVar()
im_dir_entry = tk.Entry(directory_frame, text=self.im_dir, state='readonly')
im_dir_entry.grid(row=0, column=1, sticky=tk.NSEW)
self.im_names = []
if im_dir is not None:
self.im_dir.set(im_dir)
self.im_names = [_ for _ in os.listdir(self.im_dir.get())]
self.im_names = natsorted(self.im_names)
for im_name in self.im_names:
self.listbox.insert(tk.END, im_name)
self.listbox.bind('<<ListboxSelect>>', self.callback)
# more key binds see https://www.cnblogs.com/muziyunxuan/p/8297536.html
self.listbox.bind('<Control_L>', self.save_image)
self.scrollbar.config(command=self.listbox.yview)
self.scrollbar.grid(row=1, column=1, sticky=tk.NS)
# anno_dir button
choose_anno_dir_bn = tk.Button(directory_frame, text='Annotation Directory',
command=self.select_annotation_directory, bg=self.bg, fg=self.fg)
choose_anno_dir_bn.grid(row=1, column=0, sticky=tk.NSEW)
self.anno_dir = tk.StringVar()
anno_dir_entry = tk.Entry(directory_frame, text=self.anno_dir, state='readonly')
anno_dir_entry.grid(row=1, column=1, sticky=tk.NSEW)
if anno_dir is not None:
self.anno_dir.set(anno_dir)
# copy (save) dir button
choose_save_dir_btn = tk.Button(directory_frame, text='Copy Save Directory',
command=self.select_save_directory, bg=self.bg, fg=self.fg)
choose_save_dir_btn.grid(row=2, column=0, sticky=tk.NSEW)
self.save_dir = tk.StringVar()
save_dir_entry = tk.Entry(directory_frame, text=self.save_dir, state='readonly')
save_dir_entry.grid(row=2, column=1, sticky=tk.NSEW)
if save_dir is not None:
self.save_dir.set(save_dir)
def callback(self, event=None):
im_id = self.listbox.curselection()
if im_id:
im_id = im_id[0]
self.logger.info('im_id is {:d}'.format(im_id))
im_name = self.listbox.get(im_id)
im_ext = im_name.split('.')[-1]
if im_ext in self.supported_im_ext:
im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/')
self.tkim = self.get_tkim(im_pth)
self.image_label.configure(image=self.tkim)
#self.logger.debug(im_pth)
def save_image(self, event):
"""Save (copy) current displayed (original, no box) image to specified saving directory.
This is binding to left-control key now. Useful for manually picking up images.
"""
im_id = self.listbox.curselection()
if im_id:
im_name = self.listbox.get(im_id)
im_ext = im_name.split('.')[-1]
if im_ext in self.supported_im_ext:
im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/')
save_pth = os.path.join(self.save_dir.get(), im_name).replace('\\', '/')
shutil.copyfile(im_pth, save_pth)
self.logger.info('Save(copy) to {:s}'.format(save_pth))
#self.logger.debug(im_pth)
def get_tkim(self, im_pth):
"""
Load image and annotation, draw on image, and convert to image.
When necessary, image resizing is utilized.
"""
im = cv2.imread(im_pth)
self.logger.info('Image file is: {:s}'.format(im_pth))
im_ht, im_wt, im_dt = im.shape
if self.max_width is None or self.max_width >= im_wt:
show_width = im_wt
else:
show_width = self.max_width
if self.max_height is None or self.max_height >= im_ht:
show_height = im_ht
else:
show_height = self.max_height
scale_width = im_wt * 1.0 / show_width
scale_height = im_ht * 1.0 / show_height
if show_width!=im_wt or show_height!=im_ht:
im = cv2.resize(im, (show_width, show_height))
self.logger.info('doing resize, show_width={:d}, im_wt={:d}, show_height={:d}, im_ht={:d}'.format(show_width, im_wt, show_height, im_ht))
# xml_pth = im_pth.replace('JPEGImages', 'Annotations').replace('.jpg', '.xml').replace('.png', '.xml')
# We don't assume a standard PASCAL VOC dataset directory.
# User should choose image and annotation folder seperately.
im_head = '.'.join(im_pth.split('/')[-1].split('.')[:-1])
xml_pth = self.anno_dir.get() + '/' + im_head + '.xml'
if os.path.exists(xml_pth):
self.logger.info('XML annotation file is {:s}'.format(xml_pth))
boxes = self.parse_xml(xml_pth)
for box in boxes:
if self.should_ignore(box.cls_name): continue
if box.cls_name not in self.name_mapping.keys():
self.name_mapping[box.cls_name] = box.cls_name
next_ind = len(self.class_to_ind)
self.class_to_ind[box.cls_name] = next_ind
xmin = int(box.x1/scale_width)
ymin = int(box.y1/scale_height)
xmax = int(box.x2/scale_width)
ymax = int(box.y2/scale_height)
color = self.get_color_by_cls_name(box.cls_name)
cv2.rectangle(im, pt1=(xmin, ymin), pt2=(xmax, ymax),
color = color, thickness=self.box_thick)
font_size = 16
font = self.get_font(font_size)
tx = xmin
ty = ymin-20
if(ty<0):
ty = ymin+10
tx = xmin+10
text_org = (tx, ty)
show_text = self.name_mapping[box.cls_name]
self.logger.debug('box.cls_name is:' + box.cls_name)
self.logger.debug('show_text:' + show_text)
im = draw_text(im, show_text, text_org, color, font)
else:
self.logger.warning("XML annotation file {:s} doesn't exist".format(xml_pth))
return self.cv_to_tk(im)
@staticmethod
def cv_to_tk(im):
"""Convert OpenCV's (numpy) image to Tkinter-compatible photo image"""
im = im[:, :, ::-1] # bgr => rgb
return ImageTk.PhotoImage(Image.fromarray(im))
@staticmethod
def get_font(font_size):
font_pth = None
if platform.system()=='Windows':
font_pth = 'C:/Windows/Fonts/msyh.ttc'
elif (platform.system()=='Linux'):
font_pth = fm.findfont(fm.FontProperties(family='DejaVu Mono'))
else:
font_pth = '/Library/Fonts//Songti.ttc'
return ImageFont.truetype(font_pth, font_size)
def get_surface_image(self):
"""Return surface image, which is ImageTK type"""
im = np.ndarray((256, 256, 3), dtype=np.uint8)
for y in range(256):
for x in range(256):
im[y, x, :] = (60, 55, 52) # #34373c(RGB)'s BGR split
im = cv2.resize(im, ((int)(self.width*0.6), (int)(self.height*0.6)))
font_size = 30
font = self.get_font(font_size)
text_org = (self.width*0.16, self.height*0.26)
text = 'ImageSet Viewer'
im = draw_text(im, text, text_org, color=(255, 255, 255, 255), font=font)
return self.cv_to_tk(im)
def parse_xml(self, xml_pth):
anno = PascalVOC2007XML(xml_pth)
return anno.get_boxes()
def select_image_directory(self):
im_dir = askdirectory()
self.listbox.delete(0, len(self.im_names)-1) # delete all elements
self.fill_im_names(im_dir)
def select_annotation_directory(self):
anno_dir = askdirectory()
self.anno_dir.set(anno_dir) # TODO: validate anno_dir
def select_save_directory(self):
save_dir = askdirectory()
self.save_dir.set(save_dir) # the directory to save(copy) select images
def fill_im_names(self, im_dir):
if im_dir is not None:
self.im_dir.set(im_dir)
# Get natural order of image file names
self.im_names = [_ for _ in os.listdir(im_dir)]
self.im_names = natsorted(self.im_names)
for im_name in self.im_names:
self.listbox.insert(tk.END, im_name)
def example1():
"""The simplest example: don't specify any parameters.
Choose imd dir and xml dir in GUI
"""
app = VOCViewer()
app.mainloop()
def example2():
"""Specify directories & drawing related settings
"""
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
)
app.mainloop()
def example3():
"""Specify name mapping
"""
# category mapping dict: key for class name in XML,
# value for shown class name in displayed image
# note: you can make key=val if it is understandable
voc_mapping = {
'__background__': '背景',
'aeroplane': '飞机',
'bicycle': '自行车',
'bird': '鸟',
'boat': '船',
'bottle': '瓶子',
'bus': '公交车',
'car': '汽车',
'cat': '猫',
'chair': '椅子',
'cow': '牛',
'diningtable': '餐桌',
'dog': '狗',
'horse': '马',
'motorbike': '摩托车',
'person': '人',
'pottedplant': '盆栽',
'sheep': '绵羊',
'sofa': '沙发',
'train': '火车',
'tvmonitor': '显示器'
}
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
name_mapping = voc_mapping #!!
)
app.mainloop()
def example4():
"""Specify ignore_names / not_ignore_names
You can specify either ignore_names or not_ignore_names. But can't specify neither.
"""
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
not_ignore_names = ['person']
)
app.mainloop()
def example5():
"""
Take ImageNet2012 as example. You can imitate this and
show your own PASCAL-VOC-Style-Labeled imageset
"""
fin = open('imagenet_cls_cn.txt', encoding='UTF-8')
lines = [_.strip() for _ in fin.readlines()]
fin.close()
ilsvrc2012_cls_dict = dict()
for item in lines:
item = item.split(' ')
digit_cls_name = item[0]
literal_cls_name = ' '.join(item[1:])
ilsvrc2012_cls_dict[digit_cls_name] = literal_cls_name
app = VOCViewer(im_dir = 'D:/data/ILSVRC2012/ILSVRC2012_img_train/n01440764', # image directory
anno_dir = 'D:/data/ILSVRC2012/ILSVRC2012_bbox_train_v2/n01440764', # XML directory
save_dir = None, # not specified saving direcotry
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
name_mapping = ilsvrc2012_cls_dict
)
app.mainloop()
if __name__ == '__main__':
example1()
#example2()
#example3()
#example4()
#example5()
| 38.753719 | 149 | 0.612258 |
ace23ca2d0288d7d271fa02a226e0cdc876c2c9b | 43,716 | py | Python | edb/server/server.py | dmgolembiowski/edgedb | fde7853e329fceba64b6b92ece399f6858d51017 | [
"Apache-2.0"
] | null | null | null | edb/server/server.py | dmgolembiowski/edgedb | fde7853e329fceba64b6b92ece399f6858d51017 | [
"Apache-2.0"
] | null | null | null | edb/server/server.py | dmgolembiowski/edgedb | fde7853e329fceba64b6b92ece399f6858d51017 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import asyncio
import binascii
import json
import logging
import os
import pickle
import socket
import ssl
import stat
import sys
import uuid
import immutables
from edb import errors
from edb.common import devmode
from edb.common import taskgroup
from edb.common import windowedsum
from edb.schema import reflection as s_refl
from edb.schema import roles as s_role
from edb.schema import schema as s_schema
from edb.edgeql import parser as ql_parser
from edb.server import args as srvargs
from edb.server import cache
from edb.server import config
from edb.server import connpool
from edb.server import compiler_pool
from edb.server import defines
from edb.server import protocol
from edb.server.protocol import binary # type: ignore
from edb.server import pgcon
from edb.server.pgcon import errors as pgcon_errors
from . import dbview
ADMIN_PLACEHOLDER = "<edgedb:admin>"
logger = logging.getLogger('edb.server')
log_metrics = logging.getLogger('edb.server.metrics')
class RoleDescriptor(TypedDict):
superuser: bool
name: str
password: str
class StartupError(Exception):
pass
class Server:
_sys_pgcon: Optional[pgcon.PGConnection]
_roles: Mapping[str, RoleDescriptor]
_instance_data: Mapping[str, str]
_sys_queries: Mapping[str, str]
_local_intro_query: bytes
_global_intro_query: bytes
_std_schema: s_schema.Schema
_refl_schema: s_schema.Schema
_schema_class_layout: s_refl.SchemaTypeLayout
_sys_pgcon_waiter: asyncio.Lock
_servers: Mapping[str, asyncio.AbstractServer]
_task_group: Optional[taskgroup.TaskGroup]
_binary_conns: Set[binary.EdgeConnection]
def __init__(
self,
*,
cluster,
runstate_dir,
internal_runstate_dir,
max_backend_connections,
compiler_pool_size,
nethosts,
netport,
allow_insecure_binary_clients: bool = False,
allow_insecure_http_clients: bool = False,
auto_shutdown_after: float = -1,
echo_runtime_info: bool = False,
status_sink: Optional[Callable[[str], None]] = None,
startup_script: Optional[srvargs.StartupScript] = None,
):
self._loop = asyncio.get_running_loop()
# Used to tag PG notifications to later disambiguate them.
self._server_id = str(uuid.uuid4())
self._serving = False
self._initing = False
self._accept_new_tasks = False
self._cluster = cluster
self._pg_addr = self._get_pgaddr()
inst_params = cluster.get_runtime_params().instance_params
self._tenant_id = inst_params.tenant_id
# 1 connection is reserved for the system DB
pool_capacity = max_backend_connections - 1
self._pg_pool = connpool.Pool(
connect=self._pg_connect,
disconnect=self._pg_disconnect,
max_capacity=pool_capacity,
)
self._pg_unavailable_msg = None
# DB state will be initialized in init().
self._dbindex = None
self._runstate_dir = runstate_dir
self._internal_runstate_dir = internal_runstate_dir
self._max_backend_connections = max_backend_connections
self._compiler_pool = None
self._compiler_pool_size = compiler_pool_size
self._listen_hosts = nethosts
self._listen_port = netport
self._sys_auth: Tuple[Any, ...] = tuple()
# Shutdown the server after the last management
# connection has disconnected
# and there have been no new connections for n seconds
self._auto_shutdown_after = auto_shutdown_after
self._auto_shutdown_handler = None
self._echo_runtime_info = echo_runtime_info
self._status_sink = status_sink
self._startup_script = startup_script
# Never use `self.__sys_pgcon` directly; get it via
# `await self._acquire_sys_pgcon()`.
self.__sys_pgcon = None
self._roles = immutables.Map()
self._instance_data = immutables.Map()
self._sys_queries = immutables.Map()
self._devmode = devmode.is_in_dev_mode()
self._binary_proto_id_counter = 0
self._binary_conns = set()
self._accepting_connections = False
self._servers = {}
self._http_query_cache = cache.StatementsCache(
maxsize=defines.HTTP_PORT_QUERY_CACHE_SIZE)
self._http_last_minute_requests = windowedsum.WindowedSum()
self._http_request_logger = None
self._task_group = None
self._stop_evt = asyncio.Event()
self._tls_cert_file = None
self._sslctx = None
self._allow_insecure_binary_clients = allow_insecure_binary_clients
self._allow_insecure_http_clients = allow_insecure_http_clients
async def _request_stats_logger(self):
last_seen = -1
while True:
current = int(self._http_last_minute_requests)
if current != last_seen:
log_metrics.info(
"HTTP requests in last minute: %d",
current,
)
last_seen = current
await asyncio.sleep(30)
def get_listen_hosts(self):
return self._listen_hosts
def get_listen_port(self):
return self._listen_port
def get_loop(self):
return self._loop
def in_dev_mode(self):
return self._devmode
def get_pg_dbname(self, dbname: str) -> str:
return self._cluster.get_db_name(dbname)
def on_binary_client_connected(self) -> str:
self._binary_proto_id_counter += 1
if self._auto_shutdown_handler:
self._auto_shutdown_handler.cancel()
self._auto_shutdown_handler = None
return str(self._binary_proto_id_counter)
def on_binary_client_authed(self, conn):
self._binary_conns.add(conn)
self._report_connections(event='opened')
def on_binary_client_disconnected(self, conn):
self._binary_conns.discard(conn)
self._report_connections(event="closed")
if not self._binary_conns and self._auto_shutdown_after >= 0:
def shutdown():
self._accepting_connections = False
self._stop_evt.set()
self._auto_shutdown_handler = self._loop.call_later(
self._auto_shutdown_after, shutdown)
def _report_connections(self, *, event: str) -> None:
log_metrics.info(
"%s a connection; open_count=%d",
event,
len(self._binary_conns),
)
async def _pg_connect(self, dbname):
pg_dbname = self.get_pg_dbname(dbname)
return await pgcon.connect(
self._get_pgaddr(), pg_dbname, self._tenant_id)
async def _pg_disconnect(self, conn):
conn.terminate()
async def init(self):
self._initing = True
try:
self.__sys_pgcon = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)
self._sys_pgcon_waiter = asyncio.Lock()
self._sys_pgcon_ready_evt = asyncio.Event()
self._sys_pgcon_reconnect_evt = asyncio.Event()
await self._load_instance_data()
global_schema = await self.introspect_global_schema()
sys_config = await self.load_sys_config()
self._dbindex = dbview.DatabaseIndex(
self,
std_schema=self._std_schema,
global_schema=global_schema,
sys_config=sys_config,
)
self._fetch_roles()
await self._introspect_dbs()
# Now, once all DBs have been introspected, start listening on
# any notifications about schema/roles/etc changes.
await self.__sys_pgcon.listen_for_sysevent()
self.__sys_pgcon.set_server(self)
self._sys_pgcon_ready_evt.set()
self._populate_sys_auth()
if not self._listen_hosts:
self._listen_hosts = (
config.lookup('listen_addresses', sys_config)
or ('localhost',)
)
if self._listen_port is None:
self._listen_port = (
config.lookup('listen_port', sys_config)
or defines.EDGEDB_PORT
)
self._http_request_logger = asyncio.create_task(
self._request_stats_logger()
)
finally:
self._initing = False
async def _create_compiler_pool(self):
self._compiler_pool = await compiler_pool.create_compiler_pool(
pool_size=self._compiler_pool_size,
dbindex=self._dbindex,
runstate_dir=self._internal_runstate_dir,
backend_runtime_params=self.get_backend_runtime_params(),
std_schema=self._std_schema,
refl_schema=self._refl_schema,
schema_class_layout=self._schema_class_layout,
)
async def _destroy_compiler_pool(self):
if self._compiler_pool is not None:
await self._compiler_pool.stop()
self._compiler_pool = None
def _populate_sys_auth(self):
cfg = self._dbindex.get_sys_config()
auth = config.lookup('auth', cfg) or ()
self._sys_auth = tuple(sorted(auth, key=lambda a: a.priority))
def _get_pgaddr(self):
return self._cluster.get_connection_spec()
def get_compiler_pool(self):
return self._compiler_pool
def get_db(self, *, dbname: str):
assert self._dbindex is not None
return self._dbindex.get_db(dbname)
def maybe_get_db(self, *, dbname: str):
assert self._dbindex is not None
return self._dbindex.maybe_get_db(dbname)
def new_dbview(self, *, dbname, user, query_cache):
return self._dbindex.new_view(
dbname, user=user, query_cache=query_cache)
def remove_dbview(self, dbview):
return self._dbindex.remove_view(dbview)
def get_global_schema(self):
return self._dbindex.get_global_schema()
def get_compilation_system_config(self):
return self._dbindex.get_compilation_system_config()
async def acquire_pgcon(self, dbname):
if self._pg_unavailable_msg is not None:
raise errors.BackendUnavailableError(
'Postgres is not available: ' + self._pg_unavailable_msg
)
for _ in range(self._pg_pool.max_capacity + 1):
conn = await self._pg_pool.acquire(dbname)
if conn.is_healthy():
return conn
else:
logger.warning('Acquired an unhealthy pgcon; discard now.')
self._pg_pool.release(dbname, conn, discard=True)
else:
# This is unlikely to happen, but we defer to the caller to retry
# when it does happen
raise errors.BackendUnavailableError(
'No healthy backend connection available at the moment, '
'please try again.'
)
def release_pgcon(self, dbname, conn, *, discard=False):
if not conn.is_healthy():
logger.warning('Released an unhealthy pgcon; discard now.')
discard = True
self._pg_pool.release(dbname, conn, discard=discard)
async def load_sys_config(self):
syscon = await self._acquire_sys_pgcon()
try:
query = self.get_sys_query('sysconfig')
sys_config_json = await syscon.parse_execute_json(
query,
b'__backend_sysconfig',
dbver=0,
use_prep_stmt=True,
args=(),
)
finally:
self._release_sys_pgcon()
return config.from_json(config.get_settings(), sys_config_json)
async def introspect_global_schema(self, conn=None):
if conn is not None:
json_data = await conn.parse_execute_json(
self._global_intro_query, b'__global_intro_db',
dbver=0, use_prep_stmt=True, args=(),
)
else:
syscon = await self._acquire_sys_pgcon()
try:
json_data = await syscon.parse_execute_json(
self._global_intro_query, b'__global_intro_db',
dbver=0, use_prep_stmt=True, args=(),
)
finally:
self._release_sys_pgcon()
return s_refl.parse_into(
base_schema=self._std_schema,
schema=s_schema.FlatSchema(),
data=json_data,
schema_class_layout=self._schema_class_layout,
)
async def _reintrospect_global_schema(self):
if not self._initing and not self._serving:
logger.warning(
"global-schema-changes event received during shutdown; "
"ignoring."
)
return
new_global_schema = await self.introspect_global_schema()
self._dbindex.update_global_schema(new_global_schema)
self._fetch_roles()
async def introspect_user_schema(self, conn):
json_data = await conn.parse_execute_json(
self._local_intro_query, b'__local_intro_db',
dbver=0, use_prep_stmt=True, args=(),
)
base_schema = s_schema.ChainedSchema(
self._std_schema,
s_schema.FlatSchema(),
self.get_global_schema(),
)
return s_refl.parse_into(
base_schema=base_schema,
schema=s_schema.FlatSchema(),
data=json_data,
schema_class_layout=self._schema_class_layout,
)
async def introspect_db(
self, dbname, *, refresh=False, skip_dropped=False
):
try:
conn = await self.acquire_pgcon(dbname)
except pgcon_errors.BackendError as e:
if skip_dropped and e.code_is(
pgcon_errors.ERROR_INVALID_CATALOG_NAME
):
# database does not exist
logger.warning(
"Detected concurrently-dropped database %s; skipping.",
dbname,
)
return
else:
raise
try:
user_schema = await self.introspect_user_schema(conn)
reflection_cache_json = await conn.parse_execute_json(
b'''
SELECT json_agg(o.c)
FROM (
SELECT
json_build_object(
'eql_hash', t.eql_hash,
'argnames', array_to_json(t.argnames)
) AS c
FROM
ROWS FROM(edgedb._get_cached_reflection())
AS t(eql_hash text, argnames text[])
) AS o;
''',
b'__reflection_cache',
dbver=0,
use_prep_stmt=True,
args=(),
)
reflection_cache = immutables.Map({
r['eql_hash']: tuple(r['argnames'])
for r in json.loads(reflection_cache_json)
})
backend_ids_json = await conn.parse_execute_json(
b'''
SELECT
json_object_agg(
"id"::text,
"backend_id"
)::text
FROM
edgedb."_SchemaType"
''',
b'__backend_ids_fetch',
dbver=0,
use_prep_stmt=True,
args=(),
)
backend_ids = json.loads(backend_ids_json)
db_config = await self.introspect_db_config(conn)
self._dbindex.register_db(
dbname,
user_schema=user_schema,
db_config=db_config,
reflection_cache=reflection_cache,
backend_ids=backend_ids,
refresh=refresh,
)
finally:
self.release_pgcon(dbname, conn)
async def introspect_db_config(self, conn):
query = self.get_sys_query('dbconfig')
result = await conn.parse_execute_json(
query,
b'__backend_dbconfig',
dbver=0,
use_prep_stmt=True,
args=(),
)
return config.from_json(config.get_settings(), result)
async def _introspect_dbs(self):
syscon = await self._acquire_sys_pgcon()
try:
dbs_query = self.get_sys_query('listdbs')
json_data = await syscon.parse_execute_json(
dbs_query, b'__listdbs',
dbver=0, use_prep_stmt=True, args=(),
)
dbnames = json.loads(json_data)
finally:
self._release_sys_pgcon()
async with taskgroup.TaskGroup(name='introspect DBs') as g:
for dbname in dbnames:
g.create_task(self.introspect_db(dbname, skip_dropped=True))
def _fetch_roles(self):
global_schema = self._dbindex.get_global_schema()
roles = {}
for role in global_schema.get_objects(type=s_role.Role):
role_name = str(role.get_name(global_schema))
roles[role_name] = {
'name': role_name,
'superuser': role.get_superuser(global_schema),
'password': role.get_password(global_schema),
}
self._roles = immutables.Map(roles)
async def _load_instance_data(self):
syscon = await self._acquire_sys_pgcon()
try:
result = await syscon.simple_query(b'''\
SELECT json FROM edgedbinstdata.instdata
WHERE key = 'instancedata';
''', ignore_data=False)
self._instance_data = immutables.Map(
json.loads(result[0][0].decode('utf-8')))
result = await syscon.simple_query(b'''\
SELECT json FROM edgedbinstdata.instdata
WHERE key = 'sysqueries';
''', ignore_data=False)
queries = json.loads(result[0][0].decode('utf-8'))
self._sys_queries = immutables.Map(
{k: q.encode() for k, q in queries.items()})
result = await syscon.simple_query(b'''\
SELECT text FROM edgedbinstdata.instdata
WHERE key = 'local_intro_query';
''', ignore_data=False)
self._local_intro_query = result[0][0]
result = await syscon.simple_query(b'''\
SELECT text FROM edgedbinstdata.instdata
WHERE key = 'global_intro_query';
''', ignore_data=False)
self._global_intro_query = result[0][0]
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'stdschema';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
self._std_schema = pickle.loads(data)
except Exception as e:
raise RuntimeError(
'could not load std schema pickle') from e
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'reflschema';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
self._refl_schema = pickle.loads(data)
except Exception as e:
raise RuntimeError(
'could not load refl schema pickle') from e
result = await syscon.simple_query(b'''\
SELECT bin FROM edgedbinstdata.instdata
WHERE key = 'classlayout';
''', ignore_data=False)
try:
data = binascii.a2b_hex(result[0][0][2:])
self._schema_class_layout = pickle.loads(data)
except Exception as e:
raise RuntimeError(
'could not load schema class layout pickle') from e
finally:
self._release_sys_pgcon()
def get_roles(self):
return self._roles
async def _restart_servers_new_addr(self, nethosts, netport):
if not netport:
raise RuntimeError('cannot restart without network port specified')
nethosts = _fix_wildcard_host(nethosts)
servers_to_stop = []
servers = {}
if self._listen_port == netport:
hosts_to_start = [
host for host in nethosts if host not in self._servers
]
for host, srv in self._servers.items():
if host == ADMIN_PLACEHOLDER or host in nethosts:
servers[host] = srv
else:
servers_to_stop.append(srv)
admin = False
else:
hosts_to_start = nethosts
servers_to_stop = self._servers.values()
admin = True
new_servers, *_ = await self._start_servers(
hosts_to_start, netport, admin
)
servers.update(new_servers)
self._servers = servers
self._listen_hosts = nethosts
self._listen_port = netport
addrs = []
unix_addr = None
port = None
for srv in servers_to_stop:
for s in srv.sockets:
addr = s.getsockname()
if isinstance(addr, tuple):
addrs.append(addr)
if port is None:
port = addr[1]
elif port != addr[1]:
port = 0
else:
unix_addr = addr
if len(addrs) > 1:
if port:
addr_str = f"{{{', '.join(addr[0] for addr in addrs)}}}:{port}"
else:
addr_str = f"{{{', '.join('%s:%d' % addr for addr in addrs)}}}"
elif addrs:
addr_str = "%s:%d" % addrs[0]
else:
addr_str = None
if addr_str:
logger.info('Stopping to serve on %s', addr_str)
if unix_addr:
logger.info('Stopping to serve admin on %s', unix_addr)
await self._stop_servers(servers_to_stop)
async def _on_before_drop_db(
self,
dbname: str,
current_dbname: str
) -> None:
if current_dbname == dbname:
raise errors.ExecutionError(
f'cannot drop the currently open database {dbname!r}')
await self._ensure_database_not_connected(dbname)
async def _on_before_create_db_from_template(
self,
dbname: str,
current_dbname: str
):
if current_dbname == dbname:
raise errors.ExecutionError(
f'cannot create database using currently open database '
f'{dbname!r} as a template database')
await self._ensure_database_not_connected(dbname)
async def _ensure_database_not_connected(self, dbname: str):
assert self._dbindex is not None
if self._dbindex.count_connections(dbname):
# If there are open EdgeDB connections to the `dbname` DB
# just raise the error Postgres would have raised itself.
raise errors.ExecutionError(
f'database {dbname!r} is being accessed by other users')
else:
# If, however, there are no open EdgeDB connections, prune
# all non-active postgres connection to the `dbname` DB.
await self._pg_pool.prune_inactive_connections(dbname)
def _on_after_drop_db(self, dbname: str):
assert self._dbindex is not None
self._dbindex.unregister_db(dbname)
async def _on_system_config_add(self, setting_name, value):
# CONFIGURE INSTANCE INSERT ConfigObject;
pass
async def _on_system_config_rem(self, setting_name, value):
# CONFIGURE INSTANCE RESET ConfigObject;
pass
async def _on_system_config_set(self, setting_name, value):
# CONFIGURE INSTANCE SET setting_name := value;
if setting_name == 'listen_addresses':
await self._restart_servers_new_addr(value, self._listen_port)
elif setting_name == 'listen_port':
await self._restart_servers_new_addr(self._listen_hosts, value)
async def _on_system_config_reset(self, setting_name):
# CONFIGURE INSTANCE RESET setting_name;
if setting_name == 'listen_addresses':
await self._restart_servers_new_addr(
('localhost',), self._listen_port)
elif setting_name == 'listen_port':
await self._restart_servers_new_addr(
self._listen_hosts, defines.EDGEDB_PORT)
async def _after_system_config_add(self, setting_name, value):
# CONFIGURE INSTANCE INSERT ConfigObject;
if setting_name == 'auth':
self._populate_sys_auth()
async def _after_system_config_rem(self, setting_name, value):
# CONFIGURE INSTANCE RESET ConfigObject;
if setting_name == 'auth':
self._populate_sys_auth()
async def _after_system_config_set(self, setting_name, value):
# CONFIGURE INSTANCE SET setting_name := value;
pass
async def _after_system_config_reset(self, setting_name):
# CONFIGURE INSTANCE RESET setting_name;
pass
async def _acquire_sys_pgcon(self):
if not self._initing and not self._serving:
raise RuntimeError("EdgeDB server is not serving.")
await self._sys_pgcon_waiter.acquire()
if not self._initing and not self._serving:
self._sys_pgcon_waiter.release()
raise RuntimeError("EdgeDB server is not serving.")
if self.__sys_pgcon is None or not self.__sys_pgcon.is_healthy():
conn, self.__sys_pgcon = self.__sys_pgcon, None
if conn is not None:
self._sys_pgcon_ready_evt.clear()
conn.abort()
# We depend on the reconnect on connection_lost() of __sys_pgcon
await self._sys_pgcon_ready_evt.wait()
if self.__sys_pgcon is None:
self._sys_pgcon_waiter.release()
raise RuntimeError("Cannot acquire pgcon to the system DB.")
return self.__sys_pgcon
def _release_sys_pgcon(self):
self._sys_pgcon_waiter.release()
async def _cancel_pgcon_operation(self, pgcon) -> bool:
syscon = await self._acquire_sys_pgcon()
try:
if pgcon.idle:
# pgcon could have received the query results while we
# were acquiring a system connection to cancel it.
return False
if pgcon.is_cancelling():
# Somehow the connection is already being cancelled and
# we don't want to have to cancellations go in parallel.
return False
pgcon.start_pg_cancellation()
try:
# Returns True if the `pid` exists and it was able to send it a
# SIGINT. Will throw an exception if the priveleges aren't
# sufficient.
result = await syscon.simple_query(
f'SELECT pg_cancel_backend({pgcon.backend_pid});'.encode(),
ignore_data=False
)
finally:
pgcon.finish_pg_cancellation()
return result[0][0] == b't'
finally:
self._release_sys_pgcon()
async def _cancel_and_discard_pgcon(self, pgcon, dbname) -> None:
try:
if self._serving:
await self._cancel_pgcon_operation(pgcon)
finally:
self.release_pgcon(dbname, pgcon, discard=True)
async def _signal_sysevent(self, event, **kwargs):
if not self._initing and not self._serving:
# This is very likely if we are doing
# "run_startup_script_and_exit()", but is also possible if the
# server was shut down with this coroutine as a background task
# in flight.
return
pgcon = await self._acquire_sys_pgcon()
try:
await pgcon.signal_sysevent(event, **kwargs)
finally:
self._release_sys_pgcon()
def _on_remote_ddl(self, dbname):
# Triggered by a postgres notification event 'schema-changes'
# on the __edgedb_sysevent__ channel
self._loop.create_task(
self.introspect_db(dbname, refresh=True)
)
def _on_remote_database_config_change(self, dbname):
# Triggered by a postgres notification event 'database-config-changes'
# on the __edgedb_sysevent__ channel
pass
def _on_remote_system_config_change(self):
# Triggered by a postgres notification event 'ystem-config-changes'
# on the __edgedb_sysevent__ channel
pass
def _on_global_schema_change(self):
self._loop.create_task(self._reintrospect_global_schema())
def _on_sys_pgcon_connection_lost(self, exc):
if not self._serving:
# The server is shutting down, release all events so that
# the waiters if any could continue and exit
self._sys_pgcon_ready_evt.set()
self._sys_pgcon_reconnect_evt.set()
return
logger.error(
"Connection to the system database is " +
("closed." if exc is None else f"broken! Reason: {exc}")
)
self.set_pg_unavailable_msg(
"Connection is lost, please check server log for the reason."
)
self.__sys_pgcon = None
self._sys_pgcon_ready_evt.clear()
self._loop.create_task(self._reconnect_sys_pgcon())
async def _reconnect_sys_pgcon(self):
try:
conn = None
while self._serving:
try:
conn = await self._pg_connect(defines.EDGEDB_SYSTEM_DB)
break
except ConnectionError:
# Keep retrying as far as:
# 1. The EdgeDB server is still serving,
# 2. We still cannot connect to the Postgres cluster, or
pass
except pgcon_errors.BackendError as e:
# 3. The Postgres cluster is still starting up
if not e.code_is(pgcon_errors.ERROR_CANNOT_CONNECT_NOW):
raise
if self._serving:
try:
# Retry after INTERVAL seconds, unless the event is set
# and we can retry immediately after the event.
await asyncio.wait_for(
self._sys_pgcon_reconnect_evt.wait(),
defines.SYSTEM_DB_RECONNECT_INTERVAL,
)
# But the event can only skip one INTERVAL.
self._sys_pgcon_reconnect_evt.clear()
except asyncio.TimeoutError:
pass
if not self._serving:
if conn is not None:
conn.abort()
return
logger.info("Successfully reconnected to the system database.")
self.__sys_pgcon = conn
self.__sys_pgcon.set_server(self)
# This await is meant to be after set_server() because we need the
# pgcon to be able to trigger another reconnect if its connection
# is lost during this await.
await self.__sys_pgcon.listen_for_sysevent()
self.set_pg_unavailable_msg(None)
finally:
self._sys_pgcon_ready_evt.set()
async def run_startup_script_and_exit(self):
"""Run the script specified in *startup_script* and exit immediately"""
if self._startup_script is None:
raise AssertionError('startup script is not defined')
await self._create_compiler_pool()
try:
ql_parser.preload()
await binary.EdgeConnection.run_script(
server=self,
database=self._startup_script.database,
user=self._startup_script.user,
script=self._startup_script.text,
)
finally:
await self._destroy_compiler_pool()
async def _start_server(
self, host: str, port: int
) -> asyncio.AbstractServer:
nethost = None
if host == "localhost":
nethost = await _resolve_localhost()
proto_factory = lambda: protocol.HttpProtocol(
self, self._sslctx,
allow_insecure_binary_clients=self._allow_insecure_binary_clients,
allow_insecure_http_clients=self._allow_insecure_http_clients,
)
return await self._loop.create_server(
proto_factory, host=nethost or host, port=port)
async def _start_admin_server(self, port: int) -> asyncio.AbstractServer:
admin_unix_sock_path = os.path.join(
self._runstate_dir, f'.s.EDGEDB.admin.{port}')
admin_unix_srv = await self._loop.create_unix_server(
lambda: binary.EdgeConnection(self, external_auth=True),
admin_unix_sock_path
)
os.chmod(admin_unix_sock_path, stat.S_IRUSR | stat.S_IWUSR)
logger.info('Serving admin on %s', admin_unix_sock_path)
return admin_unix_srv
async def _start_servers(self, hosts, port, admin=True):
servers = {}
try:
async with taskgroup.TaskGroup() as g:
for host in hosts:
servers[host] = g.create_task(
self._start_server(host, port)
)
except Exception:
await self._stop_servers([
fut.result() for fut in servers.values()
if fut.done() and fut.exception() is None
])
raise
servers = {host: fut.result() for host, fut in servers.items()}
addrs = []
for tcp_srv in servers.values():
for s in tcp_srv.sockets:
addrs.append(s.getsockname())
if len(addrs) > 1:
if port:
addr_str = f"{{{', '.join(addr[0] for addr in addrs)}}}:{port}"
else:
addr_str = f"{{{', '.join('%s:%d' % addr for addr in addrs)}}}"
elif addrs:
addr_str = "%s:%d" % addrs[0]
port = addrs[0][1]
else:
addr_str = None
if addr_str:
logger.info('Serving on %s', addr_str)
if admin and port:
try:
admin_unix_srv = await self._start_admin_server(port)
except Exception:
await self._stop_servers(servers.values())
raise
servers[ADMIN_PLACEHOLDER] = admin_unix_srv
return servers, port, addrs
def init_tls(self, tls_cert_file, tls_key_file):
assert self._sslctx is None
tls_password_needed = False
def _tls_private_key_password():
nonlocal tls_password_needed
tls_password_needed = True
return os.environ.get('EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD', '')
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
sslctx.load_cert_chain(
tls_cert_file,
tls_key_file,
password=_tls_private_key_password,
)
except ssl.SSLError as e:
if e.library == "SSL" and e.errno == 9: # ERR_LIB_PEM
if tls_password_needed:
if _tls_private_key_password():
raise StartupError(
"Cannot load TLS certificates - it's likely that "
"the private key password is wrong."
) from e
else:
raise StartupError(
"Cannot load TLS certificates - the private key "
"file is likely protected by a password. Specify "
"the password using environment variable: "
"EDGEDB_SERVER_TLS_PRIVATE_KEY_PASSWORD"
) from e
elif tls_key_file is None:
raise StartupError(
"Cannot load TLS certificates - have you specified "
"the private key file using the `--tls-key-file` "
"command-line argument?"
) from e
else:
raise StartupError(
"Cannot load TLS certificates - please double check "
"if the specified certificate files are valid."
)
elif e.library == "X509" and e.errno == 116:
# X509 Error 116: X509_R_KEY_VALUES_MISMATCH
raise StartupError(
"Cannot load TLS certificates - the private key doesn't "
"match the certificate."
)
raise StartupError(f"Cannot load TLS certificates - {e}") from e
sslctx.set_alpn_protocols(['edgedb-binary', 'http/1.1'])
self._sslctx = sslctx
self._tls_cert_file = str(tls_cert_file)
async def _stop_servers(self, servers):
async with taskgroup.TaskGroup() as g:
for srv in servers:
srv.close()
g.create_task(srv.wait_closed())
async def start(self):
self._stop_evt.clear()
assert self._task_group is None
self._task_group = taskgroup.TaskGroup()
await self._task_group.__aenter__()
self._accept_new_tasks = True
await self._create_compiler_pool()
# Make sure that EdgeQL parser is preloaded; edgecon might use
# it to restore config values.
ql_parser.preload()
if self._startup_script:
await binary.EdgeConnection.run_script(
server=self,
database=self._startup_script.database,
user=self._startup_script.user,
script=self._startup_script.text,
)
self._servers, actual_port, listen_addrs = await self._start_servers(
_fix_wildcard_host(self._listen_hosts), self._listen_port
)
if self._listen_port == 0:
self._listen_port = actual_port
self._accepting_connections = True
self._serving = True
if self._echo_runtime_info:
ri = {
"port": self._listen_port,
"runstate_dir": str(self._runstate_dir),
"tls_cert_file": self._tls_cert_file,
}
print(f'\nEDGEDB_SERVER_DATA:{json.dumps(ri)}\n', flush=True)
if self._status_sink is not None:
status = {
"listen_addrs": listen_addrs,
"port": self._listen_port,
"socket_dir": str(self._runstate_dir),
"main_pid": os.getpid(),
"tenant_id": self._tenant_id,
"tls_cert_file": self._tls_cert_file,
}
self._status_sink(f'READY={json.dumps(status)}')
async def stop(self):
try:
self._serving = False
self._accept_new_tasks = False
if self._http_request_logger is not None:
self._http_request_logger.cancel()
await self._stop_servers(self._servers.values())
self._servers = {}
for conn in self._binary_conns:
conn.stop()
self._binary_conns = set()
if self._task_group is not None:
tg = self._task_group
self._task_group = None
await tg.__aexit__(*sys.exc_info())
await self._destroy_compiler_pool()
finally:
if self.__sys_pgcon is not None:
self.__sys_pgcon.terminate()
self.__sys_pgcon = None
self._sys_pgcon_waiter = None
def create_task(self, coro):
if self._accept_new_tasks:
return self._task_group.create_task(coro)
async def serve_forever(self):
await self._stop_evt.wait()
async def get_auth_method(self, user):
authlist = self._sys_auth
if not authlist:
default_method = 'SCRAM'
return config.get_settings().get_type_by_name(default_method)()
else:
for auth in authlist:
match = (
(user in auth.user or '*' in auth.user)
)
if match:
return auth.method
def get_sys_query(self, key):
return self._sys_queries[key]
def get_instance_data(self, key):
return self._instance_data[key]
def get_backend_runtime_params(self) -> Any:
return self._cluster.get_runtime_params()
def set_pg_unavailable_msg(self, msg):
if msg is None or self._pg_unavailable_msg is None:
self._pg_unavailable_msg = msg
async def _resolve_localhost() -> List[str]:
# On many systems 'localhost' resolves to _both_ IPv4 and IPv6
# addresses, even if the system is not capable of handling
# IPv6 connections. Due to the common nature of this issue
# we explicitly disable the AF_INET6 component of 'localhost'.
loop = asyncio.get_running_loop()
localhost = await loop.getaddrinfo(
'localhost',
0,
family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM,
flags=socket.AI_PASSIVE,
proto=0,
)
infos = [a for a in localhost if a[0] == socket.AF_INET]
if not infos:
# "localhost" did not resolve to an IPv4 address,
# let create_server handle the situation.
return ["localhost"]
# Replace 'localhost' with explicitly resolved AF_INET addresses.
hosts = []
for info in reversed(infos):
addr, *_ = info[4]
hosts.append(addr)
return hosts
def _fix_wildcard_host(hosts: Sequence[str]) -> Sequence[str]:
# Even though it is sometimes not a conflict to bind on the same port of
# both the wildcard host 0.0.0.0 and some specific host at the same time,
# we're still discarding other hosts if 0.0.0.0 is present because it
# should behave the same and we could avoid potential conflicts.
if '0.0.0.0' in hosts:
if len(hosts) > 1:
logger.warning(
"0.0.0.0 found in listen_addresses; "
"discarding the other hosts."
)
hosts = ['0.0.0.0']
return hosts
| 34.861244 | 79 | 0.586467 |
ace23d5ba11d974d8c780839107f0659084f3b76 | 67,251 | py | Python | problemtools/verifyproblem.py | Kakalinn/problemtools | 6cf30eb5f2c6e21531fc2f1ae858d1213a97511e | [
"MIT"
] | 1 | 2021-11-10T11:40:59.000Z | 2021-11-10T11:40:59.000Z | problemtools/verifyproblem.py | Kakalinn/problemtools | 6cf30eb5f2c6e21531fc2f1ae858d1213a97511e | [
"MIT"
] | null | null | null | problemtools/verifyproblem.py | Kakalinn/problemtools | 6cf30eb5f2c6e21531fc2f1ae858d1213a97511e | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import string
import hashlib
import collections
import os
import signal
import re
import shutil
import logging
import tempfile
import sys
import copy
import random
import argparse
import yaml
from . import problem2pdf
from . import problem2html
from . import config
from . import languages
from . import run
def is_TLE(status, may_signal_with_usr1=False):
return (os.WIFSIGNALED(status) and
(os.WTERMSIG(status) == signal.SIGXCPU or
(may_signal_with_usr1 and os.WTERMSIG(status) == signal.SIGUSR1)))
def is_RTE(status):
return not os.WIFEXITED(status) or os.WEXITSTATUS(status)
class SubmissionResult:
def __init__(self, verdict, score=None, testcase=None, reason=None, additional_info=None):
self.verdict = verdict
self.score = score
self.testcase = testcase
self.reason = reason
self.additional_info = additional_info
self.runtime = -1.0
self.runtime_testcase = None
self.ac_runtime = -1.0
self.ac_runtime_testcase = None
self.validator_first = False
self.sample_failures = []
def set_ac_runtime(self):
if self.verdict == 'AC':
self.ac_runtime = self.runtime
self.ac_runtime_testcase = self.runtime_testcase
def __str__(self):
verdict = self.verdict
details = []
if verdict == 'AC' and self.score is not None:
verdict += ' (%.0f)' % self.score
if self.reason is not None:
details.append(self.reason)
if self.verdict != 'AC' and self.testcase is not None:
details.append('test case: %s' % self.testcase)
if self.runtime != -1:
details.append('CPU: %.2fs @ %s' % (self.runtime, self.runtime_testcase))
if len(details) == 0:
return verdict
return '%s [%s]' % (verdict, ', '.join(details))
class VerifyError(Exception):
pass
class ProblemAspect:
max_additional_info = 15
errors = 0
warnings = 0
bail_on_error = False
_check_res = None
basename_regex = re.compile('^[a-zA-Z0-9][a-zA-Z0-9_.-]*[a-zA-Z0-9]$')
@staticmethod
def __append_additional_info(msg, additional_info):
if additional_info is None or ProblemAspect.max_additional_info <= 0:
return msg
additional_info = additional_info.rstrip()
if not additional_info:
return msg
lines = additional_info.split('\n')
if len(lines) == 1:
return '%s (%s)' % (msg, lines[0])
if len(lines) > ProblemAspect.max_additional_info:
lines = lines[:ProblemAspect.max_additional_info] + ['[.....truncated to %d lines.....]' % ProblemAspect.max_additional_info]
return '%s:\n%s' % (msg, '\n'.join(' '*8 + line for line in lines))
def error(self, msg, additional_info=None):
self._check_res = False
ProblemAspect.errors += 1
logging.error('in %s: %s',
self, ProblemAspect.__append_additional_info(msg, additional_info))
if ProblemAspect.bail_on_error:
raise VerifyError(msg)
def warning(self, msg, additional_info=None):
if ProblemAspect.consider_warnings_errors:
self.error(msg)
return
ProblemAspect.warnings += 1
logging.warning('in %s: %s',
self, ProblemAspect.__append_additional_info(msg, additional_info))
def msg(self, msg):
print(msg)
def info(self, msg):
logging.info(': %s', msg)
def debug(self, msg):
logging.debug(': %s', msg)
def check_basename(self, path):
basename = os.path.basename(path)
if not self.basename_regex.match(basename):
self.error("Invalid name '%s' (should match '%s')" % (basename, self.basename_regex.pattern))
class TestCase(ProblemAspect):
def __init__(self, problem, base, testcasegroup):
self._base = base
self.infile = base + '.in'
self.ansfile = base + '.ans'
self._problem = problem
self.testcasegroup = testcasegroup
self.reuse_result_from = None
self._result_cache = (None, None)
problem.testcase_by_infile[self.infile] = self
def check_newlines(self, filename):
with open(filename, 'r') as f:
data = f.read()
if data.find('\r') != -1:
self.warning('The file %s contains non-standard line breaks.'
% filename)
if len(data) > 0 and data[-1] != '\n':
self.warning("The file %s does not end with '\\n'." % filename)
def strip_path_prefix(self, path):
return os.path.relpath(path, os.path.join(self._problem.probdir, 'data'))
def is_in_sample_group(self):
return self.strip_path_prefix(self.infile).startswith('sample')
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
self.check_basename(self.infile)
self.check_basename(self.ansfile)
self.check_newlines(self.infile)
self.check_newlines(self.ansfile)
self._problem.input_format_validators.validate(self)
anssize = os.path.getsize(self.ansfile) / 1024.0 / 1024.0
outputlim = self._problem.config.get('limits')['output']
if anssize > outputlim:
self.error('Answer file (%.1f Mb) is larger than output limit (%d Mb), you need to increase output limit' % (anssize, outputlim))
elif 2 * anssize > outputlim:
self.warning('Answer file (%.1f Mb) is within 50%% of output limit (%d Mb), you might want to increase output limit' % (anssize, outputlim))
if not self._problem.is_interactive:
val_res = self._problem.output_validators.validate(self, self.ansfile)
if val_res.verdict != 'AC':
if self.is_in_sample_group():
self.error('judge answer file got %s' % val_res)
else:
self.warning('judge answer file got %s' % val_res)
self._check_symlinks()
return self._check_res
def __str__(self):
return 'test case %s' % self.strip_path_prefix(self._base)
def matches_filter(self, filter_re):
return filter_re.search(self.strip_path_prefix(self._base)) is not None
def set_symlinks(self):
if not os.path.islink(self.infile):
return
target = os.path.realpath(self.infile)
if target in self._problem.testcase_by_infile:
self.reuse_result_from = self._problem.testcase_by_infile[target]
def _check_symlinks(self):
if not os.path.islink(self.infile):
return True
nicepath = os.path.relpath(self.infile, self._problem.probdir)
in_target = os.path.realpath(self.infile)
ans_target = os.path.realpath(self.ansfile)
if not in_target.endswith('.in'):
self.error("Symbolic link does not point to a .in file for input '%s'" % nicepath)
return False
if ans_target != in_target[:-3] + '.ans':
self.error("Symbolic link '%s' must have a corresponding link for answer file" % nicepath)
return False
if self.reuse_result_from is None:
self.error("Symbolic link points outside data/ directory for file '%s'" % nicepath)
return False
if self.testcasegroup.config['output_validator_flags'] != self.reuse_result_from.testcasegroup.config['output_validator_flags']:
self.error("Symbolic link '%s' points to test case with different output validator flags" % nicepath)
return False
return True
def run_submission(self, sub, args, timelim_low, timelim_high):
res1, res2, reused = self._run_submission_real(sub, args, timelim_low, timelim_high)
res1 = self._init_result_for_testcase(res1)
res2 = self._init_result_for_testcase(res2)
msg = "Reused test file result" if reused else "Test file result"
self.info('%s: %s' % (msg, res1))
if res1.verdict != 'AC' and self.is_in_sample_group():
res1.sample_failures.append(res1)
return (res1, res2)
def _run_submission_real(self, sub, args, timelim_low, timelim_high):
if self.reuse_result_from is not None:
return self.reuse_result_from._run_submission_real(sub, args, timelim_low, timelim_high)
cache_key = (sub, args, timelim_low, timelim_high)
if self._result_cache[0] == cache_key:
res1, res2 = self._result_cache[1]
return (res1, res2, True)
outfile = os.path.join(self._problem.tmpdir, 'output')
if sys.stdout.isatty():
msg = 'Running %s on %s...' % (sub, self)
sys.stdout.write('%s' % msg)
sys.stdout.flush()
if self._problem.is_interactive:
res2 = self._problem.output_validators.validate_interactive(self, sub, timelim_high, self._problem.submissions)
else:
status, runtime = sub.run(self.infile, outfile,
timelim=timelim_high+1,
memlim=self._problem.config.get('limits')['memory'])
if is_TLE(status) or runtime > timelim_high:
res2 = SubmissionResult('TLE')
elif is_RTE(status):
res2 = SubmissionResult('RTE')
else:
res2 = self._problem.output_validators.validate(self, outfile)
res2.runtime = runtime
if sys.stdout.isatty():
sys.stdout.write('%s' % '\b \b' * (len(msg)))
if res2.runtime <= timelim_low:
res1 = res2
elif res2.validator_first and res2.verdict == 'WA':
# WA can override TLE for interactive problems (see comment in validate_interactive).
res1 = SubmissionResult('WA')
res1.validator_first = True
res2.runtime = timelim_low
else:
res1 = SubmissionResult('TLE')
res1.runtime = res2.runtime
res1.set_ac_runtime()
res2.set_ac_runtime()
self._result_cache = (cache_key, (res1, res2))
return (res1, res2, False)
def _init_result_for_testcase(self, res):
res = copy.copy(res)
res.testcase = self
res.runtime_testcase = self
if res.score is None:
if res.verdict == 'AC':
res.score = self.testcasegroup.config['accept_score']
else:
res.score = self.testcasegroup.config['reject_score']
return res
def get_all_testcases(self):
return [self]
def all_datasets(self):
return [self._base]
class TestCaseGroup(ProblemAspect):
_DEFAULT_CONFIG = config.load_config('testdata.yaml')
_SCORING_ONLY_KEYS = ['accept_score', 'reject_score', 'range']
def __init__(self, problem, datadir, parent=None):
self._parent = parent
self._problem = problem
self._datadir = datadir
self._seen_oob_scores = False
self.debug(' Loading test data group %s' % datadir)
configfile = os.path.join(self._datadir, 'testdata.yaml')
if os.path.isfile(configfile):
try:
with open(configfile) as f:
self.config = yaml.safe_load(f)
except Exception as e:
self.error(e)
self.config = {}
else:
self.config = {}
# For non-root groups, missing properties are inherited from the parent group
if parent:
for field, parent_value in parent.config.items():
if not field in self.config:
self.config[field] = parent_value
# Some deprecated properties are inherited from problem config during a transition period
problem_grading = problem.config.get('grading')
for key in ['accept_score', 'reject_score', 'range']:
if key in problem.config.get('grading'):
self.config[key] = problem_grading[key]
problem_on_reject = problem_grading.get('on_reject')
if problem_on_reject == 'first_error':
self.config['on_reject'] = 'break'
if problem_on_reject == 'grade':
self.config['on_reject'] = 'continue'
if self._problem.config.get('type') == 'pass-fail':
for key in TestCaseGroup._SCORING_ONLY_KEYS:
if key not in self.config:
self.config[key] = None
for field, default in TestCaseGroup._DEFAULT_CONFIG.items():
if field not in self.config:
self.config[field] = default
self._items = []
if os.path.isdir(datadir):
for f in sorted(os.listdir(datadir)):
f = os.path.join(datadir, f)
if os.path.isdir(f):
self._items.append(TestCaseGroup(problem, f, self))
else:
base, ext = os.path.splitext(f)
if ext == '.ans' and os.path.isfile(base + '.in'):
self._items.append(TestCase(problem, base, self))
if not parent:
self.set_symlinks()
def __str__(self):
return 'test case group %s' % os.path.relpath(self._datadir, os.path.join(self._problem.probdir))
def set_symlinks(self):
for sub in self._items:
sub.set_symlinks()
def matches_filter(self, filter_re):
return True
def get_all_testcases(self):
res = []
for child in self._items:
res += child.get_all_testcases()
return res
def get_testcases(self):
return [child for child in self._items if isinstance(child, TestCase)]
def get_subgroups(self):
return [child for child in self._items if isinstance(child, TestCaseGroup)]
def get_subgroup(self, name):
return next((child for child in self._items if isinstance(child, TestCaseGroup) and os.path.basename(child._datadir) == name), None)
def has_custom_groups(self):
return any(group.get_subgroups() for group in self.get_subgroups())
def get_score_range(self):
try:
score_range = self.config['range']
min_score, max_score = list(map(float, score_range.split()))
return (min_score, max_score)
except:
return (-float('inf'), float('inf'))
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
self.check_basename(self._datadir)
if self.config['grading'] not in ['default', 'custom']:
self.error("Invalid grading policy in testdata.yaml")
if self.config['grading'] == 'custom' and len(self._problem.graders._graders) == 0:
self._problem.graders.error('%s has custom grading but no custom graders provided' % self)
if self.config['grading'] == 'default' and Graders._default_grader is None:
self._problem.graders.error('%s has default grading but I could not find default grader' % self)
if self.config['grading'] == 'default' and 'ignore_sample' in self.config['grader_flags'].split():
if self._parent is not None:
self.error("'grader_flags: ignore_sample' is specified, but that flag is only allowed at top level")
elif self.config['on_reject'] == 'break':
self.error("'grader_flags: ignore_sample' is specified, but 'on_reject: break' may cause secret data not to be judged")
for field in self.config.keys():
if field not in TestCaseGroup._DEFAULT_CONFIG.keys():
self.warning("Unknown key '%s' in '%s'" % (field, os.path.join(self._datadir, 'testdata.yaml')))
if not self._problem.is_scoring:
for key in TestCaseGroup._SCORING_ONLY_KEYS:
if self.config.get(key) is not None:
self.error("Key '%s' is only applicable for scoring problems, this is a pass-fail problem" % key)
if self.config['on_reject'] not in ['break', 'continue']:
self.error("Invalid value '%s' for on_reject policy" % self.config['on_reject'])
if self._problem.is_scoring:
# Check grading
try:
score_range = self.config['range']
min_score, max_score = list(map(float, score_range.split()))
if min_score > max_score:
self.error("Invalid score range '%s': minimum score cannot be greater than maximum score" % score_range)
except VerifyError:
raise
except:
self.error("Invalid format '%s' for range: must be exactly two floats" % score_range)
if self._parent is None:
seen_secret = False
seen_sample = False
for item in self._items:
if not isinstance(item, TestCaseGroup):
self.error("Can't have individual test data files at top level")
else:
name = os.path.basename(item._datadir)
if name == 'secret':
seen_secret = True
elif name == 'sample':
seen_sample = True
else:
self.error("Test data at top level can only have the groups sample and secret")
self.debug(self._items)
if not seen_secret:
self.error("No secret data provided")
if not seen_sample:
self.warning("No sample data provided")
hashes = collections.defaultdict(list)
for root, dirs, files in os.walk(self._datadir):
for filename in files:
filepath = os.path.join(root, filename)
if filepath.endswith('.in') and not os.path.islink(filepath):
md5 = hashlib.md5()
with open(filepath, 'rb') as f:
for buf in iter(lambda: f.read(1024), b''):
md5.update(buf)
filehash = md5.digest()
hashes[filehash].append(os.path.relpath(filepath, self._problem.probdir))
for _, files in hashes.items():
if len(files) > 1:
self.warning("Identical input files: '%s'" % str(files))
infiles = glob.glob(os.path.join(self._datadir, '*.in'))
ansfiles = glob.glob(os.path.join(self._datadir, '*.ans'))
for f in infiles:
if not f[:-3] + '.ans' in ansfiles:
self.error("No matching answer file for input '%s'" % f)
for f in ansfiles:
if not f[:-4] + '.in' in infiles:
self.error("No matching input file for answer '%s'" % f)
# Check whether a <= b according to a natural sorting where numeric components
# are compactified, so that e.g. "a" < "a1" < "a2" < "a10" = "a010" < "a10a".
def natural_sort_le(a, b):
a += '\0'
b += '\0'
i = j = 0
def parse_num(s, i):
ret = 0
while ord('0') <= ord(s[i]) <= ord('9'):
ret = ret * 10 + ord(s[i]) - ord('0')
i += 1
return ret, i
while i < len(a) and j < len(b):
if ord('0') <= ord(a[i]) <= ord('9') and ord('0') <= ord(b[i]) <= ord('9'):
anum,i = parse_num(a, i)
bnum,j = parse_num(b, j)
if anum == bnum:
continue
return anum < bnum
if a[i] == b[j]:
i += 1
j += 1
continue
return a[i] < b[j]
return True
last_testgroup_name = ''
for group in self.get_subgroups():
name = os.path.relpath(group._datadir, self._problem.probdir)
if natural_sort_le(name, last_testgroup_name):
self.warning("Test data group '%s' will be ordered before '%s'; consider zero-padding" % (last_testgroup_name, name))
last_testgroup_name = name
for child in self._items:
if child.matches_filter(args.data_filter):
child.check(args)
return self._check_res
def run_submission(self, sub, args, timelim_low, timelim_high):
self.info('Running on %s' % self)
subres1 = []
subres2 = []
on_reject = self.config['on_reject']
for child in self._items:
if not child.matches_filter(args.data_filter):
continue
r1, r2 = child.run_submission(sub, args, timelim_low, timelim_high)
subres1.append(r1)
subres2.append(r2)
if on_reject == 'break' and r2.verdict != 'AC':
break
return (self.aggregate_results(sub, subres1),
self.aggregate_results(sub, subres2, shadow_result=True))
def aggregate_results(self, sub, sub_results, shadow_result=False):
res = SubmissionResult(None)
for r in sub_results:
if r.runtime > res.runtime:
res.runtime = r.runtime
res.runtime_testcase = r.runtime_testcase
if r.ac_runtime > res.ac_runtime:
res.ac_runtime = r.ac_runtime
res.ac_runtime_testcase = r.ac_runtime_testcase
res.sample_failures.extend(r.sample_failures)
judge_error = next((r for r in sub_results if r.verdict == 'JE'), None)
if judge_error:
res.verdict = judge_error.verdict
res.reason = judge_error.reason
res.additional_info = judge_error.additional_info
res.testcase = judge_error.testcase
else:
res.verdict, score = self._problem.graders.grade(sub_results, self, shadow_result)
if sub_results:
res.testcase = sub_results[-1].testcase
res.additional_info = sub_results[-1].additional_info
if self._problem.is_scoring:
res.score = score
min_score, max_score = self.get_score_range()
if not (min_score <= score <= max_score) and not self._seen_oob_scores:
# Don't warn twice on the same subgroup, since every submission is likely
# to have the same error.
self._seen_oob_scores = True
groupname = os.path.relpath(self._datadir, self._problem.probdir)
self.error('submission %s got %s on group %s, which is outside of expected score range [%s, %s]' % (sub, res, groupname, min_score, max_score))
return res
def all_datasets(self):
res = []
for child in self._items:
res += child.all_datasets()
return res
class ProblemConfig(ProblemAspect):
_MANDATORY_CONFIG = ['name']
_OPTIONAL_CONFIG = config.load_config('problem.yaml')
_VALID_LICENSES = ['unknown', 'public domain', 'cc0', 'cc by', 'cc by-sa', 'educational', 'permission']
def __init__(self, problem):
self.debug(' Loading problem config')
self._problem = problem
self.configfile = os.path.join(problem.probdir, 'problem.yaml')
self._data = {}
if os.path.isfile(self.configfile):
try:
with open(self.configfile) as f:
self._data = yaml.safe_load(f)
# Loading empty yaml yields None, for no apparent reason...
if self._data is None:
self._data = {}
except Exception as e:
self.error(e)
# Add config items from problem statement e.g. name
self._data.update(problem.statement.get_config())
# Populate rights_owner unless license is public domain
if 'rights_owner' not in self._data and self._data.get('license') != 'public domain':
if 'author' in self._data:
self._data['rights_owner'] = self._data['author']
elif 'source' in self._data:
self._data['rights_owner'] = self._data['source']
if 'license' in self._data:
self._data['license'] = self._data['license'].lower()
# Ugly backwards compatibility hack
if 'name' in self._data and not isinstance(self._data['name'], dict):
self._data['name'] = {'': self._data['name']}
self._origdata = copy.deepcopy(self._data)
for field, default in copy.deepcopy(ProblemConfig._OPTIONAL_CONFIG).items():
if not field in self._data:
self._data[field] = default
elif isinstance(default, dict) and isinstance(self._data[field], dict):
self._data[field] = dict(list(default.items()) + list(self._data[field].items()))
val = self._data['validation'].split()
self._data['validation-type'] = val[0]
self._data['validation-params'] = val[1:]
self._data['grading']['custom_scoring'] = False
for param in self._data['validation-params']:
if param == 'score':
self._data['grading']['custom_scoring'] = True
elif param == 'interactive':
pass
def __str__(self):
return 'problem configuration'
def get(self, key=None):
if key:
return self._data[key]
return self._data
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
if not os.path.isfile(self.configfile):
self.error("No config file %s found" % self.configfile)
for field in ProblemConfig._MANDATORY_CONFIG:
if not field in self._data:
self.error("Mandatory field '%s' not provided" % field)
for field, value in self._origdata.items():
if field not in ProblemConfig._OPTIONAL_CONFIG.keys() and field not in ProblemConfig._MANDATORY_CONFIG:
self.warning("Unknown field '%s' provided in problem.yaml" % field)
for field, value in self._data.items():
if value is None:
self.error("Field '%s' provided in problem.yaml but is empty" % field)
self._data[field] = ProblemConfig._OPTIONAL_CONFIG.get(field, '')
# Check type
if not self._data['type'] in ['pass-fail', 'scoring']:
self.error("Invalid value '%s' for type" % self._data['type'])
# Check rights_owner
if self._data['license'] == 'public domain':
if self._data['rights_owner'].strip() != '':
self.error('Can not have a rights_owner for a problem in public domain')
elif self._data['license'] != 'unknown':
if self._data['rights_owner'].strip() == '':
self.error('No author, source or rights_owner provided')
# Check source_url
if (self._data['source_url'].strip() != '' and
self._data['source'].strip() == ''):
self.error('Can not provide source_url without also providing source')
# Check license
if not self._data['license'] in ProblemConfig._VALID_LICENSES:
self.error("Invalid value for license: %s.\n Valid licenses are %s" % (self._data['license'], ProblemConfig._VALID_LICENSES))
elif self._data['license'] == 'unknown':
self.warning("License is 'unknown'")
if self._data['grading']['show_test_data_groups'] not in [True, False]:
self.error("Invalid value for grading.show_test_data_groups: %s" % self._data['grading']['show_test_data_groups'])
elif self._data['grading']['show_test_data_groups'] and self._data['type'] == 'pass-fail':
self.error("Showing test data groups is only supported for scoring problems, this is a pass-fail problem")
if self._data['type'] != 'pass-fail' and self._problem.testdata.has_custom_groups() and 'show_test_data_groups' not in self._origdata.get('grading', {}):
self.warning("Problem has custom test case groups, but does not specify a value for grading.show_test_data_groups; defaulting to false")
if 'on_reject' in self._data['grading']:
if self._data['type'] == 'pass-fail' and self._data['grading']['on_reject'] == 'grade':
self.error("Invalid on_reject policy '%s' for problem type '%s'" % (self._data['grading']['on_reject'], self._data['type']))
if not self._data['grading']['on_reject'] in ['first_error', 'worst_error', 'grade']:
self.error("Invalid value '%s' for on_reject policy" % self._data['grading']['on_reject'])
if self._data['grading']['objective'] not in ['min', 'max']:
self.error("Invalid value '%s' for objective" % self._data['grading']['objective'])
for deprecated_grading_key in ['accept_score', 'reject_score', 'range', 'on_reject']:
if deprecated_grading_key in self._data['grading']:
self.warning("Grading key '%s' is deprecated in problem.yaml, use '%s' in testdata.yaml instead" % (deprecated_grading_key, deprecated_grading_key))
if not self._data['validation-type'] in ['default', 'custom']:
self.error("Invalid value '%s' for validation, first word must be 'default' or 'custom'" % self._data['validation'])
if self._data['validation-type'] == 'default' and len(self._data['validation-params']) > 0:
self.error("Invalid value '%s' for validation" % (self._data['validation']))
if self._data['validation-type'] == 'custom':
for param in self._data['validation-params']:
if param not in['score', 'interactive']:
self.error("Invalid parameter '%s' for custom validation" % param)
# Check limits
if not isinstance(self._data['limits'], dict):
self.error('Limits key in problem.yaml must specify a dict')
self._data['limits'] = ProblemConfig._OPTIONAL_CONFIG['limits']
# Some things not yet implemented
if self._data['libraries'] != '':
self.error("Libraries not yet supported")
if self._data['languages'] != '':
self.error("Languages not yet supported")
return self._check_res
class ProblemStatement(ProblemAspect):
def __init__(self, problem):
self.debug(' Loading problem statement')
self._problem = problem
self.languages = []
glob_path = os.path.join(problem.probdir, 'problem_statement', 'problem.')
if glob.glob(glob_path + 'tex'):
self.languages.append('')
for f in glob.glob(glob_path + '[a-z][a-z].tex'):
self.languages.append(re.search("problem.([a-z][a-z]).tex$", f).group(1))
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
if not self.languages:
self.error('No problem statements found (expected problem.tex or problem.[a-z][a-z].tex in problem_statement directory)')
if '' in self.languages and 'en' in self.languages:
self.error("Can't supply both problem.tex and problem.en.tex")
pdfopt = problem2pdf.ConvertOptions()
pdfopt.nopdf = True
pdfopt.quiet = True
htmlopt = problem2html.ConvertOptions()
htmlopt.destdir = os.path.join(self._problem.tmpdir, 'html')
htmlopt.quiet = True
for lang in self.languages:
pdfopt.language = lang
htmlopt.language = lang
try:
if not problem2pdf.convert(self._problem.probdir, pdfopt):
langparam = ''
if lang != '':
langparam = '-l ' + lang
self.error('Could not compile problem statement for language "%s". Run problem2pdf %s on the problem to diagnose.' % (lang, langparam))
except Exception as e:
self.error('Error raised when checking problem statement for language %s:\n%s' % (lang, e))
try:
problem2html.convert(self._problem.probdir, htmlopt)
except Exception as e:
langparam = ''
if lang != '':
langparam = '-l ' + lang
self.error('Could not convert problem statement to html for language "%s". Run problem2html %s on the problem to diagnose.' % (lang, langparam))
return self._check_res
def __str__(self):
return 'problem statement'
def get_config(self):
ret = {}
for lang in self.languages:
filename = ('problem.%s.tex' % lang) if lang != '' else 'problem.tex'
stmt = open(os.path.join(self._problem.probdir, 'problem_statement', filename)).read()
patterns = [(r'\\problemname{(.*)}', 'name'),
(r'^%%\s*plainproblemname:(.*)$', 'name')
]
for tup in patterns:
pattern = tup[0]
dest = tup[1]
hit = re.search(pattern, stmt, re.MULTILINE)
if hit:
if not dest in ret:
ret[dest] = {}
ret[dest][lang] = hit.group(1).strip()
return ret
class Attachments(ProblemAspect):
"""Represents the attachments of a problem.
Attributes:
attachments: The absolute paths to the attachment files for this problem.
"""
def __init__(self, problem):
attachments_path = os.path.join(problem.probdir, 'attachments')
if os.path.isdir(attachments_path):
self.attachments = [os.path.join(attachments_path, attachment_name) for attachment_name in os.listdir(attachments_path)]
else:
self.attachments = []
self.debug('Adding attachments %s' % str(self.attachments))
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
for attachment_path in self.attachments:
if os.path.isdir(attachment_path):
self.error('Directories are not allowed as attachments (%s is a directory)' % attachment_path)
return self._check_res
def get_attachment_paths(self):
return self.attachments
def __str__(self):
return 'attachments'
_JUNK_CASES = [
('an empty file', b''),
('a binary file with byte values 0 up to 256', bytearray(x for x in range(256))),
('a text file with the ASCII characters 32 up to 127', bytearray(x for x in range(32, 127))),
('a random text file with printable ASCII characters', bytearray(random.choice(string.printable.encode('utf8')) for _ in range(200))),
]
def _build_junk_modifier(desc, pattern, repl):
p = re.compile(pattern)
return (desc, p.search, lambda text: p.sub(repl, text))
_JUNK_MODIFICATIONS = [
_build_junk_modifier('spaces added where there already is whitespace', r'\s', lambda m: m.group(0) + ' ' * random.randint(1, 5)),
_build_junk_modifier('newlines added where there already are newlines', '\n', lambda m: '\n' * random.randint(2, 5)),
_build_junk_modifier('leading zeros added to integers', r'(^|[^.]\b)([0-9]+)\b', r'\g<1>0000000000\g<2>'),
_build_junk_modifier('trailing zeros added to real number decimal portion', r'\.[0-9]+\b', r'\g<0>0000000000'),
('random junk added to the end of the file', lambda f: True, lambda f: f + ''.join(random.choice(string.printable) for _ in range(200))),
]
class InputFormatValidators(ProblemAspect):
def __init__(self, problem):
self._problem = problem
input_validators_path = os.path.join(problem.probdir, 'input_format_validators')
if os.path.isdir(input_validators_path):
self._uses_old_path = True
else:
self._uses_old_path = False
new_input_validators_path = os.path.join(problem.probdir, 'input_validators')
if os.path.isdir(new_input_validators_path):
input_validators_path = new_input_validators_path
self._validators = run.find_programs(input_validators_path,
language_config=problem.language_config,
allow_validation_script=True,
work_dir=problem.tmpdir)
def __str__(self):
return 'input format validators'
def check(self, args):
if self._check_res is not None:
return self._check_res
if self._uses_old_path:
self.warning('input_format_validators is a deprecated name; please use input_validators instead')
self._check_res = True
if len(self._validators) == 0:
self.error('No input format validators found')
for val in self._validators[:]:
try:
success, msg = val.compile()
if not success:
self.error('Compile error for %s' % val, msg)
self._validators.remove(val)
except run.ProgramError as e:
self.error(e)
# Only sanity check input validators if they all actually compiled
if self._check_res:
all_flags = set()
def collect_flags(group, flags):
if len(group.get_testcases()) > 0:
flags.add(group.config['input_validator_flags'])
for subgroup in group.get_subgroups():
collect_flags(subgroup, flags)
collect_flags(self._problem.testdata, all_flags)
fd, file_name = tempfile.mkstemp()
os.close(fd)
for (desc, case) in _JUNK_CASES:
f = open(file_name, "wb")
f.write(case)
f.close()
for flags in all_flags:
flags = flags.split()
for val in self._validators:
status, _ = val.run(file_name, args=flags)
if os.WEXITSTATUS(status) != 42:
break
else:
self.warning('No validator rejects %s with flags "%s"' % (desc, ' '.join(flags)))
def modified_input_validates(applicable, modifier):
for testcase in self._problem.testdata.get_all_testcases():
with open(testcase.infile) as infile:
infile = infile.read()
if not applicable(infile):
continue
with open(file_name, "wb") as f:
f.write(modifier(infile).encode('utf8'))
for flags in all_flags:
flags = flags.split()
for val in self._validators:
status, _ = val.run(file_name, args=flags)
if os.WEXITSTATUS(status) != 42:
# expected behavior; validator rejects modified input
return False
# we found a file we could modify, and all validators
# accepted the modifications
return True
# no files were modifiable
return False
for (desc, applicable, modifier) in _JUNK_MODIFICATIONS:
if modified_input_validates(applicable, modifier):
self.warning('No validator rejects %s' % (desc,))
os.unlink(file_name)
return self._check_res
def validate(self, testcase):
flags = testcase.testcasegroup.config['input_validator_flags'].split()
self.check(None)
for val in self._validators:
with tempfile.NamedTemporaryFile() as outfile, tempfile.NamedTemporaryFile() as errfile:
status, _ = val.run(testcase.infile, outfile.name, errfile.name, args=flags)
if not os.WIFEXITED(status):
emsg = 'Input format validator %s crashed on input %s' % (val, testcase.infile)
elif os.WEXITSTATUS(status) != 42:
emsg = 'Input format validator %s did not accept input %s, exit code: %d' % (val, testcase.infile, os.WEXITSTATUS(status))
else:
continue
validator_stdout = outfile.read().decode('utf-8', 'replace')
validator_stderr = errfile.read().decode('utf-8', 'replace')
validator_output = "\n".join(
out for out in [validator_stdout, validator_stderr] if out)
testcase.error(emsg, validator_output)
class Graders(ProblemAspect):
_default_grader = run.get_tool('default_grader')
def __init__(self, problem):
self._problem = problem
self._graders = run.find_programs(os.path.join(problem.probdir, 'graders'),
language_config=problem.language_config,
work_dir=problem.tmpdir)
def __str__(self):
return 'graders'
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
if self._problem.config.get('type') == 'pass-fail' and len(self._graders) > 0:
self.error('There are grader programs but the problem is pass-fail')
for grader in self._graders:
success, msg = grader.compile()
if not success:
self.error('Compile error for %s' % grader, msg)
return self._check_res
def grade(self, sub_results, testcasegroup, shadow_result=False):
if testcasegroup.config['grading'] == 'default':
graders = [self._default_grader]
else:
graders = self._graders
grader_input = ''.join(['%s %s\n' % (r.verdict, 0 if r.score is None else r.score) for r in sub_results])
grader_output_re = r'^((AC)|(WA)|(TLE)|(RTE)|(JE))\s+[0-9.]+\s*$'
verdict = 'AC'
score = 0
grader_flags = testcasegroup.config['grader_flags'].split()
self.debug('Grading %d results:\n%s' % (len(sub_results), grader_input))
self.debug('Grader flags: %s' % grader_flags)
for grader in graders:
if grader is not None and grader.compile()[0]:
fd, infile = tempfile.mkstemp()
os.close(fd)
fd, outfile = tempfile.mkstemp()
os.close(fd)
open(infile, 'w').write(grader_input)
status, runtime = grader.run(infile, outfile,
args=grader_flags)
grader_output = open(outfile, 'r').read()
os.remove(infile)
os.remove(outfile)
if not os.WIFEXITED(status):
self.error('Judge error: %s crashed' % grader)
self.debug('Grader input:\n%s' % grader_input)
return ('JE', None)
ret = os.WEXITSTATUS(status)
if ret != 0:
self.error('Judge error: exit code %d for grader %s, expected 0' % (ret, grader))
self.debug('Grader input: %s\n' % grader_input)
return ('JE', None)
if not re.match(grader_output_re, grader_output):
self.error('Judge error: invalid format of grader output')
self.debug('Output must match: "%s"' % grader_output_re)
self.debug('Output was: "%s"' % grader_output)
return ('JE', None)
verdict, score = grader_output.split()
score = float(score)
# TODO: check that all graders give same result
if not shadow_result:
self.info('Grade on %s is %s (%s)' % (testcasegroup, verdict, score))
return (verdict, score)
class OutputValidators(ProblemAspect):
_default_validator = run.get_tool('default_validator')
def __init__(self, problem):
self._problem = problem
self._validators = run.find_programs(os.path.join(problem.probdir,
'output_validators'),
language_config=problem.language_config,
work_dir=problem.tmpdir)
def __str__(self):
return 'output validators'
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
if self._problem.config.get('validation') == 'default' and self._validators:
self.error('There are validator programs but problem.yaml has validation = "default"')
elif self._problem.config.get('validation') != 'default' and not self._validators:
self.error('problem.yaml specifies custom validator but no validator programs found')
if self._problem.config.get('validation') == 'default' and self._default_validator is None:
self.error('Unable to locate default validator')
for val in self._validators[:]:
try:
success, msg = val.compile()
if not success:
self.error('Compile error for output validator %s' % val, msg)
except run.ProgramError as e:
self.error(e)
# Only sanity check output validators if they all actually compiled
if self._check_res:
flags = self._problem.config.get('validator_flags')
fd, file_name = tempfile.mkstemp()
os.close(fd)
for (desc, case) in _JUNK_CASES:
f = open(file_name, "wb")
f.write(case)
f.close()
rejected = False
for testcase in self._problem.testdata.get_all_testcases():
result = self.validate(testcase, file_name)
if result.verdict != 'AC':
rejected = True
if result.verdict == 'JE':
self.error('%s as output, and output validator flags "%s" gave %s' % (desc, ' '.join(flags), result))
break
if not rejected:
self.warning('%s gets AC' % (desc))
os.unlink(file_name)
return self._check_res
@staticmethod
def __get_feedback(feedback_dir):
all_feedback = []
for feedback_file in os.listdir(feedback_dir):
feedback_path = os.path.join(feedback_dir, feedback_file)
if os.path.getsize(feedback_path) == 0:
continue
all_feedback.append('=== %s: ===' % feedback_file)
# FIXME handle feedback files containing non-text
with open(feedback_path, 'r') as feedback:
# Cap amount of feedback per file at some high-ish
# size, so that a buggy validator spewing out lots of
# data doesn't kill us.
all_feedback.append(feedback.read(128*1024))
if all_feedback:
return '\n'.join(all_feedback)
return None
def _parse_validator_results(self, val, status, feedbackdir, testcase):
custom_score = self._problem.config.get('grading')['custom_scoring']
score = None
# TODO: would be good to have some way of displaying the feedback for debugging uses
score_file = os.path.join(feedbackdir, 'score.txt')
if not custom_score and os.path.isfile(score_file):
return SubmissionResult('JE', reason='validator produced "score.txt" but problem does not have custom scoring activated')
if not os.WIFEXITED(status):
return SubmissionResult('JE',
reason='output validator %s crashed, status %d' % (val, status),
additional_info=OutputValidators.__get_feedback(feedbackdir))
ret = os.WEXITSTATUS(status)
if ret not in [42, 43]:
return SubmissionResult('JE',
reason='output validator %s exited with status %d' % (val, ret),
additional_info=OutputValidators.__get_feedback(feedbackdir))
if ret == 43:
return SubmissionResult('WA', additional_info=OutputValidators.__get_feedback(feedbackdir))
if custom_score:
if os.path.isfile(score_file):
try:
score_str = open(score_file).read()
score = float(score_str)
except Exception as e:
return SubmissionResult('JE', reason='failed to parse validator score: %s' % e)
else:
return SubmissionResult('JE', reason='problem has custom scoring but validator did not produce "score.txt"')
return SubmissionResult('AC', score=score)
def _actual_validators(self):
vals = self._validators
if self._problem.config.get('validation') == 'default':
vals = [self._default_validator]
return vals
def validate_interactive(self, testcase, submission, timelim, errorhandler):
interactive_output_re = r'\d+ \d+\.\d+ \d+ \d+\.\d+ (validator|submission)'
res = SubmissionResult('JE')
interactive = run.get_tool('interactive')
if interactive is None:
errorhandler.error('Could not locate interactive runner')
return res
# file descriptor, wall time lim
initargs = ['1', str(2 * timelim)]
validator_args = [testcase.infile, testcase.ansfile, '<feedbackdir>']
submission_args = submission.get_runcmd(memlim=self._problem.config.get('limits')['memory'])
val_timelim = self._problem.config.get('limits')['validation_time']
val_memlim = self._problem.config.get('limits')['validation_memory']
for val in self._actual_validators():
if val is not None and val.compile()[0]:
feedbackdir = tempfile.mkdtemp(prefix='feedback', dir=self._problem.tmpdir)
validator_args[2] = feedbackdir + os.sep
f = tempfile.NamedTemporaryFile(delete=False)
interactive_out = f.name
f.close()
i_status, _ = interactive.run(outfile=interactive_out,
args=initargs + val.get_runcmd(memlim=val_memlim) + validator_args + [';'] + submission_args)
if is_RTE(i_status):
errorhandler.error('Interactive crashed, status %d' % i_status)
else:
interactive_output = open(interactive_out).read()
errorhandler.debug('Interactive output: "%s"' % interactive_output)
if not re.match(interactive_output_re, interactive_output):
errorhandler.error('Output from interactive does not follow expected format, got output "%s"' % interactive_output)
else:
val_status, _, sub_status, sub_runtime, first = interactive_output.split()
sub_status = int(sub_status)
sub_runtime = float(sub_runtime)
val_status = int(val_status)
val_JE = not os.WIFEXITED(val_status) or os.WEXITSTATUS(val_status) not in [42, 43]
val_WA = os.WIFEXITED(val_status) and os.WEXITSTATUS(val_status) == 43
if val_JE or (val_WA and first == 'validator'):
# If the validator crashed, or exited first with WA,
# always follow validator verdict, even if that early
# exit caused the submission to behave erratically and
# time out.
if sub_runtime > timelim:
sub_runtime = timelim
res = self._parse_validator_results(val, val_status, feedbackdir, testcase)
elif is_TLE(sub_status, True):
res = SubmissionResult('TLE')
elif is_RTE(sub_status):
res = SubmissionResult('RTE')
else:
res = self._parse_validator_results(val, val_status, feedbackdir, testcase)
res.runtime = sub_runtime
res.validator_first = (first == 'validator')
os.unlink(interactive_out)
shutil.rmtree(feedbackdir)
if res.verdict != 'AC':
return res
# TODO: check that all output validators give same result
return res
def validate(self, testcase, submission_output):
res = SubmissionResult('JE')
val_timelim = self._problem.config.get('limits')['validation_time']
val_memlim = self._problem.config.get('limits')['validation_memory']
flags = self._problem.config.get('validator_flags').split() + testcase.testcasegroup.config['output_validator_flags'].split()
for val in self._actual_validators():
if val is not None and val.compile()[0]:
feedbackdir = tempfile.mkdtemp(prefix='feedback', dir=self._problem.tmpdir)
status, runtime = val.run(submission_output,
args=[testcase.infile, testcase.ansfile, feedbackdir] + flags,
timelim=val_timelim, memlim=val_memlim)
res = self._parse_validator_results(val, status, feedbackdir, testcase)
shutil.rmtree(feedbackdir)
if res.verdict != 'AC':
return res
# TODO: check that all output validators give same result
return res
class Submissions(ProblemAspect):
_SUB_REGEXP = re.compile(r'^[a-zA-Z0-9][a-zA-Z0-9_.-]*[a-zA-Z0-9](\.c\+\+)?$')
# (verdict, directory, required)
_VERDICTS = [
['AC', 'accepted', True],
['PAC', 'partially_accepted', False],
['WA', 'wrong_answer', False],
['RTE', 'run_time_error', False],
['TLE', 'time_limit_exceeded', False],
]
def __init__(self, problem):
self._submissions = {}
self._problem = problem
srcdir = os.path.join(problem.probdir, 'submissions')
for verdict in Submissions._VERDICTS:
acr = verdict[0]
self._submissions[acr] = run.find_programs(os.path.join(srcdir, verdict[1]),
language_config=problem.language_config,
pattern=Submissions._SUB_REGEXP,
work_dir=problem.tmpdir,
include_dir=os.path.join(problem.probdir,
'include'))
def __str__(self):
return 'submissions'
def check_submission(self, sub, args, expected_verdict, timelim, timelim_low, timelim_high):
desc = '%s submission %s' % (expected_verdict, sub)
partial = False
if expected_verdict == 'PAC':
# For partially accepted solutions, use the low timelim instead of the real one,
# to make sure we have margin in both directions.
expected_verdict = 'AC'
partial = True
timelim = timelim_low
result1, result2 = self._problem.testdata.run_submission(sub, args, timelim, timelim_high)
if result1.verdict == 'AC' and expected_verdict == 'AC' and not partial and result1.sample_failures:
res = result1.sample_failures[0]
self.warning('%s got %s on sample: %s' % (desc, res.verdict, res))
if result1.verdict != result2.verdict or result1.score != result2.score:
r1, r2 = (result1, result2) if result1.verdict == result2.verdict else (result1.verdict, result2.verdict)
self.warning('%s sensitive to time limit: limit of %s secs -> %s, limit of %s secs -> %s' % (desc, timelim, r1, timelim_high, r2))
if partial and self.fully_accepted(result1):
self.warning('%s got %s' % (desc, result1))
elif result1.verdict == expected_verdict:
self.msg(' %s OK: %s' % (desc, result1))
if (expected_verdict == 'AC' and not partial
and not self.fully_accepted(result1)
and self.full_score_finite()):
# For some heuristic problems, this is expected. Thus, only warn.
self.warning('%s did not attain full score (consider moving it to partially_accepted)' % desc)
elif result2.verdict == expected_verdict and not (partial and self.fully_accepted(result2)):
self.msg(' %s OK with extra time: %s' % (desc, result2))
else:
self.error('%s got %s' % (desc, result1), result2.additional_info)
return result1
def full_score_finite(self):
min_score, max_score = self._problem.testdata.get_score_range()
if self._problem.config.get('grading')['objective'] == 'min':
return min_score != -float('inf')
else:
return max_score != float('inf')
def fully_accepted(self, result):
min_score, max_score = self._problem.testdata.get_score_range()
best_score = min_score if self._problem.config.get('grading')['objective'] == 'min' else max_score
return result.verdict == 'AC' and (not self._problem.is_scoring or result.score == best_score)
def check(self, args):
if self._check_res is not None:
return self._check_res
self._check_res = True
limits = self._problem.config.get('limits')
time_multiplier = limits['time_multiplier']
safety_margin = limits['time_safety_margin']
timelim_margin_lo = 300 # 5 minutes
timelim_margin = 300
timelim = 300
if 'time_for_AC_submissions' in limits:
timelim = timelim_margin = limits['time_for_AC_submissions']
if args.fixed_timelim is not None:
timelim = args.fixed_timelim
timelim_margin = int(round(timelim * safety_margin))
for verdict in Submissions._VERDICTS:
acr = verdict[0]
if verdict[2] and not self._submissions[acr]:
self.error('Require at least one "%s" submission' % verdict[1])
runtimes = []
for sub in self._submissions[acr]:
if args.submission_filter.search(os.path.join(verdict[1], sub.name)):
self.info('Check %s submission %s' % (acr, sub))
if sub.code_size() > 1024*limits['code']:
self.error('%s submission %s has size %.1f kiB, exceeds code size limit of %d kiB' %
(acr, sub, sub.code_size() / 1024.0, limits['code']))
continue
success, msg = sub.compile()
if not success:
self.error('Compile error for %s submission %s' % (acr, sub),
additional_info=msg)
continue
res = self.check_submission(sub, args, acr, timelim, timelim_margin_lo, timelim_margin)
runtimes.append(res.runtime)
if acr == 'AC':
if len(runtimes) > 0:
max_runtime = max(runtimes)
exact_timelim = max_runtime * time_multiplier
max_runtime = '%.3f' % max_runtime
timelim = max(1, int(0.5 + exact_timelim))
timelim_margin_lo = max(1, min(int(0.5 + exact_timelim / safety_margin), timelim - 1))
timelim_margin = max(timelim + 1,
int(0.5 + exact_timelim * safety_margin))
else:
max_runtime = None
if args.fixed_timelim is not None and args.fixed_timelim != timelim:
self.msg(" Solutions give timelim of %d seconds, but will use provided fixed limit of %d seconds instead" % (timelim, args.fixed_timelim))
timelim = args.fixed_timelim
timelim_margin = timelim * safety_margin
self.msg(" Slowest AC runtime: %s, setting timelim to %d secs, safety margin to %d secs" % (max_runtime, timelim, timelim_margin))
limits['time'] = timelim
return self._check_res
PROBLEM_PARTS = ['config', 'statement', 'validators', 'graders', 'data', 'submissions']
class Problem(ProblemAspect):
def __init__(self, probdir):
self.probdir = os.path.realpath(probdir)
self.shortname = os.path.basename(self.probdir)
self.language_config = languages.load_language_config()
def __enter__(self):
self.tmpdir = tempfile.mkdtemp(prefix='verify-%s-'%self.shortname)
if not os.path.isdir(self.probdir):
self.error("Problem directory '%s' not found" % self.probdir)
self.shortname = None
return self
self.statement = ProblemStatement(self)
self.attachments = Attachments(self)
self.config = ProblemConfig(self)
self.is_interactive = 'interactive' in self.config.get('validation-params')
self.is_scoring = (self.config.get('type') == 'scoring')
self.input_format_validators = InputFormatValidators(self)
self.output_validators = OutputValidators(self)
self.graders = Graders(self)
self.testcase_by_infile = {}
self.testdata = TestCaseGroup(self, os.path.join(self.probdir, 'data'))
self.submissions = Submissions(self)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
shutil.rmtree(self.tmpdir)
def __str__(self):
return self.shortname
def check(self, args=None):
if self.shortname is None:
return [1, 0]
if args is None:
args = default_args()
ProblemAspect.errors = 0
ProblemAspect.warnings = 0
ProblemAspect.bail_on_error = args.bail_on_error
ProblemAspect.consider_warnings_errors = args.werror
try:
part_mapping = {'config': [self.config],
'statement': [self.statement, self.attachments],
'validators': [self.input_format_validators, self.output_validators],
'graders': [self.graders],
'data': [self.testdata],
'submissions': [self.submissions]}
if not re.match('^[a-z0-9]+$', self.shortname):
self.error("Invalid shortname '%s' (must be [a-z0-9]+)" % self.shortname)
run.limit.check_limit_capabilities(self)
for part in args.parts:
self.msg('Checking %s' % part)
for item in part_mapping[part]:
item.check(args)
except VerifyError:
pass
return [ProblemAspect.errors, ProblemAspect.warnings]
def re_argument(s):
try:
r = re.compile(s)
return r
except re.error:
raise argparse.ArgumentTypeError('%s is not a valid regex' % s)
def part_argument(s):
if s not in PROBLEM_PARTS:
raise argparse.ArgumentTypeError("Invalid problem part specified: %s" % s)
return s
def argparser():
parser = argparse.ArgumentParser(description='Validate a problem package in the Kattis problem format.')
parser.add_argument('-s', '--submission_filter', metavar='SUBMISSIONS',
type=re_argument, default=re.compile('.*'),
help='run only submissions whose name contains this regex. The name includes category (accepted, wrong_answer, etc), e.g. "accepted/hello.java" (for a single file submission) or "wrong_answer/hello" (for a directory submission)')
parser.add_argument('-d', '--data_filter', metavar='DATA',
type=re_argument, default=re.compile('.*'),
help='use only data files whose name contains this regex. The name includes path relative to the data directory but not the extension, e.g. "sample/hello" for a sample data file')
parser.add_argument('-t', '--fixed_timelim',
type=int,
help='use this fixed time limit (useful in combination with -d and/or -s when all AC submissions might not be run on all data)')
parser.add_argument('-p', '--parts', metavar='PROBLEM_PART',
type=part_argument, nargs='+', default=PROBLEM_PARTS,
help='only test the indicated parts of the problem. Each PROBLEM_PART can be one of %s.' % PROBLEM_PARTS, )
parser.add_argument('-b', '--bail_on_error',
action='store_true',
help='bail verification on first error')
parser.add_argument('-l', '--log_level',
default='warning',
help='set log level (debug, info, warning, error, critical)')
parser.add_argument('-e', '--werror',
action='store_true',
help='consider warnings as errors')
parser.add_argument('--max_additional_info',
type=int, default=15,
help='maximum number of lines of additional info (e.g. compiler output or validator feedback) to display about an error (set to 0 to disable additional info)')
parser.add_argument('problemdir', nargs='+')
return parser
def default_args():
return argparser().parse_args([None])
def main():
args = argparser().parse_args()
ProblemAspect.max_additional_info = args.max_additional_info
fmt = "%(levelname)s %(message)s"
logging.basicConfig(stream=sys.stdout,
format=fmt,
level=eval("logging." + args.log_level.upper()))
total_errors = 0
for problemdir in args.problemdir:
print('Loading problem %s' % os.path.basename(os.path.realpath(problemdir)))
with Problem(problemdir) as prob:
[errors, warnings] = prob.check(args)
def p(x):
return '' if x == 1 else 's'
print("%s tested: %d error%s, %d warning%s" % (prob.shortname, errors, p(errors), warnings, p(warnings)))
total_errors += errors
sys.exit(1 if total_errors > 0 else 0)
if __name__ == '__main__':
main()
| 43.443798 | 254 | 0.577508 |
ace23d60a369472f4f389ad819258cb1593aa8f1 | 47,863 | py | Python | oasislmf/model_execution/bash.py | JBADougBurns/OasisLMF | df43fb6038cd9f5b485f945099855666e6403c2d | [
"BSD-3-Clause"
] | null | null | null | oasislmf/model_execution/bash.py | JBADougBurns/OasisLMF | df43fb6038cd9f5b485f945099855666e6403c2d | [
"BSD-3-Clause"
] | null | null | null | oasislmf/model_execution/bash.py | JBADougBurns/OasisLMF | df43fb6038cd9f5b485f945099855666e6403c2d | [
"BSD-3-Clause"
] | null | null | null | import io
import os
import random
import re
import string
from collections import Counter
RUNTYPE_GROUNDUP_LOSS = 'gul'
RUNTYPE_INSURED_LOSS = 'il'
RUNTYPE_REINSURANCE_LOSS = 'ri'
WAIT_PROCESSING_SWITCHES = {
'full_uncertainty_aep': '-F',
'wheatsheaf_aep': '-W',
'sample_mean_aep': '-S',
'full_uncertainty_oep': '-f',
'wheatsheaf_oep': '-w',
'sample_mean_oep': '-s',
'wheatsheaf_mean_aep': '-M',
'wheatsheaf_mean_oep': '-m',
}
def print_command(command_file, cmd):
"""
Writes the supplied command to the end of the generated script
:param command_file: File to append command to.
:param cmd: The command to append
"""
with io.open(command_file, "a", encoding='utf-8') as myfile:
myfile.writelines(cmd + "\n")
def leccalc_enabled(lec_options):
"""
Checks if leccalc is enabled in the leccalc options
:param lec_options: The leccalc options from the analysis settings
:type lec_options: dict
:return: True is leccalc is enables, False otherwise.
"""
# Note: Backwards compatibility of "outputs" in lec_options
if "outputs" in lec_options:
lec_options = lec_options["outputs"]
for option in lec_options:
if option in WAIT_PROCESSING_SWITCHES and lec_options[option]:
return True
return False
def do_post_wait_processing(
runtype,
analysis_settings,
filename,
process_counter,
work_sub_dir='',
output_dir='output/'
):
if '{}_summaries'.format(runtype) not in analysis_settings:
return
for summary in analysis_settings['{}_summaries'.format(runtype)]:
if "id" in summary:
summary_set = summary['id']
if summary.get('aalcalc'):
cmd = 'aalcalc -K{}{}_S{}_summaryaalcalc'.format(
work_sub_dir,
runtype,
summary_set
)
process_counter['lpid_monitor_count'] += 1
cmd = '{} > {}{}_S{}_aalcalc.csv'.format(
cmd, output_dir, runtype, summary_set
)
cmd = '{} & lpid{}=$!'.format(cmd, process_counter['lpid_monitor_count'])
print_command(filename, cmd)
if summary.get('lec_output'):
leccalc = summary.get('leccalc', {})
if leccalc and leccalc_enabled(leccalc):
cmd = 'leccalc {} -K{}{}_S{}_summaryleccalc'.format(
'-r' if leccalc.get('return_period_file') else '',
work_sub_dir,
runtype,
summary_set
)
# Note: Backwards compatibility of "outputs" in lec_options
if "outputs" in leccalc:
leccalc = leccalc["outputs"]
process_counter['lpid_monitor_count'] += 1
for option, active in sorted(leccalc.items()):
if active and option in WAIT_PROCESSING_SWITCHES:
switch = WAIT_PROCESSING_SWITCHES.get(option, '')
cmd = '{} {} {}{}_S{}_leccalc_{}.csv'.format(
cmd, switch, output_dir, runtype, summary_set,
option
)
cmd = '{} & lpid{}=$!'.format(cmd, process_counter['lpid_monitor_count'])
print_command(filename, cmd)
def do_fifos_exec(runtype, max_process_id, filename, fifo_dir, action='mkfifo'):
for process_id in range(1, max_process_id + 1):
print_command(filename, '{} {}{}_P{}'.format(action, fifo_dir, runtype, process_id))
print_command(filename, '')
def do_fifos_calc(runtype, analysis_settings, max_process_id,
filename, fifo_dir='fifo/', action='mkfifo'):
summaries = analysis_settings.get('{}_summaries'.format(runtype))
if not summaries:
return
for process_id in range(1, max_process_id + 1):
for summary in summaries:
if 'id' in summary:
summary_set = summary['id']
print_command(filename, '{} {}{}_S{}_summary_P{}'.format(action, fifo_dir, runtype, summary_set, process_id))
if summary.get('eltcalc'):
print_command(
filename,
'{} {}{}_S{}_summaryeltcalc_P{}'.format(action, fifo_dir, runtype, summary_set, process_id)
)
print_command(
filename,
'{} {}{}_S{}_eltcalc_P{}'.format(action, fifo_dir, runtype, summary_set, process_id)
)
if summary.get('summarycalc'):
print_command(
filename,
'{} {}{}_S{}_summarysummarycalc_P{}'.format(action, fifo_dir, runtype, summary_set, process_id)
)
print_command(
filename,
'{} {}{}_S{}_summarycalc_P{}'.format(action, fifo_dir, runtype, summary_set, process_id)
)
if summary.get('pltcalc'):
print_command(
filename,
'{} {}{}_S{}_summarypltcalc_P{}'.format(action, fifo_dir, runtype, summary_set, process_id)
)
print_command(
filename,
'{} {}{}_S{}_pltcalc_P{}'.format(action, fifo_dir, runtype, summary_set, process_id)
)
print_command(filename, '')
def create_workfolders(runtype, analysis_settings, filename, work_dir='work/'):
summaries = analysis_settings.get('{}_summaries'.format(runtype))
if not summaries:
return
for summary in summaries:
if 'id' in summary:
summary_set = summary['id']
if summary.get('lec_output'):
if leccalc_enabled(summary['leccalc']):
print_command(
filename,
'mkdir {}{}_S{}_summaryleccalc'.format(work_dir, runtype, summary_set)
)
if summary.get('aalcalc'):
print_command(
filename,
'mkdir {}{}_S{}_summaryaalcalc'.format(work_dir, runtype, summary_set)
)
def do_kats(
runtype,
analysis_settings,
max_process_id,
filename,
process_counter,
work_dir='work/kat/',
output_dir='output/'
):
summaries = analysis_settings.get('{}_summaries'.format(runtype))
if not summaries:
return False
anykats = False
for summary in summaries:
if 'id' in summary:
summary_set = summary['id']
if summary.get('eltcalc'):
anykats = True
cmd = 'kat'
for process_id in range(1, max_process_id + 1):
cmd = '{} {}{}_S{}_eltcalc_P{}'.format(
cmd, work_dir, runtype, summary_set, process_id
)
process_counter['kpid_monitor_count'] += 1
cmd = '{} > {}{}_S{}_eltcalc.csv & kpid{}=$!'.format(
cmd, output_dir, runtype, summary_set,
process_counter['kpid_monitor_count']
)
print_command(filename, cmd)
if summary.get('pltcalc'):
anykats = True
cmd = 'kat'
for process_id in range(1, max_process_id + 1):
cmd = '{} {}{}_S{}_pltcalc_P{}'.format(
cmd, work_dir, runtype, summary_set, process_id
)
process_counter['kpid_monitor_count'] += 1
cmd = '{} > {}{}_S{}_pltcalc.csv & kpid{}=$!'.format(
cmd, output_dir, runtype, summary_set,
process_counter['kpid_monitor_count']
)
print_command(filename, cmd)
if summary.get("summarycalc"):
anykats = True
cmd = 'kat'
for process_id in range(1, max_process_id + 1):
cmd = '{} {}{}_S{}_summarycalc_P{}'.format(
cmd, work_dir, runtype, summary_set, process_id
)
process_counter['kpid_monitor_count'] += 1
cmd = '{} > {}{}_S{}_summarycalc.csv & kpid{}=$!'.format(
cmd, output_dir, runtype, summary_set,
process_counter['kpid_monitor_count']
)
print_command(filename, cmd)
return anykats
def do_summarycalcs(
runtype,
analysis_settings,
process_id,
filename,
fifo_dir='fifo/',
stderr_guard=True,
num_reinsurance_iterations=0,
gul_alloc_rule=None,
):
summaries = analysis_settings.get('{}_summaries'.format(runtype))
if not summaries:
return
if process_id == 1:
print_command(filename, '')
summarycalc_switch = '-f'
if runtype == RUNTYPE_GROUNDUP_LOSS:
if gul_alloc_rule:
# Accept item stream only
summarycalc_switch = '-i'
else:
# gul coverage stream
summarycalc_switch = '-g'
summarycalc_directory_switch = ""
if runtype == RUNTYPE_REINSURANCE_LOSS:
i = num_reinsurance_iterations
summarycalc_directory_switch = "-p RI_{0}".format(i)
cmd = 'summarycalc {} {}'.format(summarycalc_switch, summarycalc_directory_switch)
for summary in summaries:
if 'id' in summary:
summary_set = summary['id']
cmd = '{0} -{1} {4}{2}_S{1}_summary_P{3}'.format(cmd, summary_set, runtype, process_id, fifo_dir)
cmd = '{0} < {1}{2}_P{3}'.format(cmd, fifo_dir, runtype, process_id)
cmd = '( {0} ) 2>> log/stderror.err &'.format(cmd) if stderr_guard else '{0} &'.format(cmd) # Wrap in subshell and pipe stderr to file
print_command(filename, cmd)
def do_tees(runtype, analysis_settings, process_id, filename, process_counter, fifo_dir='fifo/', work_dir='work/'):
summaries = analysis_settings.get('{}_summaries'.format(runtype))
if not summaries:
return
if process_id == 1:
print_command(filename, '')
for summary in summaries:
if 'id' in summary:
process_counter['pid_monitor_count'] += 1
summary_set = summary['id']
cmd = 'tee < {}{}_S{}_summary_P{}'.format(fifo_dir, runtype, summary_set, process_id)
if summary.get('eltcalc'):
cmd = '{} {}{}_S{}_summaryeltcalc_P{}'.format(cmd, fifo_dir, runtype, summary_set, process_id)
if summary.get('pltcalc'):
cmd = '{} {}{}_S{}_summarypltcalc_P{}'.format(cmd, fifo_dir, runtype, summary_set, process_id)
if summary.get('summarycalc'):
cmd = '{} {}{}_S{}_summarysummarycalc_P{}'.format(cmd, fifo_dir, runtype, summary_set, process_id)
if summary.get('aalcalc'):
cmd = '{} {}{}_S{}_summaryaalcalc/P{}.bin'.format(cmd, work_dir, runtype, summary_set, process_id)
if summary.get('lec_output') and leccalc_enabled(summary['leccalc']):
cmd = '{} {}{}_S{}_summaryleccalc/P{}.bin'.format(cmd, work_dir, runtype, summary_set, process_id)
cmd = '{} > /dev/null & pid{}=$!'.format(cmd, process_counter['pid_monitor_count'])
print_command(filename, cmd)
def do_any(runtype, analysis_settings, process_id, filename, process_counter, fifo_dir='fifo/', work_dir='work/'):
summaries = analysis_settings.get('{}_summaries'.format(runtype))
if not summaries:
return
if process_id == 1:
print_command(filename, '')
for summary in summaries:
if 'id' in summary:
summary_set = summary['id']
if summary.get('eltcalc'):
cmd = 'eltcalc -s'
if process_id == 1:
cmd = 'eltcalc'
process_counter['pid_monitor_count'] += 1
print_command(
filename,
"{3} < {5}{0}_S{1}_summaryeltcalc_P{2} > {6}kat/{0}_S{1}_eltcalc_P{2} & pid{4}=$!".format(
runtype, summary_set, process_id, cmd, process_counter['pid_monitor_count'], fifo_dir, work_dir
)
)
if summary.get("summarycalc"):
cmd = 'summarycalctocsv -s'
if process_id == 1:
cmd = 'summarycalctocsv'
process_counter['pid_monitor_count'] += 1
print_command(
filename,
'{3} < {5}{0}_S{1}_summarysummarycalc_P{2} > {6}kat/{0}_S{1}_summarycalc_P{2} & pid{4}=$!'.format(
runtype, summary_set, process_id, cmd, process_counter['pid_monitor_count'], fifo_dir, work_dir
)
)
if summary.get('pltcalc'):
cmd = 'pltcalc -s'
if process_id == 1:
cmd = 'pltcalc'
process_counter['pid_monitor_count'] += 1
print_command(
filename,
'{3} < {5}{0}_S{1}_summarypltcalc_P{2} > {6}kat/{0}_S{1}_pltcalc_P{2} & pid{4}=$!'.format(
runtype, summary_set, process_id, cmd, process_counter['pid_monitor_count'], fifo_dir, work_dir
)
)
def ri(analysis_settings, max_process_id, filename, process_counter, num_reinsurance_iterations, fifo_dir='fifo/', work_dir='work/', stderr_guard=True):
for process_id in range(1, max_process_id + 1):
do_any(RUNTYPE_REINSURANCE_LOSS, analysis_settings, process_id, filename, process_counter, fifo_dir, work_dir)
for process_id in range(1, max_process_id + 1):
do_tees(RUNTYPE_REINSURANCE_LOSS, analysis_settings, process_id, filename, process_counter, fifo_dir, work_dir)
for process_id in range(1, max_process_id + 1):
do_summarycalcs(
runtype=RUNTYPE_REINSURANCE_LOSS,
analysis_settings=analysis_settings,
process_id=process_id,
filename=filename,
fifo_dir=fifo_dir,
stderr_guard=stderr_guard,
num_reinsurance_iterations=num_reinsurance_iterations,
)
def il(analysis_settings, max_process_id, filename, process_counter, fifo_dir='fifo/', work_dir='work/', stderr_guard=True):
for process_id in range(1, max_process_id + 1):
do_any(RUNTYPE_INSURED_LOSS, analysis_settings, process_id, filename, process_counter, fifo_dir, work_dir)
for process_id in range(1, max_process_id + 1):
do_tees(RUNTYPE_INSURED_LOSS, analysis_settings, process_id, filename, process_counter, fifo_dir, work_dir)
for process_id in range(1, max_process_id + 1):
do_summarycalcs(
runtype=RUNTYPE_INSURED_LOSS,
analysis_settings=analysis_settings,
process_id=process_id,
filename=filename,
fifo_dir=fifo_dir,
stderr_guard=stderr_guard,
)
def do_gul(
analysis_settings,
max_process_id,
filename,
process_counter,
fifo_dir='fifo/',
work_dir='work/',
gul_alloc_rule=None,
stderr_guard=True,
full_correlation=False
):
for process_id in range(1, max_process_id + 1):
do_any(RUNTYPE_GROUNDUP_LOSS, analysis_settings, process_id, filename, process_counter, fifo_dir, work_dir)
for process_id in range(1, max_process_id + 1):
do_tees(RUNTYPE_GROUNDUP_LOSS, analysis_settings, process_id, filename, process_counter, fifo_dir, work_dir)
for process_id in range(1, max_process_id + 1):
do_summarycalcs(
runtype=RUNTYPE_GROUNDUP_LOSS,
analysis_settings=analysis_settings,
process_id=process_id,
filename=filename,
gul_alloc_rule=gul_alloc_rule,
fifo_dir=fifo_dir,
stderr_guard=stderr_guard
)
def do_waits(wait_variable, wait_count, filename):
"""
Add waits to the script
:param wait_variable: The type of wait
:type wait_variable: str
:param wait_count: The number of processes to wait for
:type wait_count: int
:param filename: Script to add waits to
:type filename: str
"""
if wait_count > 0:
cmd = 'wait'
for pid in range(1, wait_count + 1):
cmd = '{} ${}{}'.format(cmd, wait_variable, pid)
print_command(filename, cmd)
print_command(filename, '')
def do_pwaits(filename, process_counter):
"""
Add pwaits to the script
"""
do_waits('pid', process_counter['pid_monitor_count'], filename)
def do_awaits(filename, process_counter):
"""
Add awaits to the script
"""
do_waits('apid', process_counter['apid_monitor_count'], filename)
def do_lwaits(filename, process_counter):
"""
Add lwaits to the script
"""
do_waits('lpid', process_counter['lpid_monitor_count'], filename)
def do_kwaits(filename, process_counter):
"""
Add kwaits to the script
"""
do_waits('kpid', process_counter['kpid_monitor_count'], filename)
def do_fcwaits(filename, process_counter):
"""
Add fcwaits to the script
"""
do_waits('fcpid', process_counter['fcpid_monitor_count'], filename)
def get_getmodel_itm_cmd(
number_of_samples, gul_threshold, use_random_number_file,
gul_alloc_rule, item_output,
process_id, max_process_id, correlated_output, **kwargs):
"""
Gets the getmodel ktools command (3.1.0+) Gulcalc item stream
:param number_of_samples: The number of samples to run
:type number_of_samples: int
:param gul_threshold: The GUL threshold to use
:type gul_threshold: float
:param use_random_number_file: flag to use the random number file
:type use_random_number_file: bool
:param gul_alloc_rule: back allocation rule for gulcalc
:type gul_alloc_rule: int
:param item_output: The item output
:type item_output: str
:return: The generated getmodel command
"""
cmd = 'eve {0} {1} | getmodel | gulcalc -S{2} -L{3}'.format(
process_id, max_process_id,
number_of_samples, gul_threshold)
if use_random_number_file:
cmd = '{} -r'.format(cmd)
if correlated_output != '':
cmd = '{} -j {}'.format(cmd, correlated_output)
cmd = '{} -a{} -i {}'.format(cmd, gul_alloc_rule, item_output)
return cmd
def get_getmodel_cov_cmd(
number_of_samples, gul_threshold, use_random_number_file,
coverage_output, item_output,
process_id, max_process_id, **kwargs):
"""
Gets the getmodel ktools command (version < 3.0.8) gulcalc coverage stream
:param number_of_samples: The number of samples to run
:type number_of_samples: int
:param gul_threshold: The GUL threshold to use
:type gul_threshold: float
:param use_random_number_file: flag to use the random number file
:type use_random_number_file: bool
:param coverage_output: The coverage output
:type coverage_output: str
:param item_output: The item output
:type item_output: str
:return: The generated getmodel command
"""
cmd = 'eve {0} {1} | getmodel | gulcalc -S{2} -L{3}'.format(
process_id, max_process_id,
number_of_samples, gul_threshold)
if use_random_number_file:
cmd = '{} -r'.format(cmd)
if coverage_output != '':
cmd = '{} -c {}'.format(cmd, coverage_output)
if item_output != '':
cmd = '{} -i {}'.format(cmd, item_output)
return cmd
def get_main_cmd_ri_stream(
cmd,
process_id,
il_output,
il_alloc_rule,
ri_alloc_rule,
num_reinsurance_iterations,
fifo_dir='fifo/',
stderr_guard=True,
full_correlation=False,
process_counter=None
):
"""
Gets the fmcalc ktools command reinsurance stream
:param cmd: either gulcalc command stream or correlated output file
:type cmd: str
:param process_id: ID corresponding to thread
:type process_id: int
:param il_output: If insured loss outputs required
:type il_output: Boolean
:param il_alloc_rule: insured loss allocation rule for fmcalc
:type il_alloc_rule: int
:param ri_alloc_rule: reinsurance allocation rule for fmcalc
:type ri_alloc_rule: int
:param num_reinsurance_iterations: number of reinsurance iterations
:type num_reinsurance_iterations: int
:param fifo_dir: path to fifo directory
:type fifo_dir: str
:param stderr_guard: send stderr output to log file
:type stderr_guard: bool
:param full_correlation: execute fmcalc on fully correlated data
:type full_correlation: bool
:param process_counter: process counter
:type process_counter: Counter
:return: generated fmcalc command as str
"""
if full_correlation:
fm_cmd = 'fmcalc -a{1} < {0}'
else:
fm_cmd = '{0} | fmcalc -a{1}'
main_cmd = fm_cmd.format(cmd, il_alloc_rule)
if il_output:
main_cmd = "{0} | tee {1}il_P{2}".format(main_cmd, fifo_dir, process_id)
for i in range(1, num_reinsurance_iterations + 1):
main_cmd = "{0} | fmcalc -a{2} -n -p RI_{1}".format(
main_cmd, i, ri_alloc_rule
)
main_cmd = "{0} > {1}ri_P{2}".format(main_cmd, fifo_dir, process_id)
main_cmd = '( {0} ) 2>> log/stderror.err &'.format(main_cmd) if stderr_guard else '{0} &'.format(main_cmd)
if full_correlation:
process_counter['fcpid_monitor_count'] += 1
main_cmd = '{0} fcpid{1}=$!'.format(
main_cmd, process_counter['fcpid_monitor_count']
)
return main_cmd
def get_main_cmd_il_stream(
cmd,
process_id,
il_alloc_rule,
fifo_dir='fifo/',
stderr_guard=True,
full_correlation=False,
process_counter=None
):
"""
Gets the fmcalc ktools command insured losses stream
:param cmd: either gulcalc command stream or correlated output file
:type cmd: str
:param process_id: ID corresponding to thread
:type process_id: int
:param il_alloc_rule: insured loss allocation rule for fmcalc
:type il_alloc_rule: int
:param fifo_dir: path to fifo directory
:type fifo_dir: str
:param stderr_guard: send stderr output to log file
:type stderr_guard: bool
:param full_correlation: execute fmcalc on fully correlated data
:type full_correlation: bool
:param process_counter: process counter
:type process_counter: Counter
:return: generated fmcalc command as str
"""
if full_correlation:
fm_cmd = 'fmcalc-a{2} < {1} > {3}il_P{0}'
else:
fm_cmd = '{1} | fmcalc -a{2} > {3}il_P{0} '
main_cmd = fm_cmd.format(process_id, cmd, il_alloc_rule, fifo_dir)
main_cmd = '( {0} ) 2>> log/stderror.err &'.format(main_cmd) if stderr_guard else '{0} &'.format(main_cmd)
if full_correlation:
process_counter['fcpid_monitor_count'] += 1
main_cmd = '{0} fcpid{1}=$!'.format(
main_cmd, process_counter['fcpid_monitor_count']
)
return main_cmd
def get_main_cmd_gul_stream(
cmd,
process_id,
fifo_dir='fifo/',
stderr_guard=True
):
"""
Gets the command to output ground up losses
:param cmd: either gulcalc command stream or correlated output file
:type cmd: str
:param process_id: ID corresponding to thread
:type process_id: int
:param fifo_dir: path to fifo directory
:type fifo_dir: str
:param stderr_guard: send stderr output to log file
:type stderr_guard: bool
:return: generated command as str
"""
gul_cmd = '{1} > {2}gul_P{0} '
main_cmd = gul_cmd.format(process_id, cmd, fifo_dir)
main_cmd = '( {0} ) 2>> log/stderror.err &'.format(main_cmd) if stderr_guard else '{0} &'.format(main_cmd)
return main_cmd
def do_computes(outputs):
if len(outputs) == 0:
return
for output in outputs:
filename = output['compute_args']['filename']
print_command(filename, '')
print_command(
filename,
'# --- Do {} loss computes ---'.format(output['loss_type'])
)
output['compute_fun'](**output['compute_args'])
def genbash(
max_process_id,
analysis_settings,
num_reinsurance_iterations=0,
fifo_tmp_dir=True,
gul_alloc_rule=None,
il_alloc_rule=None,
ri_alloc_rule=None,
stderr_guard=True,
bash_trace=False,
filename='run_kools.sh',
_get_getmodel_cmd=None,
custom_args={}
):
"""
Generates a bash script containing ktools calculation instructions for an
Oasis model.
:param max_process_id: The number of processes to create
:type max_process_id: int
:param analysis_settings: The analysis settings
:type analysis_settings: dict
:param filename: The output file name
:type filename: string
:param num_reinsurance_iterations: The number of reinsurance iterations
:type num_reinsurance_iterations: int
:param fifo_tmp_dir: When set to True, Create and use FIFO quese in `/tmp/[A-Z,0-9]/fifo`, if False run in './fifo'
:type fifo_tmp_dir: boolean
:param gul_alloc_rule: Allocation rule (None or 1) for gulcalc, if not set default to coverage stream
:type gul_alloc_rule: Int
:param il_alloc_rule: Allocation rule (0, 1 or 2) for fmcalc
:type il_alloc_rule: Int
:param ri_alloc_rule: Allocation rule (0, 1 or 2) for fmcalc
:type ri_alloc_rule: Int
:param get_getmodel_cmd: Method for getting the getmodel command, by default
``GenerateLossesCmd.get_getmodel_cmd`` is used.
:type get_getmodel_cmd: callable
"""
process_counter = Counter()
use_random_number_file = False
stderr_guard = stderr_guard
gul_item_stream = (gul_alloc_rule and isinstance(gul_alloc_rule, int))
full_correlation = False
gul_output = False
il_output = False
ri_output = False
fifo_queue_dir = ""
fifo_full_correlation_dir = ""
work_dir = 'work/'
work_kat_dir = 'work/kat/'
work_full_correlation_dir = 'work/full_correlation/'
work_full_correlation_kat_dir = 'work/full_correlation/kat/'
output_dir = 'output/'
output_full_correlation_dir = 'output/full_correlation/'
# remove the file if it already exists
if os.path.exists(filename):
os.remove(filename)
gul_threshold = analysis_settings.get('gul_threshold', 0)
number_of_samples = analysis_settings.get('number_of_samples', 0)
if 'model_settings' in analysis_settings and analysis_settings['model_settings'].get('use_random_number_file'):
use_random_number_file = True
if 'full_correlation' in analysis_settings:
if _get_getmodel_cmd is None and gul_item_stream:
full_correlation = analysis_settings['full_correlation']
if 'gul_output' in analysis_settings:
gul_output = analysis_settings['gul_output']
if 'il_output' in analysis_settings:
il_output = analysis_settings['il_output']
if 'ri_output' in analysis_settings:
ri_output = analysis_settings['ri_output']
print_command(filename, '#!/bin/bash')
print_command(filename, 'SCRIPT=$(readlink -f "$0") && cd $(dirname "$SCRIPT")')
print_command(filename, '')
print_command(filename, '# --- Script Init ---')
print_command(filename, '')
print_command(filename, 'set -e')
print_command(filename, 'set -o pipefail')
print_command(filename, 'mkdir -p log')
print_command(filename, 'rm -R -f log/*')
print_command(filename, '')
if bash_trace:
print_command(filename, '# --- Redirect Bash trace to file ---')
print_command(filename, 'exec > >(tee -ia log/bash.log)')
print_command(filename, 'exec 2> >(tee -ia log/bash.log >& 2)')
print_command(filename, 'exec 19> log/bash.log')
print_command(filename, 'export BASH_XTRACEFD="19"')
print_command(filename, '')
if stderr_guard:
print_command(filename, 'error_handler(){')
print_command(filename, " echo 'Run Error - terminating'")
print_command(filename, ' exit_code=$?')
print_command(filename, ' set +x')
print_command(filename, ' group_pid=$(ps -p $$ -o pgid --no-headers)')
print_command(filename, ' sess_pid=$(ps -p $$ -o sess --no-headers)')
print_command(filename, ' printf "Script PID:%d, GPID:%s, SPID:%d" $$ $group_pid $sess_pid >> log/killout.txt')
print_command(filename, '')
print_command(filename, ' if hash pstree 2>/dev/null; then')
print_command(filename, ' pstree -pn $$ >> log/killout.txt')
print_command(filename, ' PIDS_KILL=$(pstree -pn $$ | grep -o "([[:digit:]]*)" | grep -o "[[:digit:]]*")')
print_command(filename, ' kill -9 $(echo "$PIDS_KILL" | grep -v $group_pid | grep -v $$) 2>/dev/null')
print_command(filename, ' else')
print_command(filename, ' ps f -g $sess_pid > log/subprocess_list')
print_command(filename, ' PIDS_KILL=$(pgrep -a --pgroup $group_pid | grep -v celery | grep -v $group_pid | grep -v $$)')
print_command(filename, ' echo "$PIDS_KILL" >> log/killout.txt')
print_command(filename, ' kill -9 $(echo "$PIDS_KILL" | awk \'BEGIN { FS = "[ \\t\\n]+" }{ print $1 }\') 2>/dev/null')
print_command(filename, ' fi')
print_command(filename, ' exit $(( 1 > $exit_code ? 1 : $exit_code ))')
print_command(filename, '}')
print_command(filename, 'trap error_handler QUIT HUP INT KILL TERM ERR')
print_command(filename, '')
print_command(filename, 'touch log/stderror.err')
print_command(filename, 'ktools_monitor.sh $$ & pid0=$!')
print_command(filename, '')
if bash_trace:
print_command(filename, 'set -x')
print_command(filename, '# --- Setup run dirs ---')
print_command(filename, '')
print_command(filename, "find output/* ! -name '*summary-info*' -type f -exec rm -f {} +")
if full_correlation:
print_command(filename, 'mkdir {}'.format(output_full_correlation_dir))
print_command(filename, '')
if not fifo_tmp_dir:
fifo_queue_dir = 'fifo/'
print_command(filename, 'rm -R -f {}*'.format(fifo_queue_dir))
if full_correlation:
fifo_full_correlation_dir = fifo_queue_dir + 'full_correlation/'
print_command(
filename, 'mkdir {}'.format(fifo_full_correlation_dir)
)
print_command(filename, 'rm -R -f {}*'.format(work_dir))
print_command(filename, 'mkdir {}'.format(work_kat_dir))
if full_correlation:
print_command(filename, 'mkdir {}'.format(work_full_correlation_dir))
print_command(
filename, 'mkdir {}'.format(work_full_correlation_kat_dir)
)
print_command(filename, '')
# Create FIFOS under /tmp/* (Windows support)
if fifo_tmp_dir:
fifo_queue_dir = '/tmp/{}/'.format(
''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
)
print_command(filename, 'rm -R -f {}'.format(fifo_queue_dir))
fifo_queue_dir = fifo_queue_dir + 'fifo/'
print_command(filename, 'mkdir -p {}'.format(fifo_queue_dir))
if full_correlation:
fifo_full_correlation_dir = fifo_queue_dir + 'full_correlation/'
print_command(
filename, 'mkdir {}'.format(fifo_full_correlation_dir)
)
# Create workfolders
if gul_output:
create_workfolders(RUNTYPE_GROUNDUP_LOSS, analysis_settings, filename, work_dir)
if full_correlation:
create_workfolders(
RUNTYPE_GROUNDUP_LOSS, analysis_settings,
filename, work_full_correlation_dir
)
if il_output:
create_workfolders(RUNTYPE_INSURED_LOSS, analysis_settings, filename, work_dir)
if full_correlation:
create_workfolders(
RUNTYPE_INSURED_LOSS, analysis_settings,
filename, work_full_correlation_dir
)
if ri_output:
create_workfolders(RUNTYPE_REINSURANCE_LOSS, analysis_settings, filename, work_dir)
if full_correlation:
create_workfolders(
RUNTYPE_REINSURANCE_LOSS, analysis_settings,
filename, work_full_correlation_dir
)
print_command(filename, '')
# Create Execution Pipeline FIFOs
if gul_output:
do_fifos_exec(RUNTYPE_GROUNDUP_LOSS, max_process_id, filename, fifo_queue_dir)
if il_output:
do_fifos_exec(RUNTYPE_INSURED_LOSS, max_process_id, filename, fifo_queue_dir)
if ri_output:
do_fifos_exec(RUNTYPE_REINSURANCE_LOSS, max_process_id, filename, fifo_queue_dir)
# Create Summarycalc FIFOs
if gul_output:
do_fifos_calc(RUNTYPE_GROUNDUP_LOSS, analysis_settings, max_process_id,
filename, fifo_queue_dir)
if il_output:
do_fifos_calc(RUNTYPE_INSURED_LOSS, analysis_settings,
max_process_id, filename, fifo_queue_dir)
if ri_output:
do_fifos_calc(RUNTYPE_REINSURANCE_LOSS, analysis_settings,
max_process_id, filename, fifo_queue_dir)
# Create Full correlation FIFO
if full_correlation:
if gul_output:
do_fifos_calc(
RUNTYPE_GROUNDUP_LOSS, analysis_settings,
max_process_id, filename, fifo_full_correlation_dir
)
if il_output:
do_fifos_calc(
RUNTYPE_INSURED_LOSS, analysis_settings,
max_process_id, filename, fifo_full_correlation_dir
)
if ri_output:
do_fifos_calc(
RUNTYPE_REINSURANCE_LOSS, analysis_settings,
max_process_id, filename, fifo_full_correlation_dir
)
print_command(filename, '')
compute_outputs = []
if ri_output:
ri_computes = {
'loss_type': 'reinsurance',
'compute_fun': ri,
'compute_args': {
'analysis_settings': analysis_settings,
'max_process_id': max_process_id,
'filename': filename,
'process_counter': process_counter,
'num_reinsurance_iterations': num_reinsurance_iterations,
'fifo_dir': fifo_queue_dir,
'work_dir': work_dir,
'stderr_guard': stderr_guard
}
}
compute_outputs.append(ri_computes)
if il_output:
il_computes = {
'loss_type': 'insured',
'compute_fun': il,
'compute_args': {
'analysis_settings': analysis_settings,
'max_process_id': max_process_id,
'filename': filename,
'process_counter': process_counter,
'fifo_dir': fifo_queue_dir,
'work_dir': work_dir,
'stderr_guard': stderr_guard
}
}
compute_outputs.append(il_computes)
if gul_output:
gul_computes = {
'loss_type': 'ground up',
'compute_fun': do_gul,
'compute_args': {
'analysis_settings': analysis_settings,
'max_process_id': max_process_id,
'filename': filename,
'process_counter': process_counter,
'fifo_dir': fifo_queue_dir,
'work_dir': work_dir,
'gul_alloc_rule': gul_alloc_rule,
'stderr_guard': stderr_guard
}
}
compute_outputs.append(gul_computes)
do_computes(compute_outputs)
print_command(filename, '')
for process_id in range(1, max_process_id + 1):
# gulcalc output file for fully correlated output
if full_correlation:
correlated_output_file = '{0}gul_P{1}'.format(
fifo_full_correlation_dir,
process_id
)
else:
correlated_output_file = ''
getmodel_args = {
'number_of_samples': number_of_samples,
'gul_threshold': gul_threshold,
'use_random_number_file': use_random_number_file,
'coverage_output': '{0}gul_P{1}'.format(fifo_queue_dir, process_id),
'item_output': '-',
'gul_alloc_rule': gul_alloc_rule,
'process_id': process_id,
'max_process_id': max_process_id,
'correlated_output': correlated_output_file,
'stderr_guard': stderr_guard
}
# GUL coverage & item stream (Older)
if gul_item_stream:
if gul_output:
getmodel_args['item_output'] = '- | tee {0}gul_P{1}'.format(fifo_queue_dir, process_id)
_get_getmodel_cmd = (_get_getmodel_cmd or get_getmodel_itm_cmd)
else:
if not gul_output:
getmodel_args['coverage_output'] = ""
_get_getmodel_cmd = (_get_getmodel_cmd or get_getmodel_cov_cmd)
# ! Should be able to streamline the logic a little
if num_reinsurance_iterations > 0 and ri_output:
getmodel_args.update(custom_args)
getmodel_cmd = _get_getmodel_cmd(**getmodel_args)
main_cmd = get_main_cmd_ri_stream(
getmodel_cmd,
process_id,
il_output,
il_alloc_rule,
ri_alloc_rule,
num_reinsurance_iterations,
fifo_queue_dir,
stderr_guard
)
print_command(filename, main_cmd)
elif gul_output and il_output:
getmodel_args.update(custom_args)
getmodel_cmd = _get_getmodel_cmd(**getmodel_args)
main_cmd = get_main_cmd_il_stream(
getmodel_cmd, process_id, il_alloc_rule, fifo_queue_dir,
stderr_guard
)
print_command(filename, main_cmd)
else:
if gul_output and 'gul_summaries' in analysis_settings:
getmodel_args['coverage_output'] = '-'
getmodel_args['item_output'] = ''
if gul_item_stream:
getmodel_args['item_output'] = '-'
getmodel_args.update(custom_args)
getmodel_cmd = _get_getmodel_cmd(**getmodel_args)
main_cmd = get_main_cmd_gul_stream(
getmodel_cmd, process_id, fifo_queue_dir, stderr_guard
)
print_command(filename, main_cmd)
if il_output and 'il_summaries' in analysis_settings:
getmodel_args['coverage_output'] = ''
getmodel_args['item_output'] = '-'
getmodel_args.update(custom_args)
getmodel_cmd = _get_getmodel_cmd(**getmodel_args)
main_cmd = get_main_cmd_il_stream(
getmodel_cmd, process_id, il_alloc_rule, fifo_queue_dir,
stderr_guard
)
print_command(filename, main_cmd)
print_command(filename, '')
do_pwaits(filename, process_counter)
if full_correlation:
print_command(
filename, '# --- Do computes for fully correlated output ---'
)
print_command(filename, '')
for process_id in range(1, max_process_id + 1):
# Set up file name for full correlation file
correlated_output_file = '{0}gul_P{1}'.format(
fifo_full_correlation_dir,
process_id
)
if num_reinsurance_iterations > 0 and ri_output:
main_cmd = get_main_cmd_ri_stream(
correlated_output_file,
process_id,
il_output,
il_alloc_rule,
ri_alloc_rule,
num_reinsurance_iterations,
fifo_full_correlation_dir,
stderr_guard,
full_correlation,
process_counter
)
print_command(filename, main_cmd)
elif gul_output and il_output:
main_cmd = get_main_cmd_il_stream(
correlated_output_file, process_id, il_alloc_rule,
fifo_full_correlation_dir, stderr_guard, full_correlation,
process_counter
)
print_command(filename, main_cmd)
else:
if il_output and 'il_summaries' in analysis_settings:
main_cmd = get_main_cmd_il_stream(
correlated_output_file, process_id, il_alloc_rule,
fifo_full_correlation_dir, stderr_guard,
full_correlation, process_counter
)
print_command(filename, main_cmd)
print_command(filename, '')
do_fcwaits(filename, process_counter)
process_counter['pid_monitor_count'] = 0
compute_outputs = []
if ri_output:
ri_computes = {
'loss_type': 'reinsurance',
'compute_fun': ri,
'compute_args': {
'analysis_settings': analysis_settings,
'max_process_id': max_process_id,
'filename': filename,
'process_counter': process_counter,
'num_reinsurance_iterations': num_reinsurance_iterations,
'fifo_dir': fifo_full_correlation_dir,
'work_dir': work_full_correlation_dir,
'stderr_guard': stderr_guard
}
}
compute_outputs.append(ri_computes)
if il_output:
il_computes = {
'loss_type': 'insured',
'compute_fun': il,
'compute_args': {
'analysis_settings': analysis_settings,
'max_process_id': max_process_id,
'filename': filename,
'process_counter': process_counter,
'fifo_dir': fifo_full_correlation_dir,
'work_dir': work_full_correlation_dir,
'stderr_guard': stderr_guard
}
}
compute_outputs.append(il_computes)
if gul_output:
gul_computes = {
'loss_type': 'ground up',
'compute_fun': do_gul,
'compute_args': {
'analysis_settings': analysis_settings,
'max_process_id': max_process_id,
'filename': filename,
'process_counter': process_counter,
'fifo_dir': fifo_full_correlation_dir,
'work_dir': work_full_correlation_dir,
'gul_alloc_rule': gul_alloc_rule,
'stderr_guard': stderr_guard,
'full_correlation': full_correlation
}
}
compute_outputs.append(gul_computes)
do_computes(compute_outputs)
print_command(filename, '')
do_pwaits(filename, process_counter)
if ri_output:
print_command(filename, '')
print_command(filename, '# --- Do reinsurance loss kats ---')
print_command(filename, '')
do_kats(
RUNTYPE_REINSURANCE_LOSS, analysis_settings, max_process_id,
filename, process_counter, work_kat_dir, output_dir
)
if full_correlation:
print_command(filename, '')
print_command(
filename,
'# --- Do reinsurance loss kats for fully correlated output ---'
)
print_command(filename, '')
do_kats(
RUNTYPE_REINSURANCE_LOSS, analysis_settings, max_process_id,
filename, process_counter, work_full_correlation_kat_dir,
output_full_correlation_dir
)
if il_output:
print_command(filename, '')
print_command(filename, '# --- Do insured loss kats ---')
print_command(filename, '')
do_kats(
RUNTYPE_INSURED_LOSS, analysis_settings, max_process_id, filename,
process_counter, work_kat_dir, output_dir
)
if full_correlation:
print_command(filename, '')
print_command(
filename,
'# --- Do insured loss kats for fully correlated output ---'
)
print_command(filename, '')
do_kats(
RUNTYPE_INSURED_LOSS, analysis_settings, max_process_id,
filename, process_counter, work_full_correlation_kat_dir,
output_full_correlation_dir
)
if gul_output:
print_command(filename, '')
print_command(filename, '# --- Do ground up loss kats ---')
print_command(filename, '')
do_kats(
RUNTYPE_GROUNDUP_LOSS, analysis_settings, max_process_id, filename,
process_counter, work_kat_dir, output_dir
)
if full_correlation:
print_command(filename, '')
print_command(
filename,
'# --- Do ground up loss kats for fully correlated output ---'
)
print_command(filename, '')
do_kats(
RUNTYPE_GROUNDUP_LOSS, analysis_settings, max_process_id,
filename, process_counter, work_full_correlation_kat_dir,
output_full_correlation_dir
)
do_kwaits(filename, process_counter)
print_command(filename, '')
if ri_output:
do_post_wait_processing(
RUNTYPE_REINSURANCE_LOSS, analysis_settings, filename, process_counter,
'', output_dir
)
if il_output:
do_post_wait_processing(
RUNTYPE_INSURED_LOSS, analysis_settings, filename, process_counter, '',
output_dir
)
if gul_output:
do_post_wait_processing(
RUNTYPE_GROUNDUP_LOSS, analysis_settings, filename, process_counter, '',
output_dir
)
if full_correlation:
work_sub_dir = re.sub('^work/', '', work_full_correlation_dir)
if ri_output:
do_post_wait_processing(
RUNTYPE_REINSURANCE_LOSS, analysis_settings, filename,
process_counter, work_sub_dir, output_full_correlation_dir
)
if il_output:
do_post_wait_processing(
RUNTYPE_INSURED_LOSS, analysis_settings, filename, process_counter,
work_sub_dir, output_full_correlation_dir
)
if gul_output:
do_post_wait_processing(
RUNTYPE_GROUNDUP_LOSS, analysis_settings, filename, process_counter,
work_sub_dir, output_full_correlation_dir
)
do_awaits(filename, process_counter) # waits for aalcalc
do_lwaits(filename, process_counter) # waits for leccalc
print_command(filename, 'rm -R -f work/*')
if fifo_tmp_dir:
print_command(
filename, 'rm -R -f {}'.format(re.sub('fifo/$', '', fifo_queue_dir))
)
else:
print_command(filename, 'rm -R -f fifo/*')
if stderr_guard:
print_command(filename, '')
print_command(filename, '# Stop ktools watcher')
print_command(filename, 'kill -9 $pid0')
| 36.342445 | 152 | 0.596891 |
ace23ddf8e0bc9c174833c2e85098d54d936bae0 | 49 | py | Python | test/test_import.py | benvanwerkhoven/empty_python | d179e5db6a8c056930ad610d0ed3373860035ce9 | [
"Apache-2.0"
] | 1 | 2020-12-18T11:54:05.000Z | 2020-12-18T11:54:05.000Z | test/test_import.py | benvanwerkhoven/empty_python | d179e5db6a8c056930ad610d0ed3373860035ce9 | [
"Apache-2.0"
] | 1 | 2018-04-09T13:55:33.000Z | 2018-04-10T13:16:38.000Z | test/test_import.py | benvanwerkhoven/empty_python | d179e5db6a8c056930ad610d0ed3373860035ce9 | [
"Apache-2.0"
] | 3 | 2018-03-13T15:31:02.000Z | 2019-11-14T10:32:14.000Z | import empty_python
def test_empty():
pass
| 8.166667 | 19 | 0.714286 |
ace23e23adba9e29ca7cf0ae31b0a1c13609a521 | 22,660 | py | Python | tensorflow/tools/docs/generate_lib.py | aapeliv/tensorflow | cd2a135c126f209ccc943555b85ca436ea27ffe3 | [
"Apache-2.0"
] | 1 | 2018-08-18T16:54:37.000Z | 2018-08-18T16:54:37.000Z | tensorflow/tools/docs/generate_lib.py | liufengdb/tensorflow | 51100a8de57ef53e36a8a9f5a9829cbd33fbed04 | [
"Apache-2.0"
] | null | null | null | tensorflow/tools/docs/generate_lib.py | liufengdb/tensorflow | 51100a8de57ef53e36a8a9f5a9829cbd33fbed04 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate docs for the TensorFlow Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import fnmatch
import os
import shutil
import tempfile
import six
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.docs import doc_controls
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import parser
from tensorflow.tools.docs import pretty_docs
from tensorflow.tools.docs import py_guide_parser
def _is_free_function(py_object, full_name, index):
"""Check if input is a free function (and not a class- or static method)."""
if not tf_inspect.isfunction(py_object):
return False
# Static methods are functions to tf_inspect (in 2.7), so check if the parent
# is a class. If there is no parent, it's not a function.
if '.' not in full_name:
return False
parent_name = full_name.rsplit('.', 1)[0]
if tf_inspect.isclass(index[parent_name]):
return False
return True
def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
search_hints=True,
site_api_path=None):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
a tree of docs at `output_dir`.
Symbols with multiple aliases will have only one page written about
them, which is referenced for all aliases.
Args:
output_dir: Directory to write documentation markdown files to. Will be
created if it doesn't exist.
parser_config: A `parser.ParserConfig` object, containing all the necessary
indices.
yaml_toc: Set to `True` to generate a "_toc.yaml" file.
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
site_api_path: Used to write the api-duplicates _redirects.yaml file. if
None (the default) the file is not generated.
Raises:
ValueError: if `output_dir` is not an absolute path
"""
# Make output_dir.
if not os.path.isabs(output_dir):
raise ValueError("'output_dir' must be an absolute path.\n"
" output_dir='%s'" % output_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# These dictionaries are used for table-of-contents generation below
# They will contain, after the for-loop below::
# - module name(string):classes and functions the module contains(list)
module_children = {}
# - symbol name(string):pathname (string)
symbol_to_file = {}
# Collect redirects for an api _redirects.yaml file.
redirects = []
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
if full_name in parser_config.duplicate_of:
continue
# Methods and some routines are documented only as part of their class.
if not (tf_inspect.ismodule(py_object) or tf_inspect.isclass(py_object) or
_is_free_function(py_object, full_name, parser_config.index)):
continue
if doc_controls.should_skip(py_object):
continue
sitepath = os.path.join('api_docs/python',
parser.documentation_path(full_name)[:-3])
# For TOC, we need to store a mapping from full_name to the file
# we're generating
symbol_to_file[full_name] = sitepath
# For a module, remember the module for the table-of-contents
if tf_inspect.ismodule(py_object):
if full_name in parser_config.tree:
module_children.setdefault(full_name, [])
# For something else that's documented,
# figure out what module it lives in
else:
subname = str(full_name)
while True:
subname = subname[:subname.rindex('.')]
if tf_inspect.ismodule(parser_config.index[subname]):
module_children.setdefault(subname, []).append(full_name)
break
# Generate docs for `py_object`, resolving references.
page_info = parser.docs_for_object(full_name, py_object, parser_config)
path = os.path.join(output_dir, parser.documentation_path(full_name))
directory = os.path.dirname(path)
try:
if not os.path.exists(directory):
os.makedirs(directory)
# This function returns raw bytes in PY2 or unicode in PY3.
if search_hints:
content = [page_info.get_metadata_html()]
else:
content = ['']
content.append(pretty_docs.build_md_page(page_info))
text = '\n'.join(content)
if six.PY3:
text = text.encode('utf-8')
with open(path, 'wb') as f:
f.write(text)
except OSError:
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
if site_api_path:
duplicates = parser_config.duplicates.get(full_name, [])
if not duplicates:
continue
duplicates = [item for item in duplicates if item != full_name]
for dup in duplicates:
from_path = os.path.join(site_api_path, dup.replace('.', '/'))
to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
redirects.append((from_path, to_path))
if site_api_path and redirects:
redirects = sorted(redirects)
template = ('- from: /{}\n'
' to: /{}\n')
redirects = [template.format(f, t) for f, t in redirects]
api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
with open(api_redirects_path, 'w') as redirect_file:
redirect_file.write('redirects:\n')
redirect_file.write(''.join(redirects))
if yaml_toc:
# Generate table of contents
# Put modules in alphabetical order, case-insensitive
modules = sorted(module_children.keys(), key=lambda a: a.upper())
leftnav_path = os.path.join(output_dir, '_toc.yaml')
with open(leftnav_path, 'w') as f:
# Generate header
f.write('# Automatically generated file; please do not edit\ntoc:\n')
for module in modules:
indent_num = module.count('.')
# Don't list `tf.submodule` inside `tf`
indent_num = max(indent_num, 1)
indent = ' '*indent_num
if indent_num > 1:
# tf.contrib.baysflow.entropy will be under
# tf.contrib->baysflow->entropy
title = module.split('.')[-1]
else:
title = module
header = [
'- title: ' + title,
' section:',
' - title: Overview',
' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[module]]
header = ''.join([indent+line+'\n' for line in header])
f.write(header)
symbols_in_module = module_children.get(module, [])
# Sort case-insensitive, if equal sort case sensitive (upper first)
symbols_in_module.sort(key=lambda a: (a.upper(), a))
for full_name in symbols_in_module:
item = [
' - title: ' + full_name[len(module) + 1:],
' path: /TARGET_DOC_ROOT/VERSION/' + symbol_to_file[full_name]]
item = ''.join([indent+line+'\n' for line in item])
f.write(item)
# Write a global index containing all full names with links.
with open(os.path.join(output_dir, 'index.md'), 'w') as f:
f.write(
parser.generate_global_index(root_title, parser_config.index,
parser_config.reference_resolver))
def add_dict_to_dict(add_from, add_to):
for key in add_from:
if key in add_to:
add_to[key].extend(add_from[key])
else:
add_to[key] = add_from[key]
# Exclude some libraries in contrib from the documentation altogether.
def _get_default_private_map():
return {
'tf.contrib.autograph': ['utils', 'operators'],
'tf.test': ['mock'],
'tf.compat': ['v1', 'v2'],
}
# Exclude members of some libraries.
def _get_default_do_not_descend_map():
# TODO(markdaoust): Use docs_controls decorators, locally, instead.
return {
'tf': ['cli', 'lib', 'wrappers'],
'tf.contrib': [
'compiler',
'grid_rnn',
# Block contrib.keras to de-clutter the docs
'keras',
'labeled_tensor',
'quantization',
'session_bundle',
'slim',
'solvers',
'specs',
'tensor_forest',
'tensorboard',
'testing',
'tfprof',
],
'tf.contrib.bayesflow': [
'special_math', 'stochastic_gradient_estimators',
'stochastic_variables'
],
'tf.contrib.ffmpeg': ['ffmpeg_ops'],
'tf.contrib.graph_editor': [
'edit', 'match', 'reroute', 'subgraph', 'transform', 'select', 'util'
],
'tf.contrib.keras': ['api', 'python'],
'tf.contrib.layers': ['feature_column', 'summaries'],
'tf.contrib.learn': [
'datasets',
'head',
'graph_actions',
'io',
'models',
'monitors',
'ops',
'preprocessing',
'utils',
],
'tf.contrib.util': ['loader'],
}
def extract(py_modules,
private_map,
do_not_descend_map,
visitor_cls=doc_generator_visitor.DocGeneratorVisitor):
"""Extract docs from tf namespace and write them to disk."""
# Traverse the first module.
visitor = visitor_cls(py_modules[0][0])
api_visitor = public_api.PublicAPIVisitor(visitor)
api_visitor.set_root_name(py_modules[0][0])
add_dict_to_dict(private_map, api_visitor.private_map)
add_dict_to_dict(do_not_descend_map, api_visitor.do_not_descend_map)
traverse.traverse(py_modules[0][1], api_visitor)
# Traverse all py_modules after the first:
for module_name, module in py_modules[1:]:
visitor.set_root_name(module_name)
api_visitor.set_root_name(module_name)
traverse.traverse(module, api_visitor)
return visitor
class _GetMarkdownTitle(py_guide_parser.PyGuideParser):
"""Extract the title from a .md file."""
def __init__(self):
self.title = None
py_guide_parser.PyGuideParser.__init__(self)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
class _DocInfo(object):
"""A simple struct for holding a doc's url and title."""
def __init__(self, url, title):
self.url = url
self.title = title
def build_doc_index(src_dir):
"""Build an index from a keyword designating a doc to _DocInfo objects."""
doc_index = {}
if not os.path.isabs(src_dir):
raise ValueError("'src_dir' must be an absolute path.\n"
" src_dir='%s'" % src_dir)
if not os.path.exists(src_dir):
raise ValueError("'src_dir' path must exist.\n"
" src_dir='%s'" % src_dir)
for dirpath, _, filenames in os.walk(src_dir):
suffix = os.path.relpath(path=dirpath, start=src_dir)
for base_name in filenames:
if not base_name.endswith('.md'):
continue
title_parser = _GetMarkdownTitle()
title_parser.process(os.path.join(dirpath, base_name))
if title_parser.title is None:
msg = ('`{}` has no markdown title (# title)'.format(
os.path.join(dirpath, base_name)))
raise ValueError(msg)
key_parts = os.path.join(suffix, base_name[:-3]).split('/')
if key_parts[-1] == 'index':
key_parts = key_parts[:-1]
doc_info = _DocInfo(os.path.join(suffix, base_name), title_parser.title)
doc_index[key_parts[-1]] = doc_info
if len(key_parts) > 1:
doc_index['/'.join(key_parts[-2:])] = doc_info
return doc_index
class _GuideRef(object):
def __init__(self, base_name, title, section_title, section_tag):
self.url = 'api_guides/python/' + (('%s#%s' % (base_name, section_tag))
if section_tag else base_name)
self.link_text = (('%s > %s' % (title, section_title))
if section_title else title)
def make_md_link(self, url_prefix):
return '[%s](%s%s)' % (self.link_text, url_prefix, self.url)
class _GenerateGuideIndex(py_guide_parser.PyGuideParser):
"""Turn guide files into an index from symbol name to a list of _GuideRefs."""
def __init__(self):
self.index = {}
py_guide_parser.PyGuideParser.__init__(self)
def process(self, full_path, base_name):
"""Index a file, reading from `full_path`, with `base_name` as the link."""
self.full_path = full_path
self.base_name = base_name
self.title = None
self.section_title = None
self.section_tag = None
py_guide_parser.PyGuideParser.process(self, full_path)
def process_title(self, _, title):
if self.title is None: # only use the first title
self.title = title
def process_section(self, _, section_title, tag):
self.section_title = section_title
self.section_tag = tag
def process_line(self, _, line):
"""Index @{symbol} references as in the current file & section."""
for match in parser.SYMBOL_REFERENCE_RE.finditer(line):
val = self.index.get(match.group(1), [])
val.append(
_GuideRef(self.base_name, self.title, self.section_title,
self.section_tag))
self.index[match.group(1)] = val
def _build_guide_index(guide_src_dir):
"""Return dict: symbol name -> _GuideRef from the files in `guide_src_dir`."""
index_generator = _GenerateGuideIndex()
if os.path.exists(guide_src_dir):
for full_path, base_name in py_guide_parser.md_files_in_dir(guide_src_dir):
index_generator.process(full_path, base_name)
return index_generator.index
class _UpdateTags(py_guide_parser.PyGuideParser):
"""Rewrites a Python guide so that each section has an explicit id tag.
"section" here refers to blocks delimited by second level headings.
"""
def process_section(self, line_number, section_title, tag):
self.replace_line(line_number, '<h2 id="%s">%s</h2>' % (tag, section_title))
def update_id_tags_inplace(src_dir):
"""Set explicit ids on all second-level headings to ensure back-links work.
Args:
src_dir: The directory of md-files to convert (inplace).
"""
tag_updater = _UpdateTags()
for dirpath, _, filenames in os.walk(src_dir):
for base_name in filenames:
if not base_name.endswith('.md'):
continue
full_path = os.path.join(src_dir, dirpath, base_name)
# Tag updater loads the file, makes the replacements, and returns the
# modified file contents
content = tag_updater.process(full_path)
with open(full_path, 'w') as f:
f.write(content)
EXCLUDED = set(['__init__.py', 'OWNERS', 'README.txt'])
def replace_refs(src_dir, output_dir, reference_resolver, file_pattern='*.md'):
"""Fix @{} references in all files under `src_dir` matching `file_pattern`.
A matching directory structure, with the modified files is
written to `output_dir`.
`{"__init__.py","OWNERS","README.txt"}` are skipped.
Files not matching `file_pattern` (using `fnmatch`) are copied with no change.
Also, files in the `api_guides/python` directory get explicit ids set on all
heading-2s to ensure back-links work.
Args:
src_dir: The directory to convert files from.
output_dir: The root directory to write the resulting files to.
reference_resolver: A `parser.ReferenceResolver` to make the replacements.
file_pattern: Only replace references in files matching file_patters,
using fnmatch. Non-matching files are copied unchanged.
"""
# Iterate through all the source files and process them.
for dirpath, _, filenames in os.walk(src_dir):
# How to get from `dirpath` to api_docs/python/
relative_path_to_root = os.path.relpath(
path=os.path.join(src_dir, 'api_docs/python'), start=dirpath)
# Make the directory under output_dir.
new_dir = os.path.join(output_dir,
os.path.relpath(path=dirpath, start=src_dir))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
for base_name in filenames:
if base_name in EXCLUDED:
continue
full_in_path = os.path.join(dirpath, base_name)
# Set the `current_doc_full_name` so bad files can be reported on errors.
reference_resolver.current_doc_full_name = full_in_path
suffix = os.path.relpath(path=full_in_path, start=src_dir)
full_out_path = os.path.join(output_dir, suffix)
# Copy files that do not match the file_pattern, unmodified.
if not fnmatch.fnmatch(base_name, file_pattern):
shutil.copyfile(full_in_path, full_out_path)
continue
with open(full_in_path, 'rb') as f:
content = f.read().decode('utf-8')
content = reference_resolver.replace_references(content,
relative_path_to_root)
with open(full_out_path, 'wb') as f:
f.write(content.encode('utf-8'))
class DocGenerator(object):
"""Main entry point for generating docs."""
def __init__(self):
self.argument_parser = argparse.ArgumentParser()
self._py_modules = None
self._private_map = _get_default_private_map()
self._do_not_descend_map = _get_default_do_not_descend_map()
self.yaml_toc = True
self.argument_parser.add_argument(
'--no_search_hints',
dest='search_hints',
action='store_false',
default=True)
def add_output_dir_argument(self):
self.argument_parser.add_argument(
'--output_dir',
type=str,
default=None,
required=True,
help='Directory to write docs to.')
def add_src_dir_argument(self):
self.argument_parser.add_argument(
'--src_dir',
type=str,
default=tempfile.mkdtemp(),
required=False,
help='Optional directory of source docs to add api_docs links to')
def add_base_dir_argument(self, default_base_dir):
self.argument_parser.add_argument(
'--base_dir',
type=str,
default=default_base_dir,
help='Base directory to strip from file names referenced in docs.')
def parse_known_args(self):
flags, _ = self.argument_parser.parse_known_args()
return flags
def add_to_private_map(self, d):
add_dict_to_dict(d, self._private_map)
def add_to_do_not_descend_map(self, d):
add_dict_to_dict(d, self._do_not_descend_map)
def set_private_map(self, d):
self._private_map = d
def set_do_not_descend_map(self, d):
self._do_not_descend_map = d
def set_py_modules(self, py_modules):
self._py_modules = py_modules
def py_module_names(self):
if self._py_modules is None:
raise RuntimeError(
'Must call set_py_modules() before running py_module_names().')
return [name for (name, _) in self._py_modules]
def make_reference_resolver(self, visitor, doc_index):
return parser.ReferenceResolver.from_visitor(
visitor, doc_index, py_module_names=self.py_module_names())
def make_parser_config(self, visitor, reference_resolver, guide_index,
base_dir):
return parser.ParserConfig(
reference_resolver=reference_resolver,
duplicates=visitor.duplicates,
duplicate_of=visitor.duplicate_of,
tree=visitor.tree,
index=visitor.index,
reverse_index=visitor.reverse_index,
guide_index=guide_index,
base_dir=base_dir)
def run_extraction(self):
return extract(self._py_modules, self._private_map,
self._do_not_descend_map)
def build(self, flags):
"""Build all the docs.
This produces two outputs
python api docs:
* generated from modules set with `set_py_modules`.
* written to '{FLAGS.output_dir}/api_docs/python/'
non-api docs:
* Everything in '{FLAGS.src_dir}' is copied to '{FLAGS.output_dir}'.
* '@{}' references in '.md' files are replaced with links.
* '.md' files under 'api_guides/python' have explicit ids set for their
second level headings.
Args:
flags:
* src_dir: Where to fetch the non-api-docs.
* base_dir: Base of the docs directory (Used to build correct
relative links).
* output_dir: Where to write the resulting docs.
Returns:
The number of errors encountered while processing.
"""
# Extract the python api from the _py_modules
doc_index = build_doc_index(flags.src_dir)
visitor = self.run_extraction()
reference_resolver = self.make_reference_resolver(visitor, doc_index)
# Build the guide_index for the api_docs back links.
root_title = getattr(flags, 'root_title', 'TensorFlow')
guide_index = _build_guide_index(
os.path.join(flags.src_dir, 'api_guides/python'))
# Write the api docs.
parser_config = self.make_parser_config(visitor, reference_resolver,
guide_index, flags.base_dir)
output_dir = os.path.join(flags.output_dir, 'api_docs/python')
write_docs(
output_dir,
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
search_hints=getattr(flags, 'search_hints', True),
site_api_path=getattr(flags, 'site_api_path', None))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
# Fix the tags in the guide dir.
guide_dir = os.path.join(flags.output_dir, 'api_guides/python')
if os.path.exists(guide_dir):
update_id_tags_inplace(guide_dir)
# Report all errors found by the reference resolver, and return the error
# code.
parser_config.reference_resolver.log_errors()
return parser_config.reference_resolver.num_errors()
| 34.024024 | 80 | 0.666858 |
ace23f9dc3b8fe8e4c9fac88d95b1d392948b492 | 2,692 | py | Python | src/ffp/norms.py | sebpuetz/ffp | 76649e5206a262afde3d7c1db41798cc5447ae89 | [
"BlueOak-1.0.0"
] | 2 | 2019-11-12T06:07:39.000Z | 2019-11-19T13:24:02.000Z | src/ffp/norms.py | sebpuetz/ffp | 76649e5206a262afde3d7c1db41798cc5447ae89 | [
"BlueOak-1.0.0"
] | 3 | 2019-11-14T11:53:23.000Z | 2020-04-19T21:57:15.000Z | src/ffp/norms.py | sebpuetz/ffp | 76649e5206a262afde3d7c1db41798cc5447ae89 | [
"BlueOak-1.0.0"
] | null | null | null | """
Norms module.
"""
import struct
from os import PathLike
from typing import BinaryIO, Union
import numpy as np
from ffp.io import Chunk, find_chunk, ChunkIdentifier, TypeId, _pad_float32, _read_binary, \
FinalfusionFormatError, _write_binary
class Norms(np.ndarray, Chunk):
"""
Embedding Norms.
Norms subclass `numpy.ndarray`, all typical numpy operations are available.
The ith norm is expected to correspond to the l2 norm of the ith row in the storage before
normalizing it. Therefore, Norms should have at most the same length as a given Storage and
are expected to match the length of the Vocabulary.
"""
def __new__(cls, array: np.ndarray):
if array.dtype != np.float32 or array.ndim != 1:
raise TypeError("expected 1-d float32 array")
return array.view(cls)
@staticmethod
def chunk_identifier():
return ChunkIdentifier.NdNorms
@staticmethod
def read_chunk(file: BinaryIO) -> 'Norms':
n_norms, type_id = _read_binary(file, "<QI")
if int(TypeId.f32) != type_id:
raise FinalfusionFormatError(
f"Invalid Type, expected {TypeId.f32}, got {type_id}")
padding = _pad_float32(file.tell())
file.seek(padding, 1)
array = np.fromfile(file=file, count=n_norms, dtype=np.float32)
return Norms(array)
def write_chunk(self, file: BinaryIO):
_write_binary(file, "<I", int(self.chunk_identifier()))
padding = _pad_float32(file.tell())
chunk_len = struct.calcsize(
"QI") + padding + self.size * struct.calcsize("f")
_write_binary(file, f"<QQI{padding}x", chunk_len, self.size,
int(TypeId.f32))
self.tofile(file)
def __getitem__(self, key):
if isinstance(key, slice):
return Norms(super().__getitem__(key))
return super().__getitem__(key)
def load_norms(file: Union[str, bytes, int, PathLike]) -> Norms:
"""
Load an Norms chunk from the given file.
Parameters
----------
file : str, bytes, int, PathLike
Finalfusion file with a norms chunk.
Returns
-------
storage : Norms
The Norms from the file.
Raises
------
ValueError
If the file did not contain an Norms chunk.
"""
with open(file, "rb") as inf:
chunk = find_chunk(inf, [ChunkIdentifier.NdNorms])
if chunk is None:
raise ValueError("File did not contain a Norms chunk")
if chunk == ChunkIdentifier.NdNorms:
return Norms.read_chunk(inf)
raise ValueError(f"unexpected chunk: {str(chunk)}")
__all__ = ['Norms', 'load_norms']
| 30.247191 | 95 | 0.63373 |
ace2420f2e1dc35c209ef46aa50de0b38e2d0e11 | 2,434 | py | Python | trip_registration.py | amkudr/roadbotproject | f3a0cd10b85bc43e8c823b8670c8b4801e9ed3ab | [
"MIT"
] | null | null | null | trip_registration.py | amkudr/roadbotproject | f3a0cd10b85bc43e8c823b8670c8b4801e9ed3ab | [
"MIT"
] | 1 | 2021-06-21T12:41:08.000Z | 2021-06-21T12:41:08.000Z | trip_registration.py | amkudr/roadbotproject | f3a0cd10b85bc43e8c823b8670c8b4801e9ed3ab | [
"MIT"
] | 1 | 2021-06-21T08:01:12.000Z | 2021-06-21T08:01:12.000Z | from telegram import ParseMode, ReplyKeyboardRemove
from telegram.ext import ConversationHandler
from datetime import datetime
from db import db_session
from models import Trip, Car
import telegramcalendar
def trip_registration_start(update, context):
update.message.reply_text(
"Введите дату поездки DD.MM.YYYY",
reply_markup=telegramcalendar.create_calendar())
return "calendar"
def inline_handler(update, context):
bot = update.callback_query.message.bot
selected, date = telegramcalendar.process_calendar_selection(bot, update)
if selected:
context.user_data["trip"] = {"date": date.strftime("%d/%m/%Y")}
bot.send_message(
chat_id=update.callback_query.from_user.id,
text="Введите время отправления HH:MM",
reply_markup=ReplyKeyboardRemove())
return "time"
return "calendar"
def trip_time(update, context):
time = update.message.text
context.user_data["trip"]["time"] = time
update.message.reply_text("Введите место отправления")
return "arrival_point"
def trip_arrival_point(update, context):
arrival_point = update.message.text
context.user_data["trip"]["arrival_point"] = arrival_point
update.message.reply_text(
"Введите место прибытия "
)
return "departure_point"
def trip_departure_point(update, context):
departure_point = update.message.text
context.user_data["trip"]["departure_point"] = departure_point
user_text = f"""
<b>Дата поездки</b>: {context.user_data["trip"]["date"]}
<b>Время отправления</b>: {context.user_data["trip"]["time"]}
<b>Место отправления</b>: {context.user_data["trip"]["arrival_point"]}
<b>Место прибытия</b>: {context.user_data["trip"]["departure_point"]}
<b>Поездка успешно добавлена</b>
"""
update.message.reply_text(user_text, parse_mode=ParseMode.HTML)
day, month, year = map(int, context.user_data["trip"]["date"].split('/'))
hour, minute = map(int, context.user_data["trip"]["time"].split(':'))
trip = Trip(
car_id=Car.query.filter(
Car.driver_id == update.message.from_user['id']).scalar().id,
date=datetime(year, month, day, hour, minute),
arrival_point=context.user_data["trip"]["arrival_point"],
departure_point=context.user_data["trip"]["departure_point"]
)
db_session.add(trip)
db_session.commit()
return ConversationHandler.END
| 35.275362 | 77 | 0.695974 |
ace2424f5348e6216e4912fbfd8bbce1cf69d814 | 3,468 | py | Python | code.py | tywtyw2002/python-keyboard | 534d1cb56099569993bc9296524392eaee94cbce | [
"MIT"
] | null | null | null | code.py | tywtyw2002/python-keyboard | 534d1cb56099569993bc9296524392eaee94cbce | [
"MIT"
] | null | null | null | code.py | tywtyw2002/python-keyboard | 534d1cb56099569993bc9296524392eaee94cbce | [
"MIT"
] | null | null | null |
from PYKB import *
class CKeyboard(Keyboard):
def change_bt(self, n):
changed = False
if self.usb_status == 3:
self.usb_status = 1
changed = True
if n != self.ble_id:
changed = True
self.set_bt_id(n)
self.ble.name = "CosHiM KB-%02d" % n
self.advertisement.complete_name = self.ble.name
self.start_advertising()
elif not self.ble.connected and not self.ble._adapter.advertising:
self.start_advertising()
if changed:
self.on_device_changed("BT{}".format(n))
keyboard = CKeyboard()
___ = TRANSPARENT
BOOT = BOOTLOADER
L1 = LAYER_TAP(1)
L2 = LAYER_TAP(2)
FN2 =LAYER_TAP_TOGGLE(1)
FN3 =LAYER_TAP_TOGGLE(3)
# LSFT4 = LAYER_MODS(4, MODS(LSHIFT))
# RSFT4 = LAYER_MODS(4, MODS(RSHIFT))
# Semicolon & Ctrl
# SCC = MODS_TAP(MODS(RCTRL), ';')
CALC = APPLAUNCH_CALCULATOR
VOLU = 0x80 # Keyboard Volume Up
VOLD = 0x81 # Keyboard Volume Down
keyboard.keymap = (
# layer 0
(
ESC, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, '-', '=', BACKSPACE,
TAB, Q, W, E, R, T, Y, U, I, O, P, '[', ']', '|',
LCTRL, A, S, D, F, G, H, J, K, L, ';', '"', ENTER,
LSHIFT, Z, X, C, V, B, N, M, ',', '.', '/', RSHIFT,
LCTRL, LALT, LGUI, SPACE, RGUI, L1, L2, GRAVE
),
# layer 1
(
GRAVE, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, DEL,
___, ___, UP, ___, ___, ___, CALC, ___, INSERT, ___,PRTSCN,SCROLLLOCK,PAUSE,___,
___,LEFT,DOWN,RIGHT,___, ___, ___, ___, ___, ___, HOME, PGUP , ___,
___, ___, MENU, ___, ___, ___, VOLD,VOLU, MUTE, END, PGDN, ___,
___, ___, ___, ___, ___, ___, ___, ___
),
# layer 2
(
NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO ,
NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO ,
NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO , NO ,
NO , NO , NO , NO , NO , FN3, NO , NO , NO , NO , NO , NO ,
NO , NO , NO , FN2, NO , NO , NO , NO
),
# layer 3
(
BT_TOGGLE,BT1,BT2, BT3,BT4,BT5,BT6,BT7, BT8, BT9, BT0, NO , NO , NO ,
NO , NO , NO , NO , NO , NO ,NO ,USB_TOGGLE,NO ,NO ,SHUTDOWN ,NO ,NO , NO ,
NO , NO , SUSPEND, NO , NO , NO , NO , NO , NO , NO , NO , NO , NO ,
NO , NO , NO , NO , NO , MACRO(10) , NO , NO , NO , NO , NO , NO ,
NO , NO , NO , FN3 , NO , NO , NO , NO
),
)
def macro_handler(dev, n, is_down):
if is_down:
dev.send_text('You pressed macro #{}\n'.format(n))
else:
if n == 10:
dev.send_text('Battary Lvl: {}%'.format(battery_level()))
else:
dev.send_text('You released macro #{}\n'.format(n))
def pairs_handler(dev, n):
dev.send_text('You just triggered pair keys #{}\n'.format(n))
keyboard.macro_handler = macro_handler
keyboard.pairs_handler = pairs_handler
# Pairs: J & K, U & I
keyboard.pairs = [{35, 36}, {20, 19}]
keyboard.verbose = True
# hot fix ble name
bt_idx = int(keyboard.ble.name.replace("PYKB ",""))
keyboard.ble.name = "CosHiM KB-%02d" % bt_idx
keyboard.advertisement.complete_name = keyboard.ble.name
keyboard.run() | 31.243243 | 89 | 0.49827 |
ace242a0a4c9d8f5b7763ceb1c0526bb7c2571ad | 7,352 | py | Python | tests/routes/games_test.py | nyddogghr/6QuiPrend | 3e117d5a5c254d639d3794507681db43e0c37d9c | [
"MIT"
] | 1 | 2018-10-30T15:28:51.000Z | 2018-10-30T15:28:51.000Z | tests/routes/games_test.py | nyddogghr/SixQuiPrend | 3e117d5a5c254d639d3794507681db43e0c37d9c | [
"MIT"
] | null | null | null | tests/routes/games_test.py | nyddogghr/SixQuiPrend | 3e117d5a5c254d639d3794507681db43e0c37d9c | [
"MIT"
] | null | null | null | from flask import Flask
from passlib.hash import bcrypt
from sixquiprend.config import *
from sixquiprend.models.game import Game
from sixquiprend.models.user import User
from sixquiprend.sixquiprend import app, db
from sixquiprend.utils import *
import json
import unittest
class GamesTestCase(unittest.TestCase):
USERNAME = 'User'
PASSWORD = 'Password'
ADMIN_USERNAME = 'Admin'
ADMIN_PASSWORD = 'Password'
def setUp(self):
app.config['SERVER_NAME'] = 'localhost'
app.config['WTF_CSRF_ENABLED'] = False
app.config['DATABASE_NAME'] = 'sixquiprend_test'
db_path = app.config['DATABASE_USER'] + ':' + app.config['DATABASE_PASSWORD']
db_path += '@' + app.config['DATABASE_HOST'] + '/' + app.config['DATABASE_NAME']
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + db_path
app.config['TESTING'] = True
self.app = app.test_client()
ctx = app.app_context()
ctx.push()
create_db()
db.create_all()
user = User(username=self.USERNAME,
password=bcrypt.hash(self.PASSWORD),
active=True)
admin = User(username=self.ADMIN_USERNAME,
password=bcrypt.hash(self.ADMIN_PASSWORD),
active=True,
urole=User.ROLE_ADMIN)
db.session.add(user)
db.session.add(admin)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
def login(self):
rv = self.app.post('/login', data=json.dumps(dict(
username=self.USERNAME,
password=self.PASSWORD,
)), content_type='application/json')
assert rv.status_code == 201
def login_admin(self):
rv = self.app.post('/login', data=json.dumps(dict(
username=self.ADMIN_USERNAME,
password=self.ADMIN_PASSWORD,
)), content_type='application/json')
assert rv.status_code == 201
def get_current_user(self):
rv = self.app.get('/users/current')
assert rv.status_code == 200
result = json.loads(rv.data)
if result['user'] != {}:
return User.find(result['user']['id'])
def create_user(self, active=True, urole=User.ROLE_PLAYER):
username = 'User #'+str(User.query.count())
password = 'Password'
user = User(username=username,
password=bcrypt.hash(password),
active=active,
urole=urole)
db.session.add(user)
db.session.commit()
return user
def create_game(self, status=Game.STATUS_CREATED, users=[], owner_id=None):
game = Game(status=status)
for user in users:
game.users.append(user)
game.owner_id = owner_id
db.session.add(game)
db.session.commit()
return game
################################################################################
## Routes
################################################################################
def test_get_games(self):
game1 = self.create_game()
game2 = self.create_game()
rv = self.app.get('/games')
assert rv.status_code == 200
games = json.loads(rv.data)['games']
assert len(games) == 2
assert games[0]['id'] == game1.id
assert games[1]['id'] == game2.id
# Test limit and offset
rv = self.app.get('/games', query_string=dict(limit=1))
assert rv.status_code == 200
games = json.loads(rv.data)['games']
assert len(games) == 1
assert games[0]['id'] == game1.id
rv = self.app.get('/games', query_string=dict(limit=1, offset=1))
assert rv.status_code == 200
games = json.loads(rv.data)['games']
assert len(games) == 1
assert games[0]['id'] == game2.id
def test_count_games(self):
game1 = self.create_game()
game2 = self.create_game()
rv = self.app.get('/games/count')
assert rv.status_code == 200
count = json.loads(rv.data)['count']
assert count == 2
def test_get_game(self):
game = self.create_game()
self.login()
rv = self.app.get('/games/' + str(game.id))
assert rv.status_code == 200
game_response = json.loads(rv.data)['game']
assert game_response['id'] == game.id
def test_create_game(self):
self.login()
rv = self.app.post('/games', content_type='application/json')
assert rv.status_code == 201
game = json.loads(rv.data)['game']
assert game['status'] == Game.STATUS_CREATED
assert game['users'][0]['username'] == self.USERNAME
def test_delete_game(self):
game = self.create_game()
self.login_admin()
rv = self.app.delete('/games/'+str(game.id))
assert rv.status_code == 204
game_db = Game.query.get(game.id)
assert game_db == None
def test_enter_game(self):
self.login()
game = self.create_game(status=Game.STATUS_CREATED)
rv = self.app.post('/games/' + str(game.id) + '/enter', content_type='application/json')
assert rv.status_code == 201
game = json.loads(rv.data)['game']
assert game['status'] == Game.STATUS_CREATED
assert game['users'][0]['username'] == self.USERNAME
def test_get_available_bots_for_game(self):
self.login()
bot = self.create_user(urole=User.ROLE_BOT)
game = self.create_game(status=Game.STATUS_CREATED,
owner_id=self.get_current_user().id)
rv = self.app.get('/games/' + str(game.id) + '/users/bots')
assert rv.status_code == 200
available_bots = json.loads(rv.data)['available_bots']
assert available_bots == [bot.serialize()]
def test_add_bot_to_game(self):
self.login()
game = self.create_game(status=Game.STATUS_CREATED,
users=[self.get_current_user()],
owner_id=self.get_current_user().id)
bot = self.create_user(urole=User.ROLE_BOT)
rv = self.app.post('/games/' + str(game.id) + '/users/' + str(bot.id) +
'/add')
assert rv.status_code == 201
game = json.loads(rv.data)['game']
assert game['users'] == [self.get_current_user().serialize(),
bot.serialize()]
def test_leave_game(self):
self.login()
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED,
users=[self.get_current_user(), user],
owner_id=self.get_current_user().id)
rv = self.app.put('/games/' + str(game.id) + '/leave')
assert rv.status_code == 200
game = json.loads(rv.data)['game']
assert game['users'] == [user.serialize()]
def test_start_game(self):
add_cards()
self.login()
user = self.create_user()
game = self.create_game(status=Game.STATUS_CREATED,
users=[self.get_current_user(), user],
owner_id=self.get_current_user().id)
rv = self.app.put('/games/' + str(game.id) + '/start')
assert rv.status_code == 200
game = json.loads(rv.data)['game']
assert game['status'] == Game.STATUS_STARTED
if __name__ == '__main__':
unittest.main()
| 36.39604 | 96 | 0.57685 |
ace242c68020784b4fc127ae4a9cf5a687ff09e7 | 5,695 | py | Python | aiohue/v2/controllers/groups.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 14 | 2018-03-02T15:39:19.000Z | 2020-02-25T12:52:40.000Z | aiohue/v2/controllers/groups.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 16 | 2018-03-19T15:02:18.000Z | 2020-02-08T02:06:07.000Z | aiohue/v2/controllers/groups.py | balloob/aiohue | f50478027ccf3b8ee6b804abb9fb48ea436e1561 | [
"Apache-2.0"
] | 14 | 2018-03-17T10:43:30.000Z | 2020-03-12T10:49:51.000Z | """Controller holding and managing HUE group resources."""
import asyncio
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
from ..models.feature import (
AlertEffectType,
AlertFeaturePut,
ColorFeaturePut,
ColorPoint,
ColorTemperatureFeaturePut,
DimmingFeaturePut,
DynamicsFeaturePut,
OnFeature,
)
from ..models.grouped_light import GroupedLight, GroupedLightPut
from ..models.light import Light
from ..models.resource import ResourceTypes
from ..models.room import Room
from ..models.scene import Scene
from ..models.zone import Zone
from .base import BaseResourcesController, GroupedControllerBase
if TYPE_CHECKING:
from .. import HueBridgeV2
class RoomController(BaseResourcesController[Type[Room]]):
"""Controller holding and managing HUE resources of type `room`."""
item_type = ResourceTypes.ROOM
item_cls = Room
allow_parser_error = True
def get_scenes(self, id: str) -> List[Scene]:
"""Get all scenes for this room."""
return [scene for scene in self._bridge.scenes if scene.group.rid == id]
def get_lights(self, id: str) -> List[Light]:
"""Return all lights in given room."""
if id not in self._items:
return []
result = []
for dev_id in self._items[id].devices:
if (dev := self._bridge.devices.get(dev_id)) is None:
continue
for light_id in dev.lights:
if light := self._bridge.lights.get(light_id):
result.append(light)
return result
class ZoneController(BaseResourcesController[Type[Zone]]):
"""Controller holding and managing HUE resources of type `zone`."""
item_type = ResourceTypes.ZONE
item_cls = Zone
allow_parser_error = True
def get_scenes(self, id: str) -> List[Scene]:
"""Get all scenes for this room."""
return [scene for scene in self._bridge.scenes if scene.group.rid == id]
def get_lights(self, id: str) -> List[Light]:
"""Return all lights in given zone."""
if id not in self._items:
return []
light_ids = {
x.rid for x in self._items[id].children if x.rtype == ResourceTypes.LIGHT
}
return [x for x in self._bridge.lights if x.id in light_ids]
class GroupedLightController(BaseResourcesController[Type[GroupedLight]]):
"""Controller holding and managing HUE resources of type `grouped_light`."""
item_type = ResourceTypes.GROUPED_LIGHT
item_cls = GroupedLight
def get_zone(self, id: str) -> Union[Room, Zone, None]:
"""Get the zone or room connected to grouped light."""
for group in self._bridge.groups:
if group.type == ResourceTypes.GROUPED_LIGHT:
continue
if group.grouped_light == id:
return group
return None
def get_lights(self, id: str) -> List[Light]:
"""Return lights of the connected room/zone."""
# Note that this is just a convenience method for backwards compatibility
if zone := self.get_zone(id):
if zone.type == ResourceTypes.ROOM:
return self._bridge.groups.room.get_lights(zone.id)
return self._bridge.groups.zone.get_lights(zone.id)
return []
async def set_flash(self, id: str, short: bool = False) -> None:
"""Send Flash command to grouped_light."""
if short:
# redirect command to underlying lights
await asyncio.gather(
*[
self._bridge.lights.set_flash(
id=light.id,
short=True,
)
for light in self.get_lights(id)
]
)
return
await self.set_state(id, alert=AlertEffectType.BREATHE)
async def set_state(
self,
id: str,
on: Optional[bool] = None,
brightness: Optional[float] = None,
color_xy: Optional[Tuple[float, float]] = None,
color_temp: Optional[int] = None,
transition_time: Optional[int] = None,
alert: Optional[AlertEffectType] = None,
) -> None:
"""Set supported feature(s) to grouped_light resource."""
# Sending (color) commands to grouped_light was added in Bridge version 1.50.1950111030
self._bridge.config.require_version("1.50.1950111030")
update_obj = GroupedLightPut()
if on is not None:
update_obj.on = OnFeature(on=on)
if brightness is not None:
update_obj.dimming = DimmingFeaturePut(brightness=brightness)
if color_xy is not None:
update_obj.color = ColorFeaturePut(xy=ColorPoint(*color_xy))
if color_temp is not None:
update_obj.color_temperature = ColorTemperatureFeaturePut(mirek=color_temp)
if transition_time is not None:
update_obj.dynamics = DynamicsFeaturePut(duration=transition_time)
if alert is not None:
update_obj.alert = AlertFeaturePut(action=alert)
await self.update(id, update_obj)
class GroupsController(GroupedControllerBase[Union[Room, Zone, GroupedLight]]):
"""Controller grouping resources of both room and zone."""
def __init__(self, bridge: "HueBridgeV2") -> None:
"""Initialize instance."""
self.grouped_light = GroupedLightController(bridge)
self.room = RoomController(bridge)
self.zone = ZoneController(bridge)
super().__init__(
bridge,
[
self.room,
self.zone,
self.grouped_light,
],
)
| 35.59375 | 95 | 0.62511 |
ace24417f541a83f4d73689c26425577dc82d464 | 5,755 | py | Python | manhattan/tests/test_timerotating_log.py | cartlogic/manhattan | 069b5468baf3bc1f5faffe9bc90342254bad9a2d | [
"MIT"
] | 1 | 2020-06-07T23:43:14.000Z | 2020-06-07T23:43:14.000Z | manhattan/tests/test_timerotating_log.py | cartlogic/manhattan | 069b5468baf3bc1f5faffe9bc90342254bad9a2d | [
"MIT"
] | null | null | null | manhattan/tests/test_timerotating_log.py | cartlogic/manhattan | 069b5468baf3bc1f5faffe9bc90342254bad9a2d | [
"MIT"
] | 1 | 2018-03-03T16:13:50.000Z | 2018-03-03T16:13:50.000Z | from __future__ import absolute_import, division, print_function
import types
import time
from threading import Thread
from manhattan.record import Record, PageRecord, GoalRecord
from manhattan.log.timerotating import TimeRotatingLog
from .base import BaseTest, work_path
def set_fake_name(log, index):
def fake_name(self, timestamp):
return '%s.%s' % (self.path, index)
log.log_name_for = types.MethodType(fake_name, log)
def make_thread_consumer(log_r, process_from=None):
consumed = []
last_pointer_container = [None]
log_r.sleep_delay = 0.001
def consume(l):
for rec, ptr in l.process(stay_alive=True, process_from=process_from):
consumed.append(Record.from_list(rec))
last_pointer_container[0] = ptr
consumer = Thread(target=consume, args=(log_r,))
consumer.start()
return consumed, consumer, last_pointer_container
class TimeRotatingLogTest(BaseTest):
def test_basic(self):
path = work_path('trl-basic')
log_w = TimeRotatingLog(path)
log_w.write(PageRecord(url='/foo').to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 1)
rec = Record.from_list(records[0][0])
self.assertEqual(rec.url, '/foo')
def test_multiple_logs(self):
path = work_path('trl-multi')
log_w = TimeRotatingLog(path)
set_fake_name(log_w, '001')
log_w.write(PageRecord(url='/foo').to_list())
set_fake_name(log_w, '004')
log_w.write(PageRecord(url='/bar').to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 2)
self.assertEqual(Record.from_list(records[0][0]).url, '/foo')
self.assertEqual(Record.from_list(records[1][0]).url, '/bar')
def test_stay_alive_single(self):
path = work_path('trl-stayalive')
log_r = TimeRotatingLog(path)
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
try:
self.assertEqual(len(consumed), 0)
log_w = TimeRotatingLog(path)
log_w.write(PageRecord(url='/baz').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/baz')
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 2)
self.assertEqual(consumed[1].url, '/herp')
finally:
log_r.killed.set()
def test_stay_alive_multiple(self):
path = work_path('trl-stayalive-multi')
log_r = TimeRotatingLog(path)
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
try:
self.assertEqual(len(consumed), 0)
log_w = TimeRotatingLog(path)
set_fake_name(log_w, '357')
log_w.write(PageRecord(url='/baz').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/baz')
set_fake_name(log_w, '358')
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r.sleep_delay * 10)
self.assertEqual(len(consumed), 2)
self.assertEqual(consumed[1].url, '/herp')
finally:
log_r.killed.set()
def test_stay_alive_nofiles(self):
log_r = TimeRotatingLog(work_path('trl-stayalive-none'))
log_r.sleep_delay = 0.001
consumed, consumer, _ = make_thread_consumer(log_r)
log_r.killed.set()
def test_unicode_names(self):
path = work_path('trl-unicode')
log_w = TimeRotatingLog(path)
goal_name = u'Goo\xf6aa\xe1llll!!!'
rec = GoalRecord(name=goal_name,
value='',
value_type='',
value_format='')
log_w.write(rec.to_list())
log_r = TimeRotatingLog(path)
records = list(log_r.process(stay_alive=False))
self.assertEqual(len(records), 1)
rec = Record.from_list(records[0][0])
self.assertEqual(rec.name, goal_name)
def test_resume(self):
path = work_path('trl-resume')
log_w = TimeRotatingLog(path)
# Create a thread consumer
log_r1 = TimeRotatingLog(path)
consumed, consumer, ptr_container = make_thread_consumer(log_r1)
try:
# Write one record
log_w.write(PageRecord(url='/herp').to_list())
time.sleep(log_r1.sleep_delay * 10)
# Check that one record was read.
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/herp')
finally:
# Kill the thread
log_r1.killed.set()
# Wait for it to die.
time.sleep(log_r1.sleep_delay * 10)
last_pointer = ptr_container[0]
self.assertIsNotNone(last_pointer)
try:
# Write one record
log_w.write(PageRecord(url='/derp').to_list())
time.sleep(log_r1.sleep_delay * 10)
# Create a new thread consumer
log_r2 = TimeRotatingLog(path)
consumed, consumer, _ = \
make_thread_consumer(log_r2, process_from=last_pointer)
time.sleep(log_r2.sleep_delay * 10)
# Check that the second record was read.
self.assertEqual(len(consumed), 1)
self.assertEqual(consumed[0].url, '/derp')
finally:
log_r2.killed.set()
| 32.698864 | 78 | 0.607819 |
ace2445b116be9638b0c13f18a1aef232935b20b | 3,467 | py | Python | src/agoro_field_boundary_detector/google_earth_engine/visualisation.py | radix-ai/agoro-field-boundary-detector | 9dd911df096ce865471ed0330174044f4172cc66 | [
"MIT"
] | 13 | 2021-07-19T07:25:26.000Z | 2022-02-20T19:50:41.000Z | src/agoro_field_boundary_detector/google_earth_engine/visualisation.py | m0rp43us/agoro-field-boundary-detector | 9dd911df096ce865471ed0330174044f4172cc66 | [
"MIT"
] | 2 | 2021-07-27T19:58:51.000Z | 2021-09-19T04:01:26.000Z | src/agoro_field_boundary_detector/google_earth_engine/visualisation.py | m0rp43us/agoro-field-boundary-detector | 9dd911df096ce865471ed0330174044f4172cc66 | [
"MIT"
] | 7 | 2021-08-20T13:16:34.000Z | 2022-02-03T18:54:48.000Z | """Visualisation methods."""
from typing import Any, Dict, Tuple
import ee
import folium
def add_ee_layer(
self: Any, ee_object: Any, vis_params: Dict[str, Any], name: str, show: bool = True
) -> None:
"""Display Earth Engine image tiles on folium map."""
try:
# display ee.Image()
if isinstance(ee_object, ee.Image):
map_id_dict = ee_object.getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict["tile_fetcher"].url_format,
attr="Google Earth Engine",
name=name,
overlay=True,
control=True,
show=show,
).add_to(self)
elif isinstance(ee_object, ee.image.Image):
map_id_dict = ee.Image(ee_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict["tile_fetcher"].url_format,
attr="Google Earth Engine",
name=name,
overlay=True,
control=True,
show=show,
).add_to(self)
# display ee.ImageCollection()
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
ee_object_new = ee_object.mosaic()
map_id_dict = ee.Image(ee_object_new).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict["tile_fetcher"].url_format,
attr="Google Earth Engine",
name=name,
overlay=True,
control=True,
show=show,
).add_to(self)
# display ee.Geometry()
elif isinstance(ee_object, ee.geometry.Geometry):
folium.GeoJson(
data=ee_object.getInfo(),
style_function=vis_params,
name=name,
overlay=True,
control=True,
show=show,
).add_to(self)
# display ee.FeatureCollection()
elif isinstance(ee_object, ee.featurecollection.FeatureCollection):
ee_object_new = ee.Image().paint(ee_object, 0, 2)
map_id_dict = ee.Image(ee_object_new).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict["tile_fetcher"].url_format,
attr="Google Earth Engine",
name=name,
overlay=True,
control=True,
show=show,
).add_to(self)
# Catch any exception
except: # noqa F722
print(f"Could not display {name}")
# Add EE drawing method to folium.
folium.Map.add_ee_layer = add_ee_layer
def show_polygon(
mp: Any,
polygon: ee.Geometry.Polygon,
color: str = "#ff0000",
tag: str = "Bounding Box",
) -> Any:
"""Show a polygon on the map."""
mp.add_ee_layer(
polygon,
lambda x: {"color": color, "fillOpacity": 0},
tag,
)
return mp
def show_point(
mp: Any,
point: ee.Geometry.Point,
color: str = "#ff0000",
tag: str = "Point",
) -> Any:
"""Show a polygon on the map."""
mp.add_ee_layer(
point,
lambda x: {"color": color},
tag,
)
return mp
def create_map(
coordinate: Tuple[float, float],
zoom: int = 15,
) -> Any:
"""Create a map-instance hovering over the specified coordinate."""
return folium.Map(location=coordinate, zoom_start=zoom)
| 29.381356 | 87 | 0.557254 |
ace244bc23687136b493117fce607c898c285d99 | 1,268 | py | Python | proj01_ifelse/proj01.py | wesleyhodge/vsa-programing | 7cd91a057bde938605390da27036896c976a4fc3 | [
"MIT"
] | 1 | 2018-12-07T01:40:33.000Z | 2018-12-07T01:40:33.000Z | proj01_ifelse/proj01.py | wesleyhodge/vsa-programing | 7cd91a057bde938605390da27036896c976a4fc3 | [
"MIT"
] | null | null | null | proj01_ifelse/proj01.py | wesleyhodge/vsa-programing | 7cd91a057bde938605390da27036896c976a4fc3 | [
"MIT"
] | null | null | null | # Name:
# Date:
# proj01: A Simple Program
# Part I:
# This program asks the user for his/her name and grade.
#Then, it prints out a sentence that says the number of years until they graduate.
# Part II:
# This program asks the user for his/her name and birth month.
# Then, it prints a sentence that says the number of days and months until their birthday
# If you complete extensions, describe your extensions here!
day=12
month=6
name=raw_input("what is your name?")
#firstletter=name[0].upper()
#name=name[1:].lower()
#grade=int(raw_input("what is your grade?"))
#print str(firstletter+name) + ", you will graduate from high school in " + str(13-grade) + " years."
birth_month=int(raw_input("enter your birth month as a number"))
birth_day=int(raw_input("what is the day of the month you were born?"))
age=int(raw_input("how old are you?"))
if birth_month >=month:
m=birth_month-month
else:
m=12-(month-birth_month)
if birth_day >=day:
d=birth_day-day
else:
m=m-1
d=31-day+birth_day
print str(name) + ", you have " + str(m) + " months and " + str(d) + " days till your birth day."
if age>=13 and age>=17:
print "you can see g, pg, pg-13, and R."
elif age>=13:
print "you can see g, pg, and pg-13."
else:
print "you can see g, and pg." | 30.926829 | 101 | 0.694795 |
ace24568f87e8ac5ba3627750d5cbc9369595361 | 3,188 | py | Python | benchmark/Vimeo90K.py | askerlee/rift | d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2 | [
"MIT"
] | 11 | 2022-02-14T08:31:04.000Z | 2022-03-29T08:20:17.000Z | benchmark/Vimeo90K.py | askerlee/rift | d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2 | [
"MIT"
] | 3 | 2022-02-14T11:19:15.000Z | 2022-03-19T05:11:25.000Z | benchmark/Vimeo90K.py | askerlee/rift | d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2 | [
"MIT"
] | null | null | null | import os
import sys
sys.path.append('.')
import cv2
import math
import torch
import argparse
import numpy as np
from torch.nn import functional as F
from model.pytorch_msssim import ssim_matlab
from model.RIFT import RIFT
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser()
parser.add_argument('--oldmodel', dest='use_old_model', action='store_true',
help='Use the old model in the RIFE repo')
parser.add_argument('--hd', action='store_true', help='Use newer HD model')
parser.add_argument('--cp', type=str, default=None, help='Load checkpoint from this path')
parser.add_argument('--count', type=int, default=-1, help='Evaluate on the first count images')
parser.add_argument('--multi', dest='multi', default="8,8,4", type=str, metavar='M',
help='Output M groups of flow')
parser.add_argument('--each', dest='out_summary', action='store_false',
help='Output the scores of each frame instead of outputting summary only')
args = parser.parse_args()
args.multi = [ int(m) for m in args.multi.split(",") ]
if args.out_summary:
endl = "\r"
else:
endl = "\n"
print(f"Args:\n{args}")
if args.use_old_model:
model = RIFT(use_old_model=True)
model.load_model('checkpoints/rife.pth')
elif args.hd:
from model.rife_new.v4_0.RIFE_HDv3 import Model
model = Model()
if not hasattr(model, 'version'):
model.version = 0
# -1: rank. If rank <= 0, remove "module" prefix from state_dict keys.
model.load_model('checkpoints/rife-hd.pth', -1)
print("Loaded 3.x/4.x HD model.")
else:
model = RIFT(multi=args.multi)
model.load_model(args.cp)
model.eval()
model.device()
path = 'data/vimeo_triplet/'
testlist_path = path + 'tri_testlist.txt'
f = open(testlist_path, 'r')
psnr_list = []
ssim_list = []
# Don't count empty lines ("\n" or "\r\n")
total_triplets = sum(len(line) > 2 for line in open(testlist_path, 'r'))
for i, line in enumerate(f):
if args.count > 0 and i == args.count:
break
name = str(line).strip()
if(len(name) <= 1):
continue
# print(path + 'sequences/' + name + '/im1.png')
I0 = cv2.imread(path + 'sequences/' + name + '/im1.png')
I1 = cv2.imread(path + 'sequences/' + name + '/im2.png')
I2 = cv2.imread(path + 'sequences/' + name + '/im3.png')
I0 = (torch.tensor(I0.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
I2 = (torch.tensor(I2.transpose(2, 0, 1)).to(device) / 255.).unsqueeze(0)
mid = model.inference(I0, I2)[0]
ssim = ssim_matlab(torch.tensor(I1.transpose(2, 0, 1)).to(device).unsqueeze(0) / 255., torch.round(mid * 255).unsqueeze(0) / 255.).detach().cpu().numpy()
mid = np.round((mid * 255).detach().cpu().numpy()).astype('uint8').transpose(1, 2, 0) / 255.
I1 = I1 / 255.
psnr = -10 * math.log10(((I1 - mid) * (I1 - mid)).mean())
psnr_list.append(psnr)
ssim_list.append(ssim)
print("{}/{} {} PSNR {:.3f} Avg {:.3f}, SSIM {:.3f} Avg {:.3f}".format( \
i+1, total_triplets, name, psnr, np.mean(psnr_list), ssim, np.mean(ssim_list)), end=endl)
if args.out_summary:
print()
| 37.505882 | 157 | 0.6399 |
ace24644523d296ac2499ccab3425d46332b50c8 | 13,920 | py | Python | homeassistant/components/insteon/utils.py | tbarbette/core | 8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c | [
"Apache-2.0"
] | 11 | 2018-02-16T15:35:47.000Z | 2020-01-14T15:20:00.000Z | homeassistant/components/insteon/utils.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 383 | 2020-03-06T13:01:14.000Z | 2022-03-11T13:14:13.000Z | homeassistant/components/insteon/utils.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Utilities used by insteon component."""
import asyncio
import logging
from pyinsteon import devices
from pyinsteon.address import Address
from pyinsteon.constants import ALDBStatus
from pyinsteon.events import OFF_EVENT, OFF_FAST_EVENT, ON_EVENT, ON_FAST_EVENT
from pyinsteon.managers.link_manager import (
async_enter_linking_mode,
async_enter_unlinking_mode,
)
from pyinsteon.managers.scene_manager import (
async_trigger_scene_off,
async_trigger_scene_on,
)
from pyinsteon.managers.x10_manager import (
async_x10_all_lights_off,
async_x10_all_lights_on,
async_x10_all_units_off,
)
from pyinsteon.x10_address import create as create_x10_address
from homeassistant.const import (
CONF_ADDRESS,
CONF_ENTITY_ID,
CONF_PLATFORM,
ENTITY_MATCH_ALL,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
dispatcher_send,
)
from .const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_SUBCAT,
CONF_UNITCODE,
DOMAIN,
EVENT_CONF_BUTTON,
EVENT_GROUP_OFF,
EVENT_GROUP_OFF_FAST,
EVENT_GROUP_ON,
EVENT_GROUP_ON_FAST,
ON_OFF_EVENTS,
SIGNAL_ADD_DEFAULT_LINKS,
SIGNAL_ADD_DEVICE_OVERRIDE,
SIGNAL_ADD_ENTITIES,
SIGNAL_ADD_X10_DEVICE,
SIGNAL_LOAD_ALDB,
SIGNAL_PRINT_ALDB,
SIGNAL_REMOVE_DEVICE_OVERRIDE,
SIGNAL_REMOVE_ENTITY,
SIGNAL_REMOVE_X10_DEVICE,
SIGNAL_SAVE_DEVICES,
SRV_ADD_ALL_LINK,
SRV_ADD_DEFAULT_LINKS,
SRV_ALL_LINK_GROUP,
SRV_ALL_LINK_MODE,
SRV_CONTROLLER,
SRV_DEL_ALL_LINK,
SRV_HOUSECODE,
SRV_LOAD_ALDB,
SRV_LOAD_DB_RELOAD,
SRV_PRINT_ALDB,
SRV_PRINT_IM_ALDB,
SRV_SCENE_OFF,
SRV_SCENE_ON,
SRV_X10_ALL_LIGHTS_OFF,
SRV_X10_ALL_LIGHTS_ON,
SRV_X10_ALL_UNITS_OFF,
)
from .ipdb import get_device_platforms, get_platform_groups
from .schemas import (
ADD_ALL_LINK_SCHEMA,
ADD_DEFAULT_LINKS_SCHEMA,
DEL_ALL_LINK_SCHEMA,
LOAD_ALDB_SCHEMA,
PRINT_ALDB_SCHEMA,
TRIGGER_SCENE_SCHEMA,
X10_HOUSECODE_SCHEMA,
)
_LOGGER = logging.getLogger(__name__)
def add_on_off_event_device(hass, device):
"""Register an Insteon device as an on/off event device."""
@callback
def async_fire_group_on_off_event(name, address, group, button):
# Firing an event when a button is pressed.
if button and button[-2] == "_":
button_id = button[-1].lower()
else:
button_id = None
schema = {CONF_ADDRESS: address}
if button_id:
schema[EVENT_CONF_BUTTON] = button_id
if name == ON_EVENT:
event = EVENT_GROUP_ON
if name == OFF_EVENT:
event = EVENT_GROUP_OFF
if name == ON_FAST_EVENT:
event = EVENT_GROUP_ON_FAST
if name == OFF_FAST_EVENT:
event = EVENT_GROUP_OFF_FAST
_LOGGER.debug("Firing event %s with %s", event, schema)
hass.bus.async_fire(event, schema)
for group in device.events:
if isinstance(group, int):
for event in device.events[group]:
if event in [
OFF_EVENT,
ON_EVENT,
OFF_FAST_EVENT,
ON_FAST_EVENT,
]:
_LOGGER.debug(
"Registering on/off event for %s %d %s",
str(device.address),
group,
event,
)
device.events[group][event].subscribe(
async_fire_group_on_off_event, force_strong_ref=True
)
def register_new_device_callback(hass):
"""Register callback for new Insteon device."""
@callback
def async_new_insteon_device(address=None):
"""Detect device from transport to be delegated to platform."""
hass.async_create_task(async_create_new_entities(address))
async def async_create_new_entities(address):
_LOGGER.debug(
"Adding new INSTEON device to Home Assistant with address %s", address
)
await devices.async_save(workdir=hass.config.config_dir)
device = devices[address]
await device.async_status()
platforms = get_device_platforms(device)
for platform in platforms:
if platform == ON_OFF_EVENTS:
add_on_off_event_device(hass, device)
else:
signal = f"{SIGNAL_ADD_ENTITIES}_{platform}"
dispatcher_send(hass, signal, {"address": device.address})
devices.subscribe(async_new_insteon_device, force_strong_ref=True)
@callback
def async_register_services(hass):
"""Register services used by insteon component."""
save_lock = asyncio.Lock()
async def async_srv_add_all_link(service):
"""Add an INSTEON All-Link between two devices."""
group = service.data.get(SRV_ALL_LINK_GROUP)
mode = service.data.get(SRV_ALL_LINK_MODE)
link_mode = mode.lower() == SRV_CONTROLLER
await async_enter_linking_mode(link_mode, group)
async def async_srv_del_all_link(service):
"""Delete an INSTEON All-Link between two devices."""
group = service.data.get(SRV_ALL_LINK_GROUP)
await async_enter_unlinking_mode(group)
async def async_srv_load_aldb(service):
"""Load the device All-Link database."""
entity_id = service.data[CONF_ENTITY_ID]
reload = service.data[SRV_LOAD_DB_RELOAD]
if entity_id.lower() == ENTITY_MATCH_ALL:
await async_srv_load_aldb_all(reload)
else:
signal = f"{entity_id}_{SIGNAL_LOAD_ALDB}"
async_dispatcher_send(hass, signal, reload)
async def async_srv_load_aldb_all(reload):
"""Load the All-Link database for all devices."""
# Cannot be done concurrently due to issues with the underlying protocol.
for address in devices:
device = devices[address]
if device != devices.modem and device.cat != 0x03:
await device.aldb.async_load(
refresh=reload, callback=async_srv_save_devices
)
async def async_srv_save_devices():
"""Write the Insteon device configuration to file."""
async with save_lock:
_LOGGER.debug("Saving Insteon devices")
await devices.async_save(hass.config.config_dir)
def print_aldb(service):
"""Print the All-Link Database for a device."""
# For now this sends logs to the log file.
# Future direction is to create an INSTEON control panel.
entity_id = service.data[CONF_ENTITY_ID]
signal = f"{entity_id}_{SIGNAL_PRINT_ALDB}"
dispatcher_send(hass, signal)
def print_im_aldb(service):
"""Print the All-Link Database for a device."""
# For now this sends logs to the log file.
# Future direction is to create an INSTEON control panel.
print_aldb_to_log(devices.modem.aldb)
async def async_srv_x10_all_units_off(service):
"""Send the X10 All Units Off command."""
housecode = service.data.get(SRV_HOUSECODE)
await async_x10_all_units_off(housecode)
async def async_srv_x10_all_lights_off(service):
"""Send the X10 All Lights Off command."""
housecode = service.data.get(SRV_HOUSECODE)
await async_x10_all_lights_off(housecode)
async def async_srv_x10_all_lights_on(service):
"""Send the X10 All Lights On command."""
housecode = service.data.get(SRV_HOUSECODE)
await async_x10_all_lights_on(housecode)
async def async_srv_scene_on(service):
"""Trigger an INSTEON scene ON."""
group = service.data.get(SRV_ALL_LINK_GROUP)
await async_trigger_scene_on(group)
async def async_srv_scene_off(service):
"""Trigger an INSTEON scene ON."""
group = service.data.get(SRV_ALL_LINK_GROUP)
await async_trigger_scene_off(group)
@callback
def async_add_default_links(service):
"""Add the default All-Link entries to a device."""
entity_id = service.data[CONF_ENTITY_ID]
signal = f"{entity_id}_{SIGNAL_ADD_DEFAULT_LINKS}"
async_dispatcher_send(hass, signal)
async def async_add_device_override(override):
"""Remove an Insten device and associated entities."""
address = Address(override[CONF_ADDRESS])
await async_remove_device(address)
devices.set_id(address, override[CONF_CAT], override[CONF_SUBCAT], 0)
await async_srv_save_devices()
async def async_remove_device_override(address):
"""Remove an Insten device and associated entities."""
address = Address(address)
await async_remove_device(address)
devices.set_id(address, None, None, None)
await devices.async_identify_device(address)
await async_srv_save_devices()
@callback
def async_add_x10_device(x10_config):
"""Add X10 device."""
housecode = x10_config[CONF_HOUSECODE]
unitcode = x10_config[CONF_UNITCODE]
platform = x10_config[CONF_PLATFORM]
steps = x10_config.get(CONF_DIM_STEPS, 22)
x10_type = "on_off"
if platform == "light":
x10_type = "dimmable"
elif platform == "binary_sensor":
x10_type = "sensor"
_LOGGER.debug(
"Adding X10 device to Insteon: %s %d %s", housecode, unitcode, x10_type
)
# This must be run in the event loop
devices.add_x10_device(housecode, unitcode, x10_type, steps)
async def async_remove_x10_device(housecode, unitcode):
"""Remove an X10 device and associated entities."""
address = create_x10_address(housecode, unitcode)
devices.pop(address)
await async_remove_device(address)
async def async_remove_device(address):
"""Remove the device and all entities from hass."""
signal = f"{address.id}_{SIGNAL_REMOVE_ENTITY}"
async_dispatcher_send(hass, signal)
dev_registry = await hass.helpers.device_registry.async_get_registry()
device = dev_registry.async_get_device(identifiers={(DOMAIN, str(address))})
if device:
dev_registry.async_remove_device(device.id)
hass.services.async_register(
DOMAIN, SRV_ADD_ALL_LINK, async_srv_add_all_link, schema=ADD_ALL_LINK_SCHEMA
)
hass.services.async_register(
DOMAIN, SRV_DEL_ALL_LINK, async_srv_del_all_link, schema=DEL_ALL_LINK_SCHEMA
)
hass.services.async_register(
DOMAIN, SRV_LOAD_ALDB, async_srv_load_aldb, schema=LOAD_ALDB_SCHEMA
)
hass.services.async_register(
DOMAIN, SRV_PRINT_ALDB, print_aldb, schema=PRINT_ALDB_SCHEMA
)
hass.services.async_register(DOMAIN, SRV_PRINT_IM_ALDB, print_im_aldb, schema=None)
hass.services.async_register(
DOMAIN,
SRV_X10_ALL_UNITS_OFF,
async_srv_x10_all_units_off,
schema=X10_HOUSECODE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SRV_X10_ALL_LIGHTS_OFF,
async_srv_x10_all_lights_off,
schema=X10_HOUSECODE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SRV_X10_ALL_LIGHTS_ON,
async_srv_x10_all_lights_on,
schema=X10_HOUSECODE_SCHEMA,
)
hass.services.async_register(
DOMAIN, SRV_SCENE_ON, async_srv_scene_on, schema=TRIGGER_SCENE_SCHEMA
)
hass.services.async_register(
DOMAIN, SRV_SCENE_OFF, async_srv_scene_off, schema=TRIGGER_SCENE_SCHEMA
)
hass.services.async_register(
DOMAIN,
SRV_ADD_DEFAULT_LINKS,
async_add_default_links,
schema=ADD_DEFAULT_LINKS_SCHEMA,
)
async_dispatcher_connect(hass, SIGNAL_SAVE_DEVICES, async_srv_save_devices)
async_dispatcher_connect(
hass, SIGNAL_ADD_DEVICE_OVERRIDE, async_add_device_override
)
async_dispatcher_connect(
hass, SIGNAL_REMOVE_DEVICE_OVERRIDE, async_remove_device_override
)
async_dispatcher_connect(hass, SIGNAL_ADD_X10_DEVICE, async_add_x10_device)
async_dispatcher_connect(hass, SIGNAL_REMOVE_X10_DEVICE, async_remove_x10_device)
_LOGGER.debug("Insteon Services registered")
def print_aldb_to_log(aldb):
"""Print the All-Link Database to the log file."""
logger = logging.getLogger(f"{__name__}.links")
logger.info("%s ALDB load status is %s", aldb.address, aldb.status.name)
if aldb.status not in [ALDBStatus.LOADED, ALDBStatus.PARTIAL]:
_LOGGER.warning("All-Link database not loaded")
logger.info("RecID In Use Mode HWM Group Address Data 1 Data 2 Data 3")
logger.info("----- ------ ---- --- ----- -------- ------ ------ ------")
for mem_addr in aldb:
rec = aldb[mem_addr]
# For now we write this to the log
# Roadmap is to create a configuration panel
in_use = "Y" if rec.is_in_use else "N"
mode = "C" if rec.is_controller else "R"
hwm = "Y" if rec.is_high_water_mark else "N"
log_msg = (
f" {rec.mem_addr:04x} {in_use:s} {mode:s} {hwm:s} "
f"{rec.group:3d} {str(rec.target):s} {rec.data1:3d} "
f"{rec.data2:3d} {rec.data3:3d}"
)
logger.info(log_msg)
@callback
def async_add_insteon_entities(
hass, platform, entity_type, async_add_entities, discovery_info
):
"""Add Insteon devices to a platform."""
new_entities = []
device_list = [discovery_info.get("address")] if discovery_info else devices
for address in device_list:
device = devices[address]
groups = get_platform_groups(device, platform)
for group in groups:
new_entities.append(entity_type(device, group))
if new_entities:
async_add_entities(new_entities)
| 35.151515 | 87 | 0.670618 |
ace246582ccfdb928483e6c2bc13d70845a07889 | 5,313 | py | Python | src/main_app.py | DatDarkAlpaca/Buddy | 41550058fa18e6a4aef431bbc310eb87f5924c4c | [
"MIT"
] | null | null | null | src/main_app.py | DatDarkAlpaca/Buddy | 41550058fa18e6a4aef431bbc310eb87f5924c4c | [
"MIT"
] | null | null | null | src/main_app.py | DatDarkAlpaca/Buddy | 41550058fa18e6a4aef431bbc310eb87f5924c4c | [
"MIT"
] | null | null | null | from PySide6.QtWidgets import QMainWindow, QFileDialog, QMessageBox
from PySide6.QtCore import Qt, QPoint, QEvent
from PySide6.QtGui import QPixmap
from compiled_ui.main_window import Ui_MainWindow
from src.buddy_builder import BuddyBuilder
from src.mini_buddy import MiniBuddy
from src.settings import Settings
from src.serialization import load_buddy, load_icons
from os.path import basename
from pathlib import Path
class MainApplication(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super().__init__(parent)
# Mini Buddy:
self.mini_buddy = MiniBuddy(self)
self.mini_buddy.installEventFilter(self)
# Buddy Builder:
self.buddy_builder = BuddyBuilder(self)
# Settings:
self.settings = Settings(self)
self.settings.show()
self.dragging_window, self.loaded = False, False
self.offset = QPoint()
self.icons = []
self.initialize()
self.bind_buttons()
# Initialize:
def initialize(self):
self.setWindowFlags(Qt.FramelessWindowHint)
self.setupUi(self)
self.statusBar()
self.load_icons()
self.load_buddy()
# Name:
if self.buddy_name.text() == '':
self.buddy_name.setText('Buddy')
def bind_buttons(self):
# Minimize button:
self.minimize_button.setIcon(QPixmap(self.icons['minus']))
self.minimize_button.clicked.connect(self.showMinimized)
# Settings button:
self.settings_button.setIcon(QPixmap(self.icons['setting']))
# Import button:
self.import_button.setIcon(QPixmap(self.icons['import']))
self.import_button.clicked.connect(self.new_buddy_action)
# Close button:
self.close_button.setIcon(QPixmap(self.icons['close']))
self.close_button.clicked.connect(self.close)
# Action buttons::
self.feed_button.clicked.connect(self.feed_action)
self.play_button.clicked.connect(self.play_action)
self.sleep_button.clicked.connect(self.sleep_action)
# Events:
def mouseDoubleClickEvent(self, event):
child = self.childAt(event.position().toPoint())
if child:
if child.objectName() == 'buddy_display':
if event.button() == Qt.LeftButton:
if self.loaded:
self.show_mini_buddy()
def mousePressEvent(self, event):
self.offset = event.globalPosition().toPoint()
if event.button() == Qt.LeftButton:
self.dragging_window = True
def mouseReleaseEvent(self, event):
self.dragging_window = False
def mouseMoveEvent(self, event):
if self.dragging_window:
delta = QPoint(event.globalPosition().toPoint() - self.offset)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.offset = event.globalPosition().toPoint()
def eventFilter(self, obj, event):
if event.type() == QEvent.MouseButtonDblClick and event.button() == Qt.LeftButton:
self.hide_mini_buddy()
return super().eventFilter(obj, event)
# Actions:
def feed_action(self):
self.change_output('Nom nom nom')
def play_action(self):
self.change_output('Yay! such playing')
def sleep_action(self):
self.change_output('Snore')
# Mini Buddy Methods:
def show_mini_buddy(self):
self.hide()
self.mini_buddy.show()
def hide_mini_buddy(self):
self.mini_buddy.hide()
self.show()
# Buddy Builder:
def new_buddy_action(self):
dialog = QMessageBox()
dialog.setText('Would you like to create or import a Buddy?')
dialog.setWindowTitle('Buddy')
dialog.addButton(dialog.Close)
dialog.addButton('Import', dialog.ActionRole)
dialog.addButton('Create', dialog.ActionRole)
button_option = dialog.exec()
if button_option == 0:
dialog.close()
self.import_buddy_action()
elif button_option == 1:
dialog.close()
self.buddy_builder_action()
def buddy_builder_action(self):
self.buddy_builder.exec()
self.load_buddy(self.buddy_builder.file_save)
def import_buddy_action(self):
path, _ = QFileDialog.getOpenFileName(self, 'Open file',
str(Path().resolve()), 'Image files (*.buddy)')
self.load_buddy(basename(path))
# Helper:
def change_output(self, text):
self.buddy_output.setText('<center>' + text + '</center>')
def load_buddy(self, path=None):
buddy_file = load_buddy(path)
if buddy_file:
# Profile Picture:
self.buddy_display.set_buddy(buddy_file.get('profile_picture'))
# Buddy Picture:
self.mini_buddy.mini_buddy_display.set_buddy(buddy_file.get('mini_buddy_picture'))
# Name:
self.buddy_name.setText(buddy_file.get('name'))
self.change_output('Hello, my name is ' + self.buddy_name.text() + '!')
self.loaded = True
def load_icons(self):
self.icons = load_icons('./res/icons.res')
if not self.icons:
self.icons = load_icons('./res/icons')
| 30.534483 | 94 | 0.628647 |
ace247e2db4bc4209d2d45e5577fe7397aee930c | 10,237 | py | Python | mycroft/skills/skill_updater.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 1 | 2021-06-14T10:01:32.000Z | 2021-06-14T10:01:32.000Z | mycroft/skills/skill_updater.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 4 | 2021-06-08T22:45:08.000Z | 2022-03-12T00:51:26.000Z | mycroft/skills/skill_updater.py | assistent-cat/mycroft-core | 6f8bae6ba136c9dd66ca47aaadd75e214d006190 | [
"Apache-2.0"
] | 2 | 2020-09-28T01:38:34.000Z | 2020-12-03T03:14:32.000Z | # Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Periodically run by skill manager to update skills and post the manifest."""
import os
import sys
from datetime import datetime
from time import time
from msm import MsmException
from mycroft.api import DeviceApi, is_paired
from mycroft.configuration import Configuration
from mycroft.util import connected
from mycroft.util.combo_lock import ComboLock
from mycroft.util.log import LOG
from .msm_wrapper import build_msm_config, create_msm
ONE_HOUR = 3600
FIVE_MINUTES = 300 # number of seconds in a minute
def skill_is_blacklisted(skill):
blacklist = Configuration.get()['skills']['blacklisted_skills']
return os.path.basename(skill.path) in blacklist or skill.name in blacklist
class SkillUpdater:
"""Class facilitating skill update / install actions.
Arguments
bus (MessageBusClient): Optional bus emitter Used to communicate
with the mycroft core system and handle
commands.
"""
_installed_skills_file_path = None
_msm = None
def __init__(self, bus=None):
self.msm_lock = ComboLock('/tmp/mycroft-msm.lck')
self.install_retries = 0
self.config = Configuration.get()
update_interval = self.config['skills']['update_interval']
self.update_interval = int(update_interval) * ONE_HOUR
self.dot_msm_path = os.path.join(self.msm.skills_dir, '.msm')
self.next_download = self._determine_next_download_time()
self._log_next_download_time()
self.installed_skills = set()
self.default_skill_install_error = False
if bus:
self._register_bus_handlers()
def _register_bus_handlers(self):
"""TODO: Register bus handlers for triggering updates and such."""
def _determine_next_download_time(self):
"""Determine the initial values of the next/last download times.
Update immediately if the .msm or installed skills file is missing
otherwise use the timestamp on .msm as a basis.
"""
msm_files_exist = (
os.path.exists(self.dot_msm_path) and
os.path.exists(self.installed_skills_file_path)
)
if msm_files_exist:
mtime = os.path.getmtime(self.dot_msm_path)
next_download = mtime + self.update_interval
else:
# Last update can't be found or the requirements don't seem to be
# installed trigger update before skill loading
next_download = time() - 1
return next_download
@property
def installed_skills_file_path(self):
"""Property representing the path of the installed skills file."""
if self._installed_skills_file_path is None:
virtual_env_path = os.path.dirname(os.path.dirname(sys.executable))
if os.access(virtual_env_path, os.W_OK | os.R_OK | os.X_OK):
self._installed_skills_file_path = os.path.join(
virtual_env_path,
'.mycroft-skills'
)
else:
self._installed_skills_file_path = os.path.expanduser(
'~/.mycroft/.mycroft-skills'
)
return self._installed_skills_file_path
@property
def msm(self):
if self._msm is None:
msm_config = build_msm_config(self.config)
self._msm = create_msm(msm_config)
return self._msm
@property
def default_skill_names(self) -> tuple:
"""Property representing the default skills expected to be installed"""
default_skill_groups = dict(self.msm.repo.get_default_skill_names())
default_skills = set(default_skill_groups['default'])
platform_default_skills = default_skill_groups.get(self.msm.platform)
if platform_default_skills is None:
log_msg = 'No default skills found for platform {}'
LOG.info(log_msg.format(self.msm.platform))
else:
default_skills.update(platform_default_skills)
return tuple(default_skills)
def _load_installed_skills(self):
"""Load the last known skill listing from a file."""
if os.path.isfile(self.installed_skills_file_path):
with open(self.installed_skills_file_path) as skills_file:
self.installed_skills = {
i.strip() for i in skills_file.readlines() if i.strip()
}
def _save_installed_skills(self):
"""Save the skill listing after the download to a file."""
with open(self.installed_skills_file_path, 'w') as skills_file:
for skill_name in self.installed_skills:
skills_file.write(skill_name + '\n')
def update_skills(self, quick=False):
"""Invoke MSM to install default skills and/or update installed skills
Args:
quick (bool): Expedite the download by running with more threads?
"""
LOG.info('Beginning skill update...')
self.msm._device_skill_state = None # TODO: Proper msm method
success = True
if connected():
self._load_installed_skills()
with self.msm_lock, self.msm.lock:
self._apply_install_or_update(quick)
self._save_installed_skills()
# Schedule retry in 5 minutes on failure, after 10 shorter periods
# Go back to 60 minutes wait
if self.default_skill_install_error and self.install_retries < 10:
self._schedule_retry()
success = False
else:
self.install_retries = 0
self._update_download_time()
else:
self.handle_not_connected()
success = False
if success:
LOG.info('Skill update complete')
return success
def handle_not_connected(self):
"""Notifications of the device not being connected to the internet"""
LOG.error('msm failed, network connection not available')
self.next_download = time() + FIVE_MINUTES
def _apply_install_or_update(self, quick):
"""Invoke MSM to install or update a skill."""
try:
# Determine if all defaults are installed
defaults = all(
[s.is_local for s in self.msm.default_skills.values()]
)
num_threads = 20 if not defaults or quick else 2
self.msm.apply(
self.install_or_update,
self.msm.list(),
max_threads=num_threads
)
self.post_manifest()
except MsmException as e:
LOG.error('Failed to update skills: {}'.format(repr(e)))
def post_manifest(self, reload_skills_manifest=False):
"""Post the manifest of the device's skills to the backend."""
upload_allowed = self.config['skills'].get('upload_skill_manifest')
if upload_allowed and is_paired():
if reload_skills_manifest:
self.msm.clear_cache()
try:
device_api = DeviceApi()
device_api.upload_skills_data(self.msm.device_skill_state)
except Exception:
LOG.exception('Could not upload skill manifest')
def install_or_update(self, skill):
"""Install missing defaults and update existing skills"""
if self._get_device_skill_state(skill.name).get('beta', False):
skill.sha = None # Will update to latest head
if skill.is_local:
skill.update()
if skill.name not in self.installed_skills:
skill.update_deps()
elif skill.name in self.default_skill_names:
try:
self.msm.install(skill, origin='default')
except Exception:
if skill.name in self.default_skill_names:
LOG.warning(
'Failed to install default skill: ' + skill.name
)
self.default_skill_install_error = True
raise
self.installed_skills.add(skill.name)
def defaults_installed(self):
"""Check if all default skills are installed.
Returns:
True if all default skills are installed, else False.
"""
defaults = []
for skill in self.msm.default_skills.values():
if not skill_is_blacklisted(skill):
defaults.append(skill)
return all([skill.is_local for skill in defaults])
def _get_device_skill_state(self, skill_name):
"""Get skill data structure from name."""
device_skill_state = {}
for msm_skill_state in self.msm.device_skill_state.get('skills', []):
if msm_skill_state.get('name') == skill_name:
device_skill_state = msm_skill_state
return device_skill_state
def _schedule_retry(self):
"""Schedule the next skill update in the event of a failure."""
self.install_retries += 1
self.next_download = time() + FIVE_MINUTES
self._log_next_download_time()
self.default_skill_install_error = False
def _update_download_time(self):
"""Update timestamp on .msm file to be used when system is restarted"""
with open(self.dot_msm_path, 'a'):
os.utime(self.dot_msm_path, None)
self.next_download = time() + self.update_interval
self._log_next_download_time()
def _log_next_download_time(self):
LOG.info(
'Next scheduled skill update: ' +
str(datetime.fromtimestamp(self.next_download))
)
| 38.340824 | 79 | 0.632705 |
ace24946ad00fbc4272c7b2963883eae83500fc9 | 4,141 | py | Python | demonstrator.py | splitstrument/demonstrator | 85b5552a6ca1f9a2473c7a58664e1e5265d5ca4a | [
"MIT"
] | null | null | null | demonstrator.py | splitstrument/demonstrator | 85b5552a6ca1f9a2473c7a58664e1e5265d5ca4a | [
"MIT"
] | null | null | null | demonstrator.py | splitstrument/demonstrator | 85b5552a6ca1f9a2473c7a58664e1e5265d5ca4a | [
"MIT"
] | null | null | null | import web
import librosa
import soundfile
import uuid
import io
import os
import yaml
import json
import pafy
from datetime import datetime
from unmix.source.api import prediction
from unmix.source.configuration import Configuration
render = web.template.render('templates/')
urls = (
'/', 'Index',
'/splitter', 'Splitter',
'/result', 'Result'
)
predictions = {}
with open('conf.yaml') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
class Index:
def GET(self):
trainings = self.load_trainings()
return render.index(trainings)
def load_trainings(self):
training_folder = config['training_folder']
trainings = []
for folder in os.listdir(training_folder):
configuration_file = os.path.join(training_folder, folder, 'configuration.jsonc')
with open(configuration_file, 'r') as configuration_file:
configuration = json.loads(configuration_file.read())
collection = configuration['collection']
name = collection.get('name', folder)
trainings.append({
'name': name,
'folder': folder
})
return trainings
class Splitter:
def POST(self):
training = web.input().training
engine = prediction.create_engine(os.path.join(config['training_folder'], training))
stereo = Configuration.get('collection.stereo', optional=False)
fft_length = Configuration.get('spectrogram_generation.fft_length', optional=False)
sample_rate = Configuration.get('collection.sample_rate', optional=False)
id = str(uuid.uuid4())
song = self.load_song(id, sample_rate, stereo)
instrument, rest = self.run_prediction(song, engine, fft_length, sample_rate)
predictions[id] = {
'instrument': instrument,
'rest': rest,
'timestamp': datetime.now()
}
return render.display(id)
def run_prediction(self, song, engine, fft_length, sample_rate):
stft = librosa.stft(song, fft_length)
predicted_instrument, predicted_rest = prediction.run_prediction([stft], engine)
return self.write_predictions(predicted_instrument, predicted_rest, sample_rate)
def load_song(self, id, sample_rate, stereo):
file_path = self.save_song(id)
song, _ = librosa.load(file_path, sr=sample_rate, mono=(not stereo))
os.remove(file_path)
return song
def save_song(self, id):
upload = web.input(song={})
youtube_link = upload.youtube
uploaded_song = upload.song
if len(youtube_link) > 0:
video = pafy.new(youtube_link)
bestaudio = video.getbestaudio()
extension = bestaudio.extension
file_path = self.build_temp_path(extension, id)
bestaudio.download(file_path)
return file_path
elif len(uploaded_song.value) > 0:
filepath = uploaded_song.filename.replace('\\', '/')
filename = filepath.split('/')[-1]
extension = filename.split('.')[-1]
file_path = self.build_temp_path(extension, id)
fout = open(file_path, 'wb')
fout.write(uploaded_song.file.read())
fout.close()
return file_path
else:
raise web.seeother('/')
def build_temp_path(self, extension, id):
return os.path.join(config['tmp_directory'], id + '.' + extension)
def write_predictions(self, predicted_instrument, predicted_rest, sample_rate):
instrument = io.BytesIO()
soundfile.write(instrument, predicted_instrument[0], sample_rate, format='wav')
rest = io.BytesIO()
soundfile.write(rest, predicted_rest[0], sample_rate, format='wav')
return instrument, rest
class Result:
def GET(self):
i = web.input()
if i.id in predictions:
return predictions[i.id].pop(i.type).getvalue()
else:
raise web.notfound()
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
| 32.100775 | 93 | 0.629317 |
ace2498905fbe68cbe3e572d4acc8dc357c620cc | 2,461 | py | Python | dev/07_14_2018/SQL_Database_Controller.py | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | dev/07_14_2018/SQL_Database_Controller.py | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | dev/07_14_2018/SQL_Database_Controller.py | npwebste/UPS_Controller | a90ce2229108197fd48f956310ae2929e0fa5d9a | [
"AFL-1.1"
] | null | null | null | # Universal Power System Controller
# USAID Middle East Water Security Initiative
#
# Developed by: Nathan Webster
# Primary Investigator: Nathan Johnson
#
# Version History (mm_dd_yyyy)
# 1.00 07_13_2018_NW
#
######################################################
# Import Libraries
import Parameters
import sqlite3
import sched
import time
import logging
import PWM_Controller
from datetime import datetime
from VFD_Modbus_Wrapper import *
from VFD_Modbus_Registers import *
from Relay_Controller import *
# Setup event logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
fh = logging.FileHandler('UPS_Event.log')
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
def SQL_Database_Controller_Main(arg):
# Setup SQL scheduler
SQL_Sched = sched.scheduler(time.time, time.sleep)
while True:
# Schedule the SQL controller and run
SQL_Sched.enter(Parameters.SQL_Database_Controller_Interval, 1, SQL_Database_Controller, ("",))
SQL_Sched.run()
def SQL_Database_Controller(arg):
Solar_Voltage = PWM_Controller.Solar_Actual_Volts
DC_Link_Voltage = PWM_Controller.DC_Link_Actual_Volts
#Solar_Voltage = PWM_Measure_Voltage('Solar')/Parameters.Voltage_Multiplier
#DC_Link_Voltage = PWM_Measure_Voltage('DC_Link')/Parameters.Voltage_Multiplier
VFD_Freq = VFD.VFDRead(reg.get("ReadFunc", {}).get("Output_Frequency")) / 100
VFD_Volt = VFD.VFDRead(reg.get("ReadFunc", {}).get("Output_Voltage"))
VFD_Amps = VFD.VFDRead(reg.get("ReadFunc", {}).get("Output_Current")) / 100
VFD_Power = VFD.VFDRead(reg.get("ReadFunc", {}).get("Output_Power")) / 10
VFD_BusVolt = VFD.VFDRead(reg.get("ReadFunc", {}).get("Bus_Voltage"))
VFD_Temp = VFD.VFDRead(reg.get("ReadFunc", {}).get("Temperature"))
Currenttime = datetime.now()
try:
conn = sqlite3.connect('UPS_DB.db')
c = conn.cursor()
c.execute("INSERT INTO UPS_DB(Date,Solar_Voltage, DC_Link_Voltage, VFD_Freq, VFD_Volt, VFD_Amps, VFD_Power, VFD_BusVolt, VFD_Temp) VALUES(?,?,?,?,?,?,?,?,?)",(Currenttime, Solar_Voltage,DC_Link_Voltage,VFD_Freq,VFD_Volt,VFD_Amps,VFD_Power,VFD_BusVolt,VFD_Temp))
conn.commit()
except:
conn.rollback()
logger.error('Could not connect and write to SQL database')
conn.close() | 37.861538 | 269 | 0.717594 |
ace2499c159e7b3d28388886b31d812d1ba63a75 | 575 | py | Python | toph/banner/banner.py | borosilva/Toph-OSINT | 0aa49ffa405dee5ef0b358a041734cc15ca54523 | [
"MIT"
] | null | null | null | toph/banner/banner.py | borosilva/Toph-OSINT | 0aa49ffa405dee5ef0b358a041734cc15ca54523 | [
"MIT"
] | null | null | null | toph/banner/banner.py | borosilva/Toph-OSINT | 0aa49ffa405dee5ef0b358a041734cc15ca54523 | [
"MIT"
] | null | null | null | def printBanner():
print("""
████████╗ ██████╗ ██████╗ ██╗ ██╗
╚══██╔══╝██╔═══██╗██╔══██╗██║ ██║
██║ ██║ ██║██████╔╝███████║
██║ ██║ ██║██╔═══╝ ██╔══██║
██║ ╚██████╔╝██║ ██║ ██║
╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝
██████╗ ███████╗██╗███╗ ██╗████████╗
██╔═══██╗██╔════╝██║████╗ ██║╚══██╔══╝
██║ ██║███████╗██║██╔██╗ ██║ ██║
██║ ██║╚════██║██║██║╚██╗██║ ██║
╚██████╔╝███████║██║██║ ╚████║ ██║
╚═════╝ ╚══════╝╚═╝╚═╝ ╚═══╝ ╚═╝
Author: Borosilva
""") | 33.823529 | 39 | 0.05913 |
ace24a3c312f0146879c5c93960d79aeea58c7e6 | 385 | py | Python | parsec/commands/jobs/get_inputs.py | erasche/parsec | c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6 | [
"Apache-2.0"
] | 8 | 2015-03-27T17:09:15.000Z | 2021-07-13T15:33:02.000Z | parsec/commands/jobs/get_inputs.py | erasche/parsec | c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6 | [
"Apache-2.0"
] | 30 | 2015-02-27T21:21:47.000Z | 2021-08-31T14:19:55.000Z | parsec/commands/jobs/get_inputs.py | erasche/parsec | c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6 | [
"Apache-2.0"
] | 12 | 2017-06-01T03:49:23.000Z | 2021-07-13T15:33:06.000Z | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_inputs')
@click.argument("job_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, job_id):
"""Get dataset inputs used by a job.
Output:
Inputs for the given job
"""
return ctx.gi.jobs.get_inputs(job_id)
| 20.263158 | 59 | 0.753247 |
ace24c1c4181d7732882690e4449d97f9aceed48 | 18,926 | py | Python | src/emuvim/test/unittests/test_emulator.py | RafaelSche/vim-emu | 6503ba9fcbe13ca73c94d318157a1ba78ef26b5b | [
"Apache-2.0"
] | 34 | 2016-09-05T06:11:12.000Z | 2021-12-24T08:45:24.000Z | src/emuvim/test/unittests/test_emulator.py | RafaelSche/vim-emu | 6503ba9fcbe13ca73c94d318157a1ba78ef26b5b | [
"Apache-2.0"
] | 89 | 2016-07-19T14:14:27.000Z | 2020-01-09T07:19:45.000Z | src/emuvim/test/unittests/test_emulator.py | RafaelSche/vim-emu | 6503ba9fcbe13ca73c94d318157a1ba78ef26b5b | [
"Apache-2.0"
] | 32 | 2016-07-19T14:58:06.000Z | 2020-05-05T13:30:01.000Z | # Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import unittest
from emuvim.dcemulator.node import EmulatorCompute
from emuvim.test.base import SimpleTestTopology
from mininet.node import RemoteController
# @unittest.skip("disabled topology tests for development")
class testEmulatorTopology(SimpleTestTopology):
"""
Tests to check the topology API of the emulator.
"""
def testSingleDatacenter(self):
"""
Create a single data center and add check if its switch is up
by using manually added hosts. Tests especially the
data center specific addLink method.
"""
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[0])
# start Mininet network
self.startNet()
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
# @unittest.skip("disabled to test if CI fails because this is the first test.")
def testMultipleDatacenterDirect(self):
"""
Create a two data centers and interconnect them.
"""
# create network
self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[1])
self.net.addLink(self.dc[0], self.dc[1])
# start Mininet network
self.startNet()
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 2)
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
def testMultipleDatacenterWithIntermediateSwitches(self):
"""
Create a two data centers and interconnect them with additional
switches between them.
"""
# create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=2, ndockers=0,
autolinkswitches=True)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[1])
self.net.addLink(self.dc[0], self.s[0])
self.net.addLink(self.s[2], self.dc[1])
# start Mininet network
self.startNet()
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
class testEmulatorNetworking(SimpleTestTopology):
def testSDNChainingSingleService_withLearning(self):
"""
Create a two data centers and interconnect them with additional
switches between them.
Uses Ryu SDN controller.
Connect the Docker hosts to different datacenters and setup the links between.
"""
# create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
autolinkswitches=True,
controller=RemoteController,
enable_learning=True)
# setup links
self.net.addLink(self.dc[0], self.s[0])
self.net.addLink(self.s[2], self.dc[1])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute(
"vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
vnf2 = self.dc[1].startCompute(
"vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check status
# check get status
s1 = self.dc[0].containers.get("vnf1").getStatus()
print(s1)
self.assertTrue(s1["name"] == "vnf1")
self.assertTrue(s1["state"]["Running"])
self.assertTrue(s1["network"][0]['intf_name'] == 'intf1')
self.assertTrue(s1["network"][0]['ip'] == '10.0.10.1/24')
s2 = self.dc[1].containers.get("vnf2").getStatus()
print(s2)
self.assertTrue(s2["name"] == "vnf2")
self.assertTrue(s2["state"]["Running"])
self.assertTrue(s2["network"][0]['intf_name'] == 'intf2')
self.assertTrue(s2["network"][0]['ip'] == '10.0.10.2/24')
# should be connected because learning = True
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# setup links
self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
bidirectional=True, cmd='add-flow')
# should still be connected
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# stop Mininet network
self.stopNet()
def testSDNChainingSingleService(self):
"""
Create a two data centers and interconnect them with additional
switches between them.
Uses Ryu SDN controller.
Connect the Docker hosts to different datacenters and setup the links between.
"""
# create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
autolinkswitches=True,
controller=RemoteController,
enable_learning=False)
# setup links
self.net.addLink(self.dc[0], self.s[0])
self.net.addLink(self.s[2], self.dc[1])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute(
"vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
vnf2 = self.dc[1].startCompute(
"vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check status
# check get status
s1 = self.dc[0].containers.get("vnf1").getStatus()
print(s1)
self.assertTrue(s1["name"] == "vnf1")
self.assertTrue(s1["state"]["Running"])
self.assertTrue(s1["network"][0]['intf_name'] == 'intf1')
self.assertTrue(s1["network"][0]['ip'] == '10.0.10.1/24')
s2 = self.dc[1].containers.get("vnf2").getStatus()
print(s2)
self.assertTrue(s2["name"] == "vnf2")
self.assertTrue(s2["state"]["Running"])
self.assertTrue(s2["network"][0]['intf_name'] == 'intf2')
self.assertTrue(s2["network"][0]['ip'] == '10.0.10.2/24')
# should be not not yet connected
self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
# setup links
self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
bidirectional=True, cmd='add-flow')
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# stop Mininet network
self.stopNet()
def testSDNChainingMultiService(self):
"""
Create a two data centers and interconnect them with additional
switches between them.
Uses Ryu SDN controller.
Setup 2 services and setup isolated paths between them
Delete only the first service, and check that other one still works
"""
# create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
autolinkswitches=True,
controller=RemoteController,
enable_learning=False)
# setup links
self.net.addLink(self.dc[0], self.s[0])
self.net.addLink(self.s[2], self.dc[1])
# start Mininet network
self.startNet()
# First Service
# add compute resources
vnf1 = self.dc[0].startCompute(
"vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
vnf2 = self.dc[1].startCompute(
"vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
# setup links
self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
bidirectional=True, cmd='add-flow', cookie=1)
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# Second Service
# add compute resources
vnf11 = self.dc[0].startCompute(
"vnf11", network=[{'id': 'intf1', 'ip': '10.0.20.1/24'}])
vnf22 = self.dc[1].startCompute(
"vnf22", network=[{'id': 'intf2', 'ip': '10.0.20.2/24'}])
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 4)
self.assertTrue(len(self.net.hosts) == 4)
self.assertTrue(len(self.net.switches) == 5)
# setup links
self.net.setChain('vnf11', 'vnf22', 'intf1', 'intf2',
bidirectional=True, cmd='add-flow', cookie=2)
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
# check first service cannot ping second service
self.assertTrue(self.net.ping([vnf1, vnf22]) > 0.0)
self.assertTrue(self.net.ping([vnf2, vnf11]) > 0.0)
# delete the first service chain
self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
bidirectional=True, cmd='del-flows', cookie=1)
# check connectivity of first service is down
self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
# time.sleep(100)
# check connectivity of second service is still up
self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
# stop Mininet network
self.stopNet()
# @unittest.skip("disabled compute tests for development")
class testEmulatorCompute(SimpleTestTopology):
"""
Tests to check the emulator's API to add and remove
compute resources at runtime.
"""
def testAddSingleComputeSingleDC(self):
"""
Adds a single compute instance to
a single DC and checks its connectivity with a
manually added host.
"""
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
self.assertTrue(isinstance(
self.dc[0].listCompute()[0], EmulatorCompute))
self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
# stop Mininet network
self.stopNet()
def testRemoveSingleComputeSingleDC(self):
"""
Test stop method for compute instances.
Check that the instance is really removed.
"""
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
# remove compute resources
self.dc[0].stopCompute("vnf1")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 0)
self.assertTrue(len(self.net.hosts) == 1)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 0)
# stop Mininet network
self.stopNet()
def testGetStatusSingleComputeSingleDC(self):
"""
Check if the getStatus functionality of EmulatorCompute
objects works well.
"""
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
# setup links
self.net.addLink(self.dc[0], self.h[0])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
self.assertTrue(isinstance(
self.dc[0].listCompute()[0], EmulatorCompute))
self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
# check get status
s = self.dc[0].containers.get("vnf1").getStatus()
self.assertTrue(s["name"] == "vnf1")
self.assertTrue(s["state"]["Running"])
# stop Mininet network
self.stopNet()
def testConnectivityMultiDC(self):
"""
Test if compute instances started in different data centers
are able to talk to each other.
"""
# create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
autolinkswitches=True)
# setup links
self.net.addLink(self.dc[0], self.s[0])
self.net.addLink(self.dc[1], self.s[2])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
vnf2 = self.dc[1].startCompute("vnf2")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
self.assertTrue(len(self.dc[1].listCompute()) == 1)
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# stop Mininet network
self.stopNet()
def testInterleavedAddRemoveMultiDC(self):
"""
Test multiple, interleaved add and remove operations and ensure
that always all expected compute instances are reachable.
"""
# create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
autolinkswitches=True)
# setup links
self.net.addLink(self.dc[0], self.s[0])
self.net.addLink(self.dc[1], self.s[2])
# start Mininet network
self.startNet()
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
vnf2 = self.dc[1].startCompute("vnf2")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
self.assertTrue(len(self.net.switches) == 5)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
self.assertTrue(len(self.dc[1].listCompute()) == 1)
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# remove compute resources
self.dc[0].stopCompute("vnf1")
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 1)
self.assertTrue(len(self.net.hosts) == 1)
self.assertTrue(len(self.net.switches) == 5)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 0)
self.assertTrue(len(self.dc[1].listCompute()) == 1)
# add compute resources
vnf3 = self.dc[0].startCompute("vnf3")
vnf4 = self.dc[0].startCompute("vnf4")
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 2)
self.assertTrue(len(self.dc[1].listCompute()) == 1)
self.assertTrue(self.net.ping([vnf3, vnf2]) <= 0.0)
self.assertTrue(self.net.ping([vnf4, vnf2]) <= 0.0)
# remove compute resources
self.dc[0].stopCompute("vnf3")
self.dc[0].stopCompute("vnf4")
self.dc[1].stopCompute("vnf2")
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 0)
self.assertTrue(len(self.dc[1].listCompute()) == 0)
# stop Mininet network
self.stopNet()
if __name__ == '__main__':
unittest.main()
| 40.440171 | 86 | 0.606996 |
ace24c61e01e99bc9288b8bf16d589799ee2a907 | 433 | py | Python | plotly/validators/scattergeo/_uid.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 2 | 2018-12-03T15:20:42.000Z | 2018-12-03T15:20:47.000Z | plotly/validators/scattergeo/_uid.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scattergeo/_uid.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 2 | 2019-06-17T01:35:57.000Z | 2020-11-03T01:07:19.000Z | import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name='uid', parent_name='scattergeo', **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 30.928571 | 78 | 0.644342 |
ace24cd08cf848498cf2491ca803091ff7bc9770 | 903 | py | Python | basiclive/core/lims/migrations/0017_create_coords_data.py | znarthur/basic-live | 79c194311de05af2e1bb21d1bc8c6c14dda356d0 | [
"BSD-3-Clause"
] | null | null | null | basiclive/core/lims/migrations/0017_create_coords_data.py | znarthur/basic-live | 79c194311de05af2e1bb21d1bc8c6c14dda356d0 | [
"BSD-3-Clause"
] | 1 | 2020-12-03T15:27:09.000Z | 2020-12-03T15:27:09.000Z | basiclive/core/lims/migrations/0017_create_coords_data.py | znarthur/basic-live | 79c194311de05af2e1bb21d1bc8c6c14dda356d0 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T21:06:09.000Z | 2021-09-28T21:06:09.000Z | # Generated by Django 2.2.5 on 2019-09-14 22:05
from django.db import migrations
def create_coords_data(apps, schema_editor):
"""
Save Locations from layout into Location Object
"""
ContainerType = apps.get_model('lims', 'ContainerType')
LocationCoord = apps.get_model('lims', 'LocationCoord')
db_alias = schema_editor.connection.alias
to_create = []
for kind in ContainerType.objects.all():
for loc in kind.locations.all():
layout = kind.layout
x, y = layout.get('locations', {}).get(loc.name, [0.0, 0.0])
to_create.append(LocationCoord(kind=kind, location=loc, x=x, y=y))
LocationCoord.objects.using(db_alias).bulk_create(to_create)
class Migration(migrations.Migration):
dependencies = [
('lims', '0016_locationcoord'),
]
operations = [
migrations.RunPython(create_coords_data),
]
| 29.129032 | 78 | 0.660022 |
ace24dd615d0861940a6d1a1ae3685518cd6dbe3 | 1,409 | py | Python | linode_api4/common.py | borgeslucaz/linode_api4-python | d712d9e9f73754b39d0fa944b19f4095316b20e1 | [
"BSD-3-Clause"
] | null | null | null | linode_api4/common.py | borgeslucaz/linode_api4-python | d712d9e9f73754b39d0fa944b19f4095316b20e1 | [
"BSD-3-Clause"
] | null | null | null | linode_api4/common.py | borgeslucaz/linode_api4-python | d712d9e9f73754b39d0fa944b19f4095316b20e1 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import os
def load_and_validate_keys(authorized_keys):
"""
Loads authorized_keys as taken by :any:`instance_create`,
:any:`disk_create` or :any:`rebuild`, and loads in any keys from any files
provided.
:param authorized_keys: A list of keys or paths to keys, or a single key
:returns: A list of raw keys
:raises: ValueError if keys in authorized_keys don't appear to be a raw
key and can't be opened.
"""
if not authorized_keys:
return None
if not isinstance(authorized_keys, list):
authorized_keys = [authorized_keys]
ret = []
for k in authorized_keys:
accepted_types = ('ssh-dss', 'ssh-rsa', 'ecdsa-sha2-nistp', 'ssh-ed25519')
if any([ t for t in accepted_types if k.startswith(t) ]):
# this looks like a key, cool
ret.append(k)
else:
# it doesn't appear to be a key.. is it a path to the key?
k = os.path.expanduser(k)
if os.path.isfile(k):
with open(k) as f:
ret.append(f.read().rstrip())
else:
raise ValueError("authorized_keys must either be paths "
"to the key files or a list of raw "
"public key of one of these types: {}".format(accepted_types))
return ret
| 33.547619 | 95 | 0.584102 |
ace24df429ce60b714c5afe38214318206983aed | 25,378 | py | Python | CS696/Modelling/sentiment_score/finbert/finbert.py | tiwarikajal/Stock_fundamental_prediction | df55740bcd9568711fe05d25aaec3a1dd8864a66 | [
"MIT"
] | 1 | 2020-03-20T23:24:21.000Z | 2020-03-20T23:24:21.000Z | CS696/Modelling/sentiment_score/finbert/finbert.py | tiwarikajal/Stock_fundamental_prediction | df55740bcd9568711fe05d25aaec3a1dd8864a66 | [
"MIT"
] | null | null | null | CS696/Modelling/sentiment_score/finbert/finbert.py | tiwarikajal/Stock_fundamental_prediction | df55740bcd9568711fe05d25aaec3a1dd8864a66 | [
"MIT"
] | 1 | 2020-04-02T01:34:52.000Z | 2020-04-02T01:34:52.000Z | from __future__ import absolute_import, division, print_function
import random
import pandas as pd
from pytorch_pretrained_bert.tokenization import BertTokenizer
from torch.nn import MSELoss
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm_notebook as tqdm
from tqdm import trange
from nltk.tokenize import sent_tokenize
from finbert.utils import *
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Config(object):
"""The configuration class for training."""
def __init__(self,
data_dir,
bert_model,
model_dir,
max_seq_length=64,
train_batch_size=32,
eval_batch_size=32,
learning_rate=5e-5,
num_train_epochs=10.0,
warm_up_proportion=0.1,
no_cuda=False,
do_lower_case=True,
seed=42,
local_rank=-1,
gradient_accumulation_steps=1,
fp16=False,
output_mode='classification',
discriminate=True,
gradual_unfreeze=True,
encoder_no=12):
"""
Parameters
----------
data_dir: str
Path for the training and evaluation datasets.
bert_model: BertModel
The BERT model to be used. For example: BertForSequenceClassification.from_pretrained(...)
model_dir: str
The path where the resulting model will be saved.
max_seq_length: int
The maximum length of the sequence to be used. Default value is 64.
train_batch_size: int
The batch size for the training. Default value is 32.
eval_batch_size: int
The batch size for the evaluation. Default value is 32.
learning_rate: float
The learning rate. Default value is 5e5.
num_train_epochs: int
Number of epochs to train. Default value is 4.
warm_up_proportion: float
During the training, the learning rate is linearly increased. This value determines when the learning rate
reaches the intended learning rate. Default value is 0.1.
no_cuda: bool
Determines whether to use gpu. Default is False.
do_lower_case: bool
Determines whether to make all training and evaluation examples lower case. Default is True.
seed: int
Random seed. Defaults to 42.
local_rank: int
Used for number of gpu's that will be utilized. If set -1, no distributed training will be done. Default
value is -1.
gradient_accumulation_steps: int
Number of gradient accumulations steps. Defaults to 1.
fp16: bool
Determines whether to use 16 bits for floats, instead of 32.
output_mode: 'classification' or 'regression'
Determines whether the task is classification or regression.
discriminate: bool
Determines whether to apply discriminative fine-tuning.
gradual_unfreeze: bool
Determines whether to gradually unfreeze lower and lower layers as the training goes on.
encoder_no: int
Starting from which layer the model is going to be finetuned. If set 12, whole model is going to be
fine-tuned. If set, for example, 6, only the last 6 layers will be fine-tuned.
"""
self.data_dir = data_dir
self.bert_model = bert_model
self.model_dir = model_dir
self.do_lower_case = do_lower_case
self.max_seq_length = max_seq_length
self.train_batch_size = train_batch_size
self.local_rank = local_rank
self.eval_batch_size = eval_batch_size
self.learning_rate = learning_rate
self.num_train_epochs = num_train_epochs
self.warm_up_proportion = warm_up_proportion
self.no_cuda = no_cuda
self.seed = seed
self.gradient_accumulation_steps = gradient_accumulation_steps
self.output_mode = output_mode
self.fp16 = fp16
self.discriminate = discriminate
self.gradual_unfreeze = gradual_unfreeze
self.encoder_no = encoder_no
class FinBert(object):
"""
The main class for FinBERT.
"""
def __init__(self,
config):
self.config = config
def prepare_model(self, label_list):
"""
Sets some of the components of the model: Dataset processor, number of labels, usage of gpu and distributed
training, gradient accumulation steps and tokenizer.
Parameters
----------
label_list: list
The list of labels values in the dataset. For example: ['positive','negative','neutral']
"""
self.processors = {
"finsent": FinSentProcessor
}
self.num_labels_task = {
'finsent': 2
}
if self.config.local_rank == -1 or self.config.no_cuda:
self.device = torch.device("cuda" if torch.cuda.is_available() and not self.config.no_cuda else "cpu")
self.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(self.config.local_rank)
self.device = torch.device("cuda", self.config.local_rank)
self.n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
self.device, self.n_gpu, bool(self.config.local_rank != -1), self.config.fp16))
if self.config.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
self.config.gradient_accumulation_steps))
self.config.train_batch_size = self.config.train_batch_size // self.config.gradient_accumulation_steps
random.seed(self.config.seed)
np.random.seed(self.config.seed)
torch.manual_seed(self.config.seed)
if self.n_gpu > 0:
torch.cuda.manual_seed_all(self.config.seed)
if os.path.exists(self.config.model_dir) and os.listdir(self.config.model_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(self.config.model_dir))
if not os.path.exists(self.config.model_dir):
os.makedirs(self.config.model_dir)
self.processor = self.processors['finsent']()
self.num_labels = len(label_list)
self.label_list = label_list
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=self.config.do_lower_case)
def get_data(self, phase):
"""
Gets the data for training or evaluation. It returns the data in the format that pytorch will process. In the
data directory, there should be a .csv file with the name <phase>.csv
Parameters
----------
phase: str
Name of the dataset that will be used in that phase. For example if there is a 'train.csv' in the data
folder, it should be set to 'train'.
Returns
-------
examples: list
A list of InputExample's. Each InputExample is an object that includes the information for each example;
text, id, label...
"""
self.num_train_optimization_steps = None
examples = None
examples = self.processor.get_examples(self.config.data_dir, phase)
self.num_train_optimization_steps = int(
len(
examples) / self.config.train_batch_size / self.config.gradient_accumulation_steps) * self.config.num_train_epochs
if phase=='train':
train = pd.read_csv(os.path.join(self.config.data_dir, 'train.csv'),sep='\t',index_col=False)
weights = list()
labels = self.label_list
class_weights = [train.shape[0] / train[train.label == label].shape[0] for label in labels]
self.class_weights = torch.tensor(class_weights)
return examples
def create_the_model(self):
"""
Creates the model. Sets the model to be trained and the optimizer.
"""
model = self.config.bert_model
model.to(self.device)
# Prepare optimizer
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lr = self.config.learning_rate
dft_rate = 1.2
if self.config.discriminate:
# apply the discriminative fine-tuning. discrimination rate is governed by dft_rate.
encoder_params = []
for i in range(12):
encoder_decay = {
'params': [p for n, p in list(model.bert.encoder.layer[i].named_parameters()) if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01,
'lr': lr / (dft_rate ** (12 - i))}
encoder_nodecay = {
'params': [p for n, p in list(model.bert.encoder.layer[i].named_parameters()) if
any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
'lr': lr / (dft_rate ** (12 - i))}
encoder_params.append(encoder_decay)
encoder_params.append(encoder_nodecay)
optimizer_grouped_parameters = [
{'params': [p for n, p in list(model.bert.embeddings.named_parameters()) if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01,
'lr': lr / (dft_rate ** 13)},
{'params': [p for n, p in list(model.bert.embeddings.named_parameters()) if
any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
'lr': lr / (dft_rate ** 13)},
{'params': [p for n, p in list(model.bert.pooler.named_parameters()) if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01,
'lr': lr},
{'params': [p for n, p in list(model.bert.pooler.named_parameters()) if
any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
'lr': lr},
{'params': [p for n, p in list(model.classifier.named_parameters()) if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01,
'lr': lr},
{'params': [p for n, p in list(model.classifier.named_parameters()) if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
'lr': lr}]
optimizer_grouped_parameters.extend(encoder_params)
else:
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
schedule = "warmup_linear"
self.optimizer = BertAdam(optimizer_grouped_parameters,
lr=self.config.learning_rate,
warmup=self.config.warm_up_proportion,
t_total=self.num_train_optimization_steps,
schedule=schedule)
return model
def get_loader(self, examples, phase):
"""
Creates a data loader object for a dataset.
Parameters
----------
examples: list
The list of InputExample's.
phase: 'train' or 'eval'
Determines whether to use random sampling or sequential sampling depending on the phase.
Returns
-------
dataloader: DataLoader
The data loader object.
"""
features = convert_examples_to_features(examples, self.label_list,
self.config.max_seq_length,
self.tokenizer,
self.config.output_mode)
# Log the necessasry information
logger.info("***** Loading data *****")
logger.info(" Num examples = %d", len(examples))
logger.info(" Batch size = %d", self.config.train_batch_size)
logger.info(" Num steps = %d", self.num_train_optimization_steps)
# Load the data, make it into TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if self.config.output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif self.config.output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
try:
all_agree_ids = torch.tensor([f.agree for f in features], dtype=torch.long)
except:
all_agree_ids = torch.tensor([0.0 for f in features], dtype=torch.long)
data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_agree_ids)
# Distributed, if necessary
if phase == 'train':
my_sampler = RandomSampler(data)
elif phase == 'eval':
my_sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=my_sampler, batch_size=self.config.train_batch_size)
return dataloader
def train(self, train_examples, model):
"""
Trains the model.
Parameters
----------
examples: list
Contains the data as a list of InputExample's
model: BertModel
The Bert model to be trained.
weights: list
Contains class weights.
Returns
-------
model: BertModel
The trained model.
"""
validation_examples = self.get_data('validation')
global_step = 0
self.validation_losses = []
# Training
train_dataloader = self.get_loader(train_examples, 'train')
model.train()
step_number = len(train_dataloader)
i = 0
for _ in trange(int(self.config.num_train_epochs), desc="Epoch"):
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc='Iteration')):
if (self.config.gradual_unfreeze and i == 0):
for param in model.bert.parameters():
param.requires_grad = False
if (step % (step_number // 3)) == 0:
i += 1
if (self.config.gradual_unfreeze and i > 1 and i < self.config.encoder_no):
for k in range(i - 1):
try:
for param in model.bert.encoder.layer[self.config.encoder_no - 1 - k].parameters():
param.requires_grad = True
except:
pass
if (self.config.gradual_unfreeze and i > self.config.encoder_no + 1):
for param in model.bert.embeddings.parameters():
param.requires_grad = True
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, agree_ids = batch
logits = model(input_ids, segment_ids, input_mask)
weights = self.class_weights.to(self.device)
if self.config.output_mode == "classification":
loss_fct = CrossEntropyLoss(weight = weights)
loss = loss_fct(logits.view(-1, self.num_labels), label_ids.view(-1))
elif self.config.output_mode == "regression":
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), label_ids.view(-1))
if self.config.gradient_accumulation_steps > 1:
loss = loss / self.config.gradient_accumulation_steps
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % self.config.gradient_accumulation_steps == 0:
if self.config.fp16:
lr_this_step = self.config.learning_rate * warmup_linear(
global_step / self.num_train_optimization_steps, self.config.warmup_proportion)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr_this_step
self.optimizer.step()
self.optimizer.zero_grad()
global_step += 1
# Validation
validation_loader = self.get_loader(validation_examples, phase='eval')
model.eval()
valid_loss, valid_accuracy = 0, 0
nb_valid_steps, nb_valid_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids, agree_ids in tqdm(validation_loader, desc="Validating"):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
label_ids = label_ids.to(self.device)
agree_ids = agree_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
if self.config.output_mode == "classification":
loss_fct = CrossEntropyLoss(weight=weights)
tmp_valid_loss = loss_fct(logits.view(-1, self.num_labels), label_ids.view(-1))
elif self.config.output_mode == "regression":
loss_fct = MSELoss()
tmp_valid_loss = loss_fct(logits.view(-1), label_ids.view(-1))
valid_loss += tmp_valid_loss.mean().item()
nb_valid_steps += 1
valid_loss = valid_loss / nb_valid_steps
self.validation_losses.append(valid_loss)
print("Validation losses: {}".format(self.validation_losses))
if valid_loss == min(self.validation_losses):
try:
os.remove(self.config.model_dir / ('temporary' + str(best_model)))
except:
print('No best model found')
torch.save({'epoch': str(i), 'state_dict': model.state_dict()},
self.config.model_dir / ('temporary' + str(i)))
best_model = i
# Save a trained model and the associated configuration
checkpoint = torch.load(self.config.model_dir / ('temporary' + str(best_model)))
model.load_state_dict(checkpoint['state_dict'])
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(self.config.model_dir, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
output_config_file = os.path.join(self.config.model_dir, CONFIG_NAME)
with open(output_config_file, 'w') as f:
f.write(model_to_save.config.to_json_string())
os.remove(self.config.model_dir / ('temporary' + str(best_model)))
return model
def evaluate(self, model, examples):
"""
Evaluate the model.
Parameters
----------
model: BertModel
The model to be evaluated.
examples: list
Evaluation data as a list of InputExample's/
Returns
-------
evaluation_df: pd.DataFrame
A dataframe that includes for each example predicted probability and labels.
"""
eval_loader = self.get_loader(examples, phase='eval')
logger.info("***** Running evaluation ***** ")
logger.info(" Num examples = %d", len(examples))
logger.info(" Batch size = %d", self.config.eval_batch_size)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions = []
labels = []
agree_levels = []
text_ids = []
for input_ids, input_mask, segment_ids, label_ids, agree_ids in tqdm(eval_loader, desc="Testing"):
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
label_ids = label_ids.to(self.device)
agree_ids = agree_ids.to(self.device)
with torch.no_grad():
logits = model(input_ids, segment_ids, input_mask)
if self.config.output_mode == "classification":
loss_fct = CrossEntropyLoss()
tmp_eval_loss = loss_fct(logits.view(-1, self.num_labels), label_ids.view(-1))
elif self.config.output_mode == "regression":
loss_fct = MSELoss()
tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
np_logits = logits.cpu().numpy()
if self.config.output_mode == 'classification':
prediction = np.array(np_logits)
elif self.config.output_mode == "regression":
prediction = np.array(np_logits)
for agree_id in agree_ids:
agree_levels.append(agree_id.item())
for label_id in label_ids:
labels.append(label_id.item())
for pred in prediction:
predictions.append(pred)
text_ids.append(input_ids)
# tmp_eval_loss = loss_fct(logits.view(-1, self.num_labels), label_ids.view(-1))
# tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
# logits = logits.detach().cpu().numpy()
# label_ids = label_ids.to('cpu').numpy()
# tmp_eval_accuracy = accuracy(logits, label_ids)
# eval_loss += tmp_eval_loss.mean().item()
# eval_accuracy += tmp_eval_accuracy
evaluation_df = pd.DataFrame({'predictions': predictions, 'labels': labels, "agree_levels": agree_levels})
return evaluation_df
def predict(text, model, write_to_csv=False, path=None):
"""
Predict sentiments of sentences in a given text. The function first tokenizes sentences, make predictions and write
results.
Parameters
----------
text: string
text to be analyzed
model: BertForSequenceClassification
path to the classifier model
write_to_csv (optional): bool
path (optional): string
path to write the string
"""
model.eval()
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
sentences = sent_tokenize(text)
print("--------------------------------")
print("Number of sentences")
print(len(sentences))
print("sentences")
print(sentences)
examples = [InputExample(str(i), sentence) for i,sentence in enumerate(sentences)]
print("--------------------------------")
print("examples")
print(examples)
label_list = ['positive', 'negative', 'neutral']
features = convert_examples_to_features(examples, label_list, 64, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
print("--------------------------------")
print("all_input_ids")
print(all_input_ids)
with torch.no_grad():
logits = model(all_input_ids, all_segment_ids, all_input_mask)
logits = softmax(np.array(logits))
sentiment_score = pd.Series(logits[:,0] - logits[:,1])
predictions = np.squeeze(np.argmax(logits, axis=1))
result = sentiment_score.mean()
"""
result = {'sentence': sentences,
'logit': list(logits),
'prediction': predictions,
'sentiment_score':sentiment_score}
label_dict = {0:'positive',1:'negative',2:'neutral'}
result = pd.DataFrame(result)
result['prediction'] = result.prediction.apply(lambda x: label_dict[x])
if write_to_csv:
result.to_csv(path,sep=',', index=False)
"""
return result
| 39.71518 | 130 | 0.58003 |
ace24f977a33232163163f00c6d7432f8471e8a3 | 1,453 | py | Python | advent_of_code_2021/day-1.py | antoine-amara/advent-of-code-2021 | c76d88234a8723b67b624e9eecb6582dac7aed66 | [
"MIT"
] | null | null | null | advent_of_code_2021/day-1.py | antoine-amara/advent-of-code-2021 | c76d88234a8723b67b624e9eecb6582dac7aed66 | [
"MIT"
] | null | null | null | advent_of_code_2021/day-1.py | antoine-amara/advent-of-code-2021 | c76d88234a8723b67b624e9eecb6582dac7aed66 | [
"MIT"
] | null | null | null | from helpers.input_parser import parse_list_elements_to_int, read_input_lines
def read_input(input_name="example.txt"):
string_lines = read_input_lines(day=1, input_name=input_name)
return parse_list_elements_to_int(string_lines)
def count_larger_mesures_one_by_one(data):
count_list = [
mesure < data[index + 1] for index, mesure in enumerate(data) if index + 1 < len(data)
]
assert len(count_list) == len(data) - 1
return sum(count_list)
def count_larger_mesures_three_by_three(data):
tuples_sum = [
sum([mesure, data[index + 1], data[index + 2]])
for index, mesure in enumerate(data)
if index + 2 < len(data)
]
assert len(tuples_sum) == len(data) - 2
return count_larger_mesures_one_by_one(tuples_sum)
def part1():
# should be = 1532
data = read_input("input.txt")
counter = count_larger_mesures_one_by_one(data)
assert counter == 1532
return counter
def part2():
# should be = 1571
data = read_input("input.txt")
counter = count_larger_mesures_three_by_three(data)
assert counter == 1571
return counter
def main():
# part 1: compare elements one by one
response_part1 = part1()
print("Day 1 -- part 1")
print(f"Response: {response_part1}\n")
# part 2: compare elements with sum of three elements
response_part2 = part2()
print("Day 1 -- part 2")
print(f"Response: {response_part2}")
main()
| 26.418182 | 94 | 0.680661 |
ace24fb8c2201c8283ccf9ffef2ab57b637a52c3 | 12,434 | py | Python | tests/test_toy.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | null | null | null | tests/test_toy.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | null | null | null | tests/test_toy.py | samuelefiorini/vrpy | ad3232b9e9ee9276c9c799d16b4a4a8c2b41eef1 | [
"MIT"
] | null | null | null | from time import time
from networkx import DiGraph
from vrpy import VehicleRoutingProblem
from vrpy.preprocessing import get_num_stops_upper_bound
class TestsToy:
def setup(self):
"""
Creates a toy graph.
"""
self.G = DiGraph()
for v in [1, 2, 3, 4, 5]:
self.G.add_edge("Source", v, cost=10, time=20)
self.G.add_edge(v, "Sink", cost=10, time=20)
self.G.nodes[v]["demand"] = 5
self.G.nodes[v]["upper"] = 100
self.G.nodes[v]["lower"] = 5
self.G.nodes[v]["service_time"] = 1
self.G.nodes[2]["upper"] = 20
self.G.nodes["Sink"]["upper"] = 100
self.G.nodes["Source"]["upper"] = 100
self.G.add_edge(1, 2, cost=10, time=20)
self.G.add_edge(2, 3, cost=10, time=20)
self.G.add_edge(3, 4, cost=15, time=20)
self.G.add_edge(4, 5, cost=10, time=25)
#################
# subsolve cspy #
#################
def test_cspy_stops(self):
"""Tests column generation procedure on toy graph with stop constraints"""
prob = VehicleRoutingProblem(self.G, num_stops=3)
prob.solve()
assert prob.best_value == 70
assert prob.best_routes[1] in [
["Source", 1, 2, 3, "Sink"],
["Source", 4, 5, "Sink"],
]
assert set(prob.best_routes_cost.values()) == {30, 40}
prob.solve(exact=False)
assert prob.best_value == 70
def test_cspy_stops_capacity(self):
"""Tests column generation procedure on toy graph
with stop and capacity constraints
"""
prob = VehicleRoutingProblem(self.G, num_stops=3, load_capacity=10)
prob.solve()
assert prob.best_value == 80
assert set(prob.best_routes_load.values()) == {5, 10}
def test_cspy_stops_capacity_duration(self):
"""Tests column generation procedure on toy graph
with stop, capacity and duration constraints
"""
prob = VehicleRoutingProblem(self.G,
num_stops=3,
load_capacity=10,
duration=62)
prob.solve(exact=False)
assert prob.best_value == 85
assert set(prob.best_routes_duration.values()) == {41, 62}
assert prob.node_load[1]["Sink"] in [5, 10]
def test_cspy_stops_time_windows(self):
"""Tests column generation procedure on toy graph
with stop, capacity and time_window constraints
"""
prob = VehicleRoutingProblem(
self.G,
num_stops=3,
time_windows=True,
)
prob.solve()
assert prob.best_value == 80
assert prob.departure_time[1]["Source"] == 0
assert prob.arrival_time[1]["Sink"] in [41, 62]
def test_cspy_schedule(self):
"""Tests if final schedule is time-window feasible
"""
prob = VehicleRoutingProblem(
self.G,
num_stops=3,
time_windows=True,
)
prob.solve()
# Check departure times
for k1, v1 in prob.departure_time.items():
for k2, v2 in v1.items():
assert (self.G.nodes[k2]["lower"] <= v2)
assert (v2 <= self.G.nodes[k2]["upper"])
# Check arrival times
for k1, v1 in prob.arrival_time.items():
for k2, v2 in v1.items():
assert (self.G.nodes[k2]["lower"] <= v2)
assert (v2 <= self.G.nodes[k2]["upper"])
###############
# subsolve lp #
###############
def test_LP_stops(self):
"""Tests column generation procedure on toy graph with stop constraints"""
prob = VehicleRoutingProblem(self.G, num_stops=3)
prob.solve(cspy=False)
assert prob.best_value == 70
prob.solve(cspy=False, pricing_strategy="BestEdges1")
assert prob.best_value == 70
def test_LP_stops_capacity(self):
"""Tests column generation procedure on toy graph"""
prob = VehicleRoutingProblem(self.G, num_stops=3, load_capacity=10)
prob.solve(cspy=False)
assert prob.best_value == 80
def test_LP_stops_capacity_duration(self):
"""Tests column generation procedure on toy graph"""
prob = VehicleRoutingProblem(
self.G,
num_stops=3,
load_capacity=10,
duration=62,
)
prob.solve(cspy=False)
assert prob.best_value == 85
def test_LP_stops_time_windows(self):
"""Tests column generation procedure on toy graph"""
prob = VehicleRoutingProblem(
self.G,
num_stops=3,
time_windows=True,
)
prob.solve(cspy=False)
assert prob.best_value == 80
def test_LP_schedule(self):
"""Tests column generation procedure on toy graph"""
prob = VehicleRoutingProblem(
self.G,
num_stops=3,
time_windows=True,
)
prob.solve(cspy=False)
# Check departure times
for k1, v1 in prob.departure_time.items():
for k2, v2 in v1.items():
assert (self.G.nodes[k2]["lower"] <= v2)
assert (v2 <= self.G.nodes[k2]["upper"])
# Check arrival times
for k1, v1 in prob.arrival_time.items():
for k2, v2 in v1.items():
assert (self.G.nodes[k2]["lower"] <= v2)
assert (v2 <= self.G.nodes[k2]["upper"])
def test_LP_stops_elementarity(self):
"""Tests column generation procedure on toy graph"""
self.G.add_edge(2, 1, cost=2)
prob = VehicleRoutingProblem(
self.G,
num_stops=3,
)
prob.solve(cspy=False)
assert prob.best_value == 67
######################
# Clarke Wright only #
######################
def test_clarke_wright(self):
"Tests use of initial heuristic only"
prob = VehicleRoutingProblem(self.G, num_stops=3)
prob.solve(heuristic_only=True)
assert prob.best_value == 70
assert prob.best_routes[0] in [
["Source", 4, 5, "Sink"],
["Source", 1, 2, 3, "Sink"],
]
#########
# other #
#########
def test_all(self):
prob = VehicleRoutingProblem(self.G,
num_stops=3,
time_windows=True,
duration=63,
load_capacity=10)
prob.solve(cspy=False)
lp_best = prob.best_value
prob.solve(cspy=True)
cspy_best = prob.best_value
assert int(lp_best) == int(cspy_best)
def test_initial_solution(self):
prob = VehicleRoutingProblem(self.G, num_stops=4)
routes = [
["Source", 1, "Sink"],
["Source", 2, 3, "Sink"],
["Source", 4, 5, "Sink"],
]
prob.solve(initial_routes=routes, cspy=False)
assert prob.best_value == 70
def test_knapsack(self):
self.G.nodes["Source"]["demand"] = 0
self.G.nodes["Sink"]["demand"] = 0
assert get_num_stops_upper_bound(self.G, 10) == 4
def test_pricing_strategies(self):
sol = []
for strategy in [
"Exact", "BestPaths", "BestEdges1", "BestEdges2", "Hyper"
]:
prob = VehicleRoutingProblem(self.G, num_stops=4)
prob.solve(pricing_strategy=strategy)
sol.append(prob.best_value)
assert len(set(sol)) == 1
def test_lock(self):
routes = [["Source", 3, "Sink"]]
prob = VehicleRoutingProblem(self.G, num_stops=4)
prob.solve(preassignments=routes)
assert prob.best_value == 80
def test_partial_lock(self):
routes = [["Source", 3]]
prob = VehicleRoutingProblem(self.G, num_stops=4)
prob.solve(preassignments=routes)
assert prob.best_value == 75
def test_complete_lock(self):
routes = [
["Source", 1, "Sink"],
["Source", 2, "Sink"],
["Source", 3, "Sink"],
["Source", 4, "Sink"],
["Source", 5, "Sink"],
]
prob = VehicleRoutingProblem(self.G)
prob.solve(preassignments=routes)
assert prob.best_value == 100
def test_extend_preassignment(self):
routes = [[2, 3]]
prob = VehicleRoutingProblem(self.G, num_stops=4)
prob.solve(preassignments=routes)
assert prob.best_value == 70
def test_pick_up_delivery(self):
self.G.nodes[2]["request"] = 5
self.G.nodes[2]["demand"] = 10
self.G.nodes[3]["demand"] = 10
self.G.nodes[3]["request"] = 4
self.G.nodes[4]["demand"] = -10
self.G.nodes[5]["demand"] = -10
self.G.add_edge(2, 5, cost=10)
self.G.remove_node(1)
prob = VehicleRoutingProblem(
self.G,
load_capacity=15,
pickup_delivery=True,
)
prob.solve(pricing_strategy="Exact", cspy=False)
assert prob.best_value == 65
def test_distribution_collection(self):
self.G.nodes[1]["collect"] = 12
self.G.nodes[4]["collect"] = 1
prob = VehicleRoutingProblem(
self.G,
load_capacity=15,
distribution_collection=True,
)
prob.solve(cspy=False)
lp_sol = prob.best_value
prob.solve(cspy=True)
cspy_sol = prob.best_value
assert lp_sol == cspy_sol
assert lp_sol == 80
def test_fixed_cost(self):
prob = VehicleRoutingProblem(self.G, num_stops=3, fixed_cost=100)
prob.solve()
assert prob.best_value == 70 + 200
assert set(prob.best_routes_cost.values()) == {30 + 100, 40 + 100}
def test_drop_nodes(self):
prob = VehicleRoutingProblem(self.G,
num_stops=3,
num_vehicles=1,
drop_penalty=100)
prob.solve()
assert prob.best_value == 240
assert prob.best_routes == {1: ["Source", 1, 2, 3, "Sink"]}
def test_num_vehicles_use_all(self):
prob = VehicleRoutingProblem(self.G,
num_stops=3,
num_vehicles=2,
use_all_vehicles=True,
drop_penalty=100)
prob.solve()
assert len(prob.best_routes) == 2
def test_periodic(self):
self.G.nodes[2]["frequency"] = 2
prob = VehicleRoutingProblem(self.G, num_stops=2, periodic=2)
prob.solve()
assert prob.best_value == 90
frequency = 0
for r in prob.best_routes:
if 2 in prob.best_routes[r]:
frequency += 1
assert frequency == 2
assert prob.schedule[0] in [[1], [1, 2]]
prob = VehicleRoutingProblem(self.G,
num_stops=2,
periodic=2,
num_vehicles=1)
prob.solve()
assert prob.schedule == {}
def test_mixed_fleet(self):
for (i, j) in self.G.edges():
self.G.edges[i, j]["cost"] = 2 * [self.G.edges[i, j]["cost"]]
prob = VehicleRoutingProblem(
self.G,
load_capacity=[10, 15],
fixed_cost=[10, 0],
num_vehicles=[5, 1],
mixed_fleet=True,
)
prob.solve()
assert prob.best_value == 80
assert set(prob.best_routes_type.values()) == {0, 1}
def test_time_limit(self):
prob = VehicleRoutingProblem(self.G, num_stops=3)
start = time()
prob.solve(cspy=False, time_limit=0.01)
comp_time = time() - start
assert comp_time < 0.01 + 0.15 # time_limit + time for mip
assert prob.best_value == 70
def test_dive(self):
for (i, j) in self.G.edges():
self.G.edges[i, j]["cost"] = 2 * [self.G.edges[i, j]["cost"]]
prob = VehicleRoutingProblem(
self.G,
load_capacity=[10, 15],
fixed_cost=[10, 0],
num_vehicles=[5, 1],
mixed_fleet=True,
)
prob.solve(dive=True)
assert prob.best_value == 80
| 33.972678 | 82 | 0.532572 |
ace25069e0bde9c62726c6c7c82903c923873b1b | 4,937 | py | Python | create-spl-token.py | cryptoloutre/create-spl-token-tool | f62ee617d538c43518fad51c272059d167f58785 | [
"MIT"
] | null | null | null | create-spl-token.py | cryptoloutre/create-spl-token-tool | f62ee617d538c43518fad51c272059d167f58785 | [
"MIT"
] | null | null | null | create-spl-token.py | cryptoloutre/create-spl-token-tool | f62ee617d538c43518fad51c272059d167f58785 | [
"MIT"
] | null | null | null | import sys
import subprocess
if __name__ == '__main__':
if len(sys.argv) < 7:
print("\nPlease provide, with the following order, the network, the number of tokens to mint, the path to your private key, your public key, the number of decimals of your token and if you want disable or enable future token mint!")
exit()
### Create the needed variables from the user inputs ###
network = sys.argv[1]
quantity = sys.argv[2]
path_keypair = sys.argv[3]
pubkey = sys.argv[4]
decimals = sys.argv[5]
mint = sys.argv[6]
token_address = ""
token_account = ""
if (network == "mainnet-beta" or network == "devnet") and (mint == "disable" or mint == "enable"):
if mint == "disable" or mint == "enable":
try:
quantity = int(quantity)
try:
decimals = int(decimals)
### Set the command to create a token ###
create_token_cmd = f'spl-token create-token --url {network} --fee-payer {path_keypair} --mint-authority {pubkey} --decimals {decimals}'
### Execute the command and get the output ###
create_token_result = subprocess.check_output(create_token_cmd, shell=True, universal_newlines=True)
### Verify if the transaction succeed ###
if "Signature:" in create_token_result:
print("\nToken created!")
### Get the address of the token created from the output ###
for i in range(15, 59):
token_address += create_token_result[i]
### Set the command to create the associated token account ###
create_token_account_cmd = f'spl-token create-account --url {network} --fee-payer {path_keypair} --owner {pubkey} {token_address}'
### Execute the command and get the output ###
create_token_account_result = subprocess.check_output(create_token_account_cmd, shell=True, universal_newlines=True)
### Verify if the transaction succeed ###
if "Signature:" in create_token_account_result:
print("\nToken account created!")
### Get the address of the associated token account from the output ###
for i in range(17, 61):
token_account += create_token_account_result[i]
### Set the command to mint token ###
mint_token_cmd = f'spl-token mint --url {network} --fee-payer {path_keypair} --mint-authority {path_keypair} {token_address} {quantity} {token_account}'
### Execute the command and get the output ###
mint_token_result = subprocess.check_output(mint_token_cmd, shell=True, universal_newlines=True)
### Verify if the transaction succeed ###
if "Signature:" in mint_token_result:
print("\nToken minted!")
if network == "mainnet-beta":
url = f'https://solscan.io/token/{token_address}'
else:
url = f'https://solscan.io/token/{token_address}?cluster=devnet'
print(f'\nSee your token at this url : {url}')
if mint == "disable":
### Set the command to disable mint authority ###
disable_mint_cmd = f'spl-token authorize --url {network} --fee-payer {path_keypair} --authority {path_keypair} {token_address} mint --disable'
### Execute the command and get the output ###
disable_mint_result = subprocess.check_output(disable_mint_cmd, shell=True, universal_newlines=True)
### Verify if the transaction succeed ###
if "Signature:" in disable_mint_result:
print("\nMint authority correctly disabled!")
except ValueError:
print("\nOops! It's not a valid number of decimals. Try again...")
except ValueError:
print("\nOops! It's not a valid number of tokens to mint. Try again...")
else:
print(f"\nOops! It's not a valid input for the mint authority")
else:
print("\nOops! It's not a valid network. Only mainnet-beta and devnet are allowed")
| 51.968421 | 241 | 0.511647 |
ace251fecc09a55a90b63a2e337f48d5aee60c83 | 11,696 | py | Python | tests/integration/test_transaction_integration.py | stevenj/PynamoDB | 362d98c241f84d6a43d53bf0cc9dcd055e84a2fa | [
"MIT"
] | 1 | 2021-04-27T11:42:10.000Z | 2021-04-27T11:42:10.000Z | tests/integration/test_transaction_integration.py | stevenj/PynamoDB | 362d98c241f84d6a43d53bf0cc9dcd055e84a2fa | [
"MIT"
] | 8 | 2020-02-12T02:42:15.000Z | 2020-04-04T23:35:19.000Z | tests/integration/test_transaction_integration.py | stevenj/PynamoDB | 362d98c241f84d6a43d53bf0cc9dcd055e84a2fa | [
"MIT"
] | 1 | 2020-10-14T18:42:27.000Z | 2020-10-14T18:42:27.000Z | import uuid
from datetime import datetime
import pytest
from pynamodb.connection import Connection
from pynamodb.exceptions import DoesNotExist, TransactWriteError, TransactGetError, InvalidStateError
from pynamodb.attributes import (
NumberAttribute, UnicodeAttribute, UTCDateTimeAttribute, BooleanAttribute, VersionAttribute
)
from pynamodb.transactions import TransactGet, TransactWrite
from pynamodb.models import Model
IDEMPOTENT_PARAMETER_MISMATCH = 'IdempotentParameterMismatchException'
PROVISIONED_THROUGHPUT_EXCEEDED = 'ProvisionedThroughputExceededException'
RESOURCE_NOT_FOUND = 'ResourceNotFoundException'
TRANSACTION_CANCELLED = 'TransactionCanceledException'
TRANSACTION_IN_PROGRESS = 'TransactionInProgressException'
VALIDATION_EXCEPTION = 'ValidationException'
class User(Model):
class Meta:
region = 'us-east-1'
table_name = 'user'
user_id = NumberAttribute(hash_key=True)
class BankStatement(Model):
class Meta:
region = 'us-east-1'
table_name = 'statement'
user_id = NumberAttribute(hash_key=True)
balance = NumberAttribute(default=0)
active = BooleanAttribute(default=True)
class LineItem(Model):
class Meta:
region = 'us-east-1'
table_name = 'line-item'
user_id = NumberAttribute(hash_key=True)
created_at = UTCDateTimeAttribute(range_key=True, default=datetime.now())
amount = NumberAttribute()
currency = UnicodeAttribute()
class DifferentRegion(Model):
class Meta:
region = 'us-east-2'
table_name = 'different-region'
entry_index = NumberAttribute(hash_key=True)
class Foo(Model):
class Meta:
region = 'us-east-1'
table_name = 'foo'
bar = NumberAttribute(hash_key=True)
star = UnicodeAttribute(null=True)
version = VersionAttribute()
TEST_MODELS = [
BankStatement,
DifferentRegion,
LineItem,
User,
Foo
]
@pytest.fixture(scope='module')
def connection(ddb_url):
yield Connection(host=ddb_url)
@pytest.fixture(scope='module', autouse=True)
def create_tables(ddb_url):
for m in TEST_MODELS:
m.Meta.host = ddb_url
m.create_table(
read_capacity_units=10,
write_capacity_units=10,
wait=True
)
yield
for m in TEST_MODELS:
if m.exists():
m.delete_table()
def get_error_code(error):
return error.cause.response['Error'].get('Code')
def get_error_message(error):
return error.cause.response['Error'].get('Message')
@pytest.mark.ddblocal
def test_transact_write__error__idempotent_parameter_mismatch(connection):
client_token = str(uuid.uuid4())
with TransactWrite(connection=connection, client_request_token=client_token) as transaction:
transaction.save(User(1))
transaction.save(User(2))
with pytest.raises(TransactWriteError) as exc_info:
# committing the first time, then adding more info and committing again
with TransactWrite(connection=connection, client_request_token=client_token) as transaction:
transaction.save(User(3))
assert get_error_code(exc_info.value) == IDEMPOTENT_PARAMETER_MISMATCH
# ensure that the first request succeeded in creating new users
assert User.get(1)
assert User.get(2)
with pytest.raises(DoesNotExist):
# ensure it did not create the user from second request
User.get(3)
@pytest.mark.ddblocal
def test_transact_write__error__different_regions(connection):
with pytest.raises(TransactWriteError) as exc_info:
with TransactWrite(connection=connection) as transact_write:
# creating a model in a table outside the region everyone else operates in
transact_write.save(DifferentRegion(entry_index=0))
transact_write.save(BankStatement(1))
transact_write.save(User(1))
assert get_error_code(exc_info.value) == RESOURCE_NOT_FOUND
@pytest.mark.ddblocal
def test_transact_write__error__transaction_cancelled__condition_check_failure(connection):
# create a users and a bank statements for them
User(1).save()
BankStatement(1).save()
# attempt to do this as a transaction with the condition that they don't already exist
with pytest.raises(TransactWriteError) as exc_info:
with TransactWrite(connection=connection) as transaction:
transaction.save(User(1), condition=(User.user_id.does_not_exist()))
transaction.save(BankStatement(1), condition=(BankStatement.user_id.does_not_exist()))
assert get_error_code(exc_info.value) == TRANSACTION_CANCELLED
assert 'ConditionalCheckFailed' in get_error_message(exc_info.value)
@pytest.mark.ddblocal
def test_transact_write__error__multiple_operations_on_same_record(connection):
BankStatement(1).save()
# attempt to do a transaction with multiple operations on the same record
with pytest.raises(TransactWriteError) as exc_info:
with TransactWrite(connection=connection) as transaction:
transaction.condition_check(BankStatement, 1, condition=(BankStatement.user_id.exists()))
transaction.update(BankStatement(1), actions=[(BankStatement.balance.add(10))])
assert get_error_code(exc_info.value) == VALIDATION_EXCEPTION
@pytest.mark.ddblocal
def test_transact_get(connection):
# making sure these entries exist, and with the expected info
User(1).save()
BankStatement(1).save()
User(2).save()
BankStatement(2, balance=100).save()
# get users and statements we just created and assign them to variables
with TransactGet(connection=connection) as transaction:
_user1_future = transaction.get(User, 1)
_statement1_future = transaction.get(BankStatement, 1)
_user2_future = transaction.get(User, 2)
_statement2_future = transaction.get(BankStatement, 2)
user1 = _user1_future.get()
statement1 = _statement1_future.get()
user2 = _user2_future.get()
statement2 = _statement2_future.get()
assert user1.user_id == statement1.user_id == 1
assert statement1.balance == 0
assert user2.user_id == statement2.user_id == 2
assert statement2.balance == 100
@pytest.mark.ddblocal
def test_transact_get__does_not_exist(connection):
with TransactGet(connection=connection) as transaction:
_user_future = transaction.get(User, 100)
with pytest.raises(User.DoesNotExist):
_user_future.get()
@pytest.mark.ddblocal
def test_transact_get__invalid_state(connection):
with TransactGet(connection=connection) as transaction:
_user_future = transaction.get(User, 100)
with pytest.raises(InvalidStateError):
_user_future.get()
@pytest.mark.ddblocal
def test_transact_write(connection):
# making sure these entries exist, and with the expected info
BankStatement(1, balance=0).save()
BankStatement(2, balance=100).save()
# assert values are what we think they should be
statement1 = BankStatement.get(1)
statement2 = BankStatement.get(2)
assert statement1.balance == 0
assert statement2.balance == 100
with TransactWrite(connection=connection) as transaction:
# let the users send money to one another
# create a credit line item to user 1's account
transaction.save(
LineItem(user_id=1, amount=50, currency='USD'),
condition=(LineItem.user_id.does_not_exist()),
)
# create a debit to user 2's account
transaction.save(
LineItem(user_id=2, amount=-50, currency='USD'),
condition=(LineItem.user_id.does_not_exist()),
)
# add credit to user 1's account
transaction.update(statement1, actions=[BankStatement.balance.add(50)])
# debit from user 2's account if they have enough in the bank
transaction.update(
statement2,
actions=[BankStatement.balance.add(-50)],
condition=(BankStatement.balance >= 50)
)
statement1.refresh()
statement2.refresh()
assert statement1.balance == statement2.balance == 50
@pytest.mark.ddblocal
def test_transact_write__one_of_each(connection):
User(1).save()
User(2).save()
statement = BankStatement(1, balance=100, active=True)
statement.save()
with TransactWrite(connection=connection) as transaction:
transaction.condition_check(User, 1, condition=(User.user_id.exists()))
transaction.delete(User(2))
transaction.save(LineItem(4, amount=100, currency='USD'), condition=(LineItem.user_id.does_not_exist()))
transaction.update(
statement,
actions=[
BankStatement.active.set(False),
BankStatement.balance.set(0),
]
)
# confirming transaction correct and successful
assert User.get(1)
with pytest.raises(DoesNotExist):
User.get(2)
new_line_item = next(LineItem.query(4, scan_index_forward=False, limit=1), None)
assert new_line_item
assert new_line_item.amount == 100
assert new_line_item.currency == 'USD'
statement.refresh()
assert not statement.active
assert statement.balance == 0
@pytest.mark.ddblocal
def test_transaction_write_with_version_attribute(connection):
foo1 = Foo(1)
foo1.save()
foo2 = Foo(2, star='bar')
foo2.save()
foo3 = Foo(3)
foo3.save()
with TransactWrite(connection=connection) as transaction:
transaction.condition_check(Foo, 1, condition=(Foo.bar.exists()))
transaction.delete(foo2)
transaction.save(Foo(4))
transaction.update(
foo3,
actions=[
Foo.star.set('birdistheword'),
]
)
assert Foo.get(1).version == 1
with pytest.raises(DoesNotExist):
Foo.get(2)
# Local object's version attribute is updated automatically.
assert foo3.version == 2
assert Foo.get(4).version == 1
@pytest.mark.ddblocal
def test_transaction_get_with_version_attribute(connection):
Foo(11).save()
Foo(12, star='bar').save()
with TransactGet(connection=connection) as transaction:
foo1_future = transaction.get(Foo, 11)
foo2_future = transaction.get(Foo, 12)
foo1 = foo1_future.get()
assert foo1.version == 1
foo2 = foo2_future.get()
assert foo2.version == 1
assert foo2.star == 'bar'
@pytest.mark.ddblocal
def test_transaction_write_with_version_attribute_condition_failure(connection):
foo = Foo(21)
foo.save()
foo2 = Foo(21)
with pytest.raises(TransactWriteError) as exc_info:
with TransactWrite(connection=connection) as transaction:
transaction.save(Foo(21))
assert get_error_code(exc_info.value) == TRANSACTION_CANCELLED
assert 'ConditionalCheckFailed' in get_error_message(exc_info.value)
with pytest.raises(TransactWriteError) as exc_info:
with TransactWrite(connection=connection) as transaction:
transaction.update(
foo2,
actions=[
Foo.star.set('birdistheword'),
]
)
assert get_error_code(exc_info.value) == TRANSACTION_CANCELLED
assert 'ConditionalCheckFailed' in get_error_message(exc_info.value)
# Version attribute is not updated on failure.
assert foo2.version is None
with pytest.raises(TransactWriteError) as exc_info:
with TransactWrite(connection=connection) as transaction:
transaction.delete(foo2)
assert get_error_code(exc_info.value) == TRANSACTION_CANCELLED
assert 'ConditionalCheckFailed' in get_error_message(exc_info.value)
| 32.043836 | 112 | 0.702633 |
ace252041e649d3663ffd2200a3fe8e2f72b892e | 960 | py | Python | src/tools/zdate.py | objcat/test-python | 97b3cb610a80b8e00b8a032ca94bb3fead4102fb | [
"MIT"
] | null | null | null | src/tools/zdate.py | objcat/test-python | 97b3cb610a80b8e00b8a032ca94bb3fead4102fb | [
"MIT"
] | null | null | null | src/tools/zdate.py | objcat/test-python | 97b3cb610a80b8e00b8a032ca94bb3fead4102fb | [
"MIT"
] | null | null | null | # description: zdate
# date: 2021/1/8 1:22 下午
# author: objcat
# version: 1.0
from datetime import datetime, timedelta
def date_to_str_d(date):
"""
日期转字符串
:param date: 日期
:return: 字符串日期 例 2021-01-18
"""
return datetime.strftime(date, "%d")
def date_to_str_md(date):
"""
日期转字符串
:param date: 日期
:return: 字符串日期 例 2021-01-18
"""
return datetime.strftime(date, "%m-%d")
def date_to_str_ymd(date):
"""
日期转字符串
:param date: 日期
:return: 字符串日期 例 2021-01-18
"""
return datetime.strftime(date, "%Y-%m-%d")
def str_to_date(str):
"""
字符串转日期
:param str: 日期字符串
:return: datetime
"""
return datetime.strptime(str, "%Y-%m-%d")
def nextday(date):
"""
明天
:param date: 日期对象
:return: 明天的日期对象
"""
return date + timedelta(days=1)
def lastday(date):
"""
昨天
:param date: 日期对象
:return: 昨天的日期对象
"""
return date - timedelta(days=1)
| 15.737705 | 46 | 0.578125 |
ace252ed9a9b605e65948f3860cea2a4a2b607fd | 27,068 | py | Python | mplc/scenario.py | arthurPignet/distributed-learning-contributivity | ecc3ea8c6f742876cccfe6131ecfa6478d435cab | [
"Apache-2.0"
] | null | null | null | mplc/scenario.py | arthurPignet/distributed-learning-contributivity | ecc3ea8c6f742876cccfe6131ecfa6478d435cab | [
"Apache-2.0"
] | null | null | null | mplc/scenario.py | arthurPignet/distributed-learning-contributivity | ecc3ea8c6f742876cccfe6131ecfa6478d435cab | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This enables to parameterize a desired scenario to mock a multi-partner ML project.
"""
import datetime
import re
import uuid
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from loguru import logger
from sklearn.preprocessing import LabelEncoder
from mplc.multi_partner_learning import MULTI_PARTNER_LEARNING_APPROACHES
from mplc.multi_partner_learning.utils import AGGREGATORS, Aggregator
from . import contributivity, constants
from . import dataset as dataset_module
from .corruption import Corruption, NoCorruption, IMPLEMENTED_CORRUPTION, Duplication
from .partner import Partner
from .splitter import Splitter, IMPLEMENTED_SPLITTERS
class Scenario:
def __init__(
self,
partners_count,
amounts_per_partner,
dataset=constants.MNIST,
dataset_proportion=1,
samples_split_option='random',
corruption_parameters=None,
init_model_from="random_initialization",
multi_partner_learning_approach="fedavg",
aggregation="data-volume",
gradient_updates_per_pass_count=constants.DEFAULT_GRADIENT_UPDATES_PER_PASS_COUNT,
minibatch_count=constants.DEFAULT_BATCH_COUNT,
epoch_count=constants.DEFAULT_EPOCH_COUNT,
is_early_stopping=True,
contributivity_methods=None,
is_quick_demo=False,
save_path=None,
scenario_id=1,
val_set='global',
test_set='global',
**kwargs,
):
"""
:param partners_count: int, number of partners. Example: partners_count = 3
:param amounts_per_partner: [float]. Fractions of the
original dataset each partner receives to mock a collaborative ML scenario where each partner provides data
for the ML training.
:param dataset: dataset.Dataset object, or its string identifier. Default is MNIST.
:param dataset_proportion: float (default: 1)
:param samples_split_option: Splitter object, or its string identifier (for instance 'random', or 'stratified')
Define the strategy to use to split the data samples between the partners.
Default, RandomSplitter.
:param corruption_parameters: list of Corruption object, or its string identifier, one for each partner.
Enable to artificially corrupt partner's data.
For instance: [Permutation(proportion=0.2), 'random', 'not-corrupted']
:param init_model_from: None (default) or path
:param multi_partner_learning_approach: 'fedavg' (default), 'seq-pure', 'seq-with-final-agg' or 'seqavg'
Define the multi-partner learning approach
:param aggregation:Aggregator object, or string identifier: 'data_volume' (default), 'uniform' or 'local_score'
:param gradient_updates_per_pass_count: int
:param minibatch_count: int
:param epoch_count: int
:param is_early_stopping: boolean. Stop the training if scores on val_set reach a plateau
:param contributivity_methods: A declarative list `[]` of the contributivity measurement methods to be executed.
:param is_quick_demo: boolean. Useful for debugging
:param save_path: path where to save the scenario outputs. By default, they are not saved!
:param scenario_id: str
:param **kwargs:
"""
# ---------------------------------------------------------------------
# Initialization of the dataset defined in the config of the experiment
# ---------------------------------------------------------------------
# Raise Exception if unknown parameters in the config of the scenario
params_known = [
"dataset",
"dataset_proportion",
"val_set",
"test_set"
] # Dataset related
params_known += [
"contributivity_methods",
"multi_partner_learning_approach",
"aggregation",
] # federated learning related
params_known += [
"partners_count",
"amounts_per_partner",
"corruption_parameters",
"samples_split_option",
"samples_split_configuration"
] # Partners related
params_known += [
"gradient_updates_per_pass_count",
"epoch_count",
"minibatch_count",
"is_early_stopping",
] # Computation related
params_known += ["init_model_from"] # Model related
params_known += ["is_quick_demo"]
params_known += ["save_path",
"scenario_name",
"repeat_count"]
unrecognised_parameters = [x for x in kwargs.keys() if (x not in params_known and not x.startswith('mpl_'))]
if len(unrecognised_parameters) > 0:
for x in unrecognised_parameters:
logger.debug(f"Unrecognised parameter: {x}")
raise Exception(
f"Unrecognised parameters {unrecognised_parameters}, check your configuration"
)
# Get and verify which dataset is configured
if isinstance(dataset, dataset_module.Dataset):
self.dataset = dataset
elif isinstance(dataset, str):
# Reference the object corresponding to the dataset selected and initialize it
if dataset == constants.MNIST: # default
self.dataset = dataset_module.Mnist()
elif dataset == constants.CIFAR10:
self.dataset = dataset_module.Cifar10()
elif dataset == constants.TITANIC:
self.dataset = dataset_module.Titanic()
elif dataset == constants.ESC50:
self.dataset = dataset_module.Esc50()
elif dataset == constants.IMDB:
self.dataset = dataset_module.Imdb()
else:
raise Exception(
f"Dataset named '{dataset}' is not supported (yet). You can construct your own "
f"dataset object, or even add it by contributing to the project !"
)
logger.debug(f"Dataset selected: {self.dataset.name}")
else:
raise AttributeError(f'The dataset parameter cannot be an {type(dataset)}.'
f' Please provides a Dataset instance or a string identifier')
# Proportion of the dataset the computation will used
self.dataset_proportion = dataset_proportion
assert (
self.dataset_proportion > 0
), "Error in the config file, dataset_proportion should be > 0"
assert (
self.dataset_proportion <= 1
), "Error in the config file, dataset_proportion should be <= 1"
if self.dataset_proportion < 1:
self.dataset.shorten_dataset_proportion(self.dataset_proportion)
else:
logger.debug("The full dataset will be used (dataset_proportion is configured to 1)")
logger.debug(
f"Computation use the full dataset for scenario #{scenario_id}"
)
# --------------------------------------
# Definition of collaborative scenarios
# --------------------------------------
# Partners mock different partners in a collaborative data science project
self.partners_list = [] # List of all partners defined in the scenario
self.partners_count = partners_count # Number of partners in the scenario
# For configuring the respective sizes of the partners' datasets
# (% of samples of the dataset for each partner, ...
# ... has to sum to 1, and number of items has to equal partners_count)
self.amounts_per_partner = amounts_per_partner
if np.sum(self.amounts_per_partner) != 1:
raise ValueError("The sum of the amount per partners you provided isn't equal to 1")
if len(self.amounts_per_partner) != self.partners_count:
raise AttributeError(f"The amounts_per_partner list should have a size ({len(self.amounts_per_partner)}) "
f"equals to partners_count ({self.partners_count})")
# To configure how validation set and test set will be organized.
if test_set in ['local', 'global']:
self.test_set = test_set
else:
raise ValueError(f'Test set can be \'local\' or \'global\' not {test_set}')
if val_set in ['local', 'global']:
self.val_set = val_set
else:
raise ValueError(f'Validation set can be \'local\' or \'global\' not {val_set}')
# To configure if data samples are split between partners randomly or in a stratified way...
# ... so that they cover distinct areas of the samples space
if isinstance(samples_split_option, Splitter):
if self.val_set != samples_split_option.val_set:
logger.warning('The validation set organisation (local/global) is differently configured between the '
'provided Splitter and Scenario')
if self.test_set != samples_split_option.test_set:
logger.warning('The test set organisation (local/global) is differently configured between the '
'provided Splitter and Scenario')
self.splitter = samples_split_option
else:
splitter_param = {'amounts_per_partner': self.amounts_per_partner,
'val_set': self.val_set,
'test_set': self.test_set,
}
if "samples_split_configuration" in kwargs.keys():
splitter_param.update({'configuration': kwargs["samples_split_configuration"]})
self.splitter = IMPLEMENTED_SPLITTERS[samples_split_option](**splitter_param)
# To configure if the data of the partners are corrupted or not (useful for testing contributivity measures)
if corruption_parameters:
self.corruption_parameters = list(
map(lambda x: x if isinstance(x, Corruption) else IMPLEMENTED_CORRUPTION[x](),
corruption_parameters))
else:
self.corruption_parameters = [NoCorruption() for _ in range(self.partners_count)] # default
# ---------------------------------------------------
# Configuration of the distributed learning approach
# ---------------------------------------------------
self.mpl = None
# Multi-partner learning approach
self.multi_partner_learning_approach = multi_partner_learning_approach
try:
self._multi_partner_learning_approach = MULTI_PARTNER_LEARNING_APPROACHES[
multi_partner_learning_approach]
except KeyError:
text_error = f"Multi-partner learning approach '{multi_partner_learning_approach}' is not a valid "
text_error += "approach. List of supported approach : "
for key in MULTI_PARTNER_LEARNING_APPROACHES.keys():
text_error += f"{key}, "
raise KeyError(text_error)
# Define how federated learning aggregation steps are weighted...
# ... Toggle between 'uniform' (default) and 'data_volume'
if isinstance(aggregation, Aggregator):
self.aggregation = aggregation
else:
try:
self.aggregation = AGGREGATORS[aggregation]
except KeyError:
raise ValueError(f"aggregation approach '{aggregation}' is not a valid approach. ")
# Number of epochs, mini-batches and fit_batches in ML training
self.epoch_count = epoch_count
assert (
self.epoch_count > 0
), "Error: in the provided config file, epoch_count should be > 0"
self.minibatch_count = minibatch_count
assert (
self.minibatch_count > 0
), "Error: in the provided config file, minibatch_count should be > 0"
self.gradient_updates_per_pass_count = gradient_updates_per_pass_count
assert self.gradient_updates_per_pass_count > 0, (
"Error: in the provided config file, "
"gradient_updates_per_pass_count should be > 0 "
)
# Early stopping stops ML training when performance increase is not significant anymore
# It is used to optimize the number of epochs and the execution time
self.is_early_stopping = is_early_stopping
# Model used to initialise model
self.init_model_from = init_model_from
if init_model_from == "random_initialization":
self.use_saved_weights = False
else:
self.use_saved_weights = True
# -----------------------------------------------------------------
# Configuration of contributivity measurement contributivity_methods to be tested
# -----------------------------------------------------------------
# List of contributivity measures selected and computed in the scenario
self.contributivity_list = []
# Contributivity methods
self.contributivity_methods = []
if contributivity_methods is not None:
for method in contributivity_methods:
if method in constants.CONTRIBUTIVITY_METHODS:
self.contributivity_methods.append(method)
else:
raise Exception(f"Contributivity method '{method}' is not in contributivity_methods list.")
# -------------
# Miscellaneous
# -------------
# Misc.
self.scenario_id = scenario_id
self.repeat_count = kwargs.get('repeat_count', 1)
# The quick demo parameters overwrites previously defined parameters to make the scenario faster to compute
self.is_quick_demo = is_quick_demo
if self.is_quick_demo and self.dataset_proportion < 1:
raise Exception("Don't start a quick_demo without the full dataset")
if self.is_quick_demo:
# Use less data and/or less epochs to speed up the computations
if len(self.dataset.x_train) > constants.TRAIN_SET_MAX_SIZE_QUICK_DEMO:
index_train = np.random.choice(
self.dataset.x_train.shape[0],
constants.TRAIN_SET_MAX_SIZE_QUICK_DEMO,
replace=False,
)
index_val = np.random.choice(
self.dataset.x_val.shape[0],
constants.VAL_SET_MAX_SIZE_QUICK_DEMO,
replace=False,
)
index_test = np.random.choice(
self.dataset.x_test.shape[0],
constants.TEST_SET_MAX_SIZE_QUICK_DEMO,
replace=False,
)
self.dataset.x_train = self.dataset.x_train[index_train]
self.dataset.y_train = self.dataset.y_train[index_train]
self.dataset.x_val = self.dataset.x_val[index_val]
self.dataset.y_val = self.dataset.y_val[index_val]
self.dataset.x_test = self.dataset.x_test[index_test]
self.dataset.y_test = self.dataset.y_test[index_test]
self.epoch_count = 3
self.minibatch_count = 2
# -----------------
# Output parameters
# -----------------
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d_%Hh%M")
self.scenario_name = kwargs.get('scenario_name',
f"scenario_{self.scenario_id}_repeat_{self.repeat_count}_{now_str}_"
f"{uuid.uuid4().hex[:3]}") # to distinguish identical names
if re.search(r'\s', self.scenario_name):
raise ValueError(
f'The scenario name "{self.scenario_name}"cannot be written with space character, please use '
f'underscore or dash.')
self.short_scenario_name = f"{self.partners_count}_{self.amounts_per_partner}"
if save_path is not None:
self.save_folder = Path(save_path) / self.scenario_name
else:
self.save_folder = None
# -------------------------------------------------------------------
# Select in the kwargs the parameters to be transferred to sub object
# -------------------------------------------------------------------
self.mpl_kwargs = {}
for key, value in kwargs.items():
if key.startswith('mpl_'):
self.mpl_kwargs[key.replace('mpl_', '')] = value
# -----------------------
# Provision the scenario
# -----------------------
self.instantiate_scenario_partners()
self.split_data()
self.compute_batch_sizes()
self.apply_data_alteration_configuration()
# ------------------------------------------------
# Print the description of the scenario configured
# ------------------------------------------------
self.log_scenario_description()
@property
def nb_samples_used(self):
if len(self.partners_list) == 0:
return len(self.dataset.x_train)
else:
return sum([p.final_nb_samples for p in self.partners_list])
@property
def final_relative_nb_samples(self):
return [p.final_nb_samples / self.nb_samples_used for p in self.partners_list]
def copy(self, **kwargs):
params = self.__dict__.copy()
for key in ['partners_list',
'_multi_partner_learning_approach',
'mpl',
'aggregation',
'use_saved_weights',
'contributivity_list',
'scenario_name',
'short_scenario_name',
'save_folder',
'splitter']:
del params[key]
if 'is_quick_demo' in kwargs and kwargs['is_quick_demo'] != self.is_quick_demo:
raise ValueError("Attribute 'is_quick_demo' cannot be modified between copies.")
if self.save_folder is not None:
params['save_path'] = self.save_folder.parents[0]
else:
params['save_path'] = None
params['samples_split_option'] = self.splitter.copy()
params['aggregation'] = self.aggregation.name
params.update(kwargs)
return Scenario(**params)
def log_scenario_description(self):
"""Log the description of the scenario configured"""
# Describe scenario
logger.info("Description of data scenario configured:")
logger.info(f" Number of partners defined: {self.partners_count}")
logger.info(f" Data distribution scenario chosen: {self.splitter}")
logger.info(f" Multi-partner learning approach: {self.multi_partner_learning_approach}")
logger.info(f" Weighting option: {self.aggregation.name}")
logger.info(f" Iterations parameters: "
f"{self.epoch_count} epochs > "
f"{self.minibatch_count} mini-batches > "
f"{self.gradient_updates_per_pass_count} gradient updates per pass")
# Describe data
logger.info(f"Data loaded: {self.dataset.name}")
if self.is_quick_demo:
logger.info(" Quick demo configuration: number of data samples and epochs "
"are limited to speed up the run")
logger.info(
f" {len(self.dataset.x_train)} train data with {len(self.dataset.y_train)} labels"
)
logger.info(
f" {len(self.dataset.x_val)} val data with {len(self.dataset.y_val)} labels"
)
logger.info(
f" {len(self.dataset.x_test)} test data with {len(self.dataset.y_test)} labels"
)
def append_contributivity(self, contributivity_method):
self.contributivity_list.append(contributivity_method)
def instantiate_scenario_partners(self):
"""Create the partners_list"""
if len(self.partners_list) > 0:
raise Exception('Partners have already been initialized')
self.partners_list = [Partner(i, corruption=self.corruption_parameters[i]) for i in range(self.partners_count)]
def split_data(self):
self.splitter.split(self.partners_list, self.dataset)
return 0
def plot_data_distribution(self):
lb = LabelEncoder().fit([str(y) for y in self.dataset.y_train])
for i, partner in enumerate(self.partners_list):
plt.subplot(self.partners_count, 1, i + 1) # TODO share y axis
data_count = np.bincount(lb.transform([str(y) for y in partner.y_train]))
# Fill with 0
while len(data_count) < self.dataset.num_classes:
data_count = np.append(data_count, 0)
plt.bar(np.arange(0, self.dataset.num_classes), data_count)
plt.ylabel("partner " + str(partner.id))
plt.suptitle("Data distribution")
plt.xlabel("Digits")
(self.save_folder / 'graphs').mkdir(exist_ok=True)
plt.savefig(self.save_folder / "graphs" / "data_distribution.png")
plt.close()
def compute_batch_sizes(self):
# For each partner we compute the batch size in multi-partner and single-partner setups
batch_size_min = 1
batch_size_max = constants.MAX_BATCH_SIZE
if self.partners_count == 1:
p = self.partners_list[0]
batch_size = int(len(p.x_train) / self.gradient_updates_per_pass_count)
p.batch_size = np.clip(batch_size, batch_size_min, batch_size_max)
else:
for p in self.partners_list:
batch_size = int(
len(p.x_train)
/ (self.minibatch_count * self.gradient_updates_per_pass_count)
)
p.batch_size = np.clip(batch_size, batch_size_min, batch_size_max)
for p in self.partners_list:
logger.debug(f" Compute batch sizes, partner #{p.id}: {p.batch_size}")
def apply_data_alteration_configuration(self):
"""perform corruption on partner if needed"""
for partner in self.partners_list:
if isinstance(partner.corruption, Duplication):
if not partner.corruption.duplicated_partner_id:
data_volume = np.array([p.data_volume for p in self.partners_list if p.id != partner.id])
ids = np.array([p.id for p in self.partners_list if p.id != partner.id])
candidates = ids[data_volume >= partner.data_volume * partner.corruption.proportion]
partner.corruption.duplicated_partner_id = np.random.choice(candidates)
partner.corruption.set_duplicated_partner(self.partners_list)
partner.corrupt()
def to_dataframe(self):
df = pd.DataFrame()
dict_results = {}
# Scenario definition parameters
dict_results["scenario_name"] = self.scenario_name
dict_results["short_scenario_name"] = self.short_scenario_name
dict_results["dataset_name"] = self.dataset.name
dict_results["train_data_samples_count"] = len(self.dataset.x_train)
dict_results["test_data_samples_count"] = len(self.dataset.x_test)
dict_results["partners_count"] = self.partners_count
dict_results["dataset_fraction_per_partner"] = self.amounts_per_partner
dict_results["samples_split_option"] = str(self.splitter)
dict_results["nb_samples_used"] = self.nb_samples_used
dict_results["final_relative_nb_samples"] = self.final_relative_nb_samples
# Multi-partner learning approach parameters
dict_results["multi_partner_learning_approach"] = self.multi_partner_learning_approach
dict_results["aggregation_weighting"] = self.aggregation.name
dict_results["epoch_count"] = self.epoch_count
dict_results["minibatch_count"] = self.minibatch_count
dict_results["gradient_updates_per_pass_count"] = self.gradient_updates_per_pass_count
dict_results["is_early_stopping"] = self.is_early_stopping
dict_results["mpl_test_score"] = self.mpl.history.score
dict_results["mpl_nb_epochs_done"] = self.mpl.history.nb_epochs_done
dict_results["learning_computation_time_sec"] = self.mpl.learning_computation_time
if not self.contributivity_list:
df = df.append(dict_results, ignore_index=True)
for contrib in self.contributivity_list:
# Contributivity data
dict_results["contributivity_method"] = contrib.name
dict_results["contributivity_scores"] = contrib.contributivity_scores
dict_results["contributivity_stds"] = contrib.scores_std
dict_results["computation_time_sec"] = contrib.computation_time_sec
dict_results["first_characteristic_calls_count"] = contrib.first_charac_fct_calls_count
for i in range(self.partners_count):
# Partner-specific data
dict_results["partner_id"] = i
dict_results["dataset_fraction_of_partner"] = self.amounts_per_partner[i]
dict_results["contributivity_score"] = contrib.contributivity_scores[i]
dict_results["contributivity_std"] = contrib.scores_std[i]
df = df.append(dict_results, ignore_index=True)
return df
def run(self):
# -----------------
# Preliminary steps
# -----------------
if self.save_folder is not None:
self.save_folder.mkdir()
self.plot_data_distribution()
logger.info(f"Now starting running scenario {self.scenario_name}")
# -----------------------------------------------------
# Instantiate and run the distributed learning approach
# -----------------------------------------------------
self.mpl = self._multi_partner_learning_approach(self, custom_name='main_mpl', **self.mpl_kwargs)
self.mpl.fit()
# -------------------------------------------------------------------------
# Instantiate and run the contributivity measurement contributivity_methods
# -------------------------------------------------------------------------
for method in self.contributivity_methods:
logger.info(f"{method}")
contrib = contributivity.Contributivity(scenario=self)
contrib.compute_contributivity(method)
self.append_contributivity(contrib)
logger.info(f"Evaluating contributivity with {method}: {contrib}")
return 0
| 46.034014 | 120 | 0.600525 |
ace2533973eff25bd86080ea55bb0b214b0d6b08 | 5,098 | py | Python | DictionaryOfNewZealandEnglish/headword/forms.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | DictionaryOfNewZealandEnglish/headword/forms.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | DictionaryOfNewZealandEnglish/headword/forms.py | eResearchSandpit/DictionaryOfNewZealandEnglish | cf3cec34aafc7a9a8bd0413883f5eeb314d46a48 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import *
from wtforms.validators import DataRequired, Length
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from DictionaryOfNewZealandEnglish.headword.models import *
from DictionaryOfNewZealandEnglish.headword.attribute.models import *
from DictionaryOfNewZealandEnglish.database import db
from sqlalchemy import asc, func
class SearchForm(Form):
headword = TextField('Headword', validators=[DataRequired(),
Length(max=50)])
output = RadioField('Output',
choices=[('definition_only', 'definition only'),
('sample_citations', 'sample citations'),
('all_citations', 'all citations')],
default = 'sample_citations')
def getattr(self, name):
return getattr(self, name)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
self.user = None
class HeadwordForm(Form):
headword = TextField('Headword', validators=[DataRequired(),
Length(max=50)])
definition = TextAreaField('Definition', validators=[DataRequired()])
see = TextField('See', validators=[])
pronunciation = TextField('Pronunciation', validators=[])
notes = TextAreaField('Notes', validators=[])
archived = BooleanField('Archived')
word_class = QuerySelectField(
query_factory=lambda: db.session.query(Word_class)
.order_by(asc(func.lower(Word_class.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
data_set = QuerySelectField(
query_factory=lambda: db.session.query(Data_set)
.order_by(asc(func.lower(Data_set.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none")
homonym_number = QuerySelectField(
query_factory=lambda: db.session.query(Homonym_number)
.order_by(asc(func.lower(Homonym_number.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
sense_number = QuerySelectField(
query_factory=lambda: db.session.query(Sense_number)
.order_by(asc(func.lower(Sense_number.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
origin = QuerySelectField(
query_factory=lambda: db.session.query(Origin)
.order_by(asc(func.lower(Origin.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
register = QuerySelectField(
query_factory=lambda: db.session.query(Register)
.order_by(asc(func.lower(Register.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
domain = QuerySelectField(
query_factory=lambda: db.session.query(Domain)
.order_by(asc(func.lower(Domain.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
region = QuerySelectField(
query_factory=lambda: db.session.query(Region).filter_by(archived=False)
.order_by(asc(func.lower(Region.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
flag = QuerySelectField(
query_factory=lambda: db.session.query(Flag).filter_by(archived=False)
.order_by(asc(func.lower(Flag.name))).all(),
get_pk =lambda a: a.id,
get_label =lambda a: a.name,
allow_blank=True,
blank_text="none" )
def getattr(self, name):
return getattr(self, name)
def __init__(self, *args, **kwargs):
super(HeadwordForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(HeadwordForm, self).validate()
if not initial_validation:
return False
return True
| 42.483333 | 91 | 0.528639 |
ace253d1666504e14194ec890d65d896c9add459 | 501 | py | Python | Mac/Tools/twit/mactwit_edit.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | 1 | 2019-10-25T21:41:07.000Z | 2019-10-25T21:41:07.000Z | Mac/Tools/twit/mactwit_edit.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Mac/Tools/twit/mactwit_edit.py | 1byte2bytes/cpython | 7fbaeb819ca7b20dca048217ff585ec195e999ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | """Edit a file using the MetroWerks editor. Modify to suit your needs"""
import MacOS
import aetools
import Metrowerks_Shell_Suite
import Required_Suite
_talker = None
class MWShell(aetools.TalkTo,
Metrowerks_Shell_Suite.Metrowerks_Shell_Suite,
Required_Suite.Required_Suite):
pass
def edit(file, line):
global _talker
if _talker == None:
_talker = MWShell('CWIE', start=1)
try:
_talker.open(file)
_talker.Goto_Line(line)
except "(MacOS.Error, aetools.Error)":
pass
| 20.04 | 72 | 0.744511 |
ace2542f190c76d49dd255908887f0fcbfa685e3 | 2,376 | py | Python | salt-get-api/src/resources/distance_resource.py | mdm373/salt-get | 72fd08f1c26e8812f2b3838085b6a7b935c4cc0d | [
"MIT"
] | null | null | null | salt-get-api/src/resources/distance_resource.py | mdm373/salt-get | 72fd08f1c26e8812f2b3838085b6a7b935c4cc0d | [
"MIT"
] | 4 | 2022-01-03T22:16:13.000Z | 2022-02-15T10:17:30.000Z | salt-get-api/src/resources/distance_resource.py | mdm373/salt-get | 72fd08f1c26e8812f2b3838085b6a7b935c4cc0d | [
"MIT"
] | null | null | null | from flask_restful import Resource, abort
from marshmallow import Schema, fields, ValidationError, post_load
from flask import request
from http import HTTPStatus
from db import make_connection, select_distances, insert_distance, delete_distance
from model import DistanceModel
from time import time_ns
class DistanceSchema(Schema):
distance = fields.Float(required=True)
timestamp = fields.Int(required=False)
@post_load
def make_object(self, data, **kwargs):
model = DistanceModel()
model.distance = data['distance']
model.timestamp = 'timestamp' in data and data['timestamp'] or None
return model
class DistanceResource(Resource):
schema = DistanceSchema()
def delete(self):
con = None
try:
con = make_connection()
timestamp = int("timestamp" in request.args and request.args["timestamp"] or -1)
if timestamp == -1:
abort(HttpStatus.BAD_REQUEST)
delete_distance(con, timestamp)
except Exception as e:
print(f"exception error: {e}")
abort(HTTPStatus.INTERNAL_SERVER_ERROR)
finally:
con is not None and con.close()
def put(self):
con = None
try:
con = make_connection()
model = self.schema.load(request.get_json())
time_ms = int(round(0.000001 * time_ns(), 0))
model.timestamp = time_ms
insert_distance(con, model)
return {}
except ValidationError as ve:
print(f"validation error: {ve}")
abort(HTTPStatus.BAD_REQUEST)
except Exception as e:
print(f"exception error: {e}")
abort(HTTPStatus.INTERNAL_SERVER_ERROR)
finally:
con is not None and con.close()
def get(self):
con = None
try:
con = make_connection()
limit = int("limit" in request.args and request.args["limit"] or "100")
distances = select_distances(con, limit)
data = []
for distance in distances:
data.append(self.schema.dump(distance))
return data
except Exception as e:
print(f"exception error: {e}")
abort(HTTPStatus.INTERNAL_SERVER_ERROR)
finally:
con is not None and con.close()
| 33.942857 | 92 | 0.603956 |
ace2561fef88a1c1734b81e324dc6521e46cad6f | 30 | py | Python | __init__.py | kdevans2/LiDAR | 1ca68243beff8eaf004d60f260c419936bf0c446 | [
"Apache-2.0"
] | null | null | null | __init__.py | kdevans2/LiDAR | 1ca68243beff8eaf004d60f260c419936bf0c446 | [
"Apache-2.0"
] | 1 | 2019-04-04T19:44:14.000Z | 2019-04-04T19:44:14.000Z | __init__.py | kdevans2/LiDAR | 1ca68243beff8eaf004d60f260c419936bf0c446 | [
"Apache-2.0"
] | 1 | 2019-04-15T16:49:37.000Z | 2019-04-15T16:49:37.000Z | # pylint: disable=E0401, C0103 | 30 | 30 | 0.766667 |
ace25671633410e2e04bce01b217becf0e12ffd4 | 2,309 | py | Python | pyllusion/movement/motiontransparency.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | null | null | null | pyllusion/movement/motiontransparency.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | null | null | null | pyllusion/movement/motiontransparency.py | RebeccaHirst/Pyllusion | 9944076e38bced0eabb49c607482b71809150bdb | [
"MIT"
] | null | null | null | import numpy as np
from ..image import image_circle, image_circles
from .movement_matrix import movement_matrix
def motiontransparency_images(parameters=None, width=800, height=500, **kwargs):
"""
>>> import pyllusion as ill
>>>
>>> parameters = ill.motiontransparency_parameters(angle=45, duration=4, n=100, fps=20, speed=2)
>>> images = ill.motiontransparency_images(parameters) #doctest: +ELLIPSIS
- 0.00% ...
>>> # ill.images_to_gif(images, path="Transparency_From_Motion.gif", fps=parameters["FPS"])
"""
if parameters is None:
parameters = motiontransparency_parameters(**kwargs)
# Adjust for screen ratio
if width is not None and height is not None:
parameters["x"] = parameters["x"] * (height / width)
# Generate PIL images
images = []
for i in range(parameters["n_Frames"]):
# Print progression
if i % 10 == 0:
print("- %.2f%%" % (i / parameters["n_Frames"] * 100))
# Background circle
image = image_circle(
width=width,
height=height,
size=1,
color="grey",
outline=0,
color_outline="red",
background=(100, 100, 100),
antialias=True,
)
# Draw points
image = image_circles(
image=image,
n=parameters["n_Points"],
x=parameters["x"][i],
y=parameters["y"][i],
color="black",
background="grey",
size=0.015,
antialias=False,
)
images.append(image)
return images
def motiontransparency_parameters(angle=None, n=200, duration=0.5, fps=60, speed=1):
"""
"""
n_frames = int(duration * fps)
if angle is None:
angle = np.random.uniform(0, 360)
angles = np.array([angle] * int(n / 2) + [180 + angle] * int(n / 2))
x, y = movement_matrix(
n=n,
n_frames=n_frames,
angle=angles,
speed=speed,
keep_in_window=False,
keep_in_circle=0.5,
)
parameters = {
"x": x,
"y": y,
"Angle": angle,
"Speed": speed,
"Duration": duration,
"FPS": fps,
"n_Frames": n_frames,
"n_Points": n,
}
return parameters
| 26.238636 | 100 | 0.549589 |
ace256aa08c3a30da232188e68c0c3e186501ede | 348 | py | Python | only-test.py | tecdan/FrozenBert_Transformer | 0d1479d7b2bf8e7206bb472c2cb63e963736bbff | [
"MIT"
] | 1 | 2020-05-22T21:40:42.000Z | 2020-05-22T21:40:42.000Z | only-test.py | tecdan/FrozenBert_Transformer | 0d1479d7b2bf8e7206bb472c2cb63e963736bbff | [
"MIT"
] | null | null | null | only-test.py | tecdan/FrozenBert_Transformer | 0d1479d7b2bf8e7206bb472c2cb63e963736bbff | [
"MIT"
] | 1 | 2020-12-13T07:41:33.000Z | 2020-12-13T07:41:33.000Z | if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, batch_order=batch_order, iteration=i) | 43.5 | 78 | 0.678161 |
ace256d9d900e2ef606def9066410beba79b35dd | 3,833 | py | Python | scripts/report-cu-util.py | Xilinx/graphanalytics | 7b8923b6494bc11708619edeac8d96eccfc76413 | [
"BSD-3-Clause"
] | 11 | 2021-01-31T17:50:55.000Z | 2022-03-23T12:40:39.000Z | scripts/report-cu-util.py | Xilinx/graphanalytics | 7b8923b6494bc11708619edeac8d96eccfc76413 | [
"BSD-3-Clause"
] | 1 | 2021-03-11T22:02:19.000Z | 2021-03-25T17:09:37.000Z | scripts/report-cu-util.py | Xilinx/graphanalytics | 7b8923b6494bc11708619edeac8d96eccfc76413 | [
"BSD-3-Clause"
] | 2 | 2021-03-02T18:41:47.000Z | 2021-11-14T06:54:08.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
* Copyright 2020-2021 Xilinx, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import sys
import re
from datetime import datetime
import tkinter as tk
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from pandas import DataFrame
FIGURE_DPI = 100
log_file = sys.argv[1]
with open(log_file, 'r', encoding='utf-8', errors='ignore') as fp:
log_lines = fp.readlines()
#CuReport: 140441970509568::udf_cosinesim_ss_fpga req=1616448355459841300 lock=1616448355459841878 release=1616448355462792067
re_cureport = re.compile('CuReport: (\S+)::(\S+) req=(\d+) lock=(\d+) release=(\d+)')
first_cu_line = True
prev_lock_tick_ms = 0
runtime_buckets = {} # bucket for each second
lock_wait_times = []
for line in log_lines:
m = re_cureport.match(line)
if m:
req_tick_ms = int(m.group(3))/1000000
bucket_sec = req_tick_ms//1000
lock_tick_ms = int(m.group(4))/1000000
if first_cu_line:
idle_time = 0
first_cu_line = False
else:
idle_time = lock_tick_ms - prev_lock_tick_ms
release_tick_ms = int(m.group(5))/1000000
req_dt = datetime.fromtimestamp( req_tick_ms / 1000)
lock_wait_ms = lock_tick_ms - req_tick_ms
lock_wait_times.append(lock_wait_ms)
# runtime including lock wait time
runtime_ms = release_tick_ms - req_tick_ms
req_time_str = req_dt.strftime("%Y-%m-%d %H:%M:%S.%f")
print(req_dt, lock_wait_ms, runtime_ms, idle_time)
if runtime_buckets.get(bucket_sec) is None:
runtime_buckets[bucket_sec] = runtime_ms
else:
runtime_buckets[bucket_sec] += runtime_ms
prev_lock_tick_ms = lock_tick_ms
time_hist = []
cu_util_hist = []
for k in sorted(runtime_buckets):
time_hist.append(datetime.fromtimestamp(k).strftime("%Y-%m-%d %H:%M:%S"))
cu_util_hist.append(runtime_buckets[k]*100/1000)
print('INFO: minimum wait time:', min(lock_wait_times))
print('INFO: average wait time:', sum(lock_wait_times)/len(lock_wait_times))
print('INFO: maximum wait time:', max(lock_wait_times))
root_window = tk.Tk()
root_window.geometry('1500x700+20+20')
root_window.title('CU Utilization Report')
root_window.grid_rowconfigure(0, weight=1)
root_window.grid_columnconfigure(0, weight=1)
cur_grid_row = 0
# plot row
figure_hist = plt.Figure(figsize=(10, 5), dpi=FIGURE_DPI)
plot_hist = figure_hist.add_subplot(111)
canvas_hist = FigureCanvasTkAgg(figure_hist, root_window)
canvas_hist.get_tk_widget().grid(row=cur_grid_row, columnspan=4, sticky='nsew')
cur_grid_row = cur_grid_row + 1
# Plot navigation toolbar
frame_toolbar = tk.Frame(root_window)
frame_toolbar.grid(row=cur_grid_row, columnspan=4)
toolbar_plot = NavigationToolbar2Tk(canvas_hist, frame_toolbar)
cur_grid_row = cur_grid_row + 1
y_hist_dict = {'time': time_hist,
'cu_util': cu_util_hist}
y_hist_df = DataFrame(y_hist_dict, columns=['time', 'cu_util'])
y_hist_df.plot(kind='line', legend=True, x='time', y='cu_util',
ax=plot_hist, color='r', marker='.', fontsize=10)
plot_hist.set_ylabel('CU Utilization %')
plot_hist.set_title('CU Utilization (%) History')
canvas_hist.draw()
root_window.mainloop()
| 32.210084 | 126 | 0.717454 |
ace2595f301665ac55129e01c8872f3aac5456a7 | 1,785 | py | Python | park/api/delegate.py | galperins4/ARK-Python | 3a9bddfd605a6d4675cc1de00ab46c6304a7cf49 | [
"MIT"
] | 3 | 2017-12-22T06:27:57.000Z | 2018-01-09T18:18:35.000Z | park/api/delegate.py | faustbrian/ARK-Python-Client | 3a9bddfd605a6d4675cc1de00ab46c6304a7cf49 | [
"MIT"
] | 2 | 2018-03-22T04:37:19.000Z | 2018-05-04T03:16:47.000Z | park/api/delegate.py | faustbrian/ARK-Python-Client | 3a9bddfd605a6d4675cc1de00ab46c6304a7cf49 | [
"MIT"
] | 3 | 2017-12-22T19:13:49.000Z | 2018-01-20T20:28:14.000Z | #!/usr/bin/env python
from park.api.api import API
class Delegate(API):
def count(self):
return self.get('api/delegates/count')
def search(self, query, parameters={}):
return self.get('api/delegates/search', {**{"q": query}, **parameters})
def voters(self, publicKey):
return self.get('api/delegates/voters', {"publicKey": publicKey})
def delegate(self, parameters={}):
return self.get('api/delegates/get', parameters)
def delegates(self, parameters={}):
return self.get('api/delegates', parameters)
def fee(self):
return self.get('api/delegates/fee')
def forgedByAccount(self, generatorPublicKey):
return self.get('api/delegates/forging/getForgedByAccount',
{"generatorPublicKey": generatorPublicKey})
def create(self, secret, username, secondSecret=None):
transaction = self.client.delegateBuilder().create(
secret, username, secondSecret)
return self.client.transport().createTransaction(transaction)
def nextForgers(self):
return self.get('api/delegates/getNextForgers')
def enableForging(self, secret, parameters={}):
return self.post('api/delegates/forging/enable', {
**{
"secret": secret
},
**parameters
})
def disableForging(self, secret, parameters={}):
return self.post('api/delegates/forging/disable', {
**{
"secret": secret
},
**parameters
})
def forgingStatus(self, publicKey, parameters={}):
return self.get('api/delegates/forging/status', {
**{
"publicKey": publicKey
},
**parameters
})
| 29.262295 | 79 | 0.589356 |
ace259775f7c3706bd4575c8bdfc3111aa782183 | 6,585 | py | Python | robustness_metrics/projects/revisiting_calibration/figures/clean_imagenet_temp_scaling_bit_pretrain_comparison.py | Anselmoo/robustness_metrics | bf3bde93d0de60a288533469962b69f8f6fc09d5 | [
"Apache-2.0"
] | null | null | null | robustness_metrics/projects/revisiting_calibration/figures/clean_imagenet_temp_scaling_bit_pretrain_comparison.py | Anselmoo/robustness_metrics | bf3bde93d0de60a288533469962b69f8f6fc09d5 | [
"Apache-2.0"
] | 1 | 2022-02-01T08:48:18.000Z | 2022-02-01T08:48:18.000Z | robustness_metrics/projects/revisiting_calibration/figures/clean_imagenet_temp_scaling_bit_pretrain_comparison.py | Anselmoo/robustness_metrics | bf3bde93d0de60a288533469962b69f8f6fc09d5 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Figures for "Revisiting Calibration of Modern Neural Networks".
This module contains figures comparing ECE of BiT-models pretrained on different
datasets.
"""
from typing import List, Optional, Tuple
import matplotlib as mpl
from matplotlib import pyplot as plt
import pandas as pd
from robustness_metrics.projects.revisiting_calibration import display
from robustness_metrics.projects.revisiting_calibration import plotting
from robustness_metrics.projects.revisiting_calibration import utils
def plot(df_main: pd.DataFrame,
gce_prefix: str = plotting.STD_GCE_PREFIX,
rescaling_method: str = "temperature_scaling",
add_guo: bool = False) -> mpl.figure.Figure:
"""Plots acc/calib and reliability diagrams on clean ImageNet (Figure 1)."""
rescaling_methods = ["none", rescaling_method]
family_order = display.get_model_families_sorted(
["mixer", "vit", "bit", "simclr"])
if add_guo:
family_order.append("guo")
# Set up figure:
fig = plt.figure(figsize=(display.FULL_WIDTH/2, 2))
spec = fig.add_gridspec(ncols=3, nrows=2)
for col, bit_version in enumerate(
["BiT-ImageNet", "BiT-ImageNet21k", "BiT-JFT"]):
# pylint: disable=g-long-lambda
if bit_version == "BiT-ImageNet":
display.get_standard_model_list = lambda: [
m for m in display.MODEL_SIZE.keys()
if not (m.startswith("bit-imagenet21k-") or m.startswith("bit-jft-"))
]
elif bit_version == "BiT-ImageNet21k":
display.get_standard_model_list = lambda: [
m for m in display.MODEL_SIZE.keys()
if not (m.startswith("bit-imagenet-") or m.startswith("bit-jft-"))
]
elif bit_version == "BiT-JFT":
display.get_standard_model_list = lambda: [
m for m in display.MODEL_SIZE.keys() if not (m.startswith(
"bit-imagenet-") or m.startswith("bit-imagenet21k-"))
]
else:
raise ValueError(f"Unknown BiT version: {bit_version}")
# pylint: enable=g-long-lambda
for row, rescaling_method in enumerate(rescaling_methods):
df_plot, cmap = _get_data(df_main, gce_prefix, family_order,
rescaling_methods=[rescaling_method])
ax = fig.add_subplot(spec[row, col])
big_ax = ax
for i, family in enumerate(family_order):
if family == "guo":
continue
data_sub = df_plot[df_plot.ModelFamily == family]
if data_sub.empty:
continue
ax.scatter(
data_sub["downstream_error"],
data_sub["MetricValue"],
s=plotting.model_to_scatter_size(data_sub.model_size),
c=data_sub.family_index,
cmap=cmap,
vmin=0,
vmax=len(family_order),
marker=utils.assert_and_get_constant(data_sub.family_marker),
linewidth=0.5,
alpha=1.0 if "bit" in family else 0.5,
zorder=100 - i, # Z-order is same as model family order.
label=family)
# Manually add Guo et al data:
# From Table 1 and Table S2 in https://arxiv.org/pdf/1706.04599.pdf.
# First model is DenseNet161, second is ResNet152.
if add_guo:
size = plotting.model_to_scatter_size(1)
color = [len(family_order) - 1] * 2
marker = "x"
if rescaling_method == "none":
ax.scatter([0.2257, 0.2231], [0.0628, 0.0548],
s=size, c=color, marker=marker, alpha=0.7, label="guo")
if rescaling_method == "temperature_scaling":
ax.scatter([0.2257, 0.2231], [0.0199, 0.0186],
s=size, c=color, marker=marker, alpha=0.7, label="guo")
plotting.show_spines(ax)
# Aspect ratios are tuned manually for display in the paper:
ax.set_anchor("N")
ax.grid(False, which="minor")
ax.grid(True, axis="both")
ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.01))
ax.set_ylim(bottom=0.0, top=0.09)
ax.set_xlim(0.05, 0.3)
ax.set_xlabel(display.XLABEL_INET_ERROR)
if ax.is_first_row():
ax.set_title(bit_version, fontsize=6)
ax.set_xlabel("")
ax.set_xticklabels("")
else:
ax.set_ylim(bottom=0.0, top=0.05)
if ax.is_first_col():
if rescaling_method == "none":
ax.set_ylabel(display.YLABEL_ECE_UNSCALED)
elif rescaling_method == "temperature_scaling":
ax.set_ylabel(display.YLABEL_ECE_TEMP_SCALED)
else:
ax.set_yticklabels("")
fig.tight_layout(pad=0.5)
# Model family legend:
handles, labels = plotting.get_model_family_legend(big_ax, family_order)
plotting.apply_to_fig_text(fig, display.prettify)
plotting.apply_to_fig_text(fig, lambda x: x.replace("EfficientNet", "EffNet"))
legend = fig.axes[0].legend(
handles=handles,
labels=labels,
loc="upper center",
title="Model family",
bbox_to_anchor=(0.55, -0.025),
frameon=True,
bbox_transform=fig.transFigure,
ncol=len(family_order),
handletextpad=0.1)
legend.get_frame().set_linewidth(mpl.rcParams["axes.linewidth"])
legend.get_frame().set_edgecolor("lightgray")
plotting.apply_to_fig_text(fig, display.prettify)
return fig
def _get_data(
df_main: pd.DataFrame,
gce_prefix: str,
family_order: List[str],
rescaling_methods: Optional[List[str]] = None,
dataset_name: str = "imagenet(split='validation[20%:]')"
) -> Tuple[pd.DataFrame, mpl.colors.ListedColormap]:
"""Selects data for plotting."""
# Select data:
mask = df_main.Metric.str.startswith(gce_prefix)
mask &= df_main.ModelName.isin(display.get_standard_model_list())
mask &= df_main.DatasetName.isin([dataset_name])
mask &= df_main.rescaling_method.isin(
rescaling_methods or ["temperature_scaling"])
mask &= df_main.ModelFamily.isin(family_order)
df_plot = df_main[mask].copy()
df_plot, cmap = display.add_display_data(df_plot, family_order)
return df_plot, cmap
| 37.414773 | 80 | 0.668489 |
ace259a2776cee2e6424b256d7dc4416cf0c1c2b | 5,315 | py | Python | train.py | 530824679/CenterNet | e62352b9053eeca1ebc3eb49f9d5d80a8c3a8d5f | [
"MIT"
] | null | null | null | train.py | 530824679/CenterNet | e62352b9053eeca1ebc3eb49f9d5d80a8c3a8d5f | [
"MIT"
] | null | null | null | train.py | 530824679/CenterNet | e62352b9053eeca1ebc3eb49f9d5d80a8c3a8d5f | [
"MIT"
] | null | null | null | import os
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from cfg.config import *
from data.dataset import *
from model.network import *
def train():
dataset_path = path_params['train_data_path']
log_dir = path_params['logs_path']
batch_size = solver_params['batch_size']
lr_type = solver_params['lr_type']
# 配置GPU
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(gpu_options=gpu_options)
# 解析得到训练样本以及标注
image_num = len(open(dataset_path, 'r').readlines())
batch_num = int(math.ceil(float(image_num) / batch_size))
dataset = create_dataset(dataset_path, batch_num, batch_size=batch_size, is_shuffle=True)
iterator = dataset.make_one_shot_iterator()
inputs, batch_hm, batch_wh, batch_reg, batch_reg_mask, batch_ind = iterator.get_next()
inputs.set_shape([None, None, None, 3])
batch_hm.set_shape([None, None, None, None])
batch_wh.set_shape([None, None, None])
batch_reg.set_shape([None, None, None])
batch_reg_mask.set_shape([None, None])
batch_ind.set_shape([None, None])
# 构建网络
model = CenterNet(True)
pred_hm, pred_wh, pred_reg = model.build_model(inputs)
# 计算损失
loss_op = model.calc_loss(pred_hm, pred_wh, pred_reg, batch_hm, batch_wh, batch_reg, batch_reg_mask, batch_ind)
# 定义优化方式
if lr_type == "CosineAnnealing":
global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(solver_params['warm_up_epochs'] * batch_num, dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant(solver_params['epochs'] * batch_num, dtype=tf.float64, name='train_steps')
learning_rate = tf.cond(pred=global_step < warmup_steps,
true_fn=lambda: global_step / warmup_steps * solver_params['init_lr'],
false_fn=lambda: solver_params['end_lr'] + 0.5 * (solver_params['init_lr'] - solver_params['end_lr']) *
(1 + tf.cos((global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(global_step, 1.0)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss_op[0])
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([optimizer, global_step_update]):
train_op = tf.no_op()
else:
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(solver_params['lr'], global_step, solver_params['decay_steps'], solver_params['decay_rate'], staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss_op[0], global_step=global_step)
# 配置tensorboard
tf.summary.scalar("learning_rate", learning_rate)
tf.summary.scalar("hm_loss", loss_op[1])
tf.summary.scalar("wh_loss", loss_op[2])
tf.summary.scalar("reg_loss", loss_op[3])
tf.summary.scalar("total_loss", loss_op[0])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir, graph=tf.get_default_graph(), flush_secs=60)
# 模型保存
save_variable = tf.global_variables()
saver = tf.train.Saver(save_variable, max_to_keep=50)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
if solver_params['pre_train']:
pretrained = np.load(path_params['pretrain_weights'], allow_pickle=True).item()
for variable in tf.trainable_variables():
for key in pretrained.keys():
key2 = variable.name.rstrip(':0')
if (key == key2):
sess.run(tf.assign(variable, pretrained[key]))
summary_writer.add_graph(sess.graph)
for epoch in range(1, 1 + solver_params['epochs']):
train_epoch_loss, train_epoch_hm_loss, train_epoch_wh_loss, train_epoch_reg_loss = [], [], [], []
for index in tqdm(range(batch_num)):
_, summary, train_total_loss, train_hm_loss, train_wh_loss, train_reg_loss, global_step_val, lr = sess.run([train_op, summary_op, loss_op[0], loss_op[1], loss_op[2], loss_op[3], global_step, learning_rate])
train_epoch_loss.append(train_total_loss)
train_epoch_hm_loss.append(train_hm_loss)
train_epoch_wh_loss.append(train_wh_loss)
train_epoch_reg_loss.append(train_reg_loss)
summary_writer.add_summary(summary, global_step_val)
train_epoch_loss, train_epoch_hm_loss, train_epoch_wh_loss, train_epoch_reg_loss = np.mean(train_epoch_loss), np.mean(train_epoch_hm_loss), np.mean(train_epoch_wh_loss), np.mean(train_epoch_reg_loss)
print("Epoch: {}, global_step: {}, lr: {:.8f}, total_loss: {:.3f}, loss_hm: {:.3f}, loss_wh: {:.3f}, loss_reg: {:.3f}".format(epoch, global_step_val, lr, train_epoch_loss, train_epoch_hm_loss, train_epoch_wh_loss, train_epoch_reg_loss))
saver.save(sess, os.path.join(path_params['checkpoints_path'], 'model.ckpt'), global_step=epoch)
sess.close()
if __name__ == '__main__':
train() | 49.212963 | 248 | 0.685419 |
ace259bb3cdb85bd321b221251c949cb38543681 | 1,776 | py | Python | tests/terraform/checks/resource/oci/test_SecurityListUnrestrictedIngress22.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | tests/terraform/checks/resource/oci/test_SecurityListUnrestrictedIngress22.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | tests/terraform/checks/resource/oci/test_SecurityListUnrestrictedIngress22.py | peaudecastor/checkov | a4804b61c1b1390b7abd44ab53285fcbc3e7e80b | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.oci.SecurityListUnrestrictedIngress22 import check
from checkov.terraform.runner import Runner
class TestSecurityListUnrestrictedIngress22(unittest.TestCase):
def test(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files_dir = current_dir + "/example_SecurityListUnrestrictedIngress22"
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
expected_passing_resources = {
"oci_core_security_list.pass0",
"oci_core_security_list.pass1",
"oci_core_security_list.pass4",
"oci_core_security_list.pass5",
"oci_core_security_list.pass6",
}
expected_failing_resources = {
"oci_core_security_list.fail",
"oci_core_security_list.fail1",
"oci_core_security_list.fail2",
"oci_core_security_list.fail3",
"oci_core_security_list.fail5",
}
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
self.assertEqual(summary["passed"], len(expected_passing_resources))
self.assertEqual(summary["failed"], len(expected_failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(expected_passing_resources, passed_check_resources)
self.assertEqual(expected_failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
| 37.787234 | 102 | 0.706081 |
ace259e7a1497b805ceab681a70fcb2e5db95e7b | 2,045 | py | Python | autotune.py | masahi/pytorch-ssd | a8c5b4574bcfdf9dac463298b57b8ec6f8485bb4 | [
"MIT"
] | 1 | 2021-07-31T15:56:12.000Z | 2021-07-31T15:56:12.000Z | autotune.py | masahi/pytorch-ssd | a8c5b4574bcfdf9dac463298b57b8ec6f8485bb4 | [
"MIT"
] | null | null | null | autotune.py | masahi/pytorch-ssd | a8c5b4574bcfdf9dac463298b57b8ec6f8485bb4 | [
"MIT"
] | null | null | null | import numpy as np
import onnx
import tvm
from tvm import relay, auto_scheduler
from tvm.runtime.vm import VirtualMachine
model = onnx.load("mb1-ssd.onnx")
ishape = (1, 3, 300, 300)
shape_dict = {"input.1": ishape}
target = "vulkan"
log_file = "logs/ssd-mb1-vulkan.log"
def auto_schedule(mod, params):
tasks, task_weights = auto_scheduler.extract_tasks(mod, params, target)
for idx, task in enumerate(tasks):
print("========== Task %d (workload key: %s) ==========" % (idx, task.workload_key))
print(task.compute_dag)
measure_ctx = auto_scheduler.LocalRPCMeasureContext(repeat=1, min_repeat_ms=300, timeout=1000)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
# tuner = auto_scheduler.TaskScheduler(tasks, task_weights, load_log_file=log_file)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=100000, # change this to 20000 to achieve the best performance
runner=measure_ctx.runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option)
# mod, params = relay.frontend.from_onnx(model, shape_dict, freeze_params=True)
# mod = relay.transform.DynamicToStatic()(mod)
# with open("ssd-mb1_mod.json", "w") as fo:
# fo.write(tvm.ir.save_json(mod))
# with open("ssd-mb1.params", "wb") as fo:
# fo.write(relay.save_param_dict(params))
with open("ssd-mb1_mod.json", "r") as fi:
mod = tvm.ir.load_json(fi.read())
with open("ssd-mb1.params", "rb") as fi:
params = relay.load_param_dict(fi.read())
auto_schedule(mod, params)
inp = np.random.randn(1, 3, 300, 300)
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(opt_level=3, config={"relay.backend.use_auto_scheduler": True}):
vm_exec = relay.vm.compile(mod, target=target, params=params)
ctx = tvm.context(target, 0)
vm = VirtualMachine(vm_exec, ctx)
vm.set_input("main", **{"input.1": inp})
vm.run()
ftimer = vm.module.time_evaluator("invoke", ctx, number=1, repeat=num_iters)
print(ftimer("main"))
| 31.953125 | 99 | 0.708557 |
ace25a87b37aac3a0fbd4ce5a99600fcb6f37680 | 4,554 | py | Python | scripts/CsUtils.py | IntelEuclid/euclid_configuration_node | e46af8d31512805bd22136ab12460334cc3189bd | [
"BSD-3-Clause"
] | 1 | 2019-04-18T06:03:19.000Z | 2019-04-18T06:03:19.000Z | scripts/CsUtils.py | IntelEuclid/euclid_configuration_node | e46af8d31512805bd22136ab12460334cc3189bd | [
"BSD-3-Clause"
] | null | null | null | scripts/CsUtils.py | IntelEuclid/euclid_configuration_node | e46af8d31512805bd22136ab12460334cc3189bd | [
"BSD-3-Clause"
] | 2 | 2018-01-31T10:03:08.000Z | 2020-04-22T05:12:30.000Z | #!/usr/bin/env python
##################################################################################
#Copyright (c) 2016, Intel Corporation
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
#list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#3. Neither the name of the copyright holder nor the names of its contributors
#may be used to endorse or promote products derived from this software without
#specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##################################################################################
import rosnode
import subprocess
import ast
from dynamic_reconfigure import client
from rospy.timer import sleep
import os
def IsNodeRunning(nodeName):
return rosnode.rosnode_ping(nodeName, 1, False)
def KillNode(nodeName):
subprocess.Popen(['rosnode', 'kill', nodeName])
#TODO, handle timeout exception if needed ..
def LaunchNode(packageName,launchFile,nodesList,timeOut=10):
print "launching: " + launchFile
my_env = os.environ.copy()
my_env["TURTLEBOT_3D_SENSOR"] = "commonsense"
my_env["TURTLEBOT_STACKS"]="hexagons"
proc = subprocess.Popen(['roslaunch', packageName, launchFile],env=my_env)
allNodes = 0
while allNodes != len(nodesList) and timeOut >0:
allNodes = 0
sleep(1)
for node in nodesList:
if rosnode.rosnode_ping(node,1,False) == True:
allNodes = allNodes + 1
else:
break
timeOut = timeOut -1
print "Process pid: " + str(proc.pid)
return proc.pid
#TODO: check refactoring
#TODO: timeout should be "#defined"
def GetConfigurationYamlFile(folderName,packageName,nodeName,fileName):
outputFile = fileName
retVal = 0
retVal = subprocess.call(["rosrun", "dynamic_reconfigure", "dynparam", "dump",nodeName, outputFile + ".tmp","-t 3"])
subprocess.call(['/intel/euclid/oobe-utils/configuration/fixYaml.bash',outputFile + ".tmp", outputFile])
return retVal == 0
def LoadConfiguration(nodeName,fileName):
outputFile = fileName
print "Got: " + nodeName + ", " + fileName
retVal = subprocess.call(["rosrun", "dynamic_reconfigure", "dynparam", "load",nodeName, outputFile,"-t 5"])
return retVal == 0
def SetParam(nodeName,param,value):
proc = subprocess.Popen(["rosrun", "dynamic_reconfigure", "dynparam", "set",nodeName, param,value,"-t 3"],stdout=subprocess.PIPE)
for line in proc.stdout:
if line.find("couldn't set parameters") != -1:
return False
return True
def GetParams(nodeName):
proc = subprocess.Popen(['rosrun','dynamic_reconfigure','dynparam','get',nodeName],stdout=subprocess.PIPE)
for data in proc.stdout:
dic = ast.literal_eval(data)
#print dic
return dic
# def Reboot():
# subprocess.call(['reboot'])
# def Shutdown():
# subprocess.call(['shutdown','-P','0'])
# def RestartOOBE():
# subprocess.call(['service','oobe-init','restart-oobe'])
# def GenerateArduinoLibrary():
# retVal = subprocess.call(['sh','/intel/euclid/oobe-utils/generateArduinoLibrary/generateArduinoLibrary.sh'])
# return retVal == 0
# def exportSettings():
# retVal = subprocess.call(['sh','/intel/euclid/oobe-utils/exportImportSettings/exportSettings.sh'])
# return retVal == 0
if __name__ == '__main__':
print IsNodeRunning('/RealsenseNodelet')
| 38.923077 | 133 | 0.688625 |
ace25bd23f034d25a08263a2c17d3b9e871c4839 | 14,840 | py | Python | askdata/integrations/facebook_api.py | AskdataInc/askdata-api-python-client | 82d63e5aad68e109cafe54aab29cf98cb5587588 | [
"Apache-2.0"
] | null | null | null | askdata/integrations/facebook_api.py | AskdataInc/askdata-api-python-client | 82d63e5aad68e109cafe54aab29cf98cb5587588 | [
"Apache-2.0"
] | null | null | null | askdata/integrations/facebook_api.py | AskdataInc/askdata-api-python-client | 82d63e5aad68e109cafe54aab29cf98cb5587588 | [
"Apache-2.0"
] | null | null | null | ##required !pip install facebook_business -q
from facebook_business.api import FacebookAdsApi
from facebook_business.adobjects.business import Business
from facebook_business.adobjects.adaccountuser import AdAccountUser
from facebook_business.adobjects.adsinsights import AdsInsights
from facebook_business.adobjects.adreportrun import AdReportRun
from facebook_business.adobjects.adaccount import AdAccount
from facebook_business.adobjects.adset import AdSet
import datetime
import time
import sys
import pandas as pd
import _locale
#return pandas dataframe
#See here for documentation https://developers.facebook.com/docs/marketing-api/insights/parameters/
"""
MANDATORY INPUT: my_app_id, my_app_secret, my_access_token, start_date, end_date,
"""
def get_fb_ads(my_app_id, my_app_secret, my_access_token, start_date, end_date, account_name):
_locale._getdefaultlocale = (lambda *args: ['en_US', 'UTF-8'])
# Start the connection to the facebook API
FacebookAdsApi.init(my_app_id, my_app_secret, my_access_token, api_version='v13.0')
# Create a business object for the business account
business = Business('WAF')
# Get today's date for the filename, and the csv data
yesterdaybad= datetime.datetime.now()
yesterdayslash = yesterdaybad.strftime('%m/%d/%Y')
yesterdayhyphen = yesterdaybad.strftime('%m-%d-%Y')
# Get all ad accounts on the business account
me = AdAccountUser(fbid='me')
accounts = list(me.get_ad_accounts())
params = {
"time_range" : {"since": start_date, "until": end_date},
'fields': [
AdsInsights.Field.account_id,
AdsInsights.Field.account_name,
AdsInsights.Field.action_values,
AdsInsights.Field.actions,
AdsInsights.Field.ad_id,
AdsInsights.Field.ad_name,
AdsInsights.Field.adset_id,
AdsInsights.Field.adset_name,
AdsInsights.Field.campaign_id,
AdsInsights.Field.campaign_name,
AdsInsights.Field.cost_per_action_type,
AdsInsights.Field.cost_per_inline_link_click,
AdsInsights.Field.cost_per_inline_post_engagement,
AdsInsights.Field.cost_per_unique_click,
AdsInsights.Field.cpm,
AdsInsights.Field.cpp,
AdsInsights.Field.ctr,
AdsInsights.Field.date_start,
AdsInsights.Field.date_stop,
AdsInsights.Field.frequency,
AdsInsights.Field.impressions,
AdsInsights.Field.inline_link_clicks,
AdsInsights.Field.inline_post_engagement,
AdsInsights.Field.reach,
AdsInsights.Field.spend,
AdsInsights.Field.unique_clicks,
AdsInsights.Field.unique_ctr,
AdsInsights.Field.video_p100_watched_actions,
AdsInsights.Field.video_p25_watched_actions,
AdsInsights.Field.video_p50_watched_actions,
AdsInsights.Field.video_p75_watched_actions,
AdsInsights.Field.video_p95_watched_actions,
AdsInsights.Field.website_ctr,
AdsInsights.Field.clicks
],
'level': 'campaign',
'time_increment': 1
}
records = []
#Wait till all the data is pulled
def wait_for_async_job(async_job):
async_job.api_get()
while async_job[AdReportRun.Field.async_status] != 'Job Completed' or async_job[AdReportRun.Field.async_percent_completion] < 100:
time.sleep(5)
async_job.api_get()
r=0
# Iterate through the accounts
records = []
for k, v in account_name.items() :
print("Account: '{}', Id: '{}'".format(k, v))
tempaccount = AdAccount(v)
try:
ads = tempaccount.get_insights_async(fields = params.get('fields'), params=params)
wait_for_async_job(ads)
x = ads.get_result()
except:
pass
for ad in x:
# Set default values in case the insight info is empty
date_of_report = yesterdaybad
account_id = ""
account_name = ""
# action_values = ""
fb_pixel_lead = ""
lead = ""
actions = ""
actions_link_click = ""
actions_post_reaction = ""
actions_post = ""
actions_onsite_conversion_post_save = ""
actions_page_engagement = ""
actions_post_engagement = ""
ad_id = ""
ad_name = ""
adset_id = ""
adset_name = ""
campaign_id = ""
campaign_name = ""
cost_per_action_type = ""
cost_per_action_type_link_click = ""
cost_per_action_type_post_reaction = ""
cost_per_action_type_post = ""
cost_per_action_type_onsite_conversion_post_save = ""
cost_per_action_type_page_engagement = ""
cost_per_action_type_post_engagement = ""
cost_per_inline_link_click = ""
cost_per_inline_post_engagement = ""
cost_per_unique_click = ""
cpm = ""
cpp = ""
ctr = ""
date_start = ""
date_stop = ""
frequency = ""
impressions = ""
inline_link_clicks = ""
inline_post_engagement = ""
reach = ""
spend = ""
unique_clicks = ""
unique_ctr = ""
video_p100_watched_actions = ""
video_p25_watched_actions = ""
video_p50_watched_actions = ""
video_p75_watched_actions = ""
video_p95_watched_actions = ""
website_ctr = ""
website_ctr_link_click = ""
clicks = ""
# Set values from insight data
if ('account_id' in ad) :
account_id = ad['account_id']
if ('account_name' in ad) :
account_name = ad['account_name']
if ('action_values' in ad) :
# action_values= ad['action_values']
for i in ad['action_values']:
if i['action_type'] == 'offsite_conversion.fb_pixel_lead':
fb_pixel_lead = i['value']
if i['action_type'] == 'lead':
lead = i['value']
if ('actions' in ad) :
for i in ad['actions']:
if i['action_type'] == 'link_click':
actions_link_click = i['value']
if i['action_type'] == 'post_reaction':
actions_post_reaction = i['value']
if i['action_type'] == 'post':
actions_post = i['value']
if i['action_type'] == 'onsite_conversion.post_save':
actions_onsite_conversion_post_save = i['value']
if i['action_type'] == 'page_engagement':
actions_page_engagement = i['value']
if i['action_type'] == 'post_engagement':
actions_post_engagement = i['value']
if ('ad_id' in ad) :
ad_id= ad['ad_id']
if ('ad_name' in ad) :
ad_name= ad['ad_name']
if ('adset_id' in ad) :
adset_id= ad['adset_id']
if ('adset_name' in ad) :
adset_name= ad['adset_name']
if ('campaign_id' in ad) :
campaign_id= ad['campaign_id']
if ('campaign_name' in ad) :
campaign_name= ad['campaign_name']
if ('cost_per_action_type' in ad) :
for i in ad['cost_per_action_type']:
if i['action_type'] == 'link_click':
cost_per_action_type_link_click = i['value']
if i['action_type'] == 'post_reaction':
cost_per_action_type_post_reaction = i['value']
if i['action_type'] == 'post':
cost_per_action_type_post = i['value']
if i['action_type'] == 'onsite_conversion.post_save':
cost_per_action_type_onsite_conversion_post_save = i['value']
if i['action_type'] == 'page_engagement':
cost_per_action_type_page_engagement = i['value']
if i['action_type'] == 'post_engagement':
cost_per_action_type_post_engagement = i['value']
if ('cost_per_inline_link_click' in ad) :
cost_per_inline_link_click= ad['cost_per_inline_link_click']
if ('cost_per_inline_post_engagement' in ad) :
cost_per_inline_post_engagement= ad['cost_per_inline_post_engagement']
if ('cost_per_unique_click' in ad) :
cost_per_unique_click= ad['cost_per_unique_click']
if ('cpm' in ad) :
cpm= ad['cpm']
if ('cpp' in ad) :
cpp= ad['cpp']
if ('ctr' in ad) :
ctr= ad['ctr']
if ('date_start' in ad) :
date_start= ad['date_start']
if ('date_stop' in ad) :
date_stop= ad['date_stop']
if ('frequency' in ad) :
frequency= ad['frequency']
if ('impressions' in ad) :
impressions= ad['impressions']
if ('inline_link_clicks' in ad) :
inline_link_clicks= ad['inline_link_clicks']
if ('inline_post_engagement' in ad) :
inline_post_engagement= ad['inline_post_engagement']
if ('reach' in ad) :
reach= ad['reach']
if ('spend' in ad) :
spend= ad['spend']
if ('unique_clicks' in ad) :
unique_clicks= ad['unique_clicks']
if ('unique_ctr' in ad) :
unique_ctr= ad['unique_ctr']
if ('video_p100_watched_actions' in ad) :
for i in ad['video_p100_watched_actions']:
if i['action_type'] == 'video_view':
video_p100_watched_actions = i['value']
if ('video_p25_watched_actions' in ad) :
for i in ad['video_p25_watched_actions']:
if i['action_type'] == 'video_view':
video_p25_watched_actions = i['value']
if ('video_p50_watched_actions' in ad) :
for i in ad['video_p50_watched_actions']:
if i['action_type'] == 'video_view':
video_p50_watched_actions = i['value']
if ('video_p75_watched_actions' in ad) :
for i in ad['video_p75_watched_actions']:
if i['action_type'] == 'video_view':
video_p75_watched_actions = i['value']
if ('video_p95_watched_actions' in ad) :
for i in ad['video_p95_watched_actions']:
if i['action_type'] == 'video_view':
video_p95_watched_actions = i['value']
if ('website_ctr' in ad) :
for i in ad['website_ctr']:
if i['action_type'] == 'link_click':
website_ctr_link_click = i['value']
if ('date_of_report' in ad) :
date_of_report = ad['date_of_report']
if ('clicks' in ad) :
clicks = ad['clicks']
#if r%15 == 0:
# print(r)
r = r+1
# Write all ad info to the file, and increment the number of rows that will display
records.append({
"account_id" : account_id,
"account_name" : account_name,
"fb_pixel_lead" : fb_pixel_lead,
"lead" : lead,
"actions_link_click" : actions_link_click ,
"actions_post_reaction " : actions_post_reaction ,
"actions_post" : actions_post,
"actions_onsite_conversion_post_save" : actions_onsite_conversion_post_save,
"actions_page_engagement" : actions_page_engagement,
"actions_post_engagement" : actions_post_engagement,
"ad_id" : ad_id,
"ad_name" : ad_name,
"adset_id" : adset_id,
"adset_name" : adset_name,
"campaign_id" : campaign_id,
"campaign_name" : campaign_name,
"cost_per_action_type_link_click" : cost_per_action_type_link_click ,
"cost_per_action_type_post_reaction" : cost_per_action_type_post_reaction,
"cost_per_action_type_post" : cost_per_action_type_post,
"cost_per_action_type_onsite_conversion_post_save" : cost_per_action_type_onsite_conversion_post_save,
"cost_per_action_type_page_engagement" : cost_per_action_type_page_engagement,
"cost_per_action_type_post_engagement" : cost_per_action_type_post_engagement,
"cost_per_inline_link_click" : cost_per_inline_link_click,
"cost_per_inline_post_engagement" : cost_per_inline_post_engagement,
"cost_per_unique_click" : cost_per_unique_click,
"cpm" : cpm,
"cpp" : cpp,
"ctr" : ctr,
"date_start" : date_start,
"date_stop" : date_stop,
"frequency" : frequency,
"impressions" : impressions,
"inline_link_clicks" : inline_link_clicks,
"inline_post_engagement" : inline_post_engagement,
"reach" : reach,
"spend" : spend,
"unique_clicks" : unique_clicks,
"unique_ctr" : unique_ctr,
"video_p100_watched_actions" : video_p100_watched_actions,
"video_p25_watched_actions" : video_p25_watched_actions,
"video_p50_watched_actions" : video_p50_watched_actions,
"video_p75_watched_actions" : video_p75_watched_actions,
"video_p95_watched_actions" : video_p95_watched_actions,
"website_ctr_link_click" : website_ctr_link_click,
"clicks" : clicks,
"date_of_report" : date_of_report
})
fb_ads = pd.DataFrame(records)
return fb_ads
| 42.52149 | 138 | 0.546765 |
ace25bde4ca8ba663b0f69b6147fb6fa4b05d35a | 848 | py | Python | 18-Years-in-a-Range/main.py | PawelZabinski/ocr-code-challenges-files | 24d30de694a00f2190790003778c6d65b8b2554b | [
"MIT"
] | null | null | null | 18-Years-in-a-Range/main.py | PawelZabinski/ocr-code-challenges-files | 24d30de694a00f2190790003778c6d65b8b2554b | [
"MIT"
] | null | null | null | 18-Years-in-a-Range/main.py | PawelZabinski/ocr-code-challenges-files | 24d30de694a00f2190790003778c6d65b8b2554b | [
"MIT"
] | null | null | null | # Years in a Range
# Write a program to count the number years in a range that has a repeated digit.
def characterOccurance(text):
characterCount = dict()
for char in text:
if characterCount.get(char, 0) > 0:
characterCount[char] += 1
else:
characterCount[char] = 1
return characterCount
def checkRepeatingChar(text):
characterCount = characterOccurance(text)
for value in characterCount.values():
if value >= 2: return True
return False
lbYear = int(input('Enter the lower bound: '))
upYear = int(input('Enter the upper bound: '))
years = [year for year in range(lbYear, upYear) if checkRepeatingChar(str(year))]
print('DISCLAIMER: The range used is exclusive meaning the upper bound is not taken into consideration')
print(f'There are {len(years)} years in the range, which have a repeating digit.') | 28.266667 | 104 | 0.720519 |
ace25c89a358bae1d848bc9420bf5c7f66de462a | 3,329 | py | Python | Assignment3_for_students/DependencyTree.py | jay-z007/Natural-Language-Processing | 377b90dea92280ba0740715443dbca232db99ee9 | [
"MIT"
] | null | null | null | Assignment3_for_students/DependencyTree.py | jay-z007/Natural-Language-Processing | 377b90dea92280ba0740715443dbca232db99ee9 | [
"MIT"
] | null | null | null | Assignment3_for_students/DependencyTree.py | jay-z007/Natural-Language-Processing | 377b90dea92280ba0740715443dbca232db99ee9 | [
"MIT"
] | 1 | 2019-11-14T17:08:52.000Z | 2019-11-14T17:08:52.000Z | import Config
"""
Represents a partial or complete dependency parse of a sentence, and
provides convenience methods for analyzing the parse.
Author: Danqi Chen
Modified by: Heeyoung Kwon
"""
class DependencyTree:
def __init__(self):
self.n = 0
self.head = [Config.NONEXIST]
self.label = [Config.UNKNOWN]
self.counter = -1
"""
Add the next token to the parse.
h: Head of the next token
l: Dependency relation label between this node and its head
"""
def add(self, h, l):
self.n += 1
self.head.append(h)
self.label.append(l)
"""
Establish a labeled dependency relation between the two given nodes.
k: Index of the dependent node
h: Index of the head node
l: Label of the dependency relation
"""
def set(self, k, h, l):
self.head[k] = h
self.label[k] = l
def getHead(self, k):
if k <= 0 or k > self.n:
return Config.NONEXIST
else:
return self.head[k]
def getLabel(self, k):
if k <= 0 or k > self.n:
return Config.NULL
else:
return self.label[k]
"""
Get the index of the node which is the root of the parse
(i.e., the node which has the ROOT node as its head).
"""
def getRoot(self):
for k in range(1, self.n+1):
if self.getHead(k) == 0:
return k
return 0
"""
Check if this parse has only one root.
"""
def isSingleRoot(self):
roots = 0
for k in range(1, self.n+1):
if self.getHead(k) == 0:
roots += 1
return roots == 1
"""
Check if the tree is legal.
"""
def isTree(self):
h = []
h.append(-1)
for i in range(1, self.n+1):
if self.getHead(i) < 0 or self.getHead(i) > self.n:
return False
h.append(-1)
for i in range(1, self.n+1):
k = i
while k > 0:
if h[k] >= 0 and h[k] < i: break
if h[k] == i:
return False
h[k] = i
k = self.getHead(k)
return True
"""
Check if the tree is projective
"""
def isProjective(self):
if self.isTree() == False:
return False
self.counter = -1
return self.visitTree(0)
"""
Inner recursive function for checking projective of tree
"""
def visitTree(self, w):
for i in range(1, w):
if self.getHead(i) == w and self.visitTree(i) == False:
return False
self.counter += 1
if w != self.counter:
return False
for i in range(w+1, self.n+1):
if self.getHead(i) == w and self.visitTree(i) == False:
return False
return True
def equal(self, t):
if t.n != self.n:
return False
for i in range(1, self.n+1):
if self.getHead(i) != t.getHead(i):
return False
if self.getLabel(i) != t.getLabel(i):
return False
return True
def print_tree(self):
for i in range(1, self.n+1):
print str(i) + " " + str(self.getHead(i)) + " " + self.getLabel(i)
print
| 25.219697 | 79 | 0.506158 |
ace25cd2d43d3bcc3d3f1dfd6fd52c6242495225 | 1,538 | py | Python | DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep/utils/hooks/benchmark_hooks.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | null | null | null | DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep/utils/hooks/benchmark_hooks.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | null | null | null | DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep/utils/hooks/benchmark_hooks.py | puririshi98/benchmark | 79f554f1e1cf36f62994c78e0e6e5b360f554022 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dllogger
import tensorflow as tf
import time
from .training_hooks import MeanAccumulator
__all__ = ['BenchmarkLoggingHook']
class BenchmarkLoggingHook(tf.train.SessionRunHook):
def __init__(self, global_batch_size, warmup_steps=100):
self.warmup_steps = warmup_steps
self.global_batch_size = global_batch_size
self.current_step = 0
self.t0 = None
self.mean_throughput = MeanAccumulator()
def before_run(self, run_context):
self.t0 = time.time()
def after_run(self, run_context, run_values):
batch_time = time.time() - self.t0
samplesps = self.global_batch_size / batch_time
if self.current_step >= self.warmup_steps:
self.mean_throughput.consume(samplesps)
dllogger.log(data={"samplesps": samplesps}, step=(0, self.current_step))
self.current_step += 1
| 32.723404 | 84 | 0.714564 |
ace25fea14899d3dd6c9ce45c0178e3bd41042f1 | 1,026 | py | Python | Additional/win.py | AakashSYadav/SpriD | d8f1bc6641a8120339715821c53d548e23b8d32e | [
"MIT"
] | 1 | 2019-04-11T10:26:20.000Z | 2019-04-11T10:26:20.000Z | Additional/win.py | AakashSYadav/DME | d8f1bc6641a8120339715821c53d548e23b8d32e | [
"MIT"
] | null | null | null | Additional/win.py | AakashSYadav/DME | d8f1bc6641a8120339715821c53d548e23b8d32e | [
"MIT"
] | null | null | null | # https://stackoverflow.com/questions/34611100/is-it-possible-to-keep-the-same-window-for-every-class-in-pythons-tkinter
from tkinter import *
class Window1:
def __init__(self, master):
# keep `root` in `self.master`
self.master = master
master.title("Welcome to Aakash")
master.geometry('350x200')
btn = Button(self.master, text="Click Me")
# btn = Button(window, text="Click Me")
# btn = Button(window, text="Click Me", bg="orange", fg="red")
btn.pack()
self.label = Button(self.master, text="Example", command=self.load_new)
self.label.pack()
def load_new(self):
self.label.destroy()
# use `root` with another class
self.another = Window2(self.master)
class Window2:
def __init__(self, master):
# keep `root` in `self.master`
self.master = master
self.label = Label(self.master, text="Example")
self.label.pack()
root = Tk()
run = Window1(root)
root.mainloop()
| 24.428571 | 120 | 0.615984 |
ace260f8de8ebd7fb6cfc4fcd67dfd8d2fc498e5 | 3,787 | py | Python | python/tests/test_linked_list.py | Mmarcos01/data-structures-and-algorithms | c43922d759718219075a568e8507719375e430c5 | [
"MIT"
] | null | null | null | python/tests/test_linked_list.py | Mmarcos01/data-structures-and-algorithms | c43922d759718219075a568e8507719375e430c5 | [
"MIT"
] | 10 | 2021-04-03T04:27:18.000Z | 2021-07-07T04:32:13.000Z | python/tests/test_linked_list.py | Mmarcos01/data-structures-and-algorithms | c43922d759718219075a568e8507719375e430c5 | [
"MIT"
] | 1 | 2022-03-31T04:33:24.000Z | 2022-03-31T04:33:24.000Z | import pytest
from linked_list.linked_list import LinkedList, Node, zipLists
def test_import():
assert LinkedList
def test_empty_linked_list():
empty = LinkedList()
assert empty
def test_instantiate_node():
node = Node('green', None)
actual = node.value
expected = 'green'
assert actual == expected
assert node.next == None
def test_insert():
linklist = LinkedList()
linklist.insert("a")
actual = linklist.head.value
expected = "a"
assert actual == expected
def test_includes_and_inserts():
ll1 = LinkedList()
ll1.insert('red')
ll1.insert('blue')
ll1.insert('green')
actual = ll1.includes('red')
expected = True
assert actual == expected
def test_to_string():
ll1 = LinkedList()
ll1.insert('red')
ll1.insert('blue')
ll1.insert('green')
actual = ll1.__str__()
expected = "{'green'} -> {'blue'} -> {'red'} -> None"
assert actual == expected
def test_append():
my_list = LinkedList()
my_list.append('orange').append('purple')
actual = my_list.__str__()
expected = "{'orange'} -> {'purple'} -> None"
assert actual == expected
def test_insertAfter():
my_list = LinkedList()
my_list.append('green').append('blue').append('red')
my_list.insertAfter('blue', 'orange')
actual = my_list.__str__()
expected = "{'green'} -> {'blue'} -> {'orange'} -> {'red'} -> None"
assert actual == expected
def test_insertBefore():
my_list = LinkedList()
my_list.append('green').append('blue').append('red')
my_list.insertBefore('blue', 'orange')
actual = my_list.__str__()
expected = "{'green'} -> {'orange'} -> {'blue'} -> {'red'} -> None"
assert actual == expected
def test_empty_linked_list():
empty = LinkedList()
assert empty
def test_k_is_greater_than_ll_length():
ll1 = LinkedList()
ll1.append("a").append("b").append("c").append("d").append("e")
actual = ll1.kth_from_the_end(2)
expected = "c"
assert actual == expected
def test_k_and_the_length_of_the_list_are_the_same():
ll1 = LinkedList()
ll1.append("a").append("b").append("c").append("d").append("e")
actual = ll1.kth_from_the_end(5)
expected = "a"
assert actual == expected
def test_k_is_negative():
ll1 = LinkedList()
ll1.append("a").append("b").append("c").append("d").append("e")
actual = ll1.kth_from_the_end(-5)
expected = "K is negative"
assert actual == expected
def test_linked_list_is_of_a_size_1():
ll1 = LinkedList()
ll1.append("a")
actual = ll1.kth_from_the_end(1)
expected = "a"
assert actual == expected
def test_k_is_somewhere_in_the_middle_of_the_linked_list():
ll1 = LinkedList()
ll1.append("a").append("b").append("c").append("d").append("e")
actual = ll1.kth_from_the_end(3)
expected = "b"
assert actual == expected
def test_zip_two_lists():
ll1 = LinkedList()
ll2 = LinkedList()
ll1.append("a").append("c").append("e")
ll2.append("b").append("d").append("f")
actual = zipLists(ll1,ll2)
expected = "{'a'} -> {'b'} -> {'c'} -> {'d'} -> {'e'} -> {'f'} -> None"
assert str(actual) == expected
def test_zip_two_lists_different_sizes():
ll1 = LinkedList()
ll2 = LinkedList()
ll1.append("a").append("c").append("e")
ll2.append("b")
actual = zipLists(ll1,ll2)
expected = "{'a'} -> {'b'} -> {'c'} -> {'e'} -> None"
assert str(actual) == expected
def test2_zip_two_lists_different_sizes():
ll1 = LinkedList()
ll2 = LinkedList()
ll1.append("a").append("c").append("e")
ll2.append("b").append("d").append("f").append("g")
actual = zipLists(ll1,ll2)
expected = "{'a'} -> {'b'} -> {'c'} -> {'d'} -> {'e'} -> {'f'} -> None"
assert str(actual) == expected
| 28.908397 | 75 | 0.616055 |
ace26110a936c1ebde9cd7b3a8d93aab8a4c012e | 10,146 | py | Python | python_modules/libraries/dagster-k8s/dagster_k8s/executor.py | dehume/dagster | 3b55c4e864775b7a70ed8ff539629317a1202505 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-k8s/dagster_k8s/executor.py | dehume/dagster | 3b55c4e864775b7a70ed8ff539629317a1202505 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-k8s/dagster_k8s/executor.py | dehume/dagster | 3b55c4e864775b7a70ed8ff539629317a1202505 | [
"Apache-2.0"
] | null | null | null | import kubernetes
from dagster_k8s.launcher import K8sRunLauncher
from dagster import Field, StringSource, check, executor
from dagster.core.definitions.executor_definition import multiple_process_executor_requirements
from dagster.core.errors import DagsterUnmetExecutorRequirementsError
from dagster.core.events import DagsterEvent, DagsterEventType, EngineEventData, MetadataEntry
from dagster.core.execution.plan.objects import StepFailureData
from dagster.core.execution.retries import RetryMode, get_retries_config
from dagster.core.executor.base import Executor
from dagster.core.executor.init import InitExecutorContext
from dagster.core.executor.step_delegating import StepDelegatingExecutor
from dagster.core.executor.step_delegating.step_handler import StepHandler
from dagster.core.executor.step_delegating.step_handler.base import StepHandlerContext
from dagster.core.types.dagster_type import Optional
from dagster.utils import frozentags, merge_dicts
from .container_context import K8sContainerContext
from .job import (
DagsterK8sJobConfig,
construct_dagster_k8s_job,
get_k8s_job_name,
get_user_defined_k8s_config,
)
from .utils import delete_job
@executor(
name="k8s",
config_schema=merge_dicts(
DagsterK8sJobConfig.config_type_job(),
{"job_namespace": Field(StringSource, is_required=False)},
{"retries": get_retries_config()},
),
requirements=multiple_process_executor_requirements(),
)
def k8s_job_executor(init_context: InitExecutorContext) -> Executor:
"""
Executor which launches steps as Kubernetes Jobs.
To use the `k8s_job_executor`, set it as the `executor_def` when defining a job:
.. literalinclude:: ../../../../../../python_modules/libraries/dagster-k8s/dagster_k8s_tests/unit_tests/test_example_executor_mode_def.py
:start-after: start_marker
:end-before: end_marker
:language: python
Then you can configure the executor with run config as follows:
.. code-block:: YAML
execution:
config:
job_namespace: 'some-namespace'
image_pull_policy: ...
image_pull_secrets: ...
service_account_name: ...
env_config_maps: ...
env_secrets: ...
env_vars: ...
job_image: ... # leave out if using userDeployments
Configuration set on the Kubernetes Jobs and Pods created by the `K8sRunLauncher` will also be
set on Kubernetes Jobs and Pods created by the `k8s_job_executor`.
"""
run_launcher = init_context.instance.run_launcher
if not isinstance(run_launcher, K8sRunLauncher):
raise DagsterUnmetExecutorRequirementsError(
"This engine is only compatible with a K8sRunLauncher; configure the "
"K8sRunLauncher on your instance to use it.",
)
exc_cfg = init_context.executor_config
k8s_container_context = K8sContainerContext(
image_pull_policy=exc_cfg.get("image_pull_policy"),
image_pull_secrets=exc_cfg.get("image_pull_secrets"),
service_account_name=exc_cfg.get("service_account_name"),
env_config_maps=exc_cfg.get("env_config_maps"),
env_secrets=exc_cfg.get("env_secrets"),
env_vars=exc_cfg.get("env_vars"),
volume_mounts=exc_cfg.get("volume_mounts"),
volumes=exc_cfg.get("volumes"),
labels=exc_cfg.get("labels"),
namespace=exc_cfg.get("job_namespace"),
)
return StepDelegatingExecutor(
K8sStepHandler(
image=exc_cfg.get("job_image"),
container_context=k8s_container_context,
load_incluster_config=run_launcher.load_incluster_config,
kubeconfig_file=run_launcher.kubeconfig_file,
),
retries=RetryMode.from_config(init_context.executor_config["retries"]),
should_verify_step=True,
)
class K8sStepHandler(StepHandler):
@property
def name(self):
return "K8sStepHandler"
def __init__(
self,
image: Optional[str],
container_context: K8sContainerContext,
load_incluster_config: bool,
kubeconfig_file: Optional[str],
k8s_client_batch_api=None,
):
super().__init__()
self._executor_image = check.opt_str_param(image, "image")
self._executor_container_context = check.inst_param(
container_context, "container_context", K8sContainerContext
)
self._fixed_k8s_client_batch_api = k8s_client_batch_api
if load_incluster_config:
check.invariant(
kubeconfig_file is None,
"`kubeconfig_file` is set but `load_incluster_config` is True.",
)
kubernetes.config.load_incluster_config()
else:
check.opt_str_param(kubeconfig_file, "kubeconfig_file")
kubernetes.config.load_kube_config(kubeconfig_file)
def _get_container_context(self, step_handler_context: StepHandlerContext):
run_target = K8sContainerContext.create_for_run(
step_handler_context.pipeline_run, step_handler_context.instance.run_launcher
)
return run_target.merge(self._executor_container_context)
@property
def _batch_api(self):
return self._fixed_k8s_client_batch_api or kubernetes.client.BatchV1Api()
def _get_k8s_step_job_name(self, step_handler_context):
step_key = step_handler_context.execute_step_args.step_keys_to_execute[0]
name_key = get_k8s_job_name(
step_handler_context.execute_step_args.pipeline_run_id,
step_key,
)
if step_handler_context.execute_step_args.known_state:
retry_state = step_handler_context.execute_step_args.known_state.get_retry_state()
if retry_state.get_attempt_count(step_key):
return "dagster-step-%s-%d" % (name_key, retry_state.get_attempt_count(step_key))
return "dagster-step-%s" % (name_key)
def launch_step(self, step_handler_context: StepHandlerContext):
events = []
assert (
len(step_handler_context.execute_step_args.step_keys_to_execute) == 1
), "Launching multiple steps is not currently supported"
step_key = step_handler_context.execute_step_args.step_keys_to_execute[0]
job_name = self._get_k8s_step_job_name(step_handler_context)
pod_name = job_name
args = step_handler_context.execute_step_args.get_command_args()
container_context = self._get_container_context(step_handler_context)
job_config = container_context.get_k8s_job_config(
self._executor_image, step_handler_context.instance.run_launcher
)
if not job_config.job_image:
job_config = job_config.with_image(
step_handler_context.execute_step_args.pipeline_origin.repository_origin.container_image
)
if not job_config.job_image:
raise Exception("No image included in either executor config or the job")
user_defined_k8s_config = get_user_defined_k8s_config(
frozentags(step_handler_context.step_tags[step_key])
)
job = construct_dagster_k8s_job(
job_config=job_config,
args=args,
job_name=job_name,
pod_name=pod_name,
component="step_worker",
user_defined_k8s_config=user_defined_k8s_config,
labels={
"dagster/job": step_handler_context.execute_step_args.pipeline_origin.pipeline_name,
"dagster/op": step_key,
"dagster/run-id": step_handler_context.execute_step_args.pipeline_run_id,
},
)
events.append(
DagsterEvent(
event_type_value=DagsterEventType.ENGINE_EVENT.value,
pipeline_name=step_handler_context.execute_step_args.pipeline_origin.pipeline_name,
step_key=step_key,
message=f"Executing step {step_key} in Kubernetes job {job_name}",
event_specific_data=EngineEventData(
[
MetadataEntry("Step key", value=step_key),
MetadataEntry("Kubernetes Job name", value=job_name),
],
),
)
)
self._batch_api.create_namespaced_job(body=job, namespace=container_context.namespace)
return events
def check_step_health(self, step_handler_context: StepHandlerContext):
assert (
len(step_handler_context.execute_step_args.step_keys_to_execute) == 1
), "Launching multiple steps is not currently supported"
step_key = step_handler_context.execute_step_args.step_keys_to_execute[0]
job_name = self._get_k8s_step_job_name(step_handler_context)
container_context = self._get_container_context(step_handler_context)
job = self._batch_api.read_namespaced_job(
namespace=container_context.namespace, name=job_name
)
if job.status.failed:
return [
DagsterEvent(
event_type_value=DagsterEventType.STEP_FAILURE.value,
pipeline_name=step_handler_context.execute_step_args.pipeline_origin.pipeline_name,
step_key=step_key,
message=f"Discovered failed Kubernetes job {job_name} for step {step_key}",
event_specific_data=StepFailureData(
error=None,
user_failure_data=None,
),
)
]
return []
def terminate_step(self, step_handler_context: StepHandlerContext):
assert (
len(step_handler_context.execute_step_args.step_keys_to_execute) == 1
), "Launching multiple steps is not currently supported"
job_name = self._get_k8s_step_job_name(step_handler_context)
container_context = self._get_container_context(step_handler_context)
delete_job(job_name=job_name, namespace=container_context.namespace)
return []
| 39.023077 | 141 | 0.686477 |
ace2611ed4bd1c4014d8eb90901fef85a20fce58 | 1,561 | py | Python | DailyProgrammer/DP20121030C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/DP20121030C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | DailyProgrammer/DP20121030C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | """
[10/30/2012] Challenge #109 [Difficult] Death Mountains
https://www.reddit.com/r/dailyprogrammer/comments/12csl5/10302012_challenge_109_difficult_death_mountains/
**Description:**
You are a proud explorer, walking towards a range of mountains. These mountains, as they appear to you, are a series of
isosceles triangles all clustered on the horizon. [Check out this example image](http://imgur.com/a/lyhMt), sketched by
your awesome aid nint22 (smiling-mountain not important). Your goal, given the position of the base of these triangles,
how tall they are, and their base-width, is to compute the overall unique area. Note that you should not count areas
that have overlapping mountains - you only care about what you can see (i.e. only count the purple areas once in the
[example image](http://imgur.com/a/lyhMt)).
**Formal Inputs & Outputs:**
*Input Description:*
Integer n - The number of triangles
Array of triangles T - An array of triangles, where each triangle has a position (float x), a base-length (float
width), and a triangle-height (float height).
*Output Description:*
Print the area of the triangles you see (without measuring overlap more than once), accurate to the second decimal
digit.
**Sample Inputs & Outputs:**
Todo... will have to solve this myself (which is pretty dang hard).
**Notes:**
It is critically important to NOT count overlapped triangle areas more than once. Again, only count the purple areas
once in the [example image](http://imgur.com/a/lyhMt)..
"""
def main():
pass
if __name__ == "__main__":
main()
| 44.6 | 119 | 0.759769 |
ace2612cc71ffc242e9102c65907aaec13e4511d | 563 | py | Python | hackerearth/Algorithms/Caesar's Cipher/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/Caesar's Cipher/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerearth/Algorithms/Caesar's Cipher/test.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'ABC',
'DEF',
'AAA',
'PQR',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'3\n' +
'-1\n')
if __name__ == '__main__':
unittest.main()
| 21.653846 | 46 | 0.552398 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.