hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
797620ddca2cd20916d834d86186eb177e9a7497
| 3,213
|
py
|
Python
|
angr/knowledge_plugins/cfg/memory_data.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
angr/knowledge_plugins/cfg/memory_data.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
angr/knowledge_plugins/cfg/memory_data.py
|
Alexeyan/angr
|
445fa2036584598d310ffd58436566847bbc7e1c
|
[
"BSD-2-Clause"
] | null | null | null |
# pylint:disable=no-member
from ...protos import cfg_pb2
from ...serializable import Serializable
class MemoryDataSort:
Unspecified = None
Unknown = "unknown"
Integer = "integer"
PointerArray = "pointer-array"
String = "string"
UnicodeString = "unicode"
SegmentBoundary = "segment-boundary"
CodeReference = "code reference"
GOTPLTEntry = "GOT PLT Entry"
ELFHeader = 'elf-header'
_SORT_TO_IDX = {
MemoryDataSort.Unspecified: cfg_pb2.MemoryData.Unspecified,
MemoryDataSort.Unknown: cfg_pb2.MemoryData.UnknownDataType,
MemoryDataSort.Integer: cfg_pb2.MemoryData.Integer,
MemoryDataSort.PointerArray: cfg_pb2.MemoryData.PointerArray,
MemoryDataSort.String: cfg_pb2.MemoryData.String,
MemoryDataSort.UnicodeString: cfg_pb2.MemoryData.UnicodeString,
MemoryDataSort.SegmentBoundary: cfg_pb2.MemoryData.SegmentBoundary,
MemoryDataSort.CodeReference: cfg_pb2.MemoryData.CodeReference,
MemoryDataSort.GOTPLTEntry: cfg_pb2.MemoryData.GOTPLTEntry,
MemoryDataSort.ELFHeader: cfg_pb2.MemoryData.ELFHeader,
}
_IDX_TO_SORT = dict((v, k) for k, v in _SORT_TO_IDX.items())
class MemoryData(Serializable):
"""
MemoryData describes the syntactic content of a single address of memory.
"""
__slots__ = ('addr', 'size', 'sort', 'max_size', 'pointer_addr', 'content', )
def __init__(self, address, size, sort, pointer_addr=None, max_size=None):
self.addr = address
self.size = size
self.sort = sort
self.max_size = max_size
self.pointer_addr = pointer_addr
self.content = None # optional
@property
def address(self):
return self.addr
def __repr__(self):
return "\\%#x, %s, %s/" % (self.address,
"%d bytes" % self.size if self.size is not None else "size unknown",
self.sort
)
def copy(self):
"""
Make a copy of the MemoryData.
:return: A copy of the MemoryData instance.
:rtype: MemoryData
"""
s = MemoryData(self.address, self.size, self.sort, pointer_addr=self.pointer_addr, max_size=self.max_size)
s.content = self.content
return s
def fill_content(self, loader):
"""
Load data to fill self.content.
:param loader: The project loader.
:return: None
"""
if self.sort == MemoryDataSort.String:
self.content = loader.memory.load(self.addr, self.size)
if self.content.endswith(b"\x00"):
self.content = self.content.strip(b"\x00")
else:
# FIXME: Other types are not supported yet
return
#
# Serialization
#
@classmethod
def _get_cmsg(cls):
return cfg_pb2.MemoryData()
def serialize_to_cmessage(self):
cmsg = self._get_cmsg()
cmsg.ea = self.addr
cmsg.size = self.size
cmsg.type = _SORT_TO_IDX[self.sort]
return cmsg
@classmethod
def parse_from_cmessage(cls, cmsg, **kwargs):
md = cls(cmsg.ea, cmsg.size, _IDX_TO_SORT[cmsg.type])
return md
| 29.75
| 114
| 0.630875
|
f2a5011518e607bc483e1d5e191da6a752d0904e
| 8,134
|
py
|
Python
|
0x0C-python-almost_a_circle/tests/test_models/test_base.py
|
BennettDixon/holbertonschool-higher_level_programming
|
3fbcd5e009548aab5539ce8610b4113f005964c4
|
[
"MIT"
] | 1
|
2022-02-07T12:13:18.000Z
|
2022-02-07T12:13:18.000Z
|
0x0C-python-almost_a_circle/tests/test_models/test_base.py
|
BennettDixon/holbertonschool-higher_level_programming
|
3fbcd5e009548aab5539ce8610b4113f005964c4
|
[
"MIT"
] | null | null | null |
0x0C-python-almost_a_circle/tests/test_models/test_base.py
|
BennettDixon/holbertonschool-higher_level_programming
|
3fbcd5e009548aab5539ce8610b4113f005964c4
|
[
"MIT"
] | 1
|
2021-12-06T18:15:54.000Z
|
2021-12-06T18:15:54.000Z
|
#!/usr/bin/python3
"""module for use in testing
base class
"""
import os
import unittest
from models.base import Base
from models.rectangle import Rectangle
from models.square import Square
class TestBase(unittest.TestCase):
"""class for test case for base class
"""
def test_basic(self):
"""tests basic functionality
"""
b = Base()
b2 = Base()
b3 = Base()
self.assertEqual(b2.id + 1, b3.id)
def test_given_id(self):
"""tests id being set when given and not upticking default
"""
b = Base()
b2 = Base(24)
b3 = Base(45)
b4 = Base()
self.assertEqual(45, b3.id)
self.assertEqual(b.id + 1, b4.id)
def test_json_method(self):
"""tests Base's to_json_string method
"""
r1 = Rectangle(4, 5, 6, 7, 8)
r2 = Rectangle(10, 11, 12, 13, 14)
dictionary = r1.to_dictionary()
d2 = r2.to_dictionary()
json_dict = Base.to_json_string([dictionary, d2])
j_d = eval(json_dict)
self.assertEqual(j_d[0]['id'], 8)
self.assertEqual(j_d[1]['x'], 12)
def test_write_file_basic(self):
"""tests write to file basic capabilities, given 1 type of class
"""
s = Square(3, 1, 1, 10)
s2 = Square(4, 2, 2, 20)
r1 = Rectangle(5, 6, 3, 3, 30)
r2 = Rectangle(7, 8, 4, 4, 40)
Base.save_to_file([s, s2])
with open('Square.json', 'r', encoding='utf-8') as myFile:
text = myFile.read()
list_of_dicts = eval(text)
self.assertEqual(list_of_dicts[0]['id'], 10)
self.assertEqual(list_of_dicts[1]['x'], 2)
Base.save_to_file([r1, r2])
with open('Rectangle.json', 'r', encoding='utf-8') as myFile:
text = myFile.read()
list_of_dicts = eval(text)
self.assertEqual(list_of_dicts[0]['id'], 30)
self.assertEqual(list_of_dicts[1]['x'], 4)
def test_write_file_complex(self):
"""tests writing a file with harder inputs
"""
s = Square(3, 1, 1, 10)
s2 = Square(4, 2, 2, 20)
r1 = Rectangle(5, 6, 3, 3, 30)
r2 = Rectangle(7, 8, 4, 4, 40)
Base.save_to_file(["hello", 42, "more garb", True, s, s2])
with open('Square.json', 'r', encoding='utf-8') as myFile:
text = myFile.read()
list_of_dicts = eval(text)
self.assertEqual(list_of_dicts[0]['id'], 10)
self.assertEqual(list_of_dicts[1]['x'], 2)
Base.save_to_file([s, 89, r1, "garb", 42, s2, r2])
with open('Rectangle.json', 'r', encoding='utf-8') as myFile:
text = myFile.read()
list_of_dicts = eval(text)
self.assertEqual(list_of_dicts[1]['id'], 30)
self.assertEqual(list_of_dicts[3]['x'], 4)
def test_write_file_empty(self):
"""tests empty list is written to correct default file
"""
Base.save_to_file([])
with open('Rectangle.json', 'r', encoding='utf-8') as myFile:
text = myFile.read()
self.assertEqual(text, "[]")
def test_from_json(self):
"""tests bases from_json_string method to convert string to
-> list of dictionaries
"""
s = Square(4, 8, 9, 2)
r = Rectangle(9, 2, 3, 4)
r_d = r.to_dictionary()
s_d = s.to_dictionary()
json_d = Base.to_json_string([s_d, r_d])
d_list = Base.from_json_string(json_d)
self.assertEqual(d_list[0]['id'], 2)
self.assertEqual(d_list[1]['width'], 9)
self.assertEqual(len(d_list), 2)
def test_from_json_empty(self):
"""tests base's from_json_string method with empty inputs
"""
d_list = Base.from_json_string("")
self.assertEqual(len(d_list), 0)
d_list = Base.from_json_string(None)
self.assertEqual(len(d_list), 0)
def test_create_inst(self):
r = Rectangle(9, 2, 3, 4, 45)
s = Square(4, 8, 9, 2)
r_d = r.to_dictionary()
s_d = s.to_dictionary()
r2 = Rectangle.create(**r_d)
s2 = Square.create(**s_d)
self.assertEqual(s.id, s2.id)
self.assertEqual(r.id, r2.id)
self.assertEqual(s.y, s2.y)
self.assertEqual(s.x, s2.x)
self.assertEqual(r.width, r2.width)
self.assertEqual(s.size, s2.size)
def test_read_from_file(self):
"""tests the base class method read from file, for use in
-> Rectangle and Square
"""
r1 = Rectangle(10, 7, 2, 8)
r2 = Rectangle(2, 4)
list_rectangles_input = [r1, r2]
Rectangle.save_to_file(list_rectangles_input)
list_rectangles_output = Rectangle.load_from_file()
self.assertEqual(list_rectangles_output[0].y, 8)
self.assertEqual(list_rectangles_output[1].height, 4)
def test_read_from_file_basic(self):
"""tests the base class method to read from json files when
-> input is basic
"""
r1 = Rectangle(10, 7, 8, 3, 44)
r2 = Rectangle(24, 23, 5, 1, 99)
Rectangle.save_to_file([r1, r2])
with open('Rectangle.json', 'r', encoding='utf-8') as myFile:
text = myFile.read()
rects = Rectangle.load_from_file()
self.assertEqual(rects[0].width, 10)
self.assertEqual(rects[1].id, 99)
self.assertEqual(rects[1].x, 5)
def test_read_from_file_empty(self):
"""tests the base class method to read from json files when
-> empty
"""
try:
os.remove('Square.json')
except:
pass
list_output = Square.load_from_file()
self.assertEqual(len(list_output), 0)
self.assertEqual(list, type(list_output))
def test_write_csv_basic(self):
"""tests the base class method to write instances as csv
"""
r1 = Rectangle(10, 7, 2, 8, 33)
r2 = Rectangle(10, 8, 4, 9, 44)
Rectangle.save_to_file_csv([r1, r2])
with open('Rectangle.csv', 'r', encoding='utf-8') as myFile:
text = myFile.readlines()
self.assertEqual(text[0][0] + text[0][1], "33")
self.assertEqual(text[1][0] + text[1][1], "44")
def test_write_csv_complex(self):
"""tests the base class method to write instances as csv
-> with bad input etc
"""
r1 = Rectangle(10, 7, 2, 4, 33)
r2 = Rectangle(10, 8, 4, 9, 44)
s1 = Square(10, 8, 4, 109)
s2 = Square(11, 4, 3, 120)
bs = ["bs", 42, True]
more_bs = 45.34
Rectangle.save_to_file_csv([bs, s1, s2, more_bs, r2, r1])
with open('Rectangle.csv', 'r', encoding='utf-8') as myFile:
text = myFile.readlines()
self.assertEqual(text[0][0] + text[0][1] + text[0][2], "109")
self.assertEqual(text[3][0] + text[3][1], "33")
def test_read_csv_basic(self):
"""tests the base class method to read from csv
-> basic input
"""
r1 = Rectangle(10, 7, 2, 8)
r2 = Rectangle(2, 4)
list_rectangles_input = [r1, r2]
Rectangle.save_to_file_csv(list_rectangles_input)
list_output = Rectangle.load_from_file_csv()
self.assertEqual(8, list_output[0].y)
self.assertEqual(4, list_output[1].height)
def test_read_csv_complex(self):
"""tests the base class method to read from csv
-> complex input, can contain squares in rectangle file
-> squares should be returned as rectangles
"""
r1 = Rectangle(10, 7, 2, 8)
s1 = Square(2, 4)
list_rectangles_input = [r1, s1]
Rectangle.save_to_file_csv(list_rectangles_input)
list_output = Rectangle.load_from_file_csv()
self.assertEqual(8, list_output[0].y)
self.assertEqual(4, list_output[1].height)
def test_read_csv_empty(self):
try:
os.remove('Square.csv')
except:
pass
list_output = Square.load_from_file_csv()
self.assertEqual(0, len(list_output))
self.assertEqual(list, type(list_output))
| 35.060345
| 72
| 0.57401
|
a64ee290e673b2f4b2884de5491df958776104d8
| 482
|
py
|
Python
|
src/checkMonotone.py
|
denghz/Probabilistic-Programming
|
fa505a75c4558e507fd3effd2737c63537bfe50d
|
[
"BSD-3-Clause"
] | null | null | null |
src/checkMonotone.py
|
denghz/Probabilistic-Programming
|
fa505a75c4558e507fd3effd2737c63537bfe50d
|
[
"BSD-3-Clause"
] | null | null | null |
src/checkMonotone.py
|
denghz/Probabilistic-Programming
|
fa505a75c4558e507fd3effd2737c63537bfe50d
|
[
"BSD-3-Clause"
] | null | null | null |
from wolframclient.evaluation.kernel.localsession import WolframLanguageSession
from nnDiff import Func, sys
if __name__ == "__main__":
exp = Func(sys.argv[1:])
#exp = Func(["D", "x", "x"])
with WolframLanguageSession() as session:
session.evaluate("Inv[zzz_] := 1/zzz")
d = "D["+ str(exp.arg1) + ", "+ str(exp.arg1) + "]"
res = session.evaluate("Reduce[" + d + "<= 0 || " + d + " >= 0 ]")
print(res, file=sys.stderr)
| 25.368421
| 79
| 0.562241
|
f98de3c28feef6f0cda1372f0e7a7efd5f30a094
| 60,545
|
py
|
Python
|
webresume/frontend/views.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-carrolji
|
75a7b828f7a47e32e9d9b500a57c2d3f0a62d0cb
|
[
"MIT"
] | null | null | null |
webresume/frontend/views.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-carrolji
|
75a7b828f7a47e32e9d9b500a57c2d3f0a62d0cb
|
[
"MIT"
] | 3
|
2020-02-11T23:15:05.000Z
|
2021-06-10T20:50:21.000Z
|
webresume/frontend/views.py
|
cmput401-fall2018/web-app-ci-cd-with-travis-ci-carrolji
|
75a7b828f7a47e32e9d9b500a57c2d3f0a62d0cb
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
import base64
def index(request):
return render(request, 'index.html', {})
# Example for returning a file from an endpoint
def text_file(request):
return HttpResponse("Hello World", content_type="text/plain")
# FIX THIS to return the content as:
image = base64.b64decode('/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAICAgICAQICAgICAgIDAwYEAwMDAwcFBQQGCAcICAgHCAgJCg0LCQkMCggICw8LDA0ODg4OCQsQEQ8OEQ0ODg7/2wBDAQICAgMDAwYEBAYOCQgJDg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg7/wgARCAHGAkYDASIAAhEBAxEB/8QAHQABAAICAwEBAAAAAAAAAAAAAAYHAwUBBAgCCf/EABsBAQEBAQEBAQEAAAAAAAAAAAABAgMEBQYH/9oADAMBAAIQAxAAAAH38D4UJEpr1RzTUC4e71I8gbT0fP8AVbDmAAAAAAAAAAAAAAAAAAAAAAAAB8H2wEzsHB2GAZ2AZ2AZ3X4Oy+S/QPxI7W2ujr65nn7sw+J7/wA54n7BpTp+U/WeUxOlPo/R9Ibry1Dbj2w8zwOX2pz5Wl9lyYfN+ZfUev8AOcCl9tvN0Hj1rs/JsM3n3Q45mwAAAAAAAAAAAAAHHOKTrPPXz0x6VwfH1jWV8LeXnS2/o/KmKuar+Z9T00paF6enVD9wuxEJbHDhXYE1+P8Aqej1fu97rksU28/HfNe2LQn8r+9+teHNNv2/rrGvvQXyQTRWznsrrYzX4IXrpsSMQW8+/LWHjb9GEtOau9259CaAAAAAAAAAAAAAA4xZkebXpDjfPB95uvnbU7JvFTTWScezwxfbbN4foU983G1KO2NwMtFIcfGpmGddgTX4paX29VX0nnT0RlkHyv1sn8z+j9T8L8x7U3ux7n6DjCcEv2msV99z1FcZbD4srbNYhY5IuWdAoAAAAAAAAAAAAAAAAAD5+h1nZM9Z2R1nZHWdkdZ2R1uO0AaAAx8x/f8Am3kHpwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB89bs1fwx579ReHLw/MX1OP12gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPjxt7Hr/wTx/tMXqz8/wCef9rWbP8AW+jkaoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGq2fnD0Zhr9n5w22pdOem+0XKpa6K4Uvp49Ac0lsktrV1VHD0XrIJoFu7nzxZVT3nz9OEsjnTUzNX6hshZ2OspLRS+nVBddb87PnjsbzeOSitDmenldwO6v/isokl98wKDRe7HkugAAAAAAAAAAAAAAAAPJlizWa4niOzLag/SdzqWV0sqS9PVDb+p515tQtTdmzNFzUVgu6X7kRqu+urm67z167hUbusK7t7rznVRX9U3Ltp5HLJhqQ/zvaUlSsonakqWtenYWpsp/wBHayPXNTS674FjVbbG3opvNaS+34DLaG+x5QGwAAAAAAAAAAAAAAAPnmgL7y++aIkGpaypdrlY3Nd2JpxzB+1Eu4gO6JGq2pa9WK5xRZvEU06buUQyZ28KS1R6CRugsvUSOc1IVTy0lnMK6RYPMdkNchQAAAAAAAAAAAAAAAAAAAPJdj7C2cPFUznWm3nY8zvQy1h6j8/egbnzj2N4iM8bncLCc860tum11h82QP6nNb5nz6g8p+rDzBJ9309W2POHpqmudpjr2zk6Y1GacdHF2nn66NTbrvSFX2hp9CaAAAAAAAAAAAAAAAAAAAA+eahjmZ6AVLp9S8kEj5bXPHmiX0yqHdFi8UZvbLVeYbKltZx52PRFOWfBjezKHTHUCaAAAA+XNXZlpOOdUD5VhZ0n040ku8V3XGs+iuar6MXDz80+tw8+ZbF1LUUHmxb3VTBbPSKsLPt+VU2rl9nGr881Ra6fRwvLjBHYR2Q2chQAAKMh+9xZdPdaGRdOdiVJZtDzXrjxn6GoouaATDFldWsp3Y1FvRlB37CnbjTWLy96lomSBSPdSzUgMJm2kk0/Ytrr7uy1PbyJCcP39RofrsyFYNxYfQyy0Db8b6yc7PH1cWv9fYWyzZDb/nP0brPmOGb30jjVIaTPJuvPawv05575dK7v6kbf786o7smnnHrDa+l8c3yuWxartSb8ibvr7qK02M0m1U/IpH9VCdLu/s33T61xc75tseLSnpiEWzAvQhIROgAAHDiC5TwacI5I05C/PMEnWX0NAAAAAAAAAAAAAAAHHIAAAAAAAAAAAAAAq7WbPVs3HAp6zfLc01LUy9CHWCZetXPdLYuOEzi5+hOgAAAAAAAAAAAAAAAAAAAAAAAAAAAAADjp6mSRkct7zP14+tfJFfMXlMUklfPxr7dmAAAA1e0AAAADq4zvAAOr2D6AdXsH0AAAAAAAAAAAAAAADyjLJBAcpLuag9I750PtYzJD0pzxzN+c59WfpTKra3tKh4sWV1zDd59SwTuU2k8sOldKtkXj56ukhmKJ4pZzaXn30FVM7uA9rFyzXoVNvMi7+w0tks0laaubtKxtZQ2sWBdMAkstM2bCutLsbziWhsrezoT1pdjL9x5yi6N35ymmpJd7DvpfRQmgAAAAAAAAAAAIFDrt5k6VQTGb+Lv5/mlnvdwpe4s40dTXrwR2LWUypKW2FxpR0vsIlF2PLuJaPuvKshWrshLXNj8rKpkM0TVJT+XEivVmhK0wWiXQVNfGGPEnrv6ke8xfVT9nVG3X2FRTVWBxFG23txAvifkhSal+haAAAAAAAAAAABxqNvSvm5eZvWPj63/y+PWo/Y+gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADH4h9v/AB4r+ffbtD1B8Xl0t38/f6TYboAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABgGdgM52AZ2AZ2AZ2AZ2AZ2AZ2AZ2AZ3TymdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGd8mvoEEzSvp6xofiX44g+872eyNb/tZprsia1GikeS4iOSRcWd7OZ1yGgMGfBnZ+ePiM+bpKfit9tprIr5Wxzt7i33kD0FecD+q+xef6HrmR09MvV8/52fnaf8Al9NrwXdfnpu+3pX4uuXj6/StYyLwt7PB7mlv5ze3Zxn+qrGM+32+iopDa55+Sa7Lwlufl9/eff8AJGq68vUe98AWR4/T7s70OmP0/n/Q7QAAAAAAACGyLU5fX4I3l7uf1eXD3ejsePaRDwfTA0mDBl9Xh6Lva3txZMvzZL3HPzvqgYM+DOzE6n9BfHh9NIXL2+knhp3ZV6e/F+xaxJ5qP6UfyePlfVV3h5J9+/PMrmHQt9iePfcdJ+L3UhfWgunyfTj9NzmCe/59e+mvL/rTXm1mmsPR+70divbMnk8f5HaT1Np/kejrc+gtT7vN5gnu9+fD9H1pZMGnX0Pk8jcAAAAAAAAMAzsBnOwDOwDOwDPxhGdgGdgGdgGdgDPgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGdgGd8mvoHHMK2NzJECkpt1d4ksjmP7Wa7fNdyKyRK/7pM0f+VkKuN2zLEMmcvIaAAAAAAAAAAAAAAAAAAAAAAAAAAA0v3tzMX2m0Gt18hHR7zldNk2pIz292rRt4iObPYDS7oUFAAAAAAAAAAAAAAAAAAAAAAAAAAAcRbUXnYLFlnQAADhBudYnHMHm8cibAA4Rmut8rsRmTY2DQABHvlmRo7ITkNAAAAAAAAAAAAAAAAAAAaPRThcaHf8cyg0ABFOlN1xW9kclCaAA6FVXI1jod5znQKABqW2M6nbDQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAH//xAA3EAABBAIBAgIHBwMFAQEAAAAFAgMEBgABBxITFRYIEBEUFyBQMDI3OEBBYCEzNBgxNTZwI0j/2gAIAQEAAQUC/hdh5GrlcLwuXK5PJ+dhmT+VwI2d8aqtkLl+qSyKVJWj6lvetJ6t51bzq3nVvOredW86t51bzq3nVvOrede9a/b1W5SlcqBoLjdlZYekLI8fGzkSaOeikcqylL4ydlxWZbs+DHIfK7LiMy3Z8GOQ37NajSos2N6ve4nif0FbzLbyXmVSMX9z7Rv/AB/UUj+9c7BJ9dAW57kefJZBPnDwa2MB/MNtHsRLFVPwtvs5Y28UdDc2wGbcbjS5BKxkuTylzIIPS7Mbn7rhIzq+36cobd6MhubYK5MspqRHPGInETJY8H5GdN3FXGK522OXIt+Iaf8A1+/b0abJ2SzvMka1Z2uvcdz+36p8khq1vFOyVtc6SN4/BWDS3x9plSCgexFZGo9zded84a6w5TZYdjf+P6nHXGfSWuLrS+WRpaHGzZ/UaVGKjz1y5CfT5sqn4WkgPiNuk1rq5Ee4+fcFQ65uLeJdTl+Oya13zkQF7ryCTA+I22VWtq5EA1/wQaii9FPmAEyL3Kdh7qDNX6rJAqE6C9+vVr2ocjEq5ZuwTsdnbTtLDn9vJiZa4GxZza4DE1CDgzxmqy4jU0MOq0qOUgVHUJMemusuyaih8wLgIF13G/8AH9Rt/cXm6SOkSCoAlHiJnyWpsCucek9WC3SWZd/qn4WuOIab956Vbls6XuYw2yqTHQtMhhchEqM68yQZeFNEGXZLLyXmvpDv9r5Pb9gj+x6rE3p7l90pvUpBR7vhCgqO7WLLMfDHAzI4ZVPwtm9fdcQ31qhvukHnXn3YDLvthMqyMwtEGMxIS1GivbQM0vwz6R/vrtJztIztIztIztIztIztIztIztIztIztIztI+S3cTTC1u+C1pz4L2n2x+K7rG35A5G03G4jszpWLGahjFoQ43qK11fwL9/4Q8Qjszvb/AAd1xDUeaZNOygRBROt/wa1vqbrJBp2JJ4/n+2T/AAa5E0vWUX2Hpok03DkNOoej/wAE/wB8TVwaV2Gr2Xd4TWRC4kWKxCg/wT3yJ4nkaXFmx8kSI8SG2429H+fUuIolkaXFlp+xjS4s2P6m3G3mcffYjRW3EPR/pKvzN7+7x481G4tRbisyOSeauXEsktqs1mLapabHha3eF3bzu9ELxbXK1aydmXHscC1vuW2OtDXpEhrCs6SDHYq6lHupOeDG2AeTpzd0nPQ6yf1ZK9NmRx4rVsOPDQZmKersqSzCHJuc5yDQ50Yfw95sOrGGzvi/CtVMOweKVWw74WeLRjXBQq0km6V5mFeRXbYeaGPWka1QHLgRgINHogQdu3T4L2t62n6EWFQDPpD74+p/sGod36MQUXZJNSqQtoam1wRZIo8TtlVn49rW/SVN61vnq4/iNJInjPIL0N6DzseCyj3M1MKon1OvfhVSda1xUPQ6v0cBD8T4ecZdO+P7CV2Ep2oFkm1jjH8M76h5fFEB+Hvj+rl/A+AnB9jk1YBr2+jFGMrA+j9KG2J2pDvyq156L8OAhl0Bw2WFWHVLHthn/R1l6udUEWF6SQvRavnX6+Na0xX/AKFsFE3ecDgYYQBuiDELFioIcSZBwDo+JTh0cvmwcXd4lBIsu2kgUQoZJVMeSOs0gNHKNg4rV1RX4bV4h1qDCACxzIoA2NYq1Eioo661x7Ceh8czIkeeMYoglvQMFDADHEIcZTQw6HoFXFwKexRBTehtaHDKvBqA6GBYoglGMVIfHovkAR7vICDJNXZoorSYlZGRaUmhjOkmEGmAyKMN7+taSn6F+/6HYUPud9WVMl/6h9/d4+lyplCkXarxSNskur4qYKRhVDF2kAZl4VsIYJgs2LNMFLMDCvDSw4vAk3OtRHJF3ho5f7sD4lyLdXYmxZoYaiE7UADzYR8SSKY2WEjgdCsaTFfKkI4sBUbpCXT5BODEAJJwV16YRES5JMyLDRRVgDm0lLQBDShxSAVgfSi0efK9IbYS3+wRKfhejSFeORKXSR5YaLtwYpPl+PQJFtwwNPDuSK+XDTb5Q2m5TqWkDfSIpkVhd0V+Zv8A/StRiRl8ljW0RfSG49ablBgEKMP5/wAq3+Fxv+FUv/i6TBSS4JblqLcX7lvt8SWeO3EtEJtsj6QMsaIavVPnHNQAME43yh9K2Cl75h3/ALV+rrhcdxBF1EQAg2SME2IA+UfdA2M2QwkAKt3IcBLOXRdeOCrEEr0uLYa+ElCTBgEZVyB4JL+LgAJKF2NgJKa5ZGxp+itTiSGOaMAAJIxVTCmwEdSetqphDdfQPqD8Pljyg78YzwOUUshavz92oeAKv29NdPhDIYWZaLfW5VLHPGA4MeDgfVjNgmMWSWatwKLZbC4M47ac5BeiCfMndq52WadwUduZxyP578QsFgjgYfvPIO2BR90/WDh28ABsHzr4tku/Q2LREkpmC7UddAVoR477n9oZPODbV8kY85J5P9RBySyCrBhZ2lNXGWvli2HJAGsSDs6PyhlcPTC4QGavJ8BB86+LFTth1ya/I5CixB9kQU43EFb2arw7zh4rkI3Lk8pesUblzuQfkX1dob4n4T9gfrpR20yLNYh0S7uw3+DYZq2JFCphWZH4/wD8r9qgQOQ8hlrM+Unf/b0lcaixWH+S/wDp3qn1BkgX1/RPJIuC7Bt4iKNgG4sSk0L4fjthJheSY9GmfVYiOKdVuKV4tp018hxxdycsdUpNAitCDrpMhZrAAjVEPbUrJ3ewhIlTk3EONkcr2WEJjQpUqvwbEe2+b5NaqKg9pG10Mrnwm4J+JlTixo0L9q5KRX1PjHo3BRZ1B3kuyvTI3O3jdwyh7WqlUcnYYtBGk7DJKlpM+N6QxK4WJqfCBeXuHagUskfj0URPSyOLDJNc+e6Jp/JU0yBL30IYDwOTIoVk1zSHjar3NJ1urotdPkR2uV6jvwizVnXic4YWkBvRs+H47YOWXkmPRqrdaHwmflJwS4XkIifKmAtiByYfA0a3bZGij3isyvEZdeIjLD4oTrRGXXlIuHW9aAk6VP8AN5D3ariCUaVyFDlTap8l9iSpdduMSVJK2oMs7SvM5/wSZXZg30eyceQ5w3CjSU8I0iO/F4xt4aQaq0ywWAkBKhSw+UZkFrhBOwJO+R77ElTK7cI0xNlMIm+e7O8fPYegkYN8VLN2SyuamiOcpRA0NsFTFPouuXMQTeusoezIrPH4wk0RIw5a+fP2p0OXGrNRMSgFLi2v3omqHL/1DWQExYKuGkmJnF9YPSAtIg2j34tjizIvmtlolZ+QktlKndRxg6VsYKJLZ5cciS9+kEOVOq9tFMnV843oeVbLjRmhVAGVyaR9H/zOf8DmV2YN9HsYlSK5/EUn4a736oBD3/5DR+GD39eI+cvF2vP/AL1mgbSeRBU+2njVZLlXLKCscrYKH54MAn7XOi8cyGL1AFKjxbpVvrxCteIF2qh2pPqon/JCfzBh4LxKggbcDZ4/P+Kn+MFwuKkgq+3CZqP8CYiRYq0Q4qCMeJFi46IFSJ2JChkTfr7z7MaPGLCpzuSSwqE8y+zIjuTIjMz1qOBUSvb/AEjS4s2N9g9LixnfsXHEMx2X2JMX5HHENMetxxDTH0CzPQfjCRrleOALQVJhuOhdMBwRpaKik2qy/jP6rZIkkbimn1lI2uByINqjT4wvh5VtO+FqtEZXGcch36jHt8VXGTlwJwY5k9CDCVW4hBUlWlIsJKJBMk7K7Gsoax7JGsl2p9VhCWSOWbTcJ85I+xQydRiXciSAsXR8oPiWSIX45VZ2BvEsm5z48AhYB42pyrqTGiBkzxGvcoPSk0vZ9Q6lOXAnEisPNSYfKD0pNL2fUOpTlwJxIsuyNxbCbtbIW2ljfhp565PauAWzKI2D9S6Ugkr3YKQCggjyp5PhrxiBuqW4tAtDVl/pzJ6rHtInmfW9bSLsLZiyVwwoDwLKG2J2ojvyqxHmmuGhLYd30cZLdyqoexyZBKzla+efr4pnUatXX/tZUVHJcjCjVhh3zOPt6TCNTR0nKY4w5xgC2lyVQUp1xHxp/Tjmu738NSn5XbRrXwqIb6aBeXGUcV1n8POTdb+Gl79iIlidjt0CkIdRxZybrfw0vfsREsTsdugvw5H+nWe1q0N1+Xqy8n138anfzK/qTNdFnWtUYc472mvdVUYYiSigxoEs3V4pyf5MxCehqdAhkxnkKAmOOGQRIqHVhcOnMUQSjBddHi6szQhLSIdcGxaQmhC+2RCjCoRFFGbWlOkoJg4hUgXrA8wQE1geJIYRqQ4gZDgh4SA7RRW5MYKPhVoQKjha4DCRQASFWIEEHIqg+RQp8BoiBsXuADjU3Eq7VQDRVwqmYERDYJivQ0VVFCE+1CEobMCIhsExXoaKqihCfbIhsSRAWvwANdAV2BXBkMHFg2fYOKq7/q5ZPccx+uUhK244gTDk/R5slEIXKc3tNTn++1T+DXKYy2MntNon0V96OT/gv7WKa4Wso+S2KJRZRaA9AmNzxX8F6de05x06WubTWm4v/h3RrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWKTrq6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NZ0azo1nRrOjWdGs6NfI4/K8UjLlOyde9J0iW+uQ29N8FYRJW2hUvYSI4pyJ6lOL0ZdlyG5zj0pTrzktG0K6mvlV9/wBXt9SlaQ0/aBsaO1yDX3n49igyZk4lDHIXyrXm3W+VK84oYUiFxvtyYaiwkxJ7MzcqS1EgeahicTdBTkiCbiT5jryWkz7mCHSYthGzI7D7chhx1tptZBpGNuIdaOnYFdr4nkEGXkEb0BFnwloEWCROOwoEzzuC958yQfZHfbkw/wBFttvaNQoepHSnW9NNpxMCCjGosWOpTDC46EIba9T0aNIzTLScchxHt7hw1a+ZX38Vv2afItsqJ2KPCEjUTt0y8uKk3SNHa1XqlP2Qg3GShurBYreqSfS1OptNW45WHn3ES7LMa8YqEtmSHPoS7TGVqJHFCxraKMpb0IyveoRWDGdyvvPjS1Xe74IqXbW7ILMIbDkF5zBrSuNKuLWQg3WLEMzOJIvZJckS39XGCHU6NFKkDSwDfVU/0U7Xbxja9w9ve1DsrTrqlr3Bj9bBH5JDLzzkFS9okr6yrSnpkHTrkXNIdiufIr7+TJbjTsiMrREDHmonPf4FrebRfQjrrKK+yhvZ5CJkbStQmtdlyOASwmOevMIROlm5M2yViz7FPEpPvnEk0eyxJiLjqcprqH4ljnpjGSbiJKSEuDFn8eykS6bYorTUnTwuSErkaI9G5cQtfGYw9MHNEHpnf4bWr4icjK6uTGzM3o70/s1J1L/H/wCicVORIZ0qKw+1Kcd2y9FlaiSERI6H1zfkn6lL2h51qBL09t1TM1sa2065BSiY898ivv8AySWlPD18b2qScGceEoc0fXJbMwtBURBweOSDap3HM11Q2E7Fj2bjAuUt0LiewxpmuILF3/B29UcnSCT8FNHN+5AwWxUO8VA1YCUSk3NgVP4puM+fR6y9VKSaBLLP7Bmdg6+Akh3SEJueLm8SF/G18RWRW6JQ5VXMW/j8kctrPGFnjsP8a3OUmuB/AaV9GV9/6p0Iztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztoztozto/hq170alqcU5rftGxtuNiIE7TkyK+/wCXYTutV5LiXY63ZLUKDtPs0hWwyi7aZrMl596dpHaXuX7q8qR4oyvfiv1hcTTkl2P3mvDU45H07AfjofSgcwh9lrTEPFQ2lJYj6j54cno3E174iHpuQ/D78p2Al7a2UuO6ia96/wDSp5JuHqCWRtjW9KT85UkuMvTs+dGGyZTk/wCcpIcjDPF5nuouQ7IGfPIJR40vZKPooySjvkPpEyCzMajiUoGj47sUd85aO7JGteJO6hCFtFfnfZRIi+DSvf2GUR4vzuw4z8j3ON76iHGblf8AnH//xAA6EQABAwEFAwkGBQUBAAAAAAABAAIRAwQSITFBEFHwBSAyQEJQYYHBEyIwcaHRBhQzkbEHFVJg4fH/2gAIAQMBAT8BVOm57g1uatPJFts7b9WmQOuSpUqVKnbZH+xqNrOyBX4q/qSatnNFlKZ/dcnWs2ineIhRgUDMlZq8E83TC/79NhIAnjKeol+5TgpVHlRj3vaQRdKOizV4Kdok0i0K18l2t1o92mSrLyeLK27twWCdBWiKu4R1CVczUYJwkEKlyfTpvvAn91KGWGwRG0EgyFyL+JDZadRlTG8FXq3p13bLquoMCuhAdVgKFChQFA5j6L2gFwie5HuutJVnu1KBqmtkYDdf/O5AQJBZeB4lVuTnU6bIcChlj3H2o2zijgpROeyZU7Ab2SlNxKYZEqclOCwiVPUz05Ueqb4qMVUF69xuR+yIxPjCaIgbiqTYuhaNHGSnJUvdTQBdG77JpxaSmYAJrYjzTd/GaY2GgLHNAQOpdqNm5Ao7DgJWqBWaOCOAQQQQPVj+oeNFHqm5FYyn5p3a41RGDuNU/GfL+E7Pz9F2ncblUz8yieiiIm7plx5/REaDwTsUM+rFSpR2TkidmYTfhDFXhdvJxhBXkRjCDggVrs1jaDzT0sV/j8vVHFvmnH3fNZghSndIca7D0Sh0/L7pmbVTHQ43Kj0FphmsMY3Jwzjd6p/RPyTtUc3JqI9+7p9+Cuy6eMlq35puSONQ8aKMG+aGaqZu43KpjKcOnxuVXVD9QDxnjzVPSeMVHR8/55p7kdsfm75+qd0vMrdCZlj3Fn3Hm4oY3U/tRu9dhPvoAYDx9EMS3xTHYMnjBA6lYyjmfJDNDF3mmmYPimuloK1j5/RAm6HeBR7Kd0gY0K3J3SBjQozh5px3J/a8P+fGKq2d9NrSdVGajYMFChBRsA2aygNjcFGKlQpWWKgfHqvutJVkfZfYlznkVNBp857ka9zCY/iVaLDTFJgZU+iH+kEGVCDUBsIKuocxrC7JOY5oxVns7qjZTGS+6vyDbsyqjIcQFUswbRD1ZKDKoIOatHJopU7xVjsjq7iAqtF1N5BVGxS2XKvQNN0FVrG9tMPATaTSU6xwE4QfhNMVSiypJj14/ZUw68MNpI9tqnhwqE5hXThnzLNaXUHXmq0V3VnXnKzWoU2QUHw+8v75avy/6WKe6TITqzy2CcFyfaGUjiuUeUKdRl1pwXJFrbQqEuKtto9rVJ0VC1gNAJVWpefMp1qJp3CVSfddKNuZHinOlxPd8oFXggVexRcFKvIHrsKFCAUBQoChR10vAMc0uEppnmOqAFA8wuU9VLQSgIHMLcU1oHMcwHmwO+v/xAA4EQABAwICBQoFBAIDAAAAAAABAAIRAyESMQRBUcHwBRATIEBQYXGBoSIyQrHRMGKR4QYUM2Di/9oACAECAQE/AU4wLpmk0nmGm/cg0Y1aTgM8k3kd2iOxuzWjcoNfVFM5lfUAi24ChYd/smCRKaJ9vdap42JrCYHGcdgKwDWVChaRyW+m1hBmRKGtYSsJlYefkvRK9WmX02zhIlf5CxtOn0mQ8VyVU0Wtyw46PUxBrb7AScpQznmvK4/lAEZK49vZC3HqsV5429gIQftUphgyq2nVKjMJA/hQpPNnz6HypX0VhbSMSuWqukcoaMaL3TK5J5KdolR7sIAdqCCxLEsWaLzKJ7LKlSpKlSepS0qlULmsMkZ9yV6nR0y5cjPwVnEn5u5NPoVKzMLVR5HrCqXApsxfuNximHI5rz5hdQoujZEQoUp3w5rbKdICeMJhRnOpQsJmEOxkHooHjvU3PonXhE2TDGDw/tM/K1DwlP8AiBO0b1Wu5x4zX1OKjNVviT74jt/Kdk4DWn/EXKocWLxhOuDxqTjLyQhGSNz2J1mYkc4W3mF1qTRiMcxUICU28rLnjszp6GB471rPon5hGMKp5JurwG5MMFnluTdXkfumbt6+hqbl6BYZxDaEIJE68+PT3TCcz4+6bY34upsOzBRzC5hFQhda4UQ5HrbOcHWiYuo+LChcxxtUotX0ytZCNkMgebx5pRHVHy2QgYvPch8x8t4TbOPlvWTmmEBdM+U+e4IWK+tvmET8Hr+FVmHbLfdVCYeRxmqsY1rvkhm3Ft3JhuJ27lS+cE7d6ZkOOLJmTU9SQzFrt7IgB7Y8d/8ASd8jvJPuVbo2zlf7lfU70TslS+VnG1UrAJh+Ty3FUosPDcjPRk64jj0hVs3Rn/X5U3dxq6ov1Bt7gZzDUP27gtXoPsjrTomB3AO5QcLURGLzVPNs7T9uYf8AHKk3PgPunCGu8E9t3AcXRExG1WhN/O5HKUXQ30VS2IbAnRjc3Yv/AD7qAZHiPdCIPgmZR+5bUzKP3K0/wg3UUwTh8R+f1hZaNp9KvUe1mbc1OSmyFkbqbIlG4UnmJlDYtUI5yhx6c2K0KFNlCNxCk/r6VV6Ok5y5Iq9HWuZnuTTNGdXbGKFT5BioXSm5X/6RNlKJCJ5gQp6tbSG0xJKp1mv+Uyi5ya4wZX+0ZhGpDQSqWkOdVjUq9SoxwOpN0wuqADJV6opiVTrYjCFMCJ1qq0sKo6aDULHHJPqVI+FP0uoAcLpVFxcwH9J4mmFip8Rx/KqFuEwefCeii10yCwbUSPDgdTT9ApaXT6N60HQmaLS6NiNG+aDbFf65xSgxpAxLo6c5LSaT3OtkqGi1A+SFpVJzm2VGjhjaqoa9rIN1pUEnCZTNFDamMDNFtk7QJNrBNbhbHd8IhYU6xWHWsJ5sN0R22VKlFSpUqe3BpInqhtkR1G0yQiI6mEqOygmESSeoDZOcTn1GvI6s99f/xABcEAABAwIDAwcIBAcLCAcJAAABAgMEABEFEiETMUEGFCIyUWGREBUjQnGBkqEzUrHBFiAwUHLR8CQ0QGBidHWCsrPhQ1Nwc5OiwtIHNmOlw9PxJjVUg4WVo6Ti/9oACAEBAAY/Av4lmBKMuXLT9I3FbCtn7bkCmorEHGw44dMzTdv7dfvef8Kf10WJOHY6FcFJZbsr2dOv3hj/APsWv/MpuOtGKwQs22shlOQfCon5UFJIKSLgj853NfRr+VfRr+VfRr+VfRr+VfRr+VfRr+VfRr+VfRr+VfRr+VfRr+VfRr+VaoV+JykKiSfOb4//ACKrDgVqbeWc5SnelFr6+0cKysNOPK7G0k01ZTMAtm6dvx8KdirSgvIWpN2lZkKtvt5OTq1G6lYYwSf/AJaaZjvSo7T730Ta3AFOewcaaivTYjMp36JlbwC1+wcfxmY70qO0+99E2twBTnsHGmoj02IzKd+iZW6AtfsHGrnQVtochiUzf6RlwKT4jy8y5zHEwpzbDaDPl7bb7fmJttx1tC19RKlWKvZS2kutqdT1kBWqfJ/WH2/lUfo+XGWSjaJOMPko+tZxRt8qkq5SxZEzE3GULaypCkt5+koWvvNwb04zyawmHHCEqOZ/UgJtfQWHEVtcaL6EKJsWXMoPb1TUTDcGZbalNLuvZA3Wd+nfvtSX4duaSkbRIG5Cty0+P21ya/oqP/dJrktNbYVKdaU7kZTvWrogDxrGMTxYrXypbd2bzbotsE7hkH3/ALGbMhY2zJRHl5TDi4dtGG03sAt5QBurXdfuIrzRhuKowuMrDUSCTFS7k3br7+ysUjI5Qs4V5v8ARMtrgbUzVp6xJA6Fz2dtcjDhchmArFEr24UyFpuLAnXXTUjWsawDF5zeJmKhDjcgMBs6gaWGnGuSs1thUpxou5GU71q6IArGMTxcuL5Utu7N5DotsE7ugOzv/Y4jKfxpDcCHNcb2AhtkvAeqTwA079TWDycOcix5b+KlkhEVtCFDXQgCw9o1pzC8WxFrGIq8PVKSUxktFGXNoLezjepHKpGLRG2nFdCGIafQpz5bhR3nuN9D21ExN+7pRyb27lvWsCo1BnO43Gk7aTlfwhMEp2KCd4ctqff+YDa2bhXN5paw9WH9ZcfrXO6xv3UlUNQxNeIdBJk9fNcbzekF0BLlukBwNf1h9vlWAt5Locs0gK8KYjKjvnMOkQgnhw7axKbCd2EppAKF5QbdIDcdKmSnuWZxyNEiqeejjCNgQBxzfdUFrEMDkYXHng8wfU+lzaaZrKA6pIrkcqTMzCauXzv0aRnyXy7hpbuqNKXgcpnAZMjYsYgX0kkk2F294F+NQG3IGR56euK+kPX2GVYRm3a6qT409KDGwaElxto577RKVZc27S5B08iP0fLKWyQHPwhcAv3vEViymXEuJD+W43aCxFCQ0woJfsl9JuUtaGyge821ppUZb0Tmuo2PVdWb3SeBAFt9Yfifo2cYTICTHB0WO0GlwkPJeDMh1WibZSog5fdauTX9FR/7pNYJivOtj5vUo7LZX2mbvvpUblDBm8wfCcktvY5hIT36i3+A7Km4Y3yiktYS68Xm43NQcir8VXuR3aV56VMDivN6YpaDNt1ule/du+dT5mDY/JwVM/8AfbaGAvMe1JvdJrk9ME98+a830w2i3rgb1X36VimO86z88aSjYbO2TKAN99d3ZWCYrzrY+b1KOy2V9pm776VG5RQZvMHwnJKb2OYSU9+ot/gOysRj8751zqUt/NssuXNbTeawzCfOl+aT+dbXm3X/AJNs2ntprHlvFxDUFUdUQNX2l83G/fUzAoOMY2pXOMsTA3oOV1K84vmUL349HTXhUPE5L6VBGFCE7F2eitNelf7qixkcp8R8yx3c7cNCAhXblLgNyO635gIvl031GVEdXiz864cQ4m2Yjvp5uW4rCHIIu2htObKTxvfupCVKKyBqo8a94+3yKTDW22/wUvcKzmVDU5wcPWHsOXSr4itiQ6n6NaRqPlUvDNvzbbpttMmbLqDuuKfgyAVMvNFtdtDYi1QXcQxx/FI8AHmLBYS3s9Mt1EdYgVyeCp+2GGKkXGwtttrf+Vpb31GirxyU/gMaRtmMP2Kbgg3F3N5F+FY1LTN2Rmt+iTsf3uvoEr363LaT7qh4ehWdLDQRmtbMeJ9+/wAiP0fLjEkaFrHHVg+x4mpD7dnHVOFS0qPSB++lQ8cwl59l1GQulokAfyrd9jpUbC8BwKbIW2iypYYIClHeqxG/vqNiOJOIh7JwLS0F5nD3m26sTkRl7RpchR999a5Nf0VH/uk1dXbYWG+htWXmAfWVYj5E1JzXQGCM5PeLi1NrkLREKtyXlgH7aSFvsoKuqCsa0plD7S3U9ZAULitm3IYcctfKlYJpcxIcDad6SNaZaAcSp1kOpuOB++itIIGYp17jb80+8fb+VR+j5eUDZJSk4s/f/aq7a9D6RhIsjajpW93GrOLO/eFaCkuuuuyiBqhKlC/2Vi6MTVGbQtKubqa0Wgm4y249u+n2IbbWI7FYU9MCiFtJV1UlO7XuvXJr+io/90mk5L5tmoIt26fO16cDTC2kFjKFKQUp14m/ZbjrU19pTgWhxC2L9RZCR49lXDMmMlbPqxbuKOt0EkWA9vjTBLa2lDD0t5lJ3GoTThxLas70lpIQnSx6VtQe41gnoVoKFHadG2Xoq3++ojGxcS242lbhI6pQNx/3fA010FNOJhNZFKT1VjN+xq7jamll1ZKFDUXUfzTauPxGvW+I163xGvW+I163xGvW+I163xGvW+I163xGvW+I163xGuJ9/wCJKxXBp0NrnSy481KzJyqO8ggGv3/gH+3d/wDLq/P8A/2zv/l16HE+Tyf66z/4dZUY9gjaf5C1D/wqUrFMcgJYe/fK2XFuLWOzVIqPEYFmWGkto9gFhRStIWnsNXO0X2Z3FKHzP+gxLCic3HTd/Ehbi1JQlIuSo6ChOUjZNuE7K1vl7KYkuZNvudCTuP8AEcRkKbSqU4GQpSt1+yuaPOPyA0LpVnJCL9gqXBu2bp2m/pdnv/iOWmXVSWmU2twCuNqdXiinWWcnQy8T30y0uO4wdvcyEDpgXpDrSwttQulQ4/xFtRUIWv8ArFW+2pIwyG/zFxfoC0sBAFuOulNh+Eja5RmKFqGvjSI0dGzZR1U/xF5lzqPzzLm2G0Ge3bbfbybaHJjymr2zsuBY+XkXIlPtRmE9Zx1YSke80h1taHGli6VJNwR+QVDEmOZaU5lMBwZwO22/yOGJKjyUoVkWWnArKocDbj+S20OTHlNXtnZcCxf3eVLjS0OtncpJuD5FvyHW2GU6rccVlSn2mkOtLQ42sXSpJuFDt/NTf9E/eaNPSH1hpluS6palbgBSpmFck587CvVkKkJbUsdqUEXNT/NAW448iyWl2SoLSQcpvuOlYJh3NXsQxN1pLDEZoi61JSL68BUXDMdwN/BHpekVfOEvIWey43HyNYKnDJE512PtGtivpKVc9G3uve9Kw/GsAnYdPUjNFZadD/ONbAAjjUXCsawJ7BnZQPNVmQl1Kz2abq8z4VhcjGcUS3tHG0OBtDY71HdTWC4zgr+CznklTF3w6hy3eKxlxxaW204UkqUToOprUow8PUMHaOVE5xy22V/JRbd33rlBiMTCGIAhPvZ2miBtlITfMbJGppOIYZyUmTYyUXfXzkICTxCbi67dwrz0hZZiBJU7tN7dt96ViTXJfEXMBBvzvapz5frBveR76XPEbmqQ+psJ2ma9ra7h20/NlL2cdlBWs15xhcjpj+GlOdDq5iELUntyWJprEYoWlCyQUL3pI3inpUlYbYaQVrWeAFec08mMQ8w7+ebZOfL9bZ77e+pE+UvZxmpDi1GvOMbkdMdwzJnS6qYhKyntyWJqXiWFRHJDT7SkPBTgQpgWOZXfbsG+9NS8Vg8ww6JGTsnw+Fl8fojdw39tecWeR0xeGZNoHVTUBZR27OxNYjiUTMGXWOqvek5rEGoLsLkxOnYZGjIQ5KD4STlABKUb1Cvwh2yjAyX3dO+7Lb619KOJK5GyxhmXPtTNRnCO3Ja9N8ovTKhuJGzQE9MqvbL7b0zJxvkxLwzDHFBPORKQ6UX3ZkjdTLriXZTz6skaOyMy3Vd1Mq5QcnJODwXFBIlCSl5KSfrW6tAg3H5jRCxJjnEbzZmy5ynW57DX/uj/APad/wCasYSxe4eVu+qFJv8AKsOeg8tUtxFR07NCcJaVkFt2/hurFsmORsadflZ31MoSnZr4ggKNjWDxH8UfwnGs6jhzzaDv0uOzs4ioHnmRBxvDJEkMB1Ccjyb93/r7fJEJ4YSbeKq5JXH+Re/sqrkN/PVf+HWIYJg8+Pg0aClJekFgOuLKhwB0tXJtiVjMrGJmVSnC7YBFwqwCRurGYEaYYl8NClf9pa1knuvahEWyiJPgHm8mOBbKRxt3/rrlx/OZf93WD6W9D95rlGlgHMJazp9UFBPyvWHPhTYhCEm54ABOtPlGiOfOZflU7Ew2Hlso6KTuJJAHu1rzni/KxUKIpjbLYhR0oyJtf6TfQ/nK/urFQzckJSVW+qFC9RX8zYg8ySSb6BOWpmJtth1TchWzQd1yQBfu1pzEsZ5WrixTHLq2IUdKMote2ffWKAf5p+oGJNtpecbioDaVbrk2F6kYhjXK1xmPzcuLjw2EtgaXtn3mpv6K/wC8rCnW1IEZMJNzwFk61ieJxmQ4HcTWISV9XWwv7N9TpuN8rXQ0mMpSo8WOlpJNurm3kHdurDWMckLiRHFkIeQkkoXtFW3A0ucnF4WP4VHHSRJRlcy9x4/F7q5FT48w4OiXGJYfcZDuyWobrHS+oFSGcY5csDDlD0xdwtpCd/bm01qEwHxKS2whIeG5yw3+/wDMY5QbSRzwR9hkzDJbwvf3+RWGx1PPx1LUo7chR138N1OiHiOPYXGcN1xYkzK0fdakQsPYDDCTftKj2nvpMee2s5FZmnEKyrbPaDTEyTNxjGH2DdjzhK2gaPaBbyIx8uP88TH2ARcZMt+y17++sOxlxyQJUNKktJSRkOYWN9KwqdIckJdw93aMhtQAJ036d1ec0SsTwuflyrfgSNkpY79KgTmTNTNjO7Qvl7Mt4n65I191qkY6HJHPHmAypNxksLd176dtO48w7JZlOt5Hm0EbN3vItv3caxXDWnZamJ63FvFShmTnFjbSo2HR1OrZYRlQXD0vfWJJgR5GKIGd8sPLF3L7x1furn6sZmw4ahtHcE84+jzcU7O1zrTW3aLCn3lPJQRuSd3yFPw5TYejupyuIPGkNPTcamwUG6IT8y7Cf6op6JCXIUwt4u2dIOUngLDdS23EhbahZSSND3UUJkYuMOK85w7nZ5sf6v8AjT2B2elQHVKK0vqBOvsAptt+ZjWIQmzduFJllTCezoi1SsHbMh+C+VZ0PKGgVoQLAaVMwxcjEcQgSEhOwlPZktgX6lgMtNtyJmM4jDb+jiS5ZUyj+qLVJ5PIfm8xeUSSVJzpuQdNPuoR25uNMQrAOxWpeVp63FSbbz3V5mciI83ZMoaTpl7Ld/fSG5czGcVjNj0UeZLzNN+waUeT6w9Mw7XR8i+pvvAHGm2pGI49OgoN0w5E27I7NAK5jOjJXHHUA6JbPAjspnnuIY5izDRuiPNmZ2h7rVYaD8685OFYaZOa+1MVOa/be353RC51I5n5sz7DaHJe51tuv5C9MkvynucrGd5wrVbsuaMR7F2A8DY5UKUB7wLViMzC5akq2QcakR3eAUCbEd1YbKxmamP+5m9o48q6lKyi/eTRYw7EWn3/APNlKkKPsCgL+RAxSe3FUrqosVKPfYa0tzDJrUpKetbRSfaDrSW8SxFqO6f8mAVq8Eg1zjDZbUtq9iU8PaOFPIk4ohtbTxZWnZLJCxv4fPdSW14qtGCNRihSdiq22vxFrn7K2XnXEPOHM78wznYZL9e1rZvfUrnOJtNKjPbJ1JQq+bsAtr7qU9hkxuUhJsq1wU+0HWhGxHEWmHz/AJMJKyPblBt76XDgTW5T6Wg6QgG2U8b7vJyhmtY1iEsMyFB0ysyxHcOgQno9W/urm0qcuVjKczj4Uk6Jzaa2t2aCpU6U7sGWk6ryk2O4bu+n3MaxNxye0VOyCWVnKnNYbha2o0FHFH3wiAGwva5SdDuOledkyEnD9jttrY9S171ycleeMRipkPXhoYzoTK3aLGXd7bb6S9icxqIhXVzalXsA1NLOFzm5WTrJsUqHuOtBjEcSaYeP+TCStXvCQbVznDpbUpntSd3cez81ttYbiXmqT5svt+bh3S50saP/ALcf9ztfrrF3mFFLweWnMN+qkpP21FhReQbD8NbAzKOKM2fuOsRbjWIxsQgnDo5lFyGxt0u5En1bjsrB8SwpuPKkYe6V81fPRcvb56Vhf4U8m5mDYo27liSio7PN+kLXHiPIvlLhMCPjLbkcNLYUvK43b6t/27qfLmDTMC5SvM+lbfuNqka6cDu32FY9jMhAcxF3EVoLihdSEi1k92/7KQzBSGWZ2HlyS2jQZgT0re77a5avraQt3zipu6h6uZWlNf0T95r/AOj/APFXLKYtpCpLc7I2sjqglW6sdbYQG23MPS4pCR63R1rEsXfQlzEJU1zbOKHSA+r7K5RR4iEts80SvIkaJvkJ8nLv+fvfYah/6xz+2ak/6s/ZT0BdrP7ZGvfXJrk84Tt3sQ5pJTxCGjc/LLTvJdKjz/zt5tSOOUqzX9ltK/6P4rWjbMzIgdwyCsacmJDpgRm0xUr1yXAJI+fjULGFShExbZqQ20l1KOddxFrq93dUzEonJZOMyJclRdmnEG21H+TYi4AqViT2AjAsPkxrPtJltuJLgOiuj+rt/NaeUG0j8zELYZMxz38LW9/kk4FixYfS+tefYqNrK7yBrXm7DcTwSXARow7NbWHW09mmht30W5uJSsVluLzuvOqNgexI4JqBPw6UmFi8FZUw4tN0m+9J7qg/hHLwdECK8HgzAQv0qh2lW7yKx/k9LhsS3Ww3KYmJOydtuOmt6Rj/ACglwnZjLRbjsQkENIvvN1a9tTZ3JiZhyWJi9o/DnJVswv6ySnWpOOYzNRPxh9Gz9EmzbKPqprlDIkLYWidNL7IbUSQNd+m/Wo3KDBH8ND6Y2wcam5strnXo15/K4/NOYbDJmOfNe/Za3vrlFLkLYU3PlbVoNkkga79O+p2PKXHMR6GGUoCjnv0e61tO2sUk8iMUwp7D3pJ5zGnNOAMO8cul65Rc4l+cH0xk84fCbDOqxtbgBu93k5Qc6cjqTPmLdb2ZJsk9txvpWHS38MfwpGYsKazbW5VfW+lt9KSdxFqcw6S/hr+EJKlMqbzB65I330tvqXjSnmFYcSt2OyCcyXFgZja1u3jX4QbVnzd9JsMxzbXLlva1u/fXJ2ZHWwluBK2rwcJuRpu07qRj+ATI8TE9ns3m5CSWn099tRTeOco5UJ+WwjJFjw0nZNX3nXUmpbvJibhvMZKy4uHiCVZW1H6pTT+JY5ixlyHE5URY5UmO0O4Hee8/nx+dEmYxg775u/5vlbMOHv0pUeA0pOdWZ1xasy3D2k/ndjA8EhNzsWca2qy8vK2yjtV2+yufY1h2DTcNSfTKw1awtsX32Vvo43hojvlQbU1tQSkpUR2Hvpt5P4GhK0hQvzinvP3mTJYbHmG0v33zVjaZTcdHM5qmG9kk6gdtzv8AJiKsOb5MNsRpSmf3Sl4KNvYTTHO/wS5tnG22W3z5b62vxpkqZdmTZC8kWK31nVfqrnIw3k4lO/mheXtvZm6t6mOYa01Exlg7NyNLvlac/lW1tTMqY1yVcbcfDKQyHibkHtI00pnzn+C/Mb+l5tttp7r6eSHGyusRvSc7beiLD4I6mX2/tamJSW3mkuoCgh1OVQ9opMqOwiTKdeSyyhZsnMb7/CnPPvmsP5uhzHPlt35uP5XAsMbjoc84PZVOKPUAt89fxcRwHYJQ1EjBwu5rlZOXwGvlmOw0trlIZUppKxcFQGl6iYk6G0PLuHUtjQKBtXmgx4wwcyFRkSLHOXUpBI3236bqRIhtsvTXX0ssIdHRJPvFYPgRRFLEqIXHl5TmzAK3a7tPJi8mSiOhcWY4y3skkAhIFr676TiMNHJNthSikB4PhWnsJpnzn+C/Mb+l5tttp7r6Unk/greDaxNvnmpc7/qn7qXIVG5Ky0oFy0wXgtXsvpT+ORWg262ysqaXrlWkbj3VHxKKjkkhh6+VLofCtCRwPdSPO/4Ncxsc/M9ttO62bTyYzgi0RxFiMoW2oJOclQG/W3Hs/E5QYS6hgR4OTZKSDmOYcdfxVZLZ7dG+6ked+Y8+uc/M82z7rZtfyLOP8nprMXE0tbJ1t4dB1P7fsKWjlXyUQ/hytHnopzoy96dfmRRew4NCCrZFgNpypCcwtYcKjJRyL2iA0nKrzu0L6eynlYrg3mdaSA2nnaXs/wAO6uVX9LL8mOowvk953aOJOFbnPkM5T2WNMtTOSXMYyldN/wA6NuZPcBrWEIeHo2sPUtkHt6Wv7dnkedYjMMuvG7q0IAKz2ntrDv6Tb+xXll4g5iEtM5bjaor460UJ9VPt1oXufbWF4gtnNLVNbjlec/RnMbVyVwzCQvD2lYpZBQ4olBVoSCb1iMnAWlR5cpSGs6nCux16WvHfW05xO8/ZM3nHnKs+07d9rX9/fU9+Z0pbaksuq+sQ4jX7KONOPzXMcahpfTLL6rosL5QNwFHHsUfmTMZXCMhEkvkbKybgJA0t7qwqVJcU6+W7KWreqxIv8qabgOFmZMkpjIcG9GbjW2wiTKjcoEdJE9yQq61fyu49wrk7yVelmJzhgu4i5FVbPYapSew2NJx/k4X4TsZxPOGtspSH0k21BPfXItMeQ7E5ztPStmy0pITex7bXrBsYwMyIrxnIZfG3UoPA3vmue6uTW2j5+fulMrpq6YTltx091YWziWNqwnAI6MnMG82aRbQC4OYgacKwWXyOTicW8xLb7npNg4Cer0jqe6oPJjnL8bDRF5zLDSspe1sE37KgzOTTohQ81p8R19ZQ6ntF79KsYimHePFaQ+wnaq6K+gb79dTxrFUcum55jFY82m69glHdl1vu+dS/N2O+eMLU5eO3fMY38m9/Zpp5OWeHO9FvD3lSmkn6ihcfYKgYxYnEWpgxNauJKlfqI8K5IQ2ulGQ2cQX7LdD5j51gLsCD5ylJhKyR9sG83X9Y7q/6j/8AfLX6q5RKcb2ThxF4qRe+U5U6Uy1h3JjzpG2q7P8AnFtq+u6xF6S1iPJjzXGIN3/OLbtj2WFNu4bhvnWT5stsOcBrS51uaawhfJ1nCJ8oWYekzwpvxAsT76xGCp0PvmM648sbiopO7urD2YPJXzjFSFZJHnNtvP0j6pFxSm8T5OeaWMlw9z9D1zppYe/yY+xIeeRh6WGlSGm1lO26CbJNuHGsFYwxx9GD4mpTTkRbhUlC9LKF/aPnWLN8qsSkNYbEd2MSCkOZF2uCs5BUDD+TWIvy8FmhSXYqwuzC7aFOccf24VyqZmOv8xQWy7HbcKA8culyNbDWnsDgOPDCpEHbpjqcKg2q/C/v8alr5WY4vEVK/e+HthwJjjs6B3+21TMOwkYixgbsPaIjS777jpJvwrlFyacNm2Hucxb/AObV+rSuVHKdeokrUxFJ/wA0kffp4U3IhnJKW+pppf1SVnXwvW05xP8AP2TN5y5yrPtO3fa1/f31PfmdKW2pLLqvrEOJ1qDjFnnsWdjAvSHHSc+YAnTd7PxnuUeEwlYrFlNBE2KhVnBa1lJ7f/WpGGYXyWxxqRIbLSncQYDLbYIsTcnWm8FitPz5LWzFmWyoqOe5IA4b6YaVyV5ZZkNhJthnd+lTjPmbH8NyozZ50TZJPcNd9coESuTvKaTzjEFuNrjQCpJF+02rm3mPlDh3QzbWbD2aPZe++sZZl8nOU8gvz1uoVGgFSbe8ikI/BflijMq2ZWG2A9vSrDsbwXJ54gK6LazYPIO9P7dtZDyO5R8/3ZNh6G/+s7O+1YljGNKSMTxBYKmEKullI3J9v6qgNw4siWtOIIUUstlZAsrXT8XDG4kaRKWnE21qS02VWHS104VyVVGjSJCWsUQt0ttlWROmptuFSYLKgiTotkn6wrmP4K4z58ybPPs/3Nm+ttN1uP31KwlDbkzEFZVuIYQVkqLiSQLdn3VJioZeXJOGZA0lBKycm63bTcVTDyZXmoo2JQc+bJut21hkeUw9GfSF5m3UFKh01cDQRCKROjPJfj5jopSeFKwyFycxqBjDydmuQ4jIwz2qDn2VyfxrD9rjM7DmdlKQtz0khJGpBPHfTWCw8DxXCorjiVTJOINbLKkG/RHGuRqo0WQ7GjKcDjiEEpbFha54VhiIkaRKWnE21qS02VWHS104VyZxePh8zEGYUhRfRFRncsctrD3GsE5UHA5+JYfzPKqIGbvx1G5vk7dawqXH5PYnHw2JMSvI636dw9uzF7AW+dQuVOGQ14iEMbCXGb65R2p7T3VhqIsHGsAwmM7tZTkm7C3uxFgdRU7EVYVicyHOjNtodis5wg9EdLsGlYjHxfCJ3KPBX1ZoiokVLpbH1FJ+81i2NIwpzAcNkNhDEJYyqPaop9Xdu7/I0cNjSHI+Kx0xZjjbZIbs4k5ldmnb30/hmUJYXHLNuwWtU6bi0WRGeZYbhxw62U3QneRfhurAZiIshcRuGsOPhslCTZehPk5QtyYsmOtzEXlNpcbKSoECxF94pvDpnJjlY48lxSiWcOJTqe8imI34N8q421Xl2r+H5W095N9BTczm0jmfmzJt9mdne50vuvT8FyyXesw6f8mvgaxXDsVgT28TjRnGgpxlX7o6JsU/WPs++oOGyuS/K1x9kKzKaw7o9YniaZi/g9yoh7Q/TSYGRtPtN9PJjmKsYNPn4atltLmyb1V0U6o4KI7Kw7FJOGTMJwnDbqZRLRkddcP8ngN3hWKymcKmYvgmIu7b9xpzOsr49Hj/AOlJ2eDO4VgqU+lXiDZS+4eASm+nvrlZJdjSGo72y2TqmyEuWHA8ajzObSOZjCygv7M7PNmOl916x0zOTuK4oqXKLrE2ExtbpPqnsrztieFPw2JUIpRl6aWhwClDQK0+dRMYwSNIkynI7kOQllsqOVQNjp79fZUfDUC6mo2U24q4/Ok4S607CxAOKcaQ+goIUFki9+3765j+CuM+fMmzz7L9zZvrbTdbj99SsJQ25MxBWVbiGEFZKi4kkC3Z91QELSULTHQFJIsRoP4pOcng3J56hjaleUbO2nfe+vZ5ZP7ixCFsXi3+6mcm0t6ye1Pf+JA523Jc52/sW9kkGx77kfn9zzV+DPMdNnzvbbTdrfLpvvTe2/BDY5htMm3zW427/IvlBtfSKhc32WTvvmv8qxyJHxViBGiTloEpUVLiwL6IA0HDUnWsXwDGnGpMyDlUmQ2jLtEntHhXKifir5fbgTHUtgJCbITuGgpOMs4vCw3ap2kaBzRKwpPDMs6i9Rp8nC3Y+Nvvc2biPIKAp3t11y8aXiZxuDOcbTtHMP5kEotvISsa1gWIpUY6ESUSspTm6twUf4/n9yX5/wCU0LPb0MSdkbTYW0Fqbd/Cfle5kUDlXiN0q7j0d3l5Xf0uuuVH80a/sprl5Cji77mIvZB2kWNqiCZOYiSYbAaejuqs6FJFtE7zu4Vh+N4lhyS0zO2yorKVAqjbrnW/hbSuf2irTluGUzXdr7Mme96hjDoUnDoRTmbjv9dNzfXU/b/EN0xo0eMXV53S22E51dptvNOy0Ro6JTgs48GwFrA3AnjTnNY0eNtF53Nk2E51dptvNc6fwzD3pP8AnXI6Svxt5OcpwnDESL32oioCr+238QC7IdaYaT1luKypFbOHieHy3N+VmQlZ+R8mzm4nh8Rz6r0hKD8zQeYdbfaV1VtquDTMZ2VHakPfRNLcAUv2Dj+JzdWL4Yl+9tmZSM3hfybaHJjymb2zsuBYv7vyLLcmVHYW8rKylxwJLh7B2n8kt11aG2kJupajYJHbSH47zUhhQuhbasyVewj8VTjq0NNpF1KWbAfiKcdWhptIupSzYD8wwG+Ut/MIi3jBd9kXb6lX7dlLVyd8zx8Rb6UaVBKU5Fd5RUNrO2cblFEbaNnQLI1UNP2vQRJgxsTmKF35EpsOKWrielurDcVwrNHwmZI2E6Hf0eu5SRw4+Fci/wBJz7vLhHJOM+7GalguzXGz0i2L9H5Gua+ZYBbtbMW7ufHv+dToT0tEnC9peAColxpP1TUmfLXkjsyXFLPhXnFnkdMXhmTaB1U1AWUduzsTTnKWIyt9lLebYrVkN72I40zimyy54of2eb+Te16RymmR1xWjf0CF51E5ikAbqamYxyXl4fha1Ac5EpLhRfcVIG6mpLu1kLeUERmGRmW8o7gKadx7k1LwnD3FZedJkJeCL/WAGlBSSFJIuCK5PMyMLYxBcmZs2XHLXjnTpC4OvhXmbCcJexrE0t7V1AeDSG096jxqThU7Dn8JxZhOdcdxYWCntChv8kjDMBwaRjkmNpJUHktNtnszHjUxD0d3DJ8M/uuM+fo++/Ed9PSMD5MTMVw5s25yZCWs9vqpIuqpGKw0rOwSrasOdFaFJHVNc/wvkrLmIQDzn91BKUW4JJF1n2Ckv8n+T83FcqbyMzoZDR+rc9Y+zurEMWEMqSw04JEN7tSm5Qe4+yoOOsYU21HXlCYbbmVLYJtoQn7qGKK5Lz/Mdx+61PpC8p9bZ77UjGXVqciOJSWcg6TubqgUZ2KclZsKOpPoF85Csx4Bel0e8VBn7PZc4YS7kzXy3F7XplhETPEU8kuSNqBkI3Jy7zf7qViuPwVYUUnKI6X0vKX2WI4mkT8T5KT4OEq3yOcJWpAPFTe8e+m5DC0usuJCkKTuIplhETPEU8kuSNqBkI3Jy7zf7qViuPwVYUUnKI6X0vKX2WI4mkT8T5KT4OEKOsjnCVqQDxU3vHvrAYyWEvwsUvspaXdAbaaW43HGsJwtyPtTMV03NrbYgmwNra8awbDm43On8QeyW2mXZpG9W7WsSwSHgcmfOjqTsg28AFi1yVEiyANO3fUnCMQw17B8VZRnLC3A4FJ7Qob/AOFTOS2K4ZHKUtB1hT5Cg/pwBHD7jUvF8NXIwSZFbLrbjT5y3HDXw0rk7jzzZckxHUPyBbrJ3ZvsNeeUulyDstpmQMxt+uuT+EYNIROckzA4cm9tIuDmG8b769lci/0nPu8vJ3HZHQguNmM44dyD0rX+L5Grg6Vi0KKwVRYRCedhd0uK7BUvEm2g863LUEJVuuVAC/dUjEMa5WuMx+blxceHHS2Bpe2feamfor/vKjvrWkNJwkEqv/2dQ2ccfXFgrWobVCSShW1VlOgNLnMYxCx7CmEZi3KRlXl7jx+L3VyGxBiWcIRKaJaeW0HQytQHA6dgqS1i3LllOHqTZ7a4W0hNr9t9Kw+OiSJiG46EpfG5yw63vrkZ/Sg/4akO4Hjz2EcpWWBzhGyJQtGlr30PDt9lR+TvKJMGW4+yXGJcbS9r9Ye7s8mPsO6TUYo5t0nrcNfka5V4bh8I+eG8NUZEpDKekMuiSrffurB9gU5RHCTb6w3/ADr/AKRZDBvDUtQQRuKglea1YZpvC7/GqlfztyuXg/7eT/YrC/a3/aNYwmwtzJVh7q/6On3v3i3JZ299w0Fr/OsW2xTlU2Ai/FVxasE/mLX9gV7JTd65MPv/ALybxRsyL7gO/wCdYsuQU7Dmi9/G40rBg71tjfXsJJHytWnCU3euTD7/AO8m8UbMi+4Dv+dYuuQU7Hmi9/G40rBJ1lc7w5SZTfblz/8AKb1yvxpjVMaM0iErsyWdVXngdKPAw1ttH+tcF1feK5ZHj6H7Kjf0R/xH+FN8+ZVtm/on2lZXEew02cQxLH8YaQbpYnTc7fgAK2OzRscuXJbS3ZanHMOnY3giXDdbWHzC2gn2a0iZgmLYlh2Ii+eQsh7a335gd9QZT07E4cmIDsnYjoQrXvtX/Wvln/8Ac/8A+aCbk2G876chzmESYy+shVc3RjHKRuD/APCJn+it2WtupMLD46I0dPqp+3vNP4FZ+XAdUVLD6rnXvAFNtyJmM4lDb+jiS5ZUyj2JFqewdvbSYLhVmQ+QdFbxoBpRZMzGX4O9EJ2XdhHeE2+2hyfKXJeH2UCHz0jdWbhakMv4jj8uAk9GC9N9B7LWrzdNioXFFsiU6bO27L2U0JmIY7ikZo3RFmTM7Q91qCUgAAaAVhcmQ5ISuA/tmtmoWJ036btKZmLcmwZ7QsiVDd2bluy9OzUuzp+ILTlVLmvbRy3ZfyKxBuRiWEz1Czj8CRslOe2nGIaHCXVZnnXVZnHT2k0+qJNxnCmHzd6NClZGl+0WpeEw2ebQ1IUkhG/Uam541HwyMp5xhq+UukZtSTw9tcwhuPuM7Qru8QVXPsFYtAZdmKaxBS1PFahmTmFjl0qPydW9NEJq2VaVJ2mhvvtb5VJw55TiWH2i2oo61jUSBIw53FsISUR3szllNo+voN/stS0QccncoJridnh0Nybtg0o6aIHV07awyI59IzFQ2r2hIFO4fNCywvihVik8CKdwiW7NxaI4ekZzudfsv3U0iRNxudCaN24UiXmYT7rUlCEhKUiwAGgp3D5oWWF8UKsUngRTuES3ZuLRHD0jOdzr9l+6mUSJuNzoTRu3CkS8zCfdanYK0AR3Gi0Up0ski1LwyLtnY61FSy8QVKv7BTsWAX1occ2ilPEFXZbQDSsTxZpyQqROy7UKUMoy9mlIx8uP88TH2ARcZMvhe/v/AIYljKjKOtdX7W/h5QtIUk7wRpW2iYXh8V767UdKVfIfmh+U5myNouco1pqWvE0PvPKN0jen2+2mkqcW66wci1K49n8R48JbqxtnRtUI16HaaWiApBjW0v63bTyHkusxnRYKNsil/wAR3HYTJQgDKntV3mnlTUsvv5MuR1QBR400lxtC4odzlpYsDru7bUzLaCkocF7KFiP4jXtUjEW8SbaYfXmWlTd1J9nbTbXWCEgAn/Qf63xGvX+M16/xmvX+M16/xmvX+M16/wAZr1/jNev8Zr1/jNev8Zr1/jNev8Zr1/jNev8AGa9f4zXr/Ga9f4zXr/Ga9f4zSNVan6xr1/jNev8AGa9f4zXr/Ga9f4zXr/Ga9f4zXr/Ga9f4zXr/ABmvX+M16/xmvX+M16/xmvX+M1vX8Zr1/jNev8Zr1/jNev8AGa9f4zXr/Ga9f4zXr/Ga9f4zXr/Ga9f4zXr/ABmvX+M16/xmt6/jNev8Zrev4zXr/Ga9f4zXr/Ga9f4zXr/Ga9f4zXr/ABmvX+M16/xmvX+M16/xmvX+M16/xmvX+M16/wAZr1/jP4iwhcnLzhKE/R7LcDY+t20Stc7Z7VYv6LZ2BI/Sqeedvulk9BKko16KVcE1lDnQdXmZNvVF7/YPipySpc7NzbOCsM5L24W18acC3sRZOllObH5ZQfnURYkuuOu2KvowvduTcW8eFHOVlaVFJzgBXvtp4eVpq/QLKjb2FP66WlIU9+6ciWwBr6LNbxqOlt2UvOhSlhhLYIsQLdPs176FnZOVDWZWTZFSTr1weH6PfSVDUEfjN/pfcfxVLUbJAuTSnXtuhpO9ZAt9tJbQZeZXVugD76QyEyG1LNklaRb7aSqW5s8wUU6E3yi5+VKTzXF1expGv+/QAiYwL7iWkf8APXOoS1qavY5kFJB37j5ElxLyrqKRlA4GxO+lhoLGVKVdK3GnJL6srSBcmlF/bxkBJOZxPy0O+mW2Gpz+f1koFk+25rYtB1K9/SAq6r0GpMiy7266NP8AeoOxnNsg8UlJ++s7ZuL1mWbdnfQzJcte2goKQQpNKxLEVLDAUEgNpupajwFJbbbnRSpWVJkoSkE9mijSsNmOSW5KWws+i0sd1SWsOecW4x9IFNkUWHQ8t0DXIBpQa2ygsm1syP8Amq4alEdoSP10h9o3QoafwNSShBCtVAjfW2EWMHr3z7IXvSiALq399Is2gZBZNhuFHJDipJFjZoa0SxGYZJ35GwL1sVstLZ+opAKaCG0JbQNyUiwHlG3YZftu2iAbUMrTabHSyd2lvs0q7saO7rfptg0gGLHIR1LtDo+z8dv9L7j5CaAWtDRUOilVyrwFbRhLkyWuyWmUIPSUd3upKcVP7sWhZe13XKjbwpnCGG1liK3tHMqutm/a1c3Rg2Zb6tntNpdSVDUHdUVt8XlxJ7SFKy8Mw091PROcOxZUlpSWFIazX3XT3XvasIw6RhYmsTGlOuLUjQXVre44C1YkwiDsGoTYcZfy6aKAATpxG+mcz6l7JhDCmii2Rab5vbw8KITmAz6mo7RmsoZ2auhl1zX1oHnDb8gN9Ow1tc2v86xBKhcbEnw1pTWIOrbaYSCBfIDmvvp7YrzvKb6Kdr4EVEkOostalXPuNOpQensjb5085LjS3Xs56RQD27r1zZC3OarbzhKzuuQKfVe9pJHyTToDiGUIRqpY1T3ns/wqA+7MShuStLbSxuUq2iknwouKDrufV4IF+kNCbVCvu85o/u3KdlzGlrgsqsltBspwjXfUjGWMSaadajtgxl6lxO66SN5voR3b6mKTJjvehUClKukm5Rw7KdhNFSEOrTnKTv6CdK2UhLUZxzUKyZ127/2FP4U84X4wa2jDh8CKin9L+0f4Gp5ya+1/mkt6U0pZBWUjNan33Jy2pCFGzN7Ad1uNDbSlxWQhJ9GdVE1GZbkqc2q7bUaHL+ulxlOuPJyZ0FZuR2/ioSHVMs+tkNlGnklwvtJXZC1caLLstcRoIunKrLm99ZEP2CXMqnR1lDuqaEvLfbbRcFetldlRnTIed2isriVnTXs/Fb/S+4+QtbIpRl+nUDlHgP1U48h2JMC0W+lCD7LGoxcgrSyhCk5y4ClN91uJ7Ke49A0l0MqYc5mgFXBWqt1OvlxSencnaHL4bqacQFJTIxEPDMLHpOi1M4TtZMd2XfI6y3my5bE+ylYYl5Jfj9Bg7RF3Up10Guo3WpvB0PtocfOWQS8i7SVG5vfeTuCaeEVyZzVuzIbkIIIUm+Y6773qbGQlEqUhyxSvRI4VLnxkhS3yoraTwta5+ypiHWmnucqzvFR104A+NGUkBvbw21gDW2bL+ulvyW0yku5rgb7jX76jrMPJZmwX9W1QHWwUNrWopSeHQptpSrXYvv71VHUgIyZxn76SczSFhu28DjUtxCgsCcoXH6CKW9dkEtdBkJ32t/iLVEYQ1GEQAc1SU/RWFtOPaNwoTY7yi9tjnBHtFvZ2GoxS2VpRiCFLsOqMjgv86LLeV2ORohZ6tRnFrQprZZWkJ3BJOo8axBNzk83K04fSN1LTmyltxs37PRoptMJVlH1yL0yuQpKsvXsKgOoIUDnFx3OKH8DcCGkSWldW6gMtRY1gsm4Ub7uNOIEWPdWgkdg+2trHYTICkBJGaxFqQ4kI5wHS5s+GvClyZDYZOXKlF7/ioQ0ypxn/AClnMubupxSoYZyDooDg1pNojMtFtyjqk++ghpOd1xZU6UKAt7KdiqiCI2UaHaBWtR0PsoabaOYqzXzH8Vv9L7j+K+yhwsqW2UhwDVNxvpT8/F8PmtkZc63l5wOFujYeylmRMgus5DkAKtTbS4tuqO5KcihLakrKGlFWoN9LgU/EQ84wtaeitDmUg8KcckzWGntNkqK6f617p7Kzx5rTrpUc6pTpPR4DRNIVIcUuSY7bbnpCpN0jeL9t6mT8Om4cGn3C5aQtaVJubkaA0XHJeBr6Nh6ZzT/coA4hgyWr7w64Tb4KZwQOHZtRUMJXb6oAB+VBqFMjZ9rnUp5R+Vk0W1PYVcjXKtdj/u1GStbJU22BlaRZIIFtKiy8GxCLFcQ1s3EPqWkHUkEFN+3srm7kvk86q9wouun/AIaL68RwFPcH3f8AkrzfIkolSXJCnnVIvlBIAsL9yRQcbeRHcAyhVr6d9GEX8PWrqiQb5svst99LW/LTJK27Kt23pcV0JUhW8K3Gnl4bMwsQiboS84sKT4JNfvvAfc85/wAlS8RnSo70l1nZBDJJAFwSbkDs7KexLDpcJAeSnaIkFQsQMulgeAFFKZWBKVe4Jec0/wBygh/FsG2P1EOuAeGSoGFbXbKYScy7WuSoqPzP5nb/AEvuP516ifCuonwrqJ8K6ifCuojwrqI8K6iPCuonwrqI8K6ifCuonwrqJ8K6ifCuonwrqI8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifCuonwrqJ8K6ifD+Jrjj6JiWUrShCw4Uo94vrqbbqjMEltLr+UltZuUhJPu3VmkypAaZcW3ZCyFuEKsnUak93GmlSzZxLfpCaIMpDpkILiUBd9n3d2lvA0I6nnVSHMuRZVdVli9793S8KivPOW9CkrWs92+toyttwEdFQNwanhb7jjvOkozITbKCEdUcN9PIHOkqSvpNvrzFOnbrp76lSw/JEhC3Skl5RT0VGwy3tbTspLR2QGZKVAu2Xc23J4jUVdMdPNs5SHNp0tONuy/fQdefeaYT6jRKVLUd2o191ObZ51D0aGHOiu3S6W+2/q+ysPVtcrCl22aRv6BOp+6lrkImoK3lJaUpwhGm4Zb919355zKefLeYKLRIKLj3X+dAKddCgvOhabXQe7/Go+SRJbU1ex6JuTvJuDrXN3XHFg6KVuJ8KbzFSSheZJTvFRnAXMzLWzTc7x399NsoJKUJyi/kkAlfpl5yfqmwsR4CnPSOOrWbqWu1zw4UttUmSqOpZUpno5Tc3I3Xt76LqXnmwSFLbSbJUR28eysyHpAbzFWyuMtz8/de1NPc4fZU2DlyZTv46g60krekXyZHCLelTfcdPs7aYUbgtKzJt7CPvoOKefdSlRUhtZBCT9vE8f9JeUDaP20TSuevgO59OjwoFJBB4j8glljR3eSRupt6E80ym1nEqHreFSmJLgcLelwPyGdnrFVr9lbPPrfr8azu9YKtft/IJZcz5j2DQVzTp7S9r20pUZGfOO0aH809MWX6qxvFPNPpaW4o9Fdt1Bp1SVEHS35BKGU51hd7X9tCPKjMojFOVSkq1+2i499G2fR/yvyC2l7lVs7DZ3+k4UhpG5I/IJddaCnE7jXONkNt9alPoaSl071f6Of/EACwQAQEAAgICAAUCBwEBAQAAAAERACExQVFhEHGB8PGRoSAwUGCxwdFAcOH/2gAIAQEAAT8h+Doy/wBkGl4UjsDuehXzMilIzCF3G9ePhV5EwYn5SafA5HAES6wqIe9DvAPqSonNH+potAZ+c/6z89/1n57/AKz89/1n57/rPz3/AFn57/rPz3/Wfnv+s/Pf9Z+e/wCs2AT6f6cI1yPxg2UXwAfsGFy5ptmvpbXKIspQv32U4wmREolkjCnB69axhV0iWlDvXH7GJNJHFVO9yqzi+mSY3IzY9XJnj2WM3t28H8Hed4qhkmNyM2PWTtDs8YXbtrRipADa4IbkCEnJRL8UhpLVPzf1JP6EpH4SjwHl+WCkETUninJ8OP7NMuXLneX4XLly/H9i/wAfHh0mlhuDyI+uIZIVMKUDmCgMJN21XR2ACpENvOOiwRdo0BA4NcC1XGLBIAMheGyrXVjgJaM4FfQhWdA+EB3uFlQHBdo4F8Y2LPEXCjpifJOFcHqI6HAtIDiUYEgl9HptKAKVNmArLMi0PJXCaAIaADWlwyQxRYA4FIC6FceJIZjKEiDyjdpInXqioDgu1OBfGJ4TxVwUdonoThXHPXKwjRJNEFbHiMdaQRkuChQPbNrwfrj3LZtpHW7sDrBp7Y0JEUbpKwnijoDoseDvE/5DBw5YgEHtuxM6/wDfNSo0eHK96tKGIojyt1OLm47TKhogvJul+mJ8wr6Q2H1zg+zT4XCobQSLoHCJPnd5tJfnwDwNC7TjOWKBmqYK0vJhH2GSnUVHQObj5HhRkgh3INrrpcDWdbN8d4DpZu4YNjga5FYTZ65WYVdIobUZh04Tvq5Q6TXS6HsCTly5+xf4+NJibhHz6in1zr0sgQs9Ii5xz2MDQEVQpFAJzgv1F0GCNNGCNvDXLERO1JHGwNx2TxMQwd6Ddw2wPHw4P9IvME0aTw45P4GuItyAWO1nbcZGaw2QRdIseTZTVTDd4rL4O+WPj2ZjiWjaWm6sTQCQxEaMVxORY29Z++oaBu26RLn6GTkCaNJ4cEVBfDCLchLHazt+lZxhrsk5pfBnQh8Tkv1Ds/LL0w6BsbIKTJ9d4qpEe1BrpXtwrXJbHIGkLwbSfqy2T37KgdW7gJwGk/8Ad1iENOg5MEvi0kDW4MeV1G6yETlCUOgC6NHxO8DL85DDn659t6ZcYGkioXfTuesX+0d69geh8+XHbL5g0iWOZ/3Omg8Ys5HE5OcHYUulCPTvWC/4PLkgupFm69pgxHiaGw5c3zesFkR5LTJjS6BxxLjYiwQlQWbgob5YUZI40bSsVUV5y5+xf4+Lr6bOHuvlk602j1VBQpdIuN9K+RsQFUAOwg5PmVa5qFfakkNBkvqtJGlCAnm+jGsjbOKAHhNUTp+HC40EAirwBu4mnMAqFAFk55dYKsg4ChobXYSbdbwgar/l8P0ckjOG3snmx+cy/KraetnJnjmEx5g2bP1weKKBx3IMrqb3TJ/TGBpC3oLPAxw0UeTyz49jnf8AB3/Qes6+Hb90ZcuXL8Fy5cuXLi6rM1+U/wAfEgQ7EobZoWeUxLToPECCq18jjYE4fUHCubYfRfQ1/lnXaY2VI0NBF9eSujVBJSmhavafChDSNTkPp+g+uXHNYjlqBArC6usK2W8YlfBLV1ybw0AAXaSjDZwBtAbwVTaJNab74Z8sEWzVQEmvYeVbXuHoyxaqzpqo55UxyQbnpB4V0PHgwU2LcEqNOdwcxfOcG9ZUZ+zz/SUFQInGfb/6Z9j/AKZ9j/pn2P8Apn2P+mfY/wCmfY/6Z9j/AKZ9j/pn2P8Apn2P+maOPUt/3/ATMUQKtLi7iE8ub8wGRbJTfMv87IQWOif9TD7KAKqOhXjSuvDxjHCGtQwX5BjtpyFMCAjsCPcQvv8A+GKojnU1p+uFb/sdE54QPbj2rJAJqPk0osu3I22IZgvM4UjPf9jnA/ENOaGuueufGScV3BfQvZmibKsiSAmm/Ovr/Y11hck7b10aeq+dcBiOIaKt/RXXHlyfcB/lpNs0lkXTgYNI0D2f2KgKOecAqrp/YYSpLaGCVwtt+eC8kiFBtJ5YQszBWVV2t5V/sXT3/wDvW/UkzrN1Tb0nJUlM7yOL9wrCoBtDfnBBpPrbETSJunxn8NRb8H9rg2bk38F7LAMFZMHY73/H38F7zdW2tLkqSnw4MGmNL+STn4C3+BB5TQYEFp9TYE0ibp8O/wCk8ftscDZ0iKvyDAiJoLnaQdajvBOiL29dKDlmzeT2JaAGrQed8+KiPSYL+WYcAG+Sy4cZJcsdWBpgRJwA61iRp67MEgJ+YRqMGUbxVbqSvqop5uRcQvnJviaMnZ5zWV2TCoGFg8Xemas5x1gFKXQB3jho6bNJsobsOtWhCLXsQ0g0iopOXNC5hwKHwBQ2SYSkXEW5+SfUmMGEjaeYYOaRB3nXHtVYrRek15wxMA7A8e3g8ueVr4QRYpslpxhLuMj0Tr6nImcM1xCVcZ8n2B766PNETdyKQrKykA8tAPLi0/6bPdPA2S0x4s3dag3cHYAmNSXubpQC7ALaMUuKhGXVwN/LODyQahAKURx4jCPUkSI7PD4zpx4e52vkLL3N4JCVIktlDW4/ri5uAl3Bs0I7mneX2vGHIIT5rTiLrDj3q24B42V9nKg8IyRSkkR89+sBsxptv9DoIj19FpPb3lXvnN65TlS+mX6ZLaDtAbtLyXdHFVEEXbIiO5qeMYgGvvZAfIUsnMQjw8hRgBYL5SIo4YfgrH4Zf4X9ckQ/6GP3xIotj+9YP4ZYxnBLOnu7A1e0su0QKCpd6cTRjwpLniJtfXGTdqWdYA4IfqDrPt3hgGAWwJ5suWCP+CZw1kunHdvlG/LGE0QepOATE3FKpvYWdXNHM0yaz0eR+bkMFfPgFgC5Kf6QV9DhTBlEu960axB0+wWnm4QvrBqQU+OarWtj83EIV4f1yxkitYkTRah465wvlPNWDqHBs35fgnY/W4Tt3qI35ZfBNsjBbNE1Jsmss+PcDJxSQ02d5cDxRC+BxFSSlKYF6q3UhQ2tm1elxhoYcQLU0ip7ROMN+oCxCWXARvOD76zQIGLqLy88/wBDU+uOxrZ5Hp6xBy/pjh1MB8kyxoc3vOyd/PE6YQKhyztXl9HAGfRihZ1x8mnFM3plZ1ggU1zeB5DOssosFqTfYenrBiPhgkKlYsiY5Xiw1paU14TvLkXVBaBV1DU1LwYtz2qWG1GvRWYlYt5bBDwHMc6wkHzsggquGwOD3X5DHoNoQANUd83I/wAhFFXYAu+gxW+1nA3kSDql93NSkWnt0h9E088YkQlspmvhBPSYxH/Aj/I+E2OVUPdE0gGh8r7uEnNapRQOhLX3itd2wulD05CTsULdxUuBGzIFnuCII8jvA7kgxbAaHhU83GRDokFKCOOX3idOFdEHYG67BzQ/q6xxA4eFTyOJSuPWAWsp2uXIKJcqAJI0V0PAZwwTrHaXIHfa+cj7ArqQRBo4tMcAm3j6KRUTZ5ydklkXCRh1vDsuuqBEnI9ammmsWt8sacajR44mkTCJgEANH9Fazr+f2f1n9ZX3f4e/jP6eAwum58z+pLj2+WcCDeAkoWHjIez7dd1xne9d4atzE2R/CEo8OM+JIpp5KbYLzkQuoADaoI5Zx8HwJstcabF7kyfZQ3RxECzVN4Wa6Bo8KgHtMgvaFF+EBXpDNDBPyZhaFNPqwXmK3iE2YhFI67waFP8A+tlNXb1gSiJbPOwnyUNecvu8lZ4gBY8m83I+aL4jY8mlxvpzhlBNbQQaPIR+FfoWZQUSgAUecGKU3UmITHwPGbLo7CJoC8g4yrePLly4oOi8GPXwwNUAK2mg7z0AE0cBLx1L6zYkJE39uddje8qpg2RcxML0Y3m/dVTI9yYOrrBB4oK6suB2NnOXgdr0g/0vSQ/AxWsN636zZK1mAvB4wwJwxfLEFAgNoqVbsfM6x1U85nQrpTmVVm8mWz44xKhD2TmjTY0pxlqE5AwiFWoV+AWGn6Kq8ocV2+2QFFAUCiiG1Mgxd5v/AFOAgeR4eDwMPxM1YKGqxvmq7cSJIq7I/I9+YeMMKcYO+O9QoSgduFhZ4yMZFA1enbV+a+cG4dQBJXJss9/LHI4gBGIDgqodXOn4/DH2PyzsAb0Ug/Rj9MlHWrgX9MwKEvyBB+TAJieAAfsYZHMomE8NdsPysOSbTQU5XXgZuf1/Y5rwFn7ZNraahgiKU+bbf9L5rNdnWzyPb1gqyzFE3pRAHy1gY2eIWAXwKfoaHt0sw76y8H/4QSoM4gea87703DlzzW+dVt48uuE7xrZ0IRtMAA1+vNNUFya7QoiPnngFcqq650JW60b7hDNUpTEYO3g2h+6rX5hBUFiaNFPeJqEIVRCKu+GSd2YefbHlqfqNvWHt70m+xHThe95CyWhKlTyHlxrNC1N8FwFfcNBuZoZaYzOR0EHjpx8Bm6nnVknoU95ruKa+IiI4CteXC5M/1YBjN2tEiI4CtTbkut42qKBrcrjWVs/D+xKPLa9Y3ngH18oXblOsh4LWrw5whsOjibYLsR3iu4FN/wCieYcaunZnrWoNg5t8FkblXTwU3zf6b3/AmH8Pf8KfRbWm01741tec3cgN5NcvrgrAv/onw1/BM7/h7zv/AMSRPIK0GN09EeOaZN5VmwL4W+D6pzmjggZooDxXOTluTQIO/ef5FSb+T4k94HLiRVpounJD1jkeg2gOdR1N63dZ/rHBeNpZdXPQQIs1dwUrHnQ5xouyx6H61mLAmmwPEROOwHSQjgfJQ1wWDk7vGs9ynh4/PWc+86xdxCFOQRiO9OjeGDJTrLE4fWJpyxsQxyAtU+ZiTFmnQn5m141/NVYqhm8h2uDdTh/hJLpCq5DQAzupycZd5ceMQtYQBGKTThibdBwgqs0PPeLhb7bAOCtPI3jBJwKKtBOB75w5MMiY30NOkXfPwF3SIEKl2bE+Rg6UypUaA/fPcp4ePz1nPvKEIeyBL6iHtvJCNfI6bK+f74ipe0vqhF0HpR6w5nIkVLQ5Xbn+doMfQWW9fAy5fjG20NpBnfxcj0m3jah9QP4f+oy6vrP8QwK+wnN7zr+RsyPGdoOmPXHhFFTpxIdZUSj4yQgDGhMB8kxJZjTsRnC+M4LEj6baDR1HPuvlxwp43xkaqugaa3mq52tY73OhDzhacoGr3B5/48fD2vA5cht7fhTOM6wsVCgrdMImqdlqVMSWbeTjW+U1pSM53ZfeDGFuhIKHfnXWVgZlKjlZCvFmus53/IWun7Bp5bxFNUc0Ze0a93DxPZIAEwmgCnnKLuXRHIIheS1pcRkvfbNPbBXtzyxVWVL0wl5LTZgaC+A1Z2cnZ891oEzjDUAROjspqNjnKHrGt0eturEmYcVBvAUB6W499pDdQarQ9b4oJ41w6QHQ0HmL3nX9LABDQtivKiiUb5oB1Syn0SvIYzEJeKkjfAvzewT6CJJJ2rcqHHG73Pxx63tsMSb4zhI4EA7DKGyUu0cLjOXBjd9k5CJxvPDOgRUVoH0D5uFRGjyD964fI7lmjb9DHya6y7oSBXfMmasrvaUcM0aZxTmZAkpvrnW0dXvLIBa4Gox3xes0Nr2NWNNa17xZfjnXVAgpoqzTQd35a7qK3ABWWWFmedmOJ3Ygqb5l7xZ5vnbBOJRV417+DLtROi2HeqPR8xLYSvuRKK8iuhwkWAcHR0oreKj9DJKBLcmYZEYvd8L8szgIaLYgm08ZGA+x5uxOO1eXBiK/PEJQ24m9nM2RcEySAGDWqgu0W8GPRL4Hez1X1Li3PSAip6j6lndMbIZ27NHuZvP0ia6fsGnlvGa1RzR/mRL7uJVdN1sBgOINGv4gXLeaRV4BoF+qjpDwkqASLD/PCMIUiYMF2emjG3FTUoBmPqVImFKu1ngcIqV7yBpV5JSd5c0S2fJ2dronTjpM2h6G8idUk3nHoOabyuAd5b+h7BiqA7ZUItnOGns6a/wsE2hU2QooxBS8N4EqvpzWBgU3hx/Auo0eZTAwUq6w8o4omnDDy6y/nr4XoPi7L1c/RuPDObk4cdeWN98UGpFQaoeWayaCBEgu2pLhP8gIVN5dtSW5v0WaqFASiP1wYrHChuuiiy6sut5ywkAmrW5tiPCLN3+M2Um4LTrt2QZMHmQQERZXQ48BIqRPWJRxEqdu8WYaPMpAYKVdZ4/wqQHb+h5S5rUy7tQ+sfJHZBbgMuXN7o6K24aV56OVCeBeHg1tQPXB4cjYu7tPMgMRJlNos4ud3Ng5yRyblohZQr1y1h9G4JEQdbKdKmtveHRxV1gBBwukMBICI0k/0MSNFIoYBUmxrnGaUTtAJBaaXs848sHwzObACiMTTjKiTgobR+2cUI5Q+Vh24ormmo8T+hbgtlImw+pHh8i4MVbIEGJNEdrpwRShNdFEHhOQzqPHCYttjU45T4WxhvIOrFBEt2+GEYKQMNtUIvHk3VsGBeqjxKvgnKiYiqZiJ0HqqEl3dY4sOJEtiR3HWAFgDBRU9nFuGA9imqLRJd15upHJbBtHSOotinDvWb14E32BgLs0OKDbEW5X7qcnhXEAICA1Z5Z+gcOGc3Jw468sn59QakVBqh5ZTJQIFhHh/kd/yOv7CiZLcDFB8B4e/j97porTrt/AKRnZTw8B8r8v6/8A/g1L9gJ1M/2Ok9mtLLrHnOIwewdx4kT69YE7lUET0ATRsbyM7nSOiEBitBym5VjjwYFGFV0LXfOQyfOZTsAmy83XAIkKmCyIqHb1e8cy5SoNENAZZfOb7M6FSeybp8nGTX9emXrgxycsrvauenMvDYclwnx+x+X4JZEKeaB9ST64SNNliicnCHmOxxU1LDDkOk2q2XXOLuY5H+g9NgeWbxRqLs05H2q8tP0zx/YS7zIxuZvqO8DMfXyINBCV6zl+W1zmQp5d4QF0TV3G1fvkJNTBAjMR8oq+/wCwCmtRfmVQMeiRR6eYj8DTCUfTzAcBVWl+XTTl0fal+bNj0fwMdvhxfHJcA0InnN0aa0uSpKeP5Pr8NkFX6Bvf8ptaDYhVLwBuuSBCODyhE+X8K7QBRnaugwRKRHv4rtAFGdq6DBEpEe/6BrOWZViDnWm6OelwQYpFNEl4eOFOTZkZSinmED0zREeMvxZ5ew2i+PrWuICVE0OynIBa40NKY79jrHXwnNeFwQ9Dv866oy/cB0+rn7wZ1WxNqoENSL26sGvFQV5AB2qgHlxC4qMZdXA38scGAliushG9OL7Gzl6b6WfTG4GYCGhKVOwm/Fx0/AXcQCfNde3WAr0sMh/Tfy5UMF8gJFQSk7736ct9EFEwmV2qL82nStc5GqK6XEKC1qdm8MxtD9DjBsuu9XcxTKfJaKjz1rhljI3lEUxZ9F4H6RWzVW40VgNdb6QdZV5PkCPLHje+fph1BSvE9BEUGUN5Pz3L1pNjLBw+UxocaG2IIiDK8HZ1k0+IMPAIc6Nw3NpsQhohaioNNlwa8FdBQM2nngG8ZcSyTXpbPBTep3i/SGr+AWWWY8MMqFzPIaaOWbML0J40dmuo7mLqIimIAAeLRzNuLZG6qKJ88eGGVC5nkNNHLNmF6E8SOzXUdzBtAimIAAeLRzNuGUcRIiO7gMcutZObRwItpCvY4zaXo4TF9tB41d7wIzipoIUBVpHGbVtplNIHI4JvSxn/AKaMP2JTxrTsV8EwtYZqKiGpeFm054Tv+2UFUHmNNRXgwkd0d5BDpwnIiYkf/uOhIsUAYnxgh+T+mOvhtQP0SUXAaPyTrABFHNxZekChsp0jsWhe8rMh62aOlqHia5wskPFWDqHBs35cpA29OZuBQkD3m5TNqQCTZ2Tcec0WTpddg2Dir0uMTYTwJBmloL8zjBr2Q9DmfKRvMwaU0kRARdcuX5ufZfOIE5+VVgBssRotGW+Bihu0BuuBHycY0lV5AAvSw+Tl2QyAgitxNiafGM6BHrQv3tnI6+dwLh5H6mSYRenNthEAFZn0P9ZuSlQvF+HEOCMw0TTC52r94egGNHYrsavux+nw+IcKK+TZ/vO5Gn1T0mNQw3pSA9qge3KNjQOX+aWEOKK+TZ/vO5Gn1T0mNRVnpSA9qge8lNj0kFPpD5DGDT82oQ+7r64yQZhwfqYF/kYS8OAfTx/YwA2hVff/AKnf/wCot509dxpesGiRCCcPQeLM6v6fxzwSamWEwQbuiPkaOgxRf6CFQYK/S7RcClSFmK00ddTn4E4C4Wq9vvDfzo99ImxPJswUbqLi3PM/VlBhJRq9l37FXHgB0KoekiCPI94Lv5dI4g4eFTyOK0/pnBX/AGG+cp2qlQtACo7Kd83IozGRZVGwukCQ73kzTKKI0iGHzyZH8pQiRzDROtcZWbnCjjUaPFmBxYAgB6wkTTANUtXRxH3mueW7t0Im3q5RoYPr4APpfhUPLYPpR+cL3c/yCwflc8cbdbcuoCV7zVaeIITgMs2NldhaL7t4PlhAzDh0agOV1ijoZ0F2BNeMVSc2jWBADij9c5cD5NCp8sLpwYAEYol+jlgWnxw3aEOGybMKDC1iBKKEhOmQswKoK+0fuZD/AK6CNREo+RPWNnS6GZAQQgk2eciy9fBwBDDxfncBhwYAaAPGQ/y6CNREo+RPWNnS6GZAQQgk2ec2qvXBwBDDxfncD04hwAdEOPGOCpjAg1AkA4xBY1oUBQImid5upE5NCQJ9VyzCwWpWp5Hp6/8AZXFzB7kr4Wt6zq/xd/y5/L38C0fLyHpzSdZz97B/k9fHv/2bYa3EGWdj67tu7/h9cphwbW1h7Ahvf9j6KxYLfYcyyTvIOOnVU0HVDpwp4dICQV2MUJpWO5/YyxLh+UsnFqHT0HgO8c5HcBjxwpLo1gNGTb22kcP0OchoioFRv1Hff9i95CQHtw8ilWgCKwa1ZOMpV3qVgG8AD/4dXv6f+me/7HvPf9j3nv8Ase89/wBj3nv+x7z3/Y957/se89/2Pee/7HvPf9j3nv8Ase89/wBj3nv+x7z3/Y957/se89/2Pee/7HvPf9j3nv8Ase8AIJF+Uvn1nv8Ase89/wBj3nv+x7z3/Y957/se89/2Pee/7HvPf9j3nv8Ase89/wBj3nv+x7z3/Y957/se89/2Pee/7HvPvD/ee/7HvPf9j3nv+x7z3/Y957/se89/2Pee/wCx7z3/AGPee/7HvPf9j3nv+x7z3/Y957/se89/2PefeH+89/2PefeH+89/2Pee/wCx7z3/AGPee/7HvPf9j3nv+x7z3/Y957/se89/2Pee/wCx7z3/AGPee/7HvPf9j3nv+x7z3/Y957/se8CAF/X4lhPRkCpHkjw7QN6zXUgCQTrwDzfWW2dwEbkGtTnvjBYuGfNr1sRq7/RnOjO3DQfmOHnODElSdug9MfLNYXdQLUQq7lMO7mpnZQjxVTJvR9cfEBS702gH92SJ2wgjY8bWrr5ayknsJ4IgJCVrn04IKPKhZB01RZ6OAyhIxLfTxnX8dD8FMOE0joCrnTuh/wBT+znBCuhT+uv1wr+SJqwKLvWKxhwwLxnQuNCDh9lqh+iDgAeocYXWBrFwIBCA2JiKnjJ9vjHzAav+OMX0MRsWki+Eb2Z5P1L0B7VmPGfWxnSironWU4tEWBTTd9QfcxMaiAwzk0u/nngy6wVL0AOSVohpxE4iHNg8i8mAHaT5EyaZWDteDA1WFC7eNDf2xQo7P8fPN0jLmIVCwXaEHLilMnxUC9WX55DgB0Rot2vgyB7dM3wumas8mE0QlwUu6nWCaYhcnj2xAIRaJnYt5uvTfd/8aqygUWtvngzoJHH3WW+8DZpWG9A350B9M8E50tNHg0aPBjCXCHQ8jDjNYykz+gw+QBCCcadYfqYT8gHxQ+vsXMprGkhSArlPf7GsoCNFGwLvuAfQwxe6oO801vesP5FAbRAqBXLeYiT1UlD3lEUGr2Fkr3XgyjDRx0z0iCdSYZhvJsDdGkJ2duAKoVJUS2cmmWJc48poVzT2oTdZMGbBImAoHAKpzzrD2tUu6O5CIiwmSGzVHRATYC6DfGTemq05VOQla7U1MM9BFKQr38gwtqcLQXtyjV+h7wZDgpA915YzgnI96P3DN5GTYq3XcDXrFpExbjyLkbN5usTiK7p39feRwLR5swQwbHA22FlsxDs6myABAgZ5fCt7v95SSSVFT0KjOU9sXlmKuSQJHbfr3liQXTTABxqU7I8mVNAjiMTD1iUDs4vPvWAp9dBUWopYAKdJkzcZ9U2g2jXyezGYAxGZP1W+4Y9LQop3jcvpXz3hPwGvFhvh3Lx88EYAeP8A4+2t60lljza+ZiIgkhFh41+mMCokYnSfbz79ZfxaNLvQsDNKMBaQrv5JdP1wJUnkSgv7/wAJI6K7jrfjEGT54Q0vcaXCEhbinl+TiZatXQB4dNLrf+ckbpKoNrvr5ZH29jQ0jqP8dBuX2XtRCzT28ucZLYa4QFJXZzXlYK3ZtcwRSWsFIHOmazXUdPzp1kviyKY1rmaH5Yr2S9V5RXhzZOTBZkwmLqc0jvpMab0eVQf2PnKx9NqygQUpg6PLEEDHVcBALUFl8Cse3vc3dkxV7PECMQJEEQvLETjpzgoUAFCLQ5c/tcAvhBc0BwRSfpgGBrAkp7mGAAapdFncTDmOsCyga6I59CdO/XOJXEyw6n6GBylljkP9YuMxgPK2c6e8lgUFQC9+sfORQd16XzlaQu0BXB0zyLrhziQxukoKDSCw3V7ACFmwfJ6R9HzMW1g05Y9UH1yh+qBSciZEkpEsLheUu+8TKp3zAX57cDUY54NnDH1yBJ6vGU4umC14acdj5piwWsa2X7md/wDi+VBGPh1vH1IjrI6E2dfpmyzxpX1w8pTTl4i+H6UXUTr0ZoL2NCIl+XfFxrkJiwtVTy/wo+QwT8K5DzOchC64PPYazofoh5l0n75dGZDXSePF9YwUx/jD9bksQUIjUDjzvOv4qE3cRowcm73gZ/dKoBHlOcsm3LKrC4PRm3DOvY66EIVLF11h1uAQWdAUN+Os2MIN3ShZZfWEpYAEr7G0B524aM1QN5doaeuwMQ/6jKxgJSXW9OIVPMBtDoKxp8saYdt2SvC6uuG7ziwVIa8QLOqfPNiIDtxB9DMAVQGnV7E2/prAGLTZCRUf2YgSvta0dDtnXHGTbp+QQG2wj4byaKo9wHL642siA5Kvn5xwcD+kMKGwhtdYY1dC6ajtKyJ8964C+Q+lpXGqlToAonkmt8dYCk4K6Ej+uXy/XH8lp43s5xYhO+Hz9eOg0648QtOv1YJ0RdLpXDyJu5ZlomVzo3yKSeKfcGvzct4luTwDopPX9uUWVVXlRvPw/Pw/Pw/Pw3Pw3Pw3Pw/Pw3Pw/Pw/Pw/Pw/Pw/Pw3Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/Pw/AAACHX8nrOv5Xf9YCRS8rNw7ByGt95AChDmGApYY6O95yPNADZSAnka3BgiLDUN3r64FVP7YvEXl01vyORAa32SVugjxGCDHyrql/zlKzfuyOS4p7FuUhR0HNu9rd4vv5E0IFUjfLn6YM96s2RcIE6e94WABXFRppbGncGZd4/KCjplBNJ4Z4ADhGEAlBPIDlTxjJ3YSrV1HEI3Z1vFHLaaLaqbCag8t1CIUoECB5RUy3nj+ssF1iCCcwKDAFONtMm/AE4NIkU0dLcQUazbTedu9k5QhiMNlYO7GA3wwNLgAAJVDXY6RR9OKidKYQdNje/bh/JrzQIZ1mg0aCiGhqU33hy7f1wHAAgGjLNBFFGwO546a4yH2yoQHToCCCGzm6rXSRpXRRVa2PGiSYWSAdGOM1fC+cfm2YeRHSc9NLzjlsegrK+osIriiYSjOgAoLxon/wAL6zr+TP6rt0K4A9uX5SmwIJsJzecPq1Ep/IRSxOIPG/Od7xS+Ua1xjY5gJsUZAp8zH+PigHXxi39p9c+sLT9N8fv/AIzmSnVOA3959P5Cfxita+f/AMwetogNnrn/AFnLQgQIczf+T+kx880fyHrGjZS3EJuXkuKOe1oD1sPf8iOcdQ1B2nnCxRCJJBIt/THwNlCbdMOJ/n+QZdKXw9OfOQ9Hn5+vzhlyS+Xt/kcgnBetmuH64sW42U/44vvOJmYvfOrD6f8Azn//2gAMAwEAAgADAAAAEAAhjAAAAAAAAAAAAAAAAAAAAAAAAABChAggjgAGkEabZ6/z1uSAAAAAAAAAAAAAAFVHwB78sMjgM6ICbjrb126AAAAAAAAAAAAAAAIZLkpMAoofgHRS6CptU0gAAAAAAAAAAAAAAAAAAAAwwwwwwAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOJgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD39AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGu5APZ/uIJbP1JctyP6wAAAAAAAAAAAAAAAAAO8UWyf5pZy2EqPuIi3oqwAAAAAAAAAAAAAAAALDu6L3+Tk9EPyAa7AAAAAAAAAAAAAAAAAAAAAMiqgt7hMe/Vszw+4AAAAAAAAAAAAAAAAAAAAAI/JrcWfA6KAAAAAJiAJKWDNfSQiBhHBDDAAAANgZoH0sUi1yZKhzFrFgkjxDqVvwNosbOXMgAAACgNQJgAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAQ6cc+wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwKgB6AAAAAAAAABAACACAAAAAAAAAAAAAAAAAdjaX1a8kuqMg2VFIAsjYgiJQAAAAAAAAAAAAOzgAkT9Pd7wETiNzS3jYheUXAAAAAAAAAAAAAIVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC30gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANG9HgO19wAAI5wGA7AaNMVAa8WcVwAAAAAAAAJYYQAK14CAAIYxrfERzQqTs7O8RdgAAAAAAAADDABBHDBDDDDDDDDDDDDDDDDDDDDDDDDDDDAAKCCToiiNnQAAAAAAAAAAAAAAAAAAAAAAAAAAAEwz+EwY0wgAAAAAAAAAAAAAAAAAAAAAAAAAAAAyAAAAKQAAHwwAABjQAAAAAAAAAAAAAAAAAAAASQAAM0gAAMoQAAM4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/xAArEQACAQIDBwQDAQEAAAAAAAABEQAhMUFRoWFxgZGxwfAQINHhQFDxMGD/2gAIAQMBAT8QgeWVANs2jLI/SAAsbtbKxLxSnnvgNDBmQGLkH0iYLArb9bIDg2agEaHdCAA7tWukUZlq78qZmGj2EOQk9I2Vj9Pp8Q4wIHQXJ8YP9xWXEtfv2lTeikqZnFlebKxhvfQ7wFGPKPXCEDOX38HlPPT12mO0EG2wGgdyVFI+OFytztEwRnCWXu6LpTdAops0CGmMybhyt1hBLxpyNORhLIZvVg94WOP0ukAVtmgA6D8AJDUmPh50gRJttjle8iOOcYEHKEk8YRDziepPOIT1cyEFIKsz1C1gzcPRiwRm1d+ld5cC/KfEAC9JH4wkSJ7gRtWbf0g8hhVZCMZJhkjAAf0iQgCKkhEhCjEGoBpnH/VTRvC+7C2Mb9GEV5VqAe8d95HIkdoSwCLVexL75TAlVHxxK7O9ueEQEuFqVGyAM1x84XgEgBjEptittlJBZLErRxQh3ifN5xhdxVvRfcDARjGnihQPjwz3R/hKTK1OgHURKLA8hLHnxKQcz23zz26QYNu8NRxAA4D4GAWSMtDfUVhmj+Brp9QBgsA8EdQ1tvCBjA9iO8SGwJ0S2G3bKHGF9tlXn1lauZ6n5gglQXw+HCZQa8lQRxcr7a0roou+HUXCIILMkjaWvkLf2MSqfUmh8rKURZQGVQFFIy/CIRZVqAe8GJyJHIkdoqpVvRfMAbSi8NCjKwsPlTF5cPznGBiMVQ6XmRy8W+koDmV503ypLG22A7b+vxKrQBt+MTJhTogsWB1Eo8uMo2716doDyR30jsRitxtfJdhnAsuyxq0FPqNHmXoPQSsWZ0dhgsjsHfQOYNecGt2gGwzuBsYYAhgXzC/sMO48T6JAUcjQSx0mRmONFph/IDM5/A/GIArGAEDmGN1pQHtWj7RAHw7wVM1IjzhHRwHdX5ViKnusCcqx+mJGX0e8EkAMZuCAFnJ8GupgFjaQOJgAhiEGf8h94QAc3peMmIZfR7+hoWSnnP0AZUZ7UyrV4dRbgIGXIxRQrDsMYKydDLeakdCDqlCC2+Vmg7oahSleR6HuoAUbO5v6Rw+9Xyx42+IAh8W/lzPgjpmit9VqoHuJ33F9qayzqIQG+PewuK7SgGLUOpcYoGN3kbDBHjjlK6rILkLZF49o++I5Icw2+cpe+COdZPAtHBBJLgAORbVmY7w9DKa6f3znMRVkeCNasW3odS5crMXsW9WavvKm9TRLvxccT2hc7NZZnBC5ioyCy3ZwVhCGoU1HdCKwqve1+FvmMQBt8CfBe0kH6bPQ0K/QVtfENQvM5eZJwYKMwAp0TQbKPwQKioLnUi+wJc8YBIkmUuDP6C3pY/Q2raD9AQQhegRtboW+cFhFlTey95AXhgBZznenUXgtLTIBcq69BGgZuonzfDJWDdHTGPPib4N1hgDkE7ClbZWMA887Q1utRPgwcuA+UgOFgE1XM4b4VvEAthfRQ2ZEjQnnT6gozAj2QovEi3AHvpHBru9F8w1hBN6NuMLBN3eitzhrCCb0bcYBABchN4AIW+2N4FE2EIb6Pq+EopxjqHf/AGqvLLosW3cInFEblDAoWECEEYSggjCUFiIJXtwgBDUsxbyAAAGETL3635xEGTWx/wAHKEUOXf8AlpcDlEbhqBygqS8QAMqw1bxvp8D/AHtNbAAlmFUiNC8PB+jPC1BFQuCYdiMDcGsYITKSjWOMZV/4iwTx5z0hAoQEesaWeymw8ewjywonHZL8W2UJyMwqfsA2CVEsYIi9AeMGIkRiP8nFEDG93vILwSQvEBljSqp21wwBSpydM0qvHdetsvWpULGqNLZLHN2lYVRpuwQyvlW7gSg2L0LGeCxNLweuNaYlvSECCzfZeHzB7o9JRvR0ggFdzwlBtCiYscpUzehy1KVU1KbaP1+LZGFQGjgJgQC/ooDXlPmMJ/NSJFiB6Cegn5xie0ZMYn7EoxofsALwAfxRAmAQPYxnLD7LrLD2En7r/8QAKxEAAgECBAUEAwEBAQAAAAAAAREAITFBUWGRcYGhwfAQILHRQFDx4TBg/9oACAECAQE/EIF1kV+fzC9FFFF63fVHEyvFPNZRTATse/YwGjxK+fqABmIelfm9Ta8KEA3r0JB6jiYzIPhH8zhRvkgFvXISg1HUAHzARfp/vcDCGJUdQbrlj+ASgEAKht23rKCvQEBhmCrEPB9OVkaeH4RcBAGfncQJ5nXvB6VpWJdVqo6uLRx4RjTszAA0VPNSyFx53gBAAHlQfmoyNpaJvXqSSNzGRXj9t19QuBgZ3vuICQEYdgjZCAKMP97tsI+51J+RJH4DApZuIt22rvKi4iyZxaXgAO8IEEGx/kzPL/ZcNQj419DaEkmfWyf2caWh7d6py3+JTGyqcKDAC19fRRaCgheV+4TPF/v3CGjD+K0aN6De0FxJSYHX9JpGIcJM66nA8cP0lEaxb7TL23aplKHH+j1tBOxI7QAF0B3AgYYuYA1YcIQcFARj2mDXtfbHKCg7EHoH/ZRJOT6gd+MIJJwvGrpBdpLmWBgnQAAdyq8qwCCrd88oMr28rlGY0UdQYUJBwvKkBW3PKAyhw55cfw1F9biSHyIa3PqAXn+xyTBaWyy00oYI1X7eOBADnfMr8iCkDmW6XwaRghP9RTrnlWNiCOaHYp6NQhgZHwB7QhnYgfLvjywlonG2lC6dYNa06AfUZehTuPdRDhgLkXUwwZnbSlQs3jbu14EEXvAAI0AJyADckPnpUZImRwsLjPtLqoMnOhL3HWkNhz/CIjzgnYkdoKzv8B7wF1URA3D7Qgi8oMQVYQwAx+n1ouIjoNftebSgowkApWLAA7+N5QCaYDv4eEJqeF9IShXNefUNCjCQofxkBcXyTtCQX4jgoERvWGzjBVGgTrfveAjNm3KvEwUANg+IvbNlbmXG1QPQuBNX3hmMAB0rHyaxANIuZfqIwAHBwqOxCGwh6H3DRA5fMCogAjYA2L/kdNbiCK5sIGIzPiR+d4HIEaC7qL4OkNBkO586YfjASHh4PnlAVdCjxvACShk+oHeFQZPqu8oI1lh0gCocO8FYG6vbeijuvuCJDOnm3qwDN52gwJYQmM/zwQwhmua7IQAJyD5QoKNIxVoPortCCMoutoEV5yvoQgMwccKBxB9rqj0PH4N3rGBBdH+oadRl48oNVj2IajQCeoIfJyodl4PMp1SFAcR52jBIfAh9OkqCTc3mr+gMIuQcUtou0y2EL6bC3eUrsw+DD6OXDM5WNtGngeRjkWCngz5PvnCNwHsfg+UYcUdBZxY5VWcpTdl7m+YoQ2rNmCOfYpI5UcFgPLx+cYAA2pvfWnGAMqMtwnRIVFYvmYGtvgQIi7g08DrzhIc7hNRX4CgUUhEJsq8M3RdFE4GuFUW+duSUQQwJPucwFKxLE3ldQ5l58cowywubPYHEmMdGWyx6utIDBxmPutG/bUQ8x9lQ2eb/AKCkB/f9lix4woSOc3NjhArxHVqKxqs+sAgFYviqA21JLvlRQQICDa2+v0BP0xXL0F6X/QgCTapYvf5FuQpEDN6DwQI4AknwQiE7CHD6F+glMxL5Gg2S4wmOcjoHM/RgMwnyq4RWGELmQ3VuUEQBgGoYJxxpQ9IARZvtjKgOPQPtXFQ0AY/aiPElugOwdYIXxi+BHJFxAGAHcrbxwAAkjFu6AgFEJ6NydKcYyncOoJlWuA3A/kCIFWXV0OtJVrgNwP5AXOwLcCwXwoYQK4CzwRI6gDnBEJxDzRdv+2TCzVVQb35xjwW0jUegVRON/mFiBxjAQcZYGMCjHeEgkEtGywQiSK5lHT6bYQgkHVPVeGCpmL83qcZYRnGolhGcvFo4k4mnKAgJYW6juf8Ato5AGL1O/wBfpBgUcHDUuac1o4wEXf8AiAFBgo88y6w5qAL9QsN/Z4ZfHzA7gFxg8ODQcgTyGhZ8/S4BEAkzphIMqfIPRQWnoMYoFNBCvxH/ACU2CeVlwBC1bMFIlPCtFBikMVUbxFARXJthYfFL5+oINTJSld2dlDGggUJpC7NnvilKtnVaosti8sV7KrVoVMCNTXWXEElhPL00ARRp+64TOeEUpRIwe6A1uF9/MOOOPSgyClPXqQYhw/X4NYgOExfz+QGDKOvGX3AS0UwH5X6iAPzWjTLCZZjR40aE/mpD2kLRB9jkIwvYCWhIfijJGA+xCe0Vt9rD91//xAAsEAACAgECBQIGAwEBAAAAAAAAAREhMUFREGFxgaEgkUBQscHR8DBg4fFw/9oACAEBAAE/EODJrikWalaWyWn+oXZf0adlU7FAShcA0BjaE34ZskUBS1myOCH/APmGxElo1J9xMARAJiDCCpqFw1+YuA1RJj+MGvXn169evXrxuAOVtzgGWR5SfFKB7MZdkOxn4TATgABgBhAiJXozaECAqQ5JcN4YDGkIMw9cGB9xpRlcJkxkphCUNj5g2iAmRqyxtXpGaJWTT6GIbjhhpJFcajGSmDiUND9Z/IAmR2LGqTJiEkPvsQyhDAmphyQobGqzD3IhrCBEPLfyBjVN1yWWEqGaf8iAt2aT6IjoJqklnc0SnTqzImyMur1ID4E2ib4haCT48igALiQ5RBAAAQZf4Ohf4AyAb/8AIRwxHwAApI2DAAAEqFMAr7EjQQzRohcrADACAt1+rBARcUtLgBS7EiIVAQADkhjt9goCYAMG1hH4BDMAAT6KakAAAPrSoObogQACazeAAAYCeY7sEhFhS0ALPLdBZeygAAcDJ5TKBwAA3HhJlQCAIRZ/UFQYAAApVOBxAgrADIWq6kKAAKE5R8vj3QJPJhmkk1BSMKAAAW3L6YACIBKU/TtpqlLhUnn5HTPnh3fmEgAOcw7DxCQDIiijiCAMUTtox6NACfZpAAEAALxIZgIA2UlmCEQAK8BW1uUnILB2ERAAdXoSdTmSDBBFn60/wYBDjABBIcGAEPEC5lyKABNODwJOBAfevWEEIAJJK4KMx1IObQYqwTWwEQQFJVzNCBIBML9BzXMalm6RsDI7j3YRSsMggDKY4pZ69gIEuPF/AlxAPeIznU0Gj2CQOyCIEAxsjY0RzmV1AHdq1gHAWA4xJyHAHhhhwNXSgRyAAFbYLgLQDVG2axOB2AgI0+OjEaesvcqEZwZqSRkAALqdlgeiAAtgLct8lAG8a5cNMscI34QAIyVpsZgDdB18ReCAAiBELalsBD+IAV36gCEAAyEEJAR+cR5BCIABMdhIQFEaqmICCAJOOqARwJsD8eBSEAIADx5ggBAAANDECIHgAAnhjx2AYBnGk13oCIFkCyDwEbiwBU5KsWADsOYW3ViUgI0jWDICmlc/01TGmTI4YyunCo3sEag5RNnRKF5NxoTWjTT7gHIKjpuK0hIQG3kRdmb8BCbyQCKRi/BHNtnVIjVoa0QvY6dipa/IWupEKU0LBk2NDr8nX5OvyT3nudXk6/J1+Tr8nX5OvydfkhIqXpxSwQZqLHkoHCsDqRhSwkkKNjqK0AEwEEcuACAjLHjdwgAAGfiLyg/gBQZHIXISRxCAWHX02AAAoZobSQcAAAnHCwEYAAI3VPdMkQC81bsX4CgTwBiGiOAAHsoU5JiCIDkiaApACQPvOIFVHMPHoj5JGCA21OWvQHKE5QnKE5QnKE5QnKE5QnKE5Qmo5RXs/Qy0MrxEysicAJVBphlTmUZaDy2O2NQX+DLWB17gAkUUOCADVD2QMtjzZNPZg4O3RpC4VHGfnblUiN2S4fj+j6E4puHAJKu2hASaqUaf0WIIGVGIWdBEpNxIKhAAPcjEhqn9FAmg3MFwZSCkH4AF8AQBAACGF45MTGOYf0UaDzKJ2FdA78TEAACEAQARurCQADQqbTcw5/0PQjIaSLDAWy36MHeyZMgAAESBRxe8An8BrqAGnuBpXxE/LGthUW884DcO3gbvuXFZgLQQeRlk5oswWDKClQLFJOoCUccmRaH2L+opgTvREbI58RgDbnZtUzUqBrwAAsBAqUVdjyuGvDQjVoQk2obfuHFRgIhobUjpER0xkxoAoui0H8eIkVIeWxchliEjUBKGVUlSIGpl8i1roQ/kj+4J/eH6+oCCEMDgp2SJlgHgHQ4bDyBfRACljDPWVCGkIkgfVe9VwMAADB71cpjQ8wAA3GGSYAxhAAB8zLIwlCDSADSBmfQThggEOoCGIF8dhECFgEiIFFoABn1TnBFCAAnfjqxQAIJUUpvyOdYAGxXjYLZdlZG1FdiC+QaByGwBgvHLj7CgB2njxE1ZCNREtGNqdQgAJHgIogrbqhh0dPwSJIPbA/aSRVh3YGgAPf8A72lBmoELh5xFwYgF3RXbwCASABmPLmkwBBAefGxXCHYvmFNubQpjkaAM0zxulqYykAAOatK0frQGQHfcXGBqrAGGSAUpdEYASSV8DvCgJiABCMqsUIAA6L9TT3qAFoDNcCb/ACLQnt7LCONhJ+BMeorb6TO49QypApFpMBs1AQIYs7ymwRqDcCHdKkaQBAJIP4JyggCAGTU7jVac5kAl0061dg7WE9mMejJjYjKgBOwAAxcl0KAAAAy+CIDQIJckmm0QuAIdGEqxkG/yNqhQ4JAevDxNAI3QdQ5OlQ+3QPGwtIYdRNgGNuV4SpoaADQZfWD6bItQoADtswQrNWwSG20H2176hKABjXKmGR1RLgAQGWTW4kloNvAdJwNSJZEgkhO6IsEsJj/DMVpugUhkaAIPhI9RABpkIEUXwIAHXBAx7MFgABLguS4QIAgFliVmgIAdsw/NgqIwEvoDUUQggo3MkOX8i/M4cOELApadTQqFMOmNqqdwZrnqnBEtnQoOQqAIUZF0EUCuLO/Sc8xQyJEISdC1atgI2DyAJJLAjBsI4KxLLgClgcQAREz9zCxB3gjuFhY0VVIpaSSNEZ0Jpj7VMCph1AJD5B4IPqIBO10aQCxfUCBend0YqAG4pDUaOtoEAKGNbAnpnaABPjFwMD9hgAgezgg5AADKzqordZENhIJq0Ku+dsgJXoJAMdLX0KiKoAjmipSwA0OIeZIoKHvcgiGG+eFAM4pBBQLSPHkJD/AUABbuuithkDKCQr7fJAI/IICJIO0EAaa3AArfrWe4Pkag2JiskQIM1KBAvWfER4JQMksrMkkCVnQPB0Bdi1NuliCBMD/ihxEkgFJM0qEPArUoMYVWN+fgZEtABIn0ORshuvkUqk3Jo/Am02ROQ8RMIT2tCjPCFxqBtbCdUJDTmtuDSeR4dWqLkEQkitO5Sb8jcldUG2Gb/kk9dBJv2ERZ59ELbjr8kfowlXQQIGqaYXt0ZwEAMSGQuk/xQQafik7qRQIA/tUAwjcRqAKDt8TUBBgPcTRKKtJL2PTQHVeYrxRjJKCDjAmm/OMBG0QTGT7xnjkQBjPsZQm6gFgjVwbCAZgABLBU/VCjCH1x6CeD0iSdJVVZMQNjEQUFWSSjCS1QNgHOt+pgDkCgwDhIemcZgIeDpAgcV7/MWYCSZ0CMMjGaRhPVl0fmCjC/hgwAoyre7YRICalPSNIbJPsLCSu5/atWb0IYDyzosyhoAEyRNkJm40pl4FwizWluRAbBLm76ZkiIb7/KdCaMLL+OpyAlMJKjV3OAFyGKw9X5O5EBjYEQog1cBgwABWCpYH0ARAQBZaDhh3HAEAD2gRktMGwTsphsU5Q4ABBABKx/SEwugIoBjiAkUAQIQY4WsKQmdSIYAQggmE1KWC2+EWfK3CZBSD7EeQUyABcsCTldfQA2EJph76QCA+BRVPeFVexgj9mIwGWBaj9pkoBI2Jn6IDVwCu4wERosCBAi6svXsjL9AWSDDkfQs78ASIpg8LBcd0ACvDj5ToIF+iBRBgSstVA1RHVrwAE7iRoxPYj7JIGwDZZc2kgaTmOqB3vTEOG0CwgBpc2qGNg3NIA2VEUZv1xOgqRlxhQWp68TD8HIAHANDNwAAACYkzAUCH5AAj1ZomxuoAHIooRkqBAEloogBAIh6rA6qmhIhP1LtMH0EwLM7bJMmAIGg8RA5EEAdtoizNXq0kZIAibLNWtURQAC62niNkg9lL3yMIgAA/8ABsyRgGQClI8wCdADg/gUN4dIJHcRm3dQCACP0GVpwKQGBZ7oRMOIAFsLQlp/pIAQs3lafKq6E9xUnMjUdUJZi2TPsxIbfI0yTrwjJAi+MVZIbxicDtrbaISgSkaoquNjrxSMgHrhbelpy+hEz0NPRFyZYsELRnM2HJiMGyUkSpwMTJzoPVtZTRz4xkO5GnBq1yQ8rOP5kvIIu3EB7AAfUr8QsBR9gAzsAIkBCheqGAQhdwdfOAmx0/4BcwSLxBIA8JsmTYSkiyQwB+oVAvFBJHeH/ZCUsCkihry1sQDJNh+IKAGQAQlgt5pAbgtQGk+JBP8AEAkP1UpxEgSClBiX1nImxEtGCHIDAyAW2q8eMp38AcJJ4zwlbk8yb4Nw2J/G/NaHmBABNNK1wlbk2LuZwCQ5gESAKD9BSSY8pYawJGAGRfVhjuKiFhhKrAGGGmX1AJ8qJdgI26YCbfKbwLNGsAG0pEjd2xsvuSoAMI8aQIAH4A0T/EAbVU4SUAO4ZsQToQCwCORUoKQoBkUFlxEIi2AP+mAY+SA8Ctw5pHaIPSCLE3wN07L0etwHACbJW5K3RMoaXWeU8Ohds6jfrieOABvLhPP1ZDu50x+ACARBvBjsGYGi24CAIoVQowU5wLO5ygsawrRSQh1Ang5s/TgVqToxALiUZSUrLgGlI8lKAE4rKOWapMZHY8nCti6D5uHDohOIgABi1CKF1HFDA7HT1Q30UQh3pJ5KJQE8rcQIX+AMAa9nspwAAY1t3o8ggjhP3q7snUBDeV3AsNAQxoBty3ZCzVHMAyptvSG6KV0ACqVFMuk3hqORwKA78oIJoDD+3VJ0wBAA0f8AUCUYQA5WzxyYAECA+E0Ly+5IAI2ykPFwRBuPAADTgIgJAB0z+u2ACCAvacZYoYcjWBQYJz0WuFECMAiMsfeIEAABfYVb4xmGaGSMg1LMqjA9VMiCwM16BQS2CE+TncjAIXU8jBSADkKSo1Opc70iDAIKu8lLVQsAv4sRGQmrIEHBWWX96cgWm9JxNyYIACAQceUYAnQABtIAYgAD0H5gv+RwBRpMWWl6mwFAAhXk7JUjzgABAD4SzhB4gbgCQXE6ZJAADN9IBIigAAGS7lSCt1E1CQz9ok1Y+wnCAEjQhAgAADyS81Uq6BArbL2KgCq67BT52ABznOBAAB4W3Kn6go3rCCM4gRG69OgopDgIEEANq1iBoYAEgpPgWypEx9gTbeschAlDl9n8OYYAL60EoAAOgFlgiCAD7sTe1zEABBGZREHcAIut1fYCgACIllIYw8wLNcENA2AAWLHVe1QSA1sJaMXQ05GIHGpIxXUXEOYAH2DltVABoDXrTwoNCd0aECpNkMIAELoySIETzAFa4AggkC18VYCAgEYEtyJCACC2eUesgBOQAcv3IByWW4AA2T9GCCgTAAkPcnBEYvYAAOZQV1IZgKKEmtVRcw5gAdmUopTdyDkAUq4JAGQggAGjr6D9iBABNCT29BUDUIDeWnQ8zgAAHUTLPGugAAnV7DGTjFkBkA6K8KSSQAEHVXQrigGXJgqgBkSX8xHcXtwyuq8QgEFxA5DIJkBZCy0MO2bWTAABTEe5iJAOdIdVHZwBtfqF5BAbC5KUFDXIPgCyUhAfkAgGpA04aGQQlQaVAIAOZnQn4a5hhgDAAhGGquWG4AAh0z1pIDAgBoYDyBGLOwIQLo8l4w5I1A6L96NCiXARgHyAUQAFxOBGPLCYmIgu3XygAGVgAvbiWdoJan7i/MmVYlCAChq4EEABiY4aRAieYA3fVJGrAhpqGaemOEUIsjg1VCULi1Shq/54WxHrj1R6YWy9MfEt2LL2HEAYCeDli/8ArIiUBvYnmaDEZg87BV9wTK3+fZQ/XDoL+QDQvpivyAESJ7BAZsOl7HvASvsBQHkACQ6OGVqIBAAUn+jA6FAEB188swMg9IAAVR1JAJ/0ALgXN88xDAB0fwiziUH7wyFFL0NPnr4tf/fgYABZXtTFbCG4RNk0+h3HEK4c00AiqOXVf2CQAgvDejDAgBDoFIYyicc4H7AAeH9JpfQCCp0OAlVvn7TmuObXALAIk5gRpZFIkOEQKICO/g8wCABUlCGeSZNBuqoiD99aJAwMhbfP4X5BxuMO2e+oDBK3F3MjEMTK4IM3bYOtQjidDOoDTg2ktBVf9cEvYQIrhUpHUelgUVGAn04YKi4rTsWT1E+jDFF0GGQNqEabYRMekAM3d7iXcgxLeglJrxzd3uJdyDEt6CUmvyBRu8q4AOlzAAO3LOqIE6MsAAuF7bBP3AABKI+HIAyEKHNIJeMgXkAQACa6OPoaw6cO1KZBAB7hAQ1nGQb/AHIHvpcsRAQAATh5JvkIAEHIGgLhDsXzEeXYAqOjIFJFZHBZIHh+9Z/4Al7xRAilFeoA39zeaAUWRgCN64gE7A57iWBUnEPAWEBDAEsK001oJJMm5BxgAFVLIWGrAfgQWs+oAIaVjFWQNFeTZ2qK3CAgAMJ4REFFQcgYkCet/IMFgADMtipcdoOQQbDRevjCgADoQlaHACWLmgg0N6pzEB7go5MrC8cIcLAppiGpZpBkQARwEihZuSYhIZCdGcL9KG+xjsERcU/BSAHQn4bVY5gvcgKVDxAUOgQWGljWq19UchgGwJ2aKCHFsjkn4bVY5gvcgKVDxAUOkQWGljWQSnUchgGwTFzAANjYgAI9ufCLgANPQ9IIMpm04G9UsvfEAAV5c0CR8VQQAL6IbdDUlZyQBn7ccSUAxwCB4gqACAEJUn95CUncAIZPrwDAAwELh6SBLHoaCuFDesGAICVoxLJNbntzgzooCGwDNohQYBOhATIDwDQBoJITuiAVKaD0DVp7IB7q6mxXdzCAAALIs1HjgQAByY/jgiQB8y7xIOzgBR1tsKgwJJGQH1SmCiBoAAAgIpp0gM0IpBA5Ylp7vkEAh7sAvoRDQEc3YROYj7hzYlNgxiw1AChVTayyDKYJxm5g42yMMM38cPSQahgWi5QhjSOoAFtptY8iPqAeRlz9IRL6yEOJRRqX2BO5K/mbKX7ICAQCY7T9MFDn6AiX1kIcSijUvsCdyX5vWUv3QECIZjnCGf7Mk8eKEhjAaUmDKkCDblWhtMgQhSOXxQSWQeK7gwMixqoiKZEf8AoIh0F2l0REDJbsxQ8KADWGtHIEBSAPyVAs40xhBopMm8aiok1EUAznsBozEH/KFQAjBYJRBnSzGtARV/JvWKAQUCQwVS0KpUACEqQrMDdEVAFVFtgeAAB0AnJ2Z4ROUmwSAhNOggDBW1YKUEAv7gBAg2D9HIkFMw7nOESSYVFwHB2gZuAEjfg9ZOwATTNOAoT7AGnUNORVOCJKSWxMGagbsayM5t5WwXUBiGDiBsSYAoBDHMEqQEn9AJBA+fqdEDBBu3M4eQEEczwQwwA5CHg1ZKktQRsJDSA5goIbgqX9zTKMRAcACwTgqAAAACA07UwqZ6gzkOmNgDyAYpqwUhPYCFAR/LAoqEABGDjyUAwi0HYl0xsAeQDFNWCkJ7AQoCKmH81UIACyGZxjBiw0BMsZmBOIAo2RDywA2CVjfCNLQ44DuwLl9SjiklkDT4lOR4EWaqlMZND1AnBncHnA9a9DfBV8FEwJalS+EqTKMGWdCL7GkG5SRTQ8C1Q4IrcuP8MoocK1SHM8qGaIeUOU3KfZL6wFamTYpLjqUkNSRgOL2SJ5aFQ5FS+fxOgsC7mU9APC/NDvtM9AQbuMHAjUHkH9GewkwU8rZCDcJy4jCIEAHC4gRAAAGn9FW5QkpdESSuHFuABhICAABGUMIAFROJgQJ0GijWTK/ojbhLcdBN2Pgpsejgf8CAAE7oZZmOahbSURhf8Ahzcbczc0vhHr06dOnXr169evXr169evXprCGwfHw9evXr169enXr069enSp069evXr06devTr06t+DU6VOnXr169evXr169evXr1igSi5cVaFWSBQgEKnt3qFEBoGjvSw8AMBDZHdMYAIJdWLPciQBAz8F9A0A5gEKEWkhSXIAJPFT/89QIBgWFwYQhna1DOPcKZRGwwJAGJqmYYZgAAJv0rEwCDYiDFdlaSZydAvS87wmgaRliDD0OwQLtVzEPfc6BMQPybyAhZJoJQRtkJDerphZzAdyNP69BGAsJQGaDZ0RiEVbpACty9AmHypzqAC3CIjlBLsaA6xHcKEBpSzAbB3CPP1PxoshqJGgCSHQc3AGn+aEIfPIGPi0KJ/u3JAQB+kDYEBYCTM01ZBMWrYJOwNWIRUjaI1AYq4s11Zach9W3K2CgAAaDOOIH6EA2cEtSgAgNRX/3TBCuYQdtPAEuecN21ER5MIUDx0NEswJsRrsPPwW1q2AQi30EM4ogsIiM8ptj3A0QXpImsmRoAFDDsZRqBqhQBYdmzaJQk/ooDQVgQpQCDaghYXBqvlcuuhaBMgT44gor/AIAdXqOMdijcn0CEXJBEgBqE6jX0+d4TWr6CEGVr2XdxgDVQQSFQABORAdgg2Wch6JAABAABoAwgAAAdh4SgCA/1wmyUDNAOOCggKEAAGwDAIAAAfCO0q6qZWwWnoARDAAJFCAAAAqoxCgCRTLqMHECAhgAAAIAgIAE+BADAmNOk2DjkpQQAAQEAAABMo6BHogxVAADAMgIAAOAB3ZAAQAB9yFuIkJIJAAECAgAFAABAuOuoikowR6EnOgggAWAoEgASKWAIAxliMIsNV8C8MQ0jZTgTlRnkDVF1AyNOxew38eyQoAIrOEHH9hIE6x8eUF0PABrLW6DHypj0mqxYaPUKyGRzOleT6iCCDdK7I0x8/UcmKj3/AOZ7gOxuLokV/wCRsbupdXXX6IoK1Po87wlC5MrAIAM0sQAC0AAQMwoADyQEHkNBOb44pIgwAgAWAkAEAIBwQwgAAdiXvgjeH0HLirVEgEBAACUAIAAAAAgTjaXM4S7RAY4HxggAANhBAAgAEJQAIQMHLRBiHoCgAgQAAIEDAAAbSOBArs5DuTg2QMAAkAEgAEio4kP9gDgcDoDAADxKgQIgABqh5wMVk35ABqabFycASxVAAHAwoAEACIwCSKBAQzzgALAUBCBAAAQBTE9IZwDuIhyv4F4Ll7u9xNeGfGpVawZmoDsA4xcl6Kj7RAcEoiYw0IJ3AvDW2wnYIgFUQPjwC5Ho0G9GLuMvQdRG4kKxICcIH3E8LuGc5CEYDECT0HvNohIOAiHFkG2JVp3Ni/NM6S0zL/SBYejzvCS4MkMMLVsjpCj2cxYAYdHmCfCiEADABJpBDwJQAA+RA5gQA0QCCypk2gODhYkh4AdKSiCEQAFpWMJh9AEGXJEl/wBxgEABKkSDQBbSAcuGAJGANZLJCfMu8Cueip+hCgdDtgAAmMKW3CrJobAZBr2tkFzAFfAlJkB6EoqNEJCkoMONAAAPiQm5gIIEOoQKAQzJbKtJLUfcGP18hIAF4vdXEwIAirJpsOQpacWkwNOKCh7BngQgweBAEviUAggUvU+RPECBOfjYWyIUzF/wed+azfkEsX9GaaaYYYaYaaaaaYaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaKYFQkWOPc14zmpHnmZRkes8G8ZHhGr5xgJ2qOHNYIPE5gIASgEAEpIzGQoAAMFkfTkZlkteQh0+BHvMFFkIGTxAwASUw+8GQ3kLTAClzH2Af6Vw8DDAAAaUpr6lQDAWOAO4ZBAAAQdgEngQIAJC0NVyYGEJ3AB608OCHUgEAaCR5D2NECATX2lAAiDgAETOWQhugNZ3+cMqq1GZsAkAGkws7QCAkWBdugtSErOoAFSz5JHQROwARHBUYByADIQQdqMAWAcad4fUdhNSkai6MCjIKQ7ByMpanYuOO0BgDECQNuEpTEF0H4AR0WjiYKrBEMgQWiV5zeQQAC0RAoo/QgCw3G10BiszcwAqLtQTAUewaGXrJIMTjAAHz0ISFBKmP6JDj+GUxQlT/BkYW/zR52TE6N+30FoQlCEFpTiRbp+vQiTto0KYSxKLyZIjyMJwI3iAaKADQaerNDhAAPwsAApyrl5FQAn102uGGGX3GlODXFK0i4yuUdZb2DN/KME1Cgn+0vjGkaBD0wK63dQ8hWvWvozr2QtAJ3nRdjBA7dsTawAivXY0Vq0Do+GIIQF0Rs6h1Y05letsblCMrEYHNK0I97ewknLDIOh8pA6hDuUR/BD0If8AGm5h/wAGX/cv/9k=')
# Hint: content_type is a MIME type and it should be "image/jpeg"
# The content is a base64 encoded image, one way of storing images in a database
def image_file(request):
return HttpResponse(image, content_type="text/plain")
| 3,027.25
| 59,988
| 0.969708
|
3fca25f07a105f4a0e4fb1ae27d883ae133186a7
| 1,138
|
py
|
Python
|
src/addresses/migrations/0001_initial.py
|
utshab-1603022/Ecommerce_site
|
34c1bda73879f841dd77eb0b8b17a5edb9a7c6a3
|
[
"bzip2-1.0.6"
] | 2
|
2020-07-29T15:05:13.000Z
|
2020-07-29T16:17:01.000Z
|
src/addresses/migrations/0001_initial.py
|
utshabkg/Ecommerce_site_Django
|
34c1bda73879f841dd77eb0b8b17a5edb9a7c6a3
|
[
"bzip2-1.0.6"
] | null | null | null |
src/addresses/migrations/0001_initial.py
|
utshabkg/Ecommerce_site_Django
|
34c1bda73879f841dd77eb0b8b17a5edb9a7c6a3
|
[
"bzip2-1.0.6"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-12-19 16:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('billing', '0002_auto_20191219_1620'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address_type', models.CharField(max_length=120)),
('address_line_1', models.CharField(blank=True, max_length=120, null=True)),
('city', models.CharField(max_length=120)),
('country', models.CharField(default='Bangladesh', max_length=120)),
('state', models.CharField(max_length=120)),
('postal_code', models.CharField(max_length=120)),
('billing_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='billing.BillingProfile')),
],
),
]
| 35.5625
| 129
| 0.615993
|
044ec903197ba1ce318ef29acc25c0582eed0a0b
| 34,760
|
py
|
Python
|
Lib/test/test_java_integration.py
|
omunroe-com/jython
|
c86dbc315bd20fc649ae8d3d946e94c4f63fb32c
|
[
"CNRI-Jython"
] | null | null | null |
Lib/test/test_java_integration.py
|
omunroe-com/jython
|
c86dbc315bd20fc649ae8d3d946e94c4f63fb32c
|
[
"CNRI-Jython"
] | 1
|
2019-01-15T07:33:46.000Z
|
2019-01-15T07:33:46.000Z
|
Lib/test/test_java_integration.py
|
omunroe-com/jython
|
c86dbc315bd20fc649ae8d3d946e94c4f63fb32c
|
[
"CNRI-Jython"
] | null | null | null |
import copy
import glob
import importlib
import operator
import os
import os.path
import unittest
import shutil
import subprocess
import sys
import tempfile
import re
from collections import deque
from test import test_support
from distutils.spawn import find_executable
from java.lang import (
ClassCastException, ExceptionInInitializerError, UnsupportedOperationException,
String, Runnable, System, Runtime, Math, Byte)
from java.math import BigDecimal, BigInteger
from java.net import URI
from java.io import (ByteArrayInputStream, ByteArrayOutputStream, File, FileInputStream,
FileNotFoundException, FileOutputStream, FileWriter, ObjectInputStream,
ObjectOutputStream, OutputStreamWriter, UnsupportedEncodingException)
from java.util import ArrayList, Date, HashMap, Hashtable, StringTokenizer, Vector
from java.util.concurrent import Executors
from java.awt import Dimension, Color, Component, Container
from java.awt.event import ComponentEvent
from javax.swing.tree import TreePath
from javax.tools import SimpleJavaFileObject, JavaFileObject, ToolProvider
from org.python.core.util import FileUtil
from org.python.compiler import CustomMaker
from org.python.tests import (BeanImplementation, Child, Child2,
CustomizableMapHolder, Listenable, ToUnicode)
from org.python.tests.mro import (ConfusedOnGetitemAdd, FirstPredefinedGetitem, GetitemAdder)
from org.python.util import PythonInterpreter
import java
import org.python.core.Options
from javatests import Issue1833
from javatests.ProxyTests import NullToString, Person
from clamp import SerializableProxies
class InstantiationTest(unittest.TestCase):
def test_cant_instantiate_abstract(self):
self.assertRaises(TypeError, Component)
def test_no_public_constructors(self):
self.assertRaises(TypeError, Math)
def test_invalid_self_to_java_constructor(self):
self.assertRaises(TypeError, Color.__init__, 10, 10, 10)
def test_str_doesnt_coerce_to_int(self):
self.assertRaises(TypeError, Date, '99-01-01', 1, 1)
def test_class_in_failed_constructor(self):
try:
Dimension(123, 456, 789)
except TypeError, exc:
self.failUnless("java.awt.Dimension" in exc.message)
class BeanTest(unittest.TestCase):
def test_shared_names(self):
self.failUnless(callable(Vector.size),
'size method should be preferred to writeonly field')
def test_multiple_listeners(self):
'''Check that multiple BEP can be assigned to a single cast listener'''
m = Listenable()
called = []
def f(evt, called=called):
called.append(0)
m.componentShown = f
m.componentHidden = f
m.fireComponentShown(ComponentEvent(Container(), 0))
self.assertEquals(1, len(called))
m.fireComponentHidden(ComponentEvent(Container(), 0))
self.assertEquals(2, len(called))
def test_bean_interface(self):
b = BeanImplementation()
self.assertEquals("name", b.getName())
self.assertEquals("name", b.name)
# Tests for #610576
class SubBean(BeanImplementation):
def __init__(bself):
self.assertEquals("name", bself.getName())
SubBean()
def test_inheriting_half_bean(self):
c = Child()
self.assertEquals("blah", c.value)
c.value = "bleh"
self.assertEquals("bleh", c.value)
self.assertEquals(7, c.id)
c.id = 16
self.assertEquals(16, c.id)
def test_inheriting_half_bean_issue1333(self):
# http://bugs.jython.org/issue1333
c = Child2()
self.assertEquals("blah", c.value)
c.value = "bleh"
self.assertEquals("Child2 bleh", c.value)
def test_awt_hack(self):
# We ignore several deprecated methods in java.awt.* in favor of bean properties that were
# addded in Java 1.1. This tests that one of those bean properties is visible.
c = Container()
c.size = 400, 300
self.assertEquals(Dimension(400, 300), c.size)
class SysIntegrationTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
def tearDown(self):
sys.stdout = self.orig_stdout
def test_stdout_outputstream(self):
out = FileOutputStream(test_support.TESTFN)
sys.stdout = out
print 'hello',
out.close()
f = open(test_support.TESTFN)
self.assertEquals('hello', f.read())
f.close()
class IOTest(unittest.TestCase):
def test_io_errors(self):
"Check that IOException isn't mangled into an IOError"
self.assertRaises(UnsupportedEncodingException, OutputStreamWriter, System.out, "garbage")
def test_fileio_error(self):
self.assertRaises(FileNotFoundException, FileInputStream, "garbage")
def fileutil_is_helper(self, mode, expected):
old_linesep = System.setProperty("line.separator", "\r\n")
try:
inputstream = ByteArrayInputStream(bytearray('1\r\n2\r\n3\r\n'))
inputfile = FileUtil.wrap(inputstream, mode)
actual = inputfile.readlines()
inputfile.close()
self.assertEquals(expected, actual)
finally:
System.setProperty("line.separator", old_linesep)
def test_fileutil_wrap_inputstream(self):
self.fileutil_is_helper('r', ['1\n', '2\n', '3\n'])
def test_fileutil_wrap_inputstream_binary(self):
self.fileutil_is_helper('rb', ['1\r\n', '2\r\n', '3\r\n'])
def fileutil_os_helper(self, mode, expected):
old_linesep = System.setProperty("line.separator", "\r\n")
try:
outputstream = ByteArrayOutputStream()
outputfile = FileUtil.wrap(outputstream, mode)
outputfile.writelines(["1\n", "2\n", "3\n"])
outputfile.close()
self.assertEquals(bytearray(outputstream.toByteArray()), expected)
finally:
System.setProperty("line.separator", old_linesep)
def test_fileutil_wrap_outputstream_default_textmode(self):
self.fileutil_os_helper("w", bytearray("1\r\n2\r\n3\r\n"))
def test_fileutil_wrap_outputstream_binary(self):
self.fileutil_os_helper("wb", bytearray("1\n2\n3\n"))
def test_unsupported_tell(self):
fp = FileUtil.wrap(System.out)
self.assertRaises(IOError, fp.tell)
class JavaReservedNamesTest(unittest.TestCase):
"Access to reserved words"
def test_system_in(self):
s = System.in
self.assert_("method" in str(s.read))
def test_runtime_exec(self):
e = Runtime.getRuntime().exec
self.assert_(re.search("method .*exec", str(e)) is not None)
def test_byte_class(self):
b = Byte(10)
self.assert_("java.lang.Byte" in str(b.class))
class Keywords(object):
pass
Keywords.in = lambda self: "in"
Keywords.exec = lambda self: "exec"
Keywords.class = lambda self: "class"
Keywords.print = lambda self: "print"
Keywords.and = lambda self: "and"
Keywords.as = lambda self: "as"
Keywords.assert = lambda self: "assert"
Keywords.break = lambda self: "break"
Keywords.continue = lambda self: "continue"
Keywords.def = lambda self: "def"
Keywords.del = lambda self: "del"
Keywords.elif = lambda self: "elif"
Keywords.else = lambda self: "else"
Keywords.except = lambda self: "except"
Keywords.finally = lambda self: "finally"
Keywords.from = lambda self: "from"
Keywords.for = lambda self: "for"
Keywords.global = lambda self: "global"
Keywords.if = lambda self: "if"
Keywords.import = lambda self: "import"
Keywords.is = lambda self: "is"
Keywords.lambda = lambda self: "lambda"
Keywords.pass = lambda self: "pass"
Keywords.print = lambda self: "print"
Keywords.raise = lambda self: "raise"
Keywords.return = lambda self: "return"
Keywords.try = lambda self: "try"
Keywords.while = lambda self: "while"
Keywords.with = lambda self: "with"
Keywords.yield = lambda self: "yield"
class PyReservedNamesTest(unittest.TestCase):
"Access to reserved words"
def setUp(self):
self.kws = Keywords()
def test_in(self):
self.assertEquals(self.kws.in(), "in")
def test_exec(self):
self.assertEquals(self.kws.exec(), "exec")
def test_class(self):
self.assertEquals(self.kws.class(), "class")
def test_print(self):
self.assertEquals(self.kws.print(), "print")
def test_and(self):
self.assertEquals(self.kws.and(), "and")
def test_as(self):
self.assertEquals(self.kws.as(), "as")
def test_assert(self):
self.assertEquals(self.kws.assert(), "assert")
def test_break(self):
self.assertEquals(self.kws.break(), "break")
def test_continue(self):
self.assertEquals(self.kws.continue(), "continue")
def test_def(self):
self.assertEquals(self.kws.def(), "def")
def test_del(self):
self.assertEquals(self.kws.del(), "del")
def test_elif(self):
self.assertEquals(self.kws.elif(), "elif")
def test_else(self):
self.assertEquals(self.kws.else(), "else")
def test_except(self):
self.assertEquals(self.kws.except(), "except")
def test_finally(self):
self.assertEquals(self.kws.finally(), "finally")
def test_from(self):
self.assertEquals(self.kws.from(), "from")
def test_for(self):
self.assertEquals(self.kws.for(), "for")
def test_global(self):
self.assertEquals(self.kws.global(), "global")
def test_if(self):
self.assertEquals(self.kws.if(), "if")
def test_import(self):
self.assertEquals(self.kws.import(), "import")
def test_is(self):
self.assertEquals(self.kws.is(), "is")
def test_lambda(self):
self.assertEquals(self.kws.lambda(), "lambda")
def test_pass(self):
self.assertEquals(self.kws.pass(), "pass")
def test_print(self):
self.assertEquals(self.kws.print(), "print")
def test_raise(self):
self.assertEquals(self.kws.raise(), "raise")
def test_return(self):
self.assertEquals(self.kws.return(), "return")
def test_try(self):
self.assertEquals(self.kws.try(), "try")
def test_while(self):
self.assertEquals(self.kws.while(), "while")
def test_with(self):
self.assertEquals(self.kws.with(), "with")
def test_yield(self):
self.assertEquals(self.kws.yield(), "yield")
class ImportTest(unittest.TestCase):
def test_bad_input_exception(self):
self.assertRaises(ValueError, __import__, '')
def test_broken_static_initializer(self):
self.assertRaises(ExceptionInInitializerError, __import__, "org.python.tests.BadStaticInitializer")
class ColorTest(unittest.TestCase):
def test_assigning_over_method(self):
self.assertRaises(TypeError, setattr, Color.RED, "getRGB", 4)
def test_static_fields(self):
self.assertEquals(Color(255, 0, 0), Color.RED)
# The bean accessor for getRed should be active on instances, but the static field red
# should be visible on the class
self.assertEquals(255, Color.red.red)
self.assertEquals(Color(0, 0, 255), Color.blue)
def test_is_operator(self):
red = Color.red
self.assert_(red is red)
self.assert_(red is Color.red)
class TreePathTest(unittest.TestCase):
def test_overloading(self):
treePath = TreePath([1,2,3])
self.assertEquals(len(treePath.path), 3, "Object[] not passed correctly")
self.assertEquals(TreePath(treePath.path).path, treePath.path, "Object[] not passed and returned correctly")
class BigNumberTest(unittest.TestCase):
def test_coerced_bigdecimal(self):
from javatests import BigDecimalTest
x = BigDecimal("123.4321")
y = BigDecimalTest().asBigDecimal()
self.assertEqual(type(x), type(y), "BigDecimal coerced")
self.assertEqual(x, y, "coerced BigDecimal not equal to directly created version")
def test_biginteger_in_long(self):
'''Checks for #608628, that long can take a BigInteger in its constructor'''
ns = '10000000000'
self.assertEquals(ns, str(long(BigInteger(ns))))
class JavaStringTest(unittest.TestCase):
def test_string_not_iterable(self):
x = String('test')
self.assertRaises(TypeError, list, x)
class JavaDelegationTest(unittest.TestCase):
def test_list_delegation(self):
for c in ArrayList, Vector:
a = c()
self.assertRaises(IndexError, a.__getitem__, 0)
a.add("blah")
self.assertTrue("blah" in a)
self.assertEquals(1, len(a))
n = 0
for i in a:
n += 1
self.assertEquals("blah", i)
self.assertEquals(1, n)
self.assertEquals("blah", a[0])
a[0] = "bleh"
del a[0]
self.assertEquals(0, len(a))
def test_map_delegation(self):
m = HashMap()
m["a"] = "b"
self.assertTrue("a" in m)
self.assertEquals("b", m["a"])
n = 0
for k in m:
n += 1
self.assertEquals("a", k)
self.assertEquals(1, n)
del m["a"]
self.assertEquals(0, len(m))
def test_enumerable_delegation(self):
tokenizer = StringTokenizer('foo bar')
self.assertEquals(list(iter(tokenizer)), ['foo', 'bar'])
def test_vector_delegation(self):
class X(Runnable):
pass
v = Vector()
v.addElement(1)
v.indexOf(X())# Compares the Java object in the vector to a Python subclass
for i in v:
pass
def test_comparable_delegation(self):
first_file = File("a")
first_date = Date(100)
for a, b, c in [(first_file, File("b"), File("c")), (first_date, Date(1000), Date())]:
self.assertTrue(a.compareTo(b) < 0)
self.assertEquals(-1, cmp(a, b))
self.assertTrue(a.compareTo(c) < 0)
self.assertEquals(-1, cmp(a, c))
self.assertEquals(0, a.compareTo(a))
self.assertEquals(0, cmp(a, a))
self.assertTrue(b.compareTo(a) > 0)
self.assertEquals(1, cmp(b, a))
self.assertTrue(c.compareTo(b) > 0)
self.assertEquals(1, cmp(c, b))
self.assertTrue(a < b)
self.assertTrue(a <= a)
self.assertTrue(b > a)
self.assertTrue(c >= a)
self.assertTrue(a != b)
l = [b, c, a]
self.assertEquals(a, min(l))
self.assertEquals(c, max(l))
l.sort()
self.assertEquals([a, b, c], l)
# Check that we fall back to the default comparison(class name) instead of using compareTo
# on non-Comparable types
self.assertRaises(ClassCastException, first_file.compareTo, first_date)
self.assertEquals(-1, cmp(first_file, first_date))
self.assertTrue(first_file < first_date)
self.assertTrue(first_file <= first_date)
self.assertTrue(first_date > first_file)
self.assertTrue(first_date >= first_file)
def test_equals(self):
# Test for bug #1338
a = range(5)
x = ArrayList()
x.addAll(a)
y = Vector()
y.addAll(a)
z = ArrayList()
z.addAll(range(1, 6))
self.assertTrue(x.equals(y))
self.assertEquals(x, y)
self.assertTrue(not (x != y))
self.assertTrue(not x.equals(z))
self.assertNotEquals(x, z)
self.assertTrue(not (x == z))
class SecurityManagerTest(unittest.TestCase):
def test_nonexistent_import_with_security(self):
script = test_support.findfile("import_nonexistent.py")
home = os.path.realpath(sys.prefix)
if not os.path.commonprefix((home, os.path.realpath(script))) == home:
# script must lie within python.home for this test to work
return
policy = test_support.findfile("python_home.policy")
self.assertEquals(
subprocess.call([sys.executable,
"-J-Dpython.cachedir.skip=true",
"-J-Djava.security.manager",
"-J-Djava.security.policy=%s" % policy, script]),
0)
def test_import_signal_fails_with_import_error_using_security(self):
policy = test_support.findfile("python_home.policy")
with self.assertRaises(subprocess.CalledProcessError) as cm:
subprocess.check_output(
[sys.executable,
"-J-Dpython.cachedir.skip=true",
"-J-Djava.security.manager",
"-J-Djava.security.policy=%s" % policy,
"-c", "import signal"],
stderr=subprocess.STDOUT)
self.assertIn(
'ImportError: signal module requires sun.misc.Signal, which is not allowed by your security profile',
cm.exception.output)
class JavaWrapperCustomizationTest(unittest.TestCase):
def tearDown(self):
CustomizableMapHolder.clearAdditions()
def test_adding_item_access(self):
m = CustomizableMapHolder()
self.assertRaises(TypeError, operator.getitem, m, "initial")
CustomizableMapHolder.addGetitem()
self.assertEquals(m.held["initial"], m["initial"])
# dict would throw a KeyError here, but Map returns null for a missing key
self.assertEquals(None, m["nonexistent"])
self.assertRaises(TypeError, operator.setitem, m, "initial")
CustomizableMapHolder.addSetitem()
m["initial"] = 12
self.assertEquals(12, m["initial"])
def test_adding_attributes(self):
m = CustomizableMapHolder()
self.assertRaises(AttributeError, getattr, m, "initial")
CustomizableMapHolder.addGetattribute()
self.assertEquals(7, m.held["initial"], "Existing fields should still be accessible")
self.assertEquals(7, m.initial)
self.assertEquals(None, m.nonexistent, "Nonexistent fields should be passed on to the Map")
def test_adding_on_interface(self):
GetitemAdder.addPredefined()
class UsesInterfaceMethod(FirstPredefinedGetitem):
pass
self.assertEquals("key", UsesInterfaceMethod()["key"])
def test_add_on_mro_conflict(self):
"""Adding same-named methods to Java classes with MRO conflicts produces TypeError"""
GetitemAdder.addPredefined()
self.assertRaises(TypeError, __import__, "org.python.tests.mro.ConfusedOnImport")
self.assertRaises(TypeError, GetitemAdder.addPostdefined)
def test_null_tostring(self):
# http://bugs.jython.org/issue1819
nts = NullToString()
self.assertEqual(repr(nts), '')
self.assertEqual(str(nts), '')
self.assertEqual(unicode(nts), '')
def test_diamond_inheritance_of_iterable_and_map(self):
"""Test deeply nested diamond inheritance of Iterable and Map, as see in some Clojure classes"""
# http://bugs.jython.org/issue1878
from javatests import DiamondIterableMapMRO # this will raise a TypeError re MRO conflict without the fix
# Verify the correct MRO is generated - order is of course *important*;
# the following used types are implemented as empty interfaces/abstract classes, but match the inheritance graph
# and naming of Clojure/Storm.
#
# Also instead of directly importing, which would cause annoying bloat in javatests by making lots of little files,
# just match using str - this will still be stable/robust.
self.assertEqual(
str(DiamondIterableMapMRO.__mro__),
"(<type 'javatests.DiamondIterableMapMRO'>, <type 'javatests.ILookup'>, <type 'javatests.IPersistentMap'>, <type 'java.lang.Iterable'>, <type 'javatests.Associative'>, <type 'javatests.IPersistentCollection'>, <type 'javatests.Seqable'>, <type 'javatests.Counted'>, <type 'java.util.Map'>, <type 'javatests.AFn'>, <type 'javatests.IFn'>, <type 'java.util.concurrent.Callable'>, <type 'java.lang.Runnable'>, <type 'java.lang.Object'>, <type 'object'>)")
# And usable with __iter__ and map functionality
m = DiamondIterableMapMRO()
m["abc"] = 42
m["xyz"] = 47
self.assertEqual(set(m), set(["abc", "xyz"]))
self.assertEqual(m["abc"], 42)
def roundtrip_serialization(obj):
"""Returns a deep copy of an object, via serializing it
see http://weblogs.java.net/blog/emcmanus/archive/2007/04/cloning_java_ob.html
"""
output = ByteArrayOutputStream()
serializer = CloneOutput(output)
serializer.writeObject(obj)
serializer.close()
input = ByteArrayInputStream(output.toByteArray())
unserializer = CloneInput(input, serializer) # to get the list of classes seen, in order
return unserializer.readObject()
class CloneOutput(ObjectOutputStream):
def __init__(self, output):
ObjectOutputStream.__init__(self, output)
self.classQueue = deque()
def annotateClass(self, c):
self.classQueue.append(c)
def annotateProxyClass(self, c):
self.classQueue.append(c)
class CloneInput(ObjectInputStream):
def __init__(self, input, output):
ObjectInputStream.__init__(self, input)
self.output = output
def resolveClass(self, obj_stream_class):
return self.output.classQueue.popleft()
def resolveProxyClass(self, interfaceNames):
return self.output.classQueue.popleft()
def find_jython_jars():
# Uses the same classpath resolution as bin/jython
jython_bin = os.path.normpath(os.path.dirname(sys.executable))
jython_top = os.path.dirname(jython_bin)
jython_jar_path = os.path.join(jython_top, 'jython.jar')
jython_jar_dev_path = os.path.join(jython_top, 'jython-dev.jar')
if os.path.exists(jython_jar_dev_path):
jars = [jython_jar_dev_path]
jars.extend(glob.glob(os.path.join(jython_top, 'javalib', '*.jar')))
elif os.path.exists(jython_jar_path):
jars = [jython_jar_path]
else:
raise Exception("Cannot find jython jar")
return jars
class JavaSource(SimpleJavaFileObject):
def __init__(self, name, source):
self._name = name
self._source = source
SimpleJavaFileObject.__init__(
self,
URI.create("string:///" + name.replace(".", "/") + JavaFileObject.Kind.SOURCE.extension),
JavaFileObject.Kind.SOURCE)
def getName(self):
return self._name
def getCharContent(self, ignore):
return self._source
@unittest.skipIf(ToolProvider.getSystemJavaCompiler() is None,
"No Java compiler available. Is JAVA_HOME pointing to a JDK?")
def compile_java_source(options, class_name, source):
"""Compiles a single java source "file" contained in the string source
Use options, specifically -d DESTDIR, to control where the class
file is emitted. Note that we could use forwarding managers to
avoid tempdirs, but this is overkill here given that we still need
to run the emitted Java class.
"""
f = JavaSource(class_name, source)
compiler = ToolProvider.getSystemJavaCompiler()
task = compiler.getTask(None, None, None, options, None, [f])
task.call()
class SerializationTest(unittest.TestCase):
def test_java_serialization(self):
date_list = [Date(), Date()]
self.assertEqual(date_list, roundtrip_serialization(date_list))
def test_java_serialization_pycode(self):
def universal_answer():
return 42
serialized_code = roundtrip_serialization(universal_answer.func_code)
self.assertEqual(eval(serialized_code), universal_answer())
def test_java_serialization_pyfunction(self):
# Not directly supported due to lack of general utility
# (globals will usually be in the function object in
# func_globals), and problems with unserialization
# vulnerabilities. Users can always subclass from PyFunction
# for specific cases, as seen in PyCascading
import new
def f():
return 6 * 7 + max(0, 1, 2)
# However, using the new module, it's possible to create a
# function with no globals, which means the globals will come
# from the current context
g = new.function(f.func_code, {}, "g")
# But still forbid Java deserialization of this function
# object. Use pickling or other support instead.
with self.assertRaises(UnsupportedOperationException):
roundtrip_serialization(g)
def test_builtin_names(self):
import __builtin__
names = [x for x in dir(__builtin__)]
self.assertEqual(names, roundtrip_serialization(names))
@unittest.skipUnless(find_executable('jar'), 'Need the jar command to run')
def test_proxy_serialization(self):
# Proxies can be deserializable in a fresh JVM, including being able
# to "findPython" to get a PySystemState.
# tempdir gets combined with unicode paths derived from class names,
# so make it a unicode object.
tempdir = tempfile.mkdtemp().decode(sys.getfilesystemencoding())
old_proxy_debug_dir = org.python.core.Options.proxyDebugDirectory
try:
# Generate a proxy for Cat class;
org.python.core.Options.proxyDebugDirectory = tempdir
from pounce import Cat
cat = Cat()
self.assertEqual(cat.whoami(), "Socks")
# Create a jar file containing the Cat proxy; could use Java to do this; do it the easy way for now
proxies_jar_path = os.path.join(tempdir, "proxies.jar")
subprocess.check_call(["jar", "cf", proxies_jar_path, "-C", tempdir,
"org" + os.path.sep])
# Serialize our cat
output = ByteArrayOutputStream()
serializer = CloneOutput(output)
serializer.writeObject(cat)
serializer.close()
cat_path = os.path.join(tempdir, "serialized-cat")
with open(cat_path, "wb") as f:
f.write(output.toByteArray())
# Then in a completely different JVM running
# ProxyDeserialization, verify we get "meow" printed to
# stdout, which in turn ensures that PySystemState (and
# Jython runtime) is initialized for the proxy
jars = find_jython_jars()
jars.append(proxies_jar_path)
classpath = os.pathsep.join(jars)
cmd = [os.path.join(System.getProperty("java.home"), "bin", "java"),
"-Dpython.path=" + os.path.dirname(__file__),
"-classpath", classpath,
"javatests.ProxyDeserialization",
cat_path]
self.assertEqual(subprocess.check_output(cmd, universal_newlines=True), "meow\n")
finally:
org.python.core.Options.proxyDebugDirectory = old_proxy_debug_dir
shutil.rmtree(tempdir)
@unittest.skipUnless(find_executable('jar'), 'Need the jar command to run')
def test_custom_proxymaker(self):
# Verify custom proxymaker supports direct usage of Python code in Java
# tempdir gets combined with unicode paths derived from class names,
# so make it a unicode object.
tempdir = tempfile.mkdtemp().decode(sys.getfilesystemencoding())
try:
SerializableProxies.serialized_path = tempdir
import bark
dog = bark.Dog()
self.assertEqual(dog.whoami(), "Rover")
self.assertEqual(dog.serialVersionUID, 1)
self.assertEqual(dog, roundtrip_serialization(dog))
# Create a jar file containing the org.python.test.Dog proxy
proxies_jar_path = os.path.join(tempdir, "proxies.jar")
subprocess.check_call(["jar", "cf", proxies_jar_path, "-C", tempdir,
"org" + os.path.sep])
# Build a Java class importing Dog
source = """
import org.python.test.bark.Dog; // yes, it's that simple
public class BarkTheDog {
public static void main(String[] args) {
Dog dog = new Dog();
try {
Boolean b = (Boolean)(dog.call());
if (!b) {
throw new RuntimeException("Expected site module to be imported");
}
}
catch(Exception e) {
System.err.println(e);
}
}
}
"""
jars = find_jython_jars()
jars.append(proxies_jar_path)
classpath = os.pathsep.join(jars)
compile_java_source(
["-classpath", classpath, "-d", tempdir],
"BarkTheDog", source)
# Then in a completely different JVM running our
# BarkTheDog code, verify we get an appropriate bark
# message printed to stdout, which in turn ensures that
# PySystemState (and Jython runtime) is initialized for
# the proxy
classpath += os.pathsep + tempdir
cmd = [os.path.join(System.getProperty("java.home"), "bin", "java"),
"-Dpython.path=" + os.path.dirname(__file__),
"-classpath", classpath, "BarkTheDog"]
self.assertRegexpMatches(
subprocess.check_output(cmd, universal_newlines=True,
stderr=subprocess.STDOUT),
r"^Class defined on CLASSPATH <type 'org.python.test.bark.Dog'>\n"
"Rover barks 42 times$")
finally:
pass
# print "Will not remove", tempdir
#shutil.rmtree(tempdir)
class CopyTest(unittest.TestCase):
def test_copy(self):
fruits = ArrayList(["apple", "banana"])
fruits_copy = copy.copy(fruits)
self.assertEqual(fruits, fruits_copy)
self.assertNotEqual(id(fruits), id(fruits_copy))
def test_deepcopy(self):
items = ArrayList([ArrayList(["apple", "banana"]),
ArrayList(["trs80", "vic20"])])
items_copy = copy.deepcopy(items)
self.assertEqual(items, items_copy)
self.assertNotEqual(id(items), id(items_copy))
self.assertNotEqual(id(items[0]), id(items_copy[0]))
self.assertNotEqual(id(items[1]), id(items_copy[1]))
def test_copy_when_not_cloneable(self):
bdfl = Person("Guido", "von Rossum")
self.assertRaises(TypeError, copy.copy, bdfl)
# monkeypatching in a __copy__ should now work
Person.__copy__ = lambda p: Person(p.firstName, p.lastName)
copy_bdfl = copy.copy(bdfl)
self.assertEqual(str(bdfl), str(copy_bdfl))
def test_copy_when_not_serializable(self):
bdfl = Person("Guido", "von Rossum")
self.assertRaises(TypeError, copy.deepcopy, bdfl)
# monkeypatching in a __deepcopy__ should now work
Person.__deepcopy__ = lambda p, memo: Person(p.firstName, p.lastName)
copy_bdfl = copy.deepcopy(bdfl)
self.assertEqual(str(bdfl), str(copy_bdfl))
def test_immutable(self):
abc = String("abc")
abc_copy = copy.copy(abc)
self.assertEqual(id(abc), id(abc_copy))
fruits = ArrayList([String("apple"), String("banana")])
fruits_copy = copy.copy(fruits)
self.assertEqual(fruits, fruits_copy)
self.assertNotEqual(id(fruits), id(fruits_copy))
class UnicodeTest(unittest.TestCase):
def test_unicode_conversion(self):
test = unicode(ToUnicode())
self.assertEqual(type(test), unicode)
self.assertEqual(test, u"Circle is 360\u00B0")
class BeanPropertyTest(unittest.TestCase):
def test_issue1833(self):
class TargetClass(object):
def _getattribute(self):
return self.__attribute
def _setattribute(self, value):
self.__attribute = value
attribute = property(_getattribute, _setattribute)
target = TargetClass()
test = Issue1833(target=target)
value = ('bleh', 'blah')
test.value = value
self.assertEqual(target.attribute, value)
class WrappedUp(object):
def __init__(self):
self.data = list()
def doit(self):
self.data.append(42)
class CallableObject(object):
def __init__(self):
self.data = list()
def __call__(self):
self.data.append(42)
class SingleMethodInterfaceTest(unittest.TestCase):
def setUp(self):
self.executor = Executors.newSingleThreadExecutor()
def tearDown(self):
self.executor.shutdown()
def test_function(self):
x = list()
def f():
x.append(42)
future = self.executor.submit(f)
future.get()
self.assertEqual(x, [42])
@unittest.skip("FIXME: not working; see http://bugs.jython.org/issue2115")
def test_callable_object(self):
callable_obj = CallableObject()
future = self.executor.submit(callable_obj)
future.get()
self.assertEqual(callable_obj.data, [42])
def test_bound_method(self):
obj = WrappedUp()
future = self.executor.submit(obj.doit)
future.get()
self.assertEqual(obj.data, [42])
def test_unbound_method(self):
with self.assertRaises(TypeError) as exc:
future = self.executor.submit(WrappedUp.doit) # not a bound method
self.assertIsInstance(
exc.exception, TypeError,
"submit(): 1st arg can't be coerced to java.util.concurrent.Callable, java.lang.Runnable")
def test_some_noncallable_object(self):
obj = WrappedUp()
with self.assertRaises(TypeError) as exc:
future = self.executor.submit(obj)
self.assertIsInstance(
exc.exception, TypeError,
"submit(): 1st arg can't be coerced to java.util.concurrent.Callable, java.lang.Runnable")
def test_main():
test_support.run_unittest(
BeanPropertyTest,
BeanTest,
BigNumberTest,
ColorTest,
CopyTest,
IOTest,
ImportTest,
InstantiationTest,
JavaDelegationTest,
JavaReservedNamesTest,
JavaStringTest,
JavaWrapperCustomizationTest,
PyReservedNamesTest,
SecurityManagerTest,
SerializationTest,
SysIntegrationTest,
TreePathTest,
UnicodeTest,
SingleMethodInterfaceTest,
)
if __name__ == "__main__":
test_main()
| 36.170656
| 464
| 0.640736
|
d2724955281fce5c2eac602ad7fdbfd9deca01ee
| 458
|
py
|
Python
|
Chap08/sets.py
|
RiddhiDamani/Python
|
06cba66aeafd9dc0fa849ec2112c0786a3e8f001
|
[
"MIT"
] | null | null | null |
Chap08/sets.py
|
RiddhiDamani/Python
|
06cba66aeafd9dc0fa849ec2112c0786a3e8f001
|
[
"MIT"
] | null | null | null |
Chap08/sets.py
|
RiddhiDamani/Python
|
06cba66aeafd9dc0fa849ec2112c0786a3e8f001
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
def main():
a = set("We're gonna need a bigger boat.")
b = set("I'm sorry, Dave. I'm afraid I can't do that.")
print_set(sorted(a))
print_set(sorted(b))
print_set(a - b)
print_set(a | b)
print_set(a ^ b)
print_set(a & b)
def print_set(o):
print('{', end='')
for x in o:
print(x, end='')
print('}')
if __name__ == '__main__':
main()
| 18.32
| 59
| 0.554585
|
4722efdc678273ff6e6c59ad105761e7d7ca2dc7
| 6,592
|
py
|
Python
|
src/pymyinstall/setuphelper/ipython_helper.py
|
sdpython/pymyinstall
|
72b3a56a29def0694e34ccae910bf288a95cf4a5
|
[
"MIT"
] | 8
|
2015-08-24T21:01:49.000Z
|
2018-01-04T06:34:51.000Z
|
src/pymyinstall/setuphelper/ipython_helper.py
|
sdpython/pymyinstall
|
72b3a56a29def0694e34ccae910bf288a95cf4a5
|
[
"MIT"
] | 66
|
2015-06-14T22:04:58.000Z
|
2021-11-11T13:46:03.000Z
|
src/pymyinstall/setuphelper/ipython_helper.py
|
sdpython/pymyinstall
|
72b3a56a29def0694e34ccae910bf288a95cf4a5
|
[
"MIT"
] | 5
|
2016-09-13T18:14:46.000Z
|
2021-08-23T12:03:28.000Z
|
"""
@file
@brief Change Jupyter configuration
"""
import sys
import os
import re
from ..installhelper.link_shortcuts import add_shortcut_to_desktop, suffix
def noLOG(*args, **kwargs):
pass
def install_mathjax():
"""
install a local copy of mathjax
"""
from IPython.external import mathjax # pylint: disable=E1101
mathjax.install_mathjax()
def setup_ipython(current_path=None, additional_path=None, apply_modification=True,
shortcut=True, browser=None):
"""
The function applies the modification suggested in this blog post:
`Travailler avec IPython notebook <http://www.xavierdupre.fr/blog/2014-02-24_nojs.html>`_ (written in French).
@param additional_path additional paths to add to jupyter (a list)
@param current_path change the current path when running a notebook
@param apply_modification apply the modification, otherwise, just create the profile
@param shortcut add short cut the desktop
@param browser if not None, tries to change if it finds it (IE, Chrome, Firefox, on Windows)
@return path the config file
If you need to create a shortcut with the appropriate paths,
you can use the following instructions
to open :epkg:`Jupyter` notebook on a specific folder:
::
set path=%path%;c:\\Python370_x64;c:\\Python370_x64\\Scripts
jupyter-notebook --notebook-dir=_doc\\notebooks
"""
if sys.platform.startswith("win"):
user_profile = os.environ['USERPROFILE']
profile = os.path.join(user_profile, ".jupyter")
ipython_notebook_config = os.path.join(
profile,
"jupyter_notebook_config.py")
checkpath = os.path.join(profile, "checkspoints")
if not os.path.exists(checkpath):
os.mkdir(checkpath)
if not os.path.exists(ipython_notebook_config):
from ..installhelper.install_cmd_helper import run_cmd
exe_exe = os.path.dirname(sys.executable)
if exe_exe.lower().endswith("scripts"):
exe = os.path.join(exe_exe, "jupyter-notebook.exe")
else:
exe = os.path.join(exe_exe, "Scripts", "jupyter-notebook.exe")
cmd = exe + " -y --generate-config"
out, err = run_cmd(cmd, wait=True, fLOG=noLOG)
if not os.path.exists(ipython_notebook_config):
raise Exception(
"unable to create jupyter configuration in \n'{0}'\nbecause of:\n{1}\nERR-4:\n{2}\ncmd={3}".format(
ipython_notebook_config, out, err, cmd))
with open(ipython_notebook_config, "r") as f:
text = f.read()
# change current path and pylab configuration
for var in ["IPKernelApp.file_to_run",
"ProfileDir.location",
"FileNotebookManager.checkpoint_dir",
"NotebookManager.notebook_dir",
"NotebookApp.ipython_dir",
"IPKernelApp.pylab"]:
reg = re.compile("(#? *c.{0} =.*)".format(var))
alls = reg.findall(text)
if len(alls) == 1 and current_path is not None:
if "pylab" in var:
text = text.replace(
alls[0],
"c.{0} = 'inline'".format(var))
elif "checkpoint_dir" in var:
text = text.replace(
alls[0],
"c.{0} = r'{1}'".format(
var,
checkpath))
elif "file_to_run" not in var:
text = text.replace(
alls[0],
"c.{1} = r'{0}'".format(
current_path,
var))
else:
text = text.replace(
alls[0],
"c.{1} = '{0}\\jupyter_startup.py'".format(
current_path,
var))
# browser
if browser is not None:
if sys.platform.startswith("win"):
paths = {"firefox": "C:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe",
"ie": "C:\\Program Files\\Internet Explorer\\iexplore.exe",
"chrome": "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe",
}
browser = browser.lower()
if browser not in paths:
raise KeyError(
"unable to find browser {0} in [{1}]".format(
browser, ",".join(
paths.keys())))
subtext = """
import webbrowser
webbrowser.register('{0}', None, webbrowser.GenericBrowser(r'{1}'))
c.NotebookApp.browser = '{0}'
""".format(browser, paths[browser]).replace(" ", "")
text += subtext
else:
raise NotImplementedError("implemented only on Windows")
if apply_modification:
# write modification
with open(ipython_notebook_config, "w") as f:
f.write(text)
# write jupyter_startup.py
rows = ["import sys"]
if additional_path is not None:
for p in additional_path:
if not os.path.exists(p):
raise FileNotFoundError(p)
rows.append("sys.path.append(r'{0}')".format(p))
s = "\n".join(rows)
if apply_modification:
with open(os.path.join(current_path, "jupyter_startup.py"), "w") as f:
f.write(s)
return [ipython_notebook_config]
else:
raise NotImplementedError("implemented only for Windows")
def add_shortcut_to_desktop_for_ipython(folder):
"""
create a shortcut on your desktop
@param folder notebook dir
@return filename
"""
file = os.path.join(
os.path.split(
sys.executable)[0],
"Scripts",
"jupyter-notebook")
arguments = " --notebook-dir=" + folder
ver = suffix()
return add_shortcut_to_desktop(
file, "notebook." + ver, "Jupyter Notebook {1} ({0})".format(folder, ver), arguments)
if __name__ == "__main__":
setup_ipython("C:\\temp", [], apply_modification=False)
| 37.668571
| 119
| 0.530188
|
285fad6ab5c23e5fc717c68fbf387d80a2508838
| 3,294
|
py
|
Python
|
twitchapi.py
|
Winter259/twitch-statistics
|
4d65a76ed5a9ea15fdd4e67bd6281523113b8d60
|
[
"MIT"
] | 3
|
2020-09-10T03:35:51.000Z
|
2021-02-16T16:34:31.000Z
|
twitchapi.py
|
Winter259/twitch-statistics
|
4d65a76ed5a9ea15fdd4e67bd6281523113b8d60
|
[
"MIT"
] | 1
|
2016-02-17T15:14:49.000Z
|
2016-04-03T17:36:31.000Z
|
twitchapi.py
|
Winter259/twitch-statistics
|
4d65a76ed5a9ea15fdd4e67bd6281523113b8d60
|
[
"MIT"
] | 2
|
2016-06-29T01:11:35.000Z
|
2019-04-02T07:26:44.000Z
|
import requests
from datetime import datetime
class APIStreamsRequest:
def __init__(self, game_url_name, game_full_names, client_id, timeout=10, verbose=False):
self.game_url_name = game_url_name
self.game_full_names = game_full_names
self.client_id = client_id
self.json_url = 'https://api.twitch.tv/kraken/streams'
self.timeout = timeout
self.last_status_code = 0
self.streams_data = []
self.verbose = verbose
def print(self, string=''):
if self.verbose:
print(string)
def make_request(self, url):
self.print('[INFO] Sending a request to: {}'.format(url))
try:
response = requests.get(
url=url,
timeout=self.timeout,
headers={'Client-ID': self.client_id})
except Exception as e:
# TODO: Don't return None :(
return None
self.last_status_code = response.status_code
self.print('[INFO] Status code returned: {}'.format(self.last_status_code))
# try to parse the JSON
try:
json_data = response.json()
except Exception as e:
self.print('Unable to parse JSON:')
print(e)
return None
return json_data
def last_request_successful(self):
return self.last_status_code == 200
def request_all_game_data(self):
url = self.json_url + '?game=' + self.game_url_name
response_data = self.make_request(url=url)
if response_data is None:
raise Exception('No data returned in the request')
streams_data = response_data['streams']
link_to_next = response_data['_links']['next']
while not len(streams_data) == 0:
self.streams_data.extend(streams_data)
response_data = self.make_request(url=link_to_next)
if response_data is not None and self.last_status_code == 200:
streams_data = response_data['streams']
link_to_next = response_data['_links']['next']
"""
# Easy way to check whether the total count and the stream amount received match up
print(response_data['_total'])
print(len(self.streams_data))
"""
def return_streams_data(self):
return self.streams_data
def clear_streams_data(self):
self.streams_data = []
def return_required_data(self):
if not self.streams_data:
self.print('[ERROR] No data is present. Have you requested the data yet?')
return None
# create a timestamp string for now
timestamp = '{}-{}-{} {}:{}:{}'.format(
datetime.now().year,
datetime.now().month,
datetime.now().day,
datetime.now().hour,
datetime.now().minute,
datetime.now().second
)
return [(
stream['channel']['name'],
stream['viewers'],
stream['channel']['followers'],
1 if stream['channel']['partner'] else 0, # 1 if true, 0 if false
timestamp
) for stream in self.streams_data if stream['game'] in self.game_full_names
]
| 36.6
| 93
| 0.576503
|
19f2b353a472ec503c65b70bbf6cc3f2d88b0856
| 756
|
py
|
Python
|
RLBotPack/Rocketnoodles/src/strategy/coaches/mr_cover.py
|
L0laapk3/RLBotPack
|
f54038475d2a57428f3784560755f96bfcf8015f
|
[
"MIT"
] | 13
|
2019-05-25T20:25:51.000Z
|
2022-03-19T13:36:23.000Z
|
RLBotPack/Rocketnoodles/src/strategy/coaches/mr_cover.py
|
L0laapk3/RLBotPack
|
f54038475d2a57428f3784560755f96bfcf8015f
|
[
"MIT"
] | 53
|
2019-06-07T13:31:59.000Z
|
2022-03-28T22:53:47.000Z
|
RLBotPack/Rocketnoodles/src/strategy/coaches/mr_cover.py
|
L0laapk3/RLBotPack
|
f54038475d2a57428f3784560755f96bfcf8015f
|
[
"MIT"
] | 78
|
2019-06-30T08:42:13.000Z
|
2022-03-23T20:11:42.000Z
|
from strategy.base_ccp import BaseCoach
from strategy.captains import *
from strategy.players import Cover
class MrCover(BaseCoach):
""""This class calls the captain that makes all drones cover, standing between the ball and your own goal."""
def __init__(self):
# Initial role assignment!
for drone in self.drones:
drone.flush_actions()
drone.assign(Cover())
def step(self):
"""Return current state containing status.
:return: Current State.
:rtype: bool
"""
for drone in self.drones:
done = drone.step() # If state returns true if the state is not pending anymore (fail or success).
if done:
drone.assign(Cover())
| 28
| 113
| 0.624339
|
ecccf339b914f678bde7ae8aa0e690e3bf1e7a9f
| 5,080
|
py
|
Python
|
evaluation_scripts/test_wic.py
|
cambridgeltl/MirrorWiC
|
a0b6ed5ff980f0ed92d84640f07729b6e02f1f6f
|
[
"MIT"
] | 3
|
2021-11-01T07:14:47.000Z
|
2022-03-28T02:52:51.000Z
|
evaluation_scripts/test_wic.py
|
cambridgeltl/MirrorWiC
|
a0b6ed5ff980f0ed92d84640f07729b6e02f1f6f
|
[
"MIT"
] | 1
|
2021-10-03T09:27:07.000Z
|
2022-02-16T14:28:11.000Z
|
evaluation_scripts/test_wic.py
|
cambridgeltl/MirrorWiC
|
a0b6ed5ff980f0ed92d84640f07729b6e02f1f6f
|
[
"MIT"
] | null | null | null |
import sys
import os
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModel
from tqdm.auto import tqdm
from sklearn import metrics
from src.helper import *
def eval_wic_cosine(scores_pred,golds,thres=None):
scores_pred,golds=np.array(scores_pred),np.array(golds)
if thres:
scores_pred_label = np.array(['F'] * len(scores_pred))
scores_true_indexes = np.where(scores_pred > thres)
scores_pred_label[scores_true_indexes] = 'T'
corrects_true = np.where((np.array(scores_pred_label) == 'T') & (np.array(golds) == 'T'))[0]
corrects_false = np.where((np.array(scores_pred_label) == 'F') & (np.array(golds) == 'F'))[0]
num_corrects = len(corrects_true) + len(corrects_false)
acc=num_corrects/len(scores_pred)
print ('==WIC RESULTS==: thres: {0}, num of correct: {1}, percentage: {2}'.format(thres,num_corrects,num_corrects/len(scores_pred)))
else:
thres=thres_search(scores_pred,golds)
thres,scores_pred_label,acc=eval_wic_cosine(scores_pred,golds,thres)
return thres,scores_pred_label,acc
def thres_search(scores_pred,golds):
thres=scores_pred[np.argmax(scores_pred)]
thres_min=scores_pred[np.argmin(scores_pred)]
num_corrects_prevmax=-1
num_corrects=0
thres_max=0
while thres>=thres_min:
if num_corrects>num_corrects_prevmax:
num_corrects_prevmax=num_corrects
thres_max=thres
scores_pred_label = np.array(['F'] * len(scores_pred))
thres-=0.01
scores_true_indexes = np.where(scores_pred>thres)
scores_pred_label[scores_true_indexes]='T'
corrects_true = np.where((np.array(scores_pred_label) == 'T') & (np.array(golds) == 'T'))[0]
corrects_false=np.where((np.array(scores_pred_label) == 'F') & (np.array(golds) == 'F'))[0]
num_corrects=len(corrects_true)+len(corrects_false)
return thres_max
def wic_scores(lines,tokenizer,model,flag,layer_start,layer_end,maxlen):
src,tgt,label=list(zip(*lines))
src,tgt,label=list(src),list(tgt),list(label)
string_features1, string_features2 = [], []
for i in tqdm(np.arange(0, len(src), bsz)):
np_feature_mean_tok=get_embed(src[i:i+bsz],tokenizer,model,flag,layer_start,layer_end,maxlen)
string_features1.append(np_feature_mean_tok)
string_features1_stacked = np.concatenate(string_features1, 0)
for i in tqdm(np.arange(0, len(tgt), bsz)):
np_feature_mean_tok=get_embed(tgt[i:i+bsz],tokenizer,model,flag,layer_start,layer_end,maxlen)
string_features2.append(np_feature_mean_tok)
string_features2_stacked = np.concatenate(string_features2, 0)
string_features1_stacked,string_features2_stacked=torch.from_numpy(string_features1_stacked),torch.from_numpy(string_features2_stacked)
scores_pred=produce_cosine_list(string_features1_stacked,string_features2_stacked)
return scores_pred,label
if __name__=='__main__':
bsz = 128
model_name=sys.argv[1]
datadir=sys.argv[2]
cuda=sys.argv[4]
flag=sys.argv[3]
maxlen=int(sys.argv[5])
layers,layer_start,layer_end=None,None,None
if flag.startswith('token') or flag=='mean':
layers=sys.argv[6]
layer_start,layer_end=int(layers.split('~')[0]),int(layers.split('~')[1])
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)
model.cuda()
model.eval()
wic_train=os.path.join(datadir,'train.tsv')
wic_test=os.path.join(datadir,'test.tsv')
wic_dev=os.path.join(datadir,'dev.tsv')
test_lines=[line.strip().replace('<word>','[ ').replace('</word>',' ]').split('\t') for line in open(wic_test).readlines()[1:]]
dev_lines=[line.strip().replace('<word>','[ ').replace('</word>',' ]').split('\t') for line in open(wic_dev).readlines()[1:]]
dev_scores_pred,dev_label=wic_scores(dev_lines,tokenizer,model,flag,layer_start,layer_end,maxlen)
print ('=======dev set accuracy=======')
dev_thres, dev_pred,dev_acc=eval_wic_cosine(dev_scores_pred,dev_label,thres=None)
test_scores_pred,test_label=wic_scores(test_lines,tokenizer,model,flag,layer_start,layer_end,maxlen)
print ('=======test set accuray=======')
print ('For WiC and WiC-tsv, the result here is a placeholder. \n You need to upload the predicted test file to their codalab competition pages.')
_, test_pred,test_acc=eval_wic_cosine(test_scores_pred,test_label,thres=dev_thres)
print ('=======auc=======')
fpr, tpr, thresholds = metrics.roc_curve(dev_label, dev_scores_pred, pos_label='T')
print ('dev auc: ',metrics.auc(fpr, tpr))
fpr, tpr, thresholds = metrics.roc_curve(test_label, test_scores_pred, pos_label='T')
print ('test auc: ',metrics.auc(fpr, tpr))
with open(wic_test+'.preds','w') as f:
for score in test_pred:
f.write(str(score)+'\n')
with open(wic_dev+'.preds','w') as f:
for i,score in enumerate(dev_pred):
f.write(str(score)+','+str(dev_scores_pred[i])+'\n')
| 40.967742
| 150
| 0.69252
|
142d54de66e85c0adea9f01f40ea213710265f4b
| 3,406
|
py
|
Python
|
scripts/appveyor_mingw_find.py
|
dejbug/lovec
|
12c80f6526b5683e2e62449fd6a197148e655b44
|
[
"MIT"
] | null | null | null |
scripts/appveyor_mingw_find.py
|
dejbug/lovec
|
12c80f6526b5683e2e62449fd6a197148e655b44
|
[
"MIT"
] | null | null | null |
scripts/appveyor_mingw_find.py
|
dejbug/lovec
|
12c80f6526b5683e2e62449fd6a197148e655b44
|
[
"MIT"
] | null | null | null |
## Designed with <3 by dejbug.
import os, os.path
import re
import sys
from argparse import ArgumentParser
from collections import namedtuple
ROOTS = (
ur"C:\mingw-w64",
ur"mingw-w64",
)
"""https://sourceforge.net/projects/mingw-w64/files/mingw-w64/mingw-w64-release/
"""
SAMPLE_DIR_STDOUT = """
i686-5.3.0-posix-dwarf-rt_v4-rev0
i686-6.3.0-posix-dwarf-rt_v5-rev1
x86_64-6.3.0-posix-seh-rt_v5-rev1
x86_64-7.2.0-posix-seh-rt_v5-rev1
x86_64-7.3.0-posix-seh-rt_v5-rev0
"""
KEYS = ("arch", "version", "threads", "eh", "rt", "rev", )
Version = namedtuple("Version", "path info")
def find_versions(recurse=False, roots=ROOTS):
for root in roots:
for t,dd,nn in os.walk(root):
for d in dd:
x = parse_mingw_distro_name(d)
if x:
yield Version(os.path.abspath(os.path.join(t, d)), x)
if not recurse:
break
def parse_mingw_distro_name(text):
r = re.match(r"""(?x)
(?P<arch>x86_64|i686|[^-]+)
-(?P<version>\d+\.\d+\.\d+)
(?:-(?P<threads>posix|win32))?
(?:-(?P<eh>sjlj|seh|dwarf))?
(?:-rt_(?P<rt>v\d+))?
(?:-rev(?P<rev>\d+))?
""", text)
if r: return r.groupdict()
return {}
def test_parse_mingw_distro_name():
for line in iter_lines(SAMPLE_DIR_STDOUT):
yield parse_mingw_distro_name(line)
def iter_lines(text):
for r in re.finditer(r'(?m)^\s*(\S+)\s*$', text):
yield r.group(1)
def filter_versions(versions, **kk):
if not kk:
return versions
def filter_func(version):
for k,v in kk.items():
if k in version.info:
if hasattr(v, "__iter__"):
if version.info[k] in v: return True
else:
if v == version.info[k]: return True
return False
return filter(filter_func, versions)
def get_highest_version(**kk):
versions = find_versions()
versions = filter_versions(versions, **kk)
versions = sorted(versions, key=lambda v: v.info["version"])
versions = reversed(versions)
try:
return versions.next()
except StopIteration:
pass
def get_choices(versions, key):
return tuple(set(v.info[key] for v in versions if key in v.info))
def get_choices_dict(versions):
versions = tuple(versions)
d = {}
for key in KEYS:
d[key] = get_choices(versions, key)
return d
def parse_args(argv=sys.argv, choices={}):
info = "Find the highest version of MinGW on the system."
note = ""
p = ArgumentParser(description=info, epilog=note)
# p.add_argument("root", nargs="*", help="folders to search")
g = p.add_argument_group("filters", "If any of these is passed, only versions matching the given values will be considered. The possible values for each option are listed in braces to the right. If no options are listed at all, no MinGW was found.")
for key in KEYS:
g.add_argument('--%s' % key, choices=choices[key], help="Search only MinGW versions matching value.")
a = p.parse_args(argv[1:])
return p, a
def dict_from_namespace(ns):
d = {}
for key in KEYS:
value = getattr(ns, key)
if value: d[key] = value
return d
def main(argv=sys.argv):
# version = get_highest_version(arch=("i686", "x86_64"), rt="v4")
versions = find_versions()
choices = get_choices_dict(versions)
parser, args = parse_args(sys.argv, choices)
version = get_highest_version(**dict_from_namespace(args))
if version:
print version.path
else:
print "."
exit(1)
if "__main__" == __name__:
main()
| 26
| 251
| 0.658837
|
e06eabf29fdbdfd3728bc03c553e56fa02ba2870
| 12,849
|
py
|
Python
|
tests/test_coordinate_analysis.py
|
theavey/ParaTemp
|
6d6c92064ec36640063cc271a245f21b95ddee96
|
[
"Apache-2.0"
] | 12
|
2018-03-01T21:49:11.000Z
|
2021-09-02T15:47:40.000Z
|
tests/test_coordinate_analysis.py
|
theavey/ParaTemp
|
6d6c92064ec36640063cc271a245f21b95ddee96
|
[
"Apache-2.0"
] | 34
|
2017-12-18T02:32:57.000Z
|
2019-10-11T12:08:57.000Z
|
tests/test_coordinate_analysis.py
|
theavey/ParaTemp
|
6d6c92064ec36640063cc271a245f21b95ddee96
|
[
"Apache-2.0"
] | 3
|
2019-05-08T05:29:13.000Z
|
2022-02-14T09:21:42.000Z
|
"""This contains a set of tests for paratemp.coordinate_analysis"""
########################################################################
# #
# This test was written by Thomas Heavey in 2018. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017-18 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
import shutil
import matplotlib
import numpy as np
import pandas as pd
import pytest
from paratemp import cd
matplotlib.use('agg')
def test_matplotlib_testing_backend():
# Travis should fail if this isn't true, but hopefully this makes it
# clearer as to why it failed.
assert matplotlib.get_backend() == 'agg'
class TestXTCUniverse(object):
def test_import(self):
from paratemp import coordinate_analysis as ca
from paratemp import Universe
assert ca.Universe == Universe
from MDAnalysis import Universe as MdUniverse
assert issubclass(Universe, MdUniverse)
assert issubclass(ca.Universe, MdUniverse)
@pytest.fixture
def universe_class(self) -> type:
from paratemp import Universe
return Universe
@pytest.fixture
def univ(self, tmp_path, path_test_data, universe_class):
gro = path_test_data / 'spc2.gro'
traj = path_test_data / 't-spc2-traj.xtc'
shutil.copy(gro, tmp_path)
shutil.copy(traj, tmp_path)
with cd(tmp_path):
_univ = universe_class(gro.name,
traj.name,
temp=205.)
return _univ
@pytest.fixture
def univ_w_a(self, univ):
univ.calculate_distances(a='4 5',
read_data=False, save_data=False)
return univ
@pytest.fixture
def univ_pbc(self, tmp_path, path_test_data, universe_class):
gro = path_test_data / 'spc2.gro'
traj = path_test_data / 'spc2-traj-pbc.xtc'
shutil.copy(gro, tmp_path)
shutil.copy(traj, tmp_path)
with cd(tmp_path):
_univ = universe_class(gro.name,
traj.name,
temp=205.)
return _univ
@pytest.fixture
def ref_a_pbc_dists(self, path_ref_data):
import pandas
return pandas.read_csv(path_ref_data / 'spc2-a-pbc-dists.csv',
index_col=0)
def test_distance_str(self, univ, ref_a_dists):
univ.calculate_distances(a='4 5',
read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_distance_list_int(self, univ, ref_a_dists):
univ.calculate_distances(a=[4, 5],
read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_distance_list_str(self, univ, ref_a_dists):
univ.calculate_distances(a=['4', '5'],
read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_calculate_distances_no_recalc(self, univ_w_a, capsys):
univ_w_a.calculate_distances(a=[4, 5],
read_data=False, save_data=False)
out, err = capsys.readouterr()
assert out == 'Nothing (new) to calculate here.\n'
def test_calculate_distances_yes_recalc(self, univ_w_a):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
"""
univ_w_a.calculate_distances(a='5 5', recalculate=True,
read_data=False, save_data=False)
assert (np.array([0., 0.]) == univ_w_a.data['a']).all()
def test_distance_pbc(self, univ_pbc, ref_a_pbc_dists):
univ_pbc.calculate_distances(a='4 5',
read_data=False, save_data=False)
assert np.isclose(ref_a_pbc_dists['a'], univ_pbc.data['a']).all()
def test_distances_com(self, univ, ref_g_dists):
univ.calculate_distances(
read_data=False, save_data=False,
g=((1, 2), (3, 4)))
assert np.isclose(ref_g_dists, univ.data).all()
def test_calculate_distance_raises(self, univ):
with pytest.raises(SyntaxError):
univ.calculate_distances(1, read_data=False, save_data=False)
with pytest.raises(SyntaxError):
univ.calculate_distances(a=['0', '5'],
read_data=False, save_data=False)
with pytest.raises(SyntaxError):
univ.calculate_distances(a=['1', '2', '5'],
read_data=False, save_data=False)
with pytest.raises(NotImplementedError):
univ.calculate_distances(a=['fail', 'here'],
read_data=False, save_data=False)
def test_calculate_distance_warns(self, univ):
with pytest.warns(UserWarning,
match='following positional arguments were given'):
univ.calculate_distances('fail', read_data=False, save_data=False)
def test_fes_1d_data_str(self, univ_w_a, ref_delta_g, ref_bins):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
:type ref_delta_g: np.ndarray
:type ref_bins: np.ndarray
"""
delta_g_str, bins_str, lines_str, fig_str, ax_str = \
univ_w_a.fes_1d('a')
assert np.allclose(delta_g_str, ref_delta_g)
assert np.allclose(bins_str, ref_bins)
def test_fes_1d_data_data(self, univ_w_a, ref_delta_g, ref_bins):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
:type ref_delta_g: np.ndarray
:type ref_bins: np.ndarray
"""
delta_g_data, bins_data, lines_data, fig_data, ax_data = \
univ_w_a.fes_1d(univ_w_a.data['a'])
assert np.allclose(delta_g_data, ref_delta_g)
assert np.allclose(bins_data, ref_bins)
def test_final_time_str(self, univ):
assert univ.final_time_str == '2ps'
univ._last_time = 1001.0
assert univ.final_time_str == '1ns'
univ._last_time = 32111222.12
assert univ.final_time_str == '32us'
univ._last_time = 5.1e12
assert univ.final_time_str == '5100ms'
def test_save_data(self, univ_w_a, tmp_path, capsys):
time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ_w_a.save_data()
out, err = capsys.readouterr()
assert (tmp_path / f_name).exists()
with pd.HDFStore(f_name) as store:
df = store[time]
assert out == 'Saved data to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
assert np.allclose(df, univ_w_a.data)
def test_save_data_no_new(self, univ_w_a, tmp_path, capsys):
time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr()
univ_w_a.save_data()
out, err = capsys.readouterr()
assert (tmp_path / f_name).exists()
with pd.HDFStore(f_name) as store:
df = store[time]
assert out == 'No data added to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
assert np.allclose(df, univ_w_a.data)
def test_save_data_add_new(self, univ, univ_w_a, tmp_path, capsys):
time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr()
univ.calculate_distances(b='4 5', save_data=False)
univ.save_data()
out, err = capsys.readouterr()
assert out == 'Saved data to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
def test_read_data(self, univ, univ_w_a, tmp_path, capsys):
"""
:type univ_w_a: paratemp.Universe
:type univ: paratemp.Universe
"""
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr() # just so it doesn't print
univ.read_data()
assert (univ_w_a.data == univ.data).all().all()
def test_read_data_no_data(self, univ, tmp_path, capsys):
"""
:type univ: paratemp.Universe
"""
time = 'time_' + str(int(univ._last_time / 1000)) + 'ns'
f_name = univ.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
with pytest.raises(IOError, match=r'This data does not exist!\n'
r'{}\[{}\]'.format(f_name,
time)):
univ.read_data()
univ.read_data(ignore_no_data=True)
out, err = capsys.readouterr()
assert out == 'No data to read in {}[{}]\n'.format(f_name, time)
def test_calculate_distances_save(self, univ, tmp_path, capsys):
"""
:type univ: paratemp.Universe
"""
time = 'time_' + str(int(univ._last_time / 1000)) + 'ns'
f_name = univ.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ.calculate_distances(a='4 5')
out, err = capsys.readouterr()
assert (tmp_path / f_name).exists()
with pd.HDFStore(f_name) as store:
df = store[time]
assert out == 'Saved data to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
assert np.allclose(df, univ.data)
def test_calculate_distances_read(self, univ_w_a, tmp_path, capsys):
"""
:type univ_w_a: paratemp.Universe
"""
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr()
univ_w_a._data = univ_w_a._init_dataframe()
univ_w_a.calculate_distances(a='4 5')
out, err = capsys.readouterr()
assert out == 'Nothing (new) to calculate here.\n'
def test_select_frames(self, univ_pbc, capsys):
u = univ_pbc
u.calculate_distances(a='4 5',
read_data=False, save_data=False)
frames = u.select_frames({'a': (0.1, 0.75)}, 'short')
out, err = capsys.readouterr()
assert out == 'These criteria include 1 frame\n'
assert (u.data['short'] == [False, True]).all()
assert (frames == [1]).all()
def test_update_num_frames(self, univ, capsys, path_test_data):
old_lt, old_nf = univ._last_time, univ._num_frames
univ.load_new([str(path_test_data / 't-spc2-traj.xtc'),
str(path_test_data / 'spc2-traj-pbc.xtc')])
univ.update_num_frames()
out, err = capsys.readouterr()
assert old_lt != univ._last_time
assert old_nf != univ._num_frames
assert out == 'Updating num of frames from {} to {}'.format(
old_nf, univ._num_frames) + '\nand the final time.\n'
class TestXTCTaddol(TestXTCUniverse):
@pytest.fixture
def universe_class(self) -> type:
from paratemp.coordinate_analysis import Taddol
return Taddol
# TODO add further Universe tests
# ignore_file_change=True
# fes_2d
# calculate_dihedrals
# figure from fes_1d
# figure from fes_2d
| 40.661392
| 78
| 0.55981
|
02797e119f38ff2abc27264e935b5e9d9bea0f01
| 3,760
|
py
|
Python
|
src/ctc/toolbox/backend_utils/backend_crud.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/toolbox/backend_utils/backend_crud.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/toolbox/backend_utils/backend_crud.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
"""this code is fragile and hacky, it needs to get replaced in future"""
from __future__ import annotations
import typing
from . import backend_exceptions
T = typing.TypeVar('T')
def get_backend_order(
backend: str | None = None,
backend_order: typing.Sequence[str] | None = None,
) -> typing.Sequence[str]:
if backend_order is None and backend is None:
return ['filesystem', 'node']
elif backend is not None:
return [backend]
elif backend_order is not None:
return backend_order
else:
raise Exception('specify backend or backend_order')
def run_on_backend(
backend_functions: typing.Mapping[str, typing.Callable[..., T]],
backend: str | None = None,
backend_order: typing.Sequence[str] | None = None,
**function_kwargs: typing.Any,
) -> T:
backend_order = get_backend_order(backend, backend_order)
for backend in backend_order:
try:
function = backend_functions.get(backend)
if function is None:
raise Exception('unknown backend: ' + str(backend))
return function(**function_kwargs)
except backend_exceptions.DataNotFound:
pass
else:
raise Exception('could not execute any of: ' + str(backend_functions))
async def async_run_on_backend(
backend_functions: typing.Mapping[
str, typing.Callable[..., typing.Coroutine[typing.Any, typing.Any, T]]
],
backend: str | None = None,
backend_order: typing.Sequence[str] | None = None,
**function_kwargs: typing.Any,
) -> T:
backend_order = get_backend_order(backend, backend_order)
for backend in backend_order:
try:
function = backend_functions.get(backend)
if function is None:
raise Exception('unknown backend: ' + str(backend))
return await function(**function_kwargs)
except backend_exceptions.DataNotFound:
pass
else:
raise Exception('could not execute any of: ' + str(backend_functions))
def transfer_backends(
get: typing.Callable[..., typing.Any],
save: typing.Callable[..., typing.Any],
from_backend: str,
to_backend: str,
get_kwargs: typing.Mapping[str, typing.Any] | None = None,
save_kwargs: typing.Mapping[str, typing.Any] | None = None,
common_kwargs: typing.Mapping[str, typing.Any] | None = None,
**more_common_kwargs: typing.Any,
) -> typing.Any:
if common_kwargs is None:
common_kwargs = {}
common_kwargs = dict(common_kwargs, **more_common_kwargs)
if get_kwargs is None:
get_kwargs = {}
get_kwargs = dict(common_kwargs, **get_kwargs)
result = get(backend=from_backend, **get_kwargs)
if save_kwargs is None:
save_kwargs = {}
save_kwargs = dict(common_kwargs, **get_kwargs)
return save(result, backend=to_backend, **save_kwargs)
async def async_transfer_backends(
get: typing.Callable[..., typing.Any],
save: typing.Callable[..., typing.Any],
from_backend: str,
to_backend: str,
get_kwargs: typing.Mapping[str, typing.Any] | None = None,
save_kwargs: typing.Mapping[str, typing.Any] | None = None,
common_kwargs: typing.Mapping[str, typing.Any] | None = None,
**more_common_kwargs: typing.Any,
) -> typing.Any:
if common_kwargs is None:
common_kwargs = {}
common_kwargs = dict(common_kwargs, **more_common_kwargs)
if get_kwargs is None:
get_kwargs = {}
get_kwargs = dict(common_kwargs, **get_kwargs)
result = await get(backend=from_backend, **get_kwargs)
if save_kwargs is None:
save_kwargs = {}
save_kwargs = dict(common_kwargs, **get_kwargs)
return await save(result, backend=to_backend, **save_kwargs)
| 32.413793
| 78
| 0.664362
|
2567f375f219a5a1259c4d6f7c63e7cd36154c3e
| 53,070
|
py
|
Python
|
sklearn/tests/test_pipeline.py
|
Raieen/scikit-learn
|
94747d2e553c36305f33e677001660e5f4b0be85
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/tests/test_pipeline.py
|
Raieen/scikit-learn
|
94747d2e553c36305f33e677001660e5f4b0be85
|
[
"BSD-3-Clause"
] | null | null | null |
sklearn/tests/test_pipeline.py
|
Raieen/scikit-learn
|
94747d2e553c36305f33e677001660e5f4b0be85
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test the pipeline module.
"""
from tempfile import mkdtemp
import shutil
import time
import re
import itertools
import pytest
import numpy as np
from scipy import sparse
import joblib
from sklearn.utils.fixes import parse_version
from sklearn.utils._testing import (
assert_allclose,
assert_array_equal,
assert_array_almost_equal,
MinimalClassifier,
MinimalRegressor,
MinimalTransformer,
)
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
from sklearn.base import clone, is_classifier, BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.neighbors import LocalOutlierFactor
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.linear_model import LinearRegression
from sklearn.metrics import accuracy_score, r2_score
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.dummy import DummyRegressor
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.impute import SimpleImputer
iris = load_iris()
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit:
"""Small class to test parameter dispatching."""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {"a": self.a, "b": self.b}
def set_params(self, **params):
self.a = params["a"]
return self
class NoInvTransf(NoTrans):
def transform(self, X):
return X
class Transf(NoInvTransf):
def transform(self, X):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
class DummyEstimatorParams(BaseEstimator):
"""Mock classifier that takes params on predict"""
def fit(self, X, y):
return self
def predict(self, X, got_attribute=False):
self.got_attribute = got_attribute
return self
def predict_proba(self, X, got_attribute=False):
self.got_attribute = got_attribute
return self
def predict_log_proba(self, X, got_attribute=False):
self.got_attribute = got_attribute
return self
def test_pipeline_invalid_parameters():
# Test the various init parameters of the pipeline in fit
# method
pipeline = Pipeline([(1, 1)])
with pytest.raises(TypeError):
pipeline.fit([[1]], [1])
# Check that we can't fit pipelines with objects without fit
# method
msg = (
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'"
".*NoFit.*"
)
pipeline = Pipeline([("clf", NoFit())])
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([("svc", clf)])
assert pipe.get_params(deep=True) == dict(
svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False)
)
# Check that params are set
pipe.set_params(svc__a=0.1)
assert clf.a == 0.1
assert clf.b is None
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([("anova", filter1), ("svc", clf)])
# Check that estimators are not cloned on pipeline construction
assert pipe.named_steps["anova"] is filter1
assert pipe.named_steps["svc"] is clf
# Check that we can't fit with non-transformers on the way
# Note that NoTrans implements fit, but not transform
msg = "All intermediate steps should be transformers.*\\bNoTrans\\b.*"
pipeline = Pipeline([("t", NoTrans()), ("svc", clf)])
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert clf.C == 0.1
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
msg = re.escape(
"Invalid parameter 'C' for estimator SelectKBest(). Valid parameters are: ['k',"
" 'score_func']."
)
with pytest.raises(ValueError, match=msg):
pipe.set_params(anova__C=0.1)
# Test clone
with pytest.warns(None):
pipe2 = clone(pipe)
assert not pipe.named_steps["svc"] is pipe2.named_steps["svc"]
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop("svc")
params.pop("anova")
params2.pop("svc")
params2.pop("anova")
assert params == params2
def test_pipeline_init_tuple():
# Pipeline accepts steps as tuple
X = np.array([[1, 2]])
pipe = Pipeline((("transf", Transf()), ("clf", FitParamT())))
pipe.fit(X, y=None)
pipe.score(X)
pipe.set_params(transf="passthrough")
pipe.fit(X, y=None)
pipe.score(X)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([("anova", filter1), ("logistic", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([("transf", Transf()), ("clf", FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert pipe.predict(None)
# and transformer params should not be changed
assert pipe.named_steps["transf"].a is None
assert pipe.named_steps["transf"].b is None
# invalid parameters should raise an error message
msg = re.escape("fit() got an unexpected keyword argument 'bad'")
with pytest.raises(TypeError, match=msg):
pipe.fit(None, None, clf__bad=True)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([("transf", Transf()), ("clf", FitParamT())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, y=None) == 3
assert pipe.score(X, y=None, sample_weight=None) == 3
assert pipe.score(X, sample_weight=np.array([2, 3])) == 8
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([("transf", Transf()), ("clf", Mult())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, sample_weight=None) == 3
msg = re.escape("score() got an unexpected keyword argument 'sample_weight'")
with pytest.raises(TypeError, match=msg):
pipe.score(X, sample_weight=np.array([2, 3]))
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([("cls", LinearRegression())])
# expected error message
error_msg = re.escape(
"Invalid parameter 'fake' for estimator Pipeline(steps=[('cls',"
" LinearRegression())]). Valid parameters are: ['memory', 'steps', 'verbose']."
)
with pytest.raises(ValueError, match=error_msg):
pipe.set_params(fake="nope")
# invalid outer parameter name for compound parameter: the expected error message
# is the same as above.
with pytest.raises(ValueError, match=error_msg):
pipe.set_params(fake__estimator="nope")
# expected error message for invalid inner parameter
error_msg = re.escape(
"Invalid parameter 'invalid_param' for estimator LinearRegression(). Valid"
" parameters are: ['copy_X', 'fit_intercept', 'n_jobs', 'normalize',"
" 'positive']."
)
with pytest.raises(ValueError, match=error_msg):
pipe.set_params(cls__invalid_param="nope")
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver="full", n_components="mle", whiten=True)
pipe = Pipeline([("pca", pca), ("svc", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_score_samples_pca_lof():
X = iris.data
# Test that the score_samples method is implemented on a pipeline.
# Test that the score_samples method on pipeline yields same results as
# applying transform and score_samples steps separately.
pca = PCA(svd_solver="full", n_components="mle", whiten=True)
lof = LocalOutlierFactor(novelty=True)
pipe = Pipeline([("pca", pca), ("lof", lof)])
pipe.fit(X)
# Check the shapes
assert pipe.score_samples(X).shape == (X.shape[0],)
# Check the values
lof.fit(pca.fit_transform(X))
assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X)))
def test_score_samples_on_pipeline_without_score_samples():
X = np.array([[1], [2]])
y = np.array([1, 2])
# Test that a pipeline does not have score_samples method when the final
# step of the pipeline does not have score_samples defined.
pipe = make_pipeline(LogisticRegression())
pipe.fit(X, y)
with pytest.raises(
AttributeError,
match="'LogisticRegression' object has no attribute 'score_samples'",
):
pipe.score_samples(X)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver="randomized", whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape="ovr")
for preprocessing in [scaler, pca]:
pipe = Pipeline([("preprocess", preprocessing), ("svc", clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert predict.shape == (n_samples,)
proba = pipe.predict_proba(X)
assert proba.shape == (n_samples, n_classes)
log_proba = pipe.predict_log_proba(X)
assert log_proba.shape == (n_samples, n_classes)
decision_function = pipe.decision_function(X)
assert decision_function.shape == (n_samples, n_classes)
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([("scaler", scaler_for_pipeline), ("Kmeans", km_for_pipeline)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver="full")
pipe = Pipeline([("scaler", scaler), ("pca", pca)])
msg = "'PCA' object has no attribute 'fit_predict'"
with pytest.raises(AttributeError, match=msg):
getattr(pipe, "fit_predict")
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([("transf", TransfFitParams()), ("clf", FitParamT())])
pipe.fit_predict(
X=None, y=None, transf__should_get_this=True, clf__should_succeed=True
)
assert pipe.named_steps["transf"].fit_params["should_get_this"]
assert pipe.named_steps["clf"].successful
assert "should_succeed" not in pipe.named_steps["transf"].fit_params
@pytest.mark.parametrize(
"method_name", ["predict", "predict_proba", "predict_log_proba"]
)
def test_predict_methods_with_predict_params(method_name):
# tests that Pipeline passes predict_* to the final estimator
# when predict_* is invoked
pipe = Pipeline([("transf", Transf()), ("clf", DummyEstimatorParams())])
pipe.fit(None, None)
method = getattr(pipe, method_name)
method(X=None, got_attribute=True)
assert pipe.named_steps["clf"].got_attribute
def test_feature_union():
# basic sanity check for feature union
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert X_transformed.shape == (X.shape[0], 3)
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
with pytest.warns(None):
fs2 = clone(fs)
assert fs.transformer_list[0][1] is not fs2.transformer_list[0][1]
# test setting parameters
fs.set_params(select__k=2)
assert fs.fit_transform(X, y).shape == (X.shape[0], 4)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert X_transformed.shape == (X.shape[0], 8)
# test error if some elements do not support transform
msg = "All estimators should implement fit and transform.*\\bNoTrans\\b"
fs = FeatureUnion([("transform", Transf()), ("no_transform", NoTrans())])
with pytest.raises(TypeError, match=msg):
fs.fit(X)
# test that init accepts tuples
fs = FeatureUnion((("svd", svd), ("select", select)))
fs.fit(X, y)
def test_make_union():
pca = PCA(svd_solver="full")
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert names == ("pca", "transf")
assert transformers == (pca, mock)
def test_make_union_kwargs():
pca = PCA(svd_solver="full")
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert fu.transformer_list == make_union(pca, mock).transformer_list
assert 3 == fu.n_jobs
# invalid keyword parameters should raise an error message
msg = re.escape(
"make_union() got an unexpected keyword argument 'transformer_weights'"
)
with pytest.raises(TypeError, match=msg):
make_union(pca, mock, transformer_weights={"pca": 10, "Transf": 1})
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
X = iris.data
pca = PCA(n_components=2, svd_solver="full")
pipeline = Pipeline([("pca", pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([("mock", transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
@pytest.mark.parametrize(
"start, end", [(0, 1), (0, 2), (1, 2), (1, 3), (None, 1), (1, None), (None, None)]
)
def test_pipeline_slice(start, end):
pipe = Pipeline(
[("transf1", Transf()), ("transf2", Transf()), ("clf", FitParamT())],
memory="123",
verbose=True,
)
pipe_slice = pipe[start:end]
# Test class
assert isinstance(pipe_slice, Pipeline)
# Test steps
assert pipe_slice.steps == pipe.steps[start:end]
# Test named_steps attribute
assert (
list(pipe_slice.named_steps.items())
== list(pipe.named_steps.items())[start:end]
)
# Test the rest of the parameters
pipe_params = pipe.get_params(deep=False)
pipe_slice_params = pipe_slice.get_params(deep=False)
del pipe_params["steps"]
del pipe_slice_params["steps"]
assert pipe_params == pipe_slice_params
# Test exception
msg = "Pipeline slicing only supports a step of 1"
with pytest.raises(ValueError, match=msg):
pipe[start:end:-1]
def test_pipeline_index():
transf = Transf()
clf = FitParamT()
pipe = Pipeline([("transf", transf), ("clf", clf)])
assert pipe[0] == transf
assert pipe["transf"] == transf
assert pipe[-1] == clf
assert pipe["clf"] == clf
# should raise an error if slicing out of range
with pytest.raises(IndexError):
pipe[3]
# should raise an error if indexing with wrong element name
with pytest.raises(KeyError):
pipe["foobar"]
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([("mock", transf1)])
assert pipeline.named_steps["mock"] is transf1
# Directly setting attr
pipeline.steps = [("mock2", transf2)]
assert "mock" not in pipeline.named_steps
assert pipeline.named_steps["mock2"] is transf2
assert [("mock2", transf2)] == pipeline.steps
# Using set_params
pipeline.set_params(steps=[("mock", transf1)])
assert [("mock", transf1)] == pipeline.steps
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert [("mock", transf2)] == pipeline.steps
# With invalid data
pipeline.set_params(steps=[("junk", ())])
msg = re.escape(
"Last step of Pipeline should implement fit or be the string 'passthrough'."
)
with pytest.raises(TypeError, match=msg):
pipeline.fit([[1]], [1])
with pytest.raises(TypeError, match=msg):
pipeline.fit_transform([[1]], [1])
def test_pipeline_named_steps():
transf = Transf()
mult2 = Mult(mult=2)
pipeline = Pipeline([("mock", transf), ("mult", mult2)])
# Test access via named_steps bunch object
assert "mock" in pipeline.named_steps
assert "mock2" not in pipeline.named_steps
assert pipeline.named_steps.mock is transf
assert pipeline.named_steps.mult is mult2
# Test bunch with conflict attribute of dict
pipeline = Pipeline([("values", transf), ("mult", mult2)])
assert pipeline.named_steps.values is not transf
assert pipeline.named_steps.mult is mult2
@pytest.mark.parametrize("passthrough", [None, "passthrough"])
def test_pipeline_correctly_adjusts_steps(passthrough):
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
pipeline = Pipeline(
[("m2", mult2), ("bad", passthrough), ("m3", mult3), ("m5", mult5)]
)
pipeline.fit(X, y)
expected_names = ["m2", "bad", "m3", "m5"]
actual_names = [name for name, _ in pipeline.steps]
assert expected_names == actual_names
@pytest.mark.parametrize("passthrough", [None, "passthrough"])
def test_set_pipeline_step_passthrough(passthrough):
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([("m2", mult2), ("m3", mult3), ("last", mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=passthrough)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert pipeline.get_params(deep=True) == {
"steps": pipeline.steps,
"m2": mult2,
"m3": passthrough,
"last": mult5,
"memory": None,
"m2__mult": 2,
"last__mult": 5,
"verbose": False,
}
pipeline.set_params(m2=passthrough)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = [
"predict_proba",
"predict_log_proba",
"decision_function",
"transform",
"score",
]
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=passthrough)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
msg = "'str' object has no attribute 'predict'"
with pytest.raises(AttributeError, match=msg):
getattr(pipeline, "predict")
# Check 'passthrough' step at construction time
exp = 2 * 5
pipeline = Pipeline([("m2", mult2), ("m3", passthrough), ("last", mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert not hasattr(pipeline, "predict")
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline("passthrough")
assert pipeline.steps[0] == ("passthrough", "passthrough")
assert not hasattr(pipeline, "predict")
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert not hasattr(pipeline, "predict")
pipeline.transform
assert not hasattr(pipeline, "inverse_transform")
pipeline = make_pipeline(NoInvTransf(), Transf())
assert not hasattr(pipeline, "predict")
pipeline.transform
assert not hasattr(pipeline, "inverse_transform")
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
pipe = make_pipeline(t1, t2, FitParamT())
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
assert pipe.steps[2][0] == "fitparamt"
def test_feature_union_weights():
# test feature union with transformer weights
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver="randomized", random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion(
[("pca", pca), ("select", select)], transformer_weights={"pca": 10}
)
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion(
[("pca", pca), ("select", select)], transformer_weights={"pca": 10}
)
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion(
[("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10},
)
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1], select.fit_transform(X, y).ravel())
assert X_fit_transformed_wo_method.shape == (X.shape[0], 7)
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion(
[
("words", CountVectorizer(analyzer="word")),
("chars", CountVectorizer(analyzer="char")),
]
)
fs_parallel = FeatureUnion(
[
("words", CountVectorizer(analyzer="word")),
("chars", CountVectorizer(analyzer="char")),
],
n_jobs=2,
)
fs_parallel2 = FeatureUnion(
[
("words", CountVectorizer(analyzer="word")),
("chars", CountVectorizer(analyzer="char")),
],
n_jobs=2,
)
fs.fit(X)
X_transformed = fs.transform(X)
assert X_transformed.shape[0] == len(X)
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert X_transformed.shape == X_transformed_parallel.shape
assert_array_equal(X_transformed.toarray(), X_transformed_parallel.toarray())
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(X_transformed.toarray(), X_transformed_parallel2.toarray())
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(X_transformed.toarray(), X_transformed_parallel2.toarray())
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_feature_union_feature_names(get_names):
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = getattr(ft, get_names)()
for feat in feature_names:
assert "chars__" in feat or "words__" in feat
assert len(feature_names) == 35
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
msg = re.escape(f"Transformer tr1 (type Transf) does not provide {get_names}")
with pytest.raises(AttributeError, match=msg):
getattr(ft, get_names)()
def test_classes_property():
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
with pytest.raises(AttributeError):
getattr(reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
with pytest.raises(AttributeError):
getattr(clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_set_feature_union_steps(get_names):
mult2 = Mult(2)
mult3 = Mult(3)
mult5 = Mult(5)
if get_names == "get_feature_names":
mult3.get_feature_names = lambda: ["x3"]
mult2.get_feature_names = lambda: ["x2"]
mult5.get_feature_names = lambda: ["x5"]
else: # get_feature_names_out
mult3.get_feature_names_out = lambda input_features: ["x3"]
mult2.get_feature_names_out = lambda input_features: ["x2"]
mult5.get_feature_names_out = lambda input_features: ["x5"]
ft = FeatureUnion([("m2", mult2), ("m3", mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_array_equal(["m2__x2", "m3__x3"], getattr(ft, get_names)())
# Directly setting attr
ft.transformer_list = [("m5", mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_array_equal(["m5__x5"], getattr(ft, get_names)())
# Using set_params
ft.set_params(transformer_list=[("mock", mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_array_equal(["mock__x3"], getattr(ft, get_names)())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_array_equal(["mock__x5"], getattr(ft, get_names)())
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_set_feature_union_step_drop(get_names):
mult2 = Mult(2)
mult3 = Mult(3)
if get_names == "get_feature_names":
mult2.get_feature_names = lambda: ["x2"]
mult3.get_feature_names = lambda: ["x3"]
else: # get_feature_names_out
mult2.get_feature_names_out = lambda input_features: ["x2"]
mult3.get_feature_names_out = lambda input_features: ["x3"]
X = np.asarray([[1]])
ft = FeatureUnion([("m2", mult2), ("m3", mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_array_equal(["m2__x2", "m3__x3"], getattr(ft, get_names)())
with pytest.warns(None) as record:
ft.set_params(m2="drop")
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_array_equal(["m3__x3"], getattr(ft, get_names)())
assert not record
with pytest.warns(None) as record:
ft.set_params(m3="drop")
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_array_equal([], getattr(ft, get_names)())
assert not record
with pytest.warns(None) as record:
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert not record
with pytest.warns(None) as record:
# Check 'drop' step at construction time
ft = FeatureUnion([("m2", "drop"), ("m3", mult3)])
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_array_equal(["m3__x3"], getattr(ft, get_names)())
assert not record
def test_set_feature_union_passthrough():
"""Check the behaviour of setting a transformer to `"passthrough"`."""
mult2 = Mult(2)
mult3 = Mult(3)
X = np.asarray([[1]])
ft = FeatureUnion([("m2", mult2), ("m3", mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
ft.set_params(m2="passthrough")
assert_array_equal([[1, 3]], ft.fit(X).transform(X))
assert_array_equal([[1, 3]], ft.fit_transform(X))
ft.set_params(m3="passthrough")
assert_array_equal([[1, 1]], ft.fit(X).transform(X))
assert_array_equal([[1, 1]], ft.fit_transform(X))
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[1, 3]], ft.fit(X).transform(X))
assert_array_equal([[1, 3]], ft.fit_transform(X))
# Check 'passthrough' step at construction time
ft = FeatureUnion([("m2", "passthrough"), ("m3", mult3)])
assert_array_equal([[1, 3]], ft.fit(X).transform(X))
assert_array_equal([[1, 3]], ft.fit_transform(X))
X = iris.data
columns = X.shape[1]
pca = PCA(n_components=2, svd_solver="randomized", random_state=0)
ft = FeatureUnion([("passthrough", "passthrough"), ("pca", pca)])
assert_array_equal(X, ft.fit(X).transform(X)[:, :columns])
assert_array_equal(X, ft.fit_transform(X)[:, :columns])
ft.set_params(pca="passthrough")
X_ft = ft.fit(X).transform(X)
assert_array_equal(X_ft, np.hstack([X, X]))
X_ft = ft.fit_transform(X)
assert_array_equal(X_ft, np.hstack([X, X]))
ft.set_params(passthrough=pca)
assert_array_equal(X, ft.fit(X).transform(X)[:, -columns:])
assert_array_equal(X, ft.fit_transform(X)[:, -columns:])
ft = FeatureUnion(
[("passthrough", "passthrough"), ("pca", pca)],
transformer_weights={"passthrough": 2},
)
assert_array_equal(X * 2, ft.fit(X).transform(X)[:, :columns])
assert_array_equal(X * 2, ft.fit_transform(X)[:, :columns])
def test_step_name_validation():
error_message_1 = r"Estimator names must not contain __: got \['a__q'\]"
error_message_2 = r"Names provided are not unique: \['a', 'a'\]"
error_message_3 = r"Estimator names conflict with constructor arguments: \['%s'\]"
bad_steps1 = [("a__q", Mult(2)), ("b", Mult(3))]
bad_steps2 = [("a", Mult(2)), ("a", Mult(3))]
for cls, param in [(Pipeline, "steps"), (FeatureUnion, "transformer_list")]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [("a", Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, error_message_1),
(bad_steps2, error_message_2),
(bad_steps3, error_message_3 % param),
]:
# three ways to make invalid:
# - construction
with pytest.raises(ValueError, match=message):
cls(**{param: bad_steps}).fit([[1]], [1])
# - setattr
est = cls(**{param: [("a", Mult(1))]})
setattr(est, param, bad_steps)
with pytest.raises(ValueError, match=message):
est.fit([[1]], [1])
with pytest.raises(ValueError, match=message):
est.fit_transform([[1]], [1])
# - set_params
est = cls(**{param: [("a", Mult(1))]})
est.set_params(**{param: bad_steps})
with pytest.raises(ValueError, match=message):
est.fit([[1]], [1])
with pytest.raises(ValueError, match=message):
est.fit_transform([[1]], [1])
def test_set_params_nested_pipeline():
estimator = Pipeline([("a", Pipeline([("b", DummyRegressor())]))])
estimator.set_params(a__b__alpha=0.001, a__b=Lasso())
estimator.set_params(a__steps=[("b", LogisticRegression())], a__b__C=5)
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([("transf", DummyTransf()), ("svc", SVC())], memory=memory)
msg = re.escape(
"'memory' should be None, a string or have the same interface "
"as joblib.Memory. Got memory='1' instead."
)
with pytest.raises(ValueError, match=msg):
cached_pipe.fit(X, y)
class DummyMemory:
def cache(self, func):
return func
class WrongDummyMemory:
pass
def test_pipeline_with_cache_attribute():
X = np.array([[1, 2]])
pipe = Pipeline([("transf", Transf()), ("clf", Mult())], memory=DummyMemory())
pipe.fit(X, y=None)
dummy = WrongDummyMemory()
pipe = Pipeline([("transf", Transf()), ("clf", Mult())], memory=dummy)
msg = re.escape(
"'memory' should be None, a string or have the same interface "
f"as joblib.Memory. Got memory='{dummy}' instead."
)
with pytest.raises(ValueError, match=msg):
pipe.fit(X)
def test_pipeline_memory():
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
if parse_version(joblib.__version__) < parse_version("0.12"):
# Deal with change of API in joblib
memory = joblib.Memory(cachedir=cachedir, verbose=10)
else:
memory = joblib.Memory(location=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([("transf", clone(transf)), ("svc", clf)])
cached_pipe = Pipeline([("transf", transf), ("svc", clf)], memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the transformer in the cached pipeline
ts = cached_pipe.named_steps["transf"].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_, cached_pipe.named_steps["transf"].means_
)
assert not hasattr(transf, "means_")
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_, cached_pipe.named_steps["transf"].means_
)
assert ts == cached_pipe.named_steps["transf"].timestamp_
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline(
[("transf_2", transf_2), ("svc", clf_2)], memory=memory
)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe_2.predict_proba(X))
assert_array_equal(
pipe.predict_log_proba(X), cached_pipe_2.predict_log_proba(X)
)
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe_2.named_steps["transf_2"].means_,
)
assert ts == cached_pipe_2.named_steps["transf_2"].timestamp_
finally:
shutil.rmtree(cachedir)
def test_make_pipeline_memory():
cachedir = mkdtemp()
if parse_version(joblib.__version__) < parse_version("0.12"):
# Deal with change of API in joblib
memory = joblib.Memory(cachedir=cachedir, verbose=10)
else:
memory = joblib.Memory(location=cachedir, verbose=10)
pipeline = make_pipeline(DummyTransf(), SVC(), memory=memory)
assert pipeline.memory is memory
pipeline = make_pipeline(DummyTransf(), SVC())
assert pipeline.memory is None
assert len(pipeline) == 2
shutil.rmtree(cachedir)
class FeatureNameSaver(BaseEstimator):
def fit(self, X, y=None):
self._check_feature_names(X, reset=True)
return self
def transform(self, X, y=None):
return X
def get_feature_names_out(self, input_features=None):
return input_features
def test_features_names_passthrough():
"""Check pipeline.get_feature_names_out with passthrough"""
pipe = Pipeline(
steps=[
("names", FeatureNameSaver()),
("pass", "passthrough"),
("clf", LogisticRegression()),
]
)
iris = load_iris()
pipe.fit(iris.data, iris.target)
assert_array_equal(
pipe[:-1].get_feature_names_out(iris.feature_names), iris.feature_names
)
def test_feature_names_count_vectorizer():
"""Check pipeline.get_feature_names_out with vectorizers"""
pipe = Pipeline(steps=[("vect", CountVectorizer()), ("clf", LogisticRegression())])
y = ["pizza" in x for x in JUNK_FOOD_DOCS]
pipe.fit(JUNK_FOOD_DOCS, y)
assert_array_equal(
pipe[:-1].get_feature_names_out(),
["beer", "burger", "coke", "copyright", "pizza", "the"],
)
assert_array_equal(
pipe[:-1].get_feature_names_out("nonsense_is_ignored"),
["beer", "burger", "coke", "copyright", "pizza", "the"],
)
def test_pipeline_feature_names_out_error_without_definition():
"""Check that error is raised when a transformer does not define
`get_feature_names_out`."""
pipe = Pipeline(steps=[("notrans", NoTrans())])
iris = load_iris()
pipe.fit(iris.data, iris.target)
msg = "does not provide get_feature_names_out"
with pytest.raises(AttributeError, match=msg):
pipe.get_feature_names_out()
def test_pipeline_param_error():
clf = make_pipeline(LogisticRegression())
with pytest.raises(
ValueError, match="Pipeline.fit does not accept the sample_weight parameter"
):
clf.fit([[0], [0]], [0, 1], sample_weight=[1, 1])
parameter_grid_test_verbose = (
(est, pattern, method)
for (est, pattern), method in itertools.product(
[
(
Pipeline([("transf", Transf()), ("clf", FitParamT())]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing clf.* total=.*\n$",
),
(
Pipeline([("transf", Transf()), ("noop", None), ("clf", FitParamT())]),
r"\[Pipeline\].*\(step 1 of 3\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 3\) Processing noop.* total=.*\n"
r"\[Pipeline\].*\(step 3 of 3\) Processing clf.* total=.*\n$",
),
(
Pipeline(
[
("transf", Transf()),
("noop", "passthrough"),
("clf", FitParamT()),
]
),
r"\[Pipeline\].*\(step 1 of 3\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 3\) Processing noop.* total=.*\n"
r"\[Pipeline\].*\(step 3 of 3\) Processing clf.* total=.*\n$",
),
(
Pipeline([("transf", Transf()), ("clf", None)]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing clf.* total=.*\n$",
),
(
Pipeline([("transf", None), ("mult", Mult())]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing mult.* total=.*\n$",
),
(
Pipeline([("transf", "passthrough"), ("mult", Mult())]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing mult.* total=.*\n$",
),
(
FeatureUnion([("mult1", Mult()), ("mult2", Mult())]),
r"\[FeatureUnion\].*\(step 1 of 2\) Processing mult1.* total=.*\n"
r"\[FeatureUnion\].*\(step 2 of 2\) Processing mult2.* total=.*\n$",
),
(
FeatureUnion([("mult1", "drop"), ("mult2", Mult()), ("mult3", "drop")]),
r"\[FeatureUnion\].*\(step 1 of 1\) Processing mult2.* total=.*\n$",
),
],
["fit", "fit_transform", "fit_predict"],
)
if hasattr(est, method)
and not (
method == "fit_transform"
and hasattr(est, "steps")
and isinstance(est.steps[-1][1], FitParamT)
)
)
@pytest.mark.parametrize("est, pattern, method", parameter_grid_test_verbose)
def test_verbose(est, method, pattern, capsys):
func = getattr(est, method)
X = [[1, 2, 3], [4, 5, 6]]
y = [[7], [8]]
est.set_params(verbose=False)
func(X, y)
assert not capsys.readouterr().out, "Got output for verbose=False"
est.set_params(verbose=True)
func(X, y)
assert re.match(pattern, capsys.readouterr().out)
def test_n_features_in_pipeline():
# make sure pipelines delegate n_features_in to the first step
X = [[1, 2], [3, 4], [5, 6]]
y = [0, 1, 2]
ss = StandardScaler()
gbdt = HistGradientBoostingClassifier()
pipe = make_pipeline(ss, gbdt)
assert not hasattr(pipe, "n_features_in_")
pipe.fit(X, y)
assert pipe.n_features_in_ == ss.n_features_in_ == 2
# if the first step has the n_features_in attribute then the pipeline also
# has it, even though it isn't fitted.
ss = StandardScaler()
gbdt = HistGradientBoostingClassifier()
pipe = make_pipeline(ss, gbdt)
ss.fit(X, y)
assert pipe.n_features_in_ == ss.n_features_in_ == 2
assert not hasattr(gbdt, "n_features_in_")
def test_n_features_in_feature_union():
# make sure FeatureUnion delegates n_features_in to the first transformer
X = [[1, 2], [3, 4], [5, 6]]
y = [0, 1, 2]
ss = StandardScaler()
fu = make_union(ss)
assert not hasattr(fu, "n_features_in_")
fu.fit(X, y)
assert fu.n_features_in_ == ss.n_features_in_ == 2
# if the first step has the n_features_in attribute then the feature_union
# also has it, even though it isn't fitted.
ss = StandardScaler()
fu = make_union(ss)
ss.fit(X, y)
assert fu.n_features_in_ == ss.n_features_in_ == 2
def test_feature_union_fit_params():
# Regression test for issue: #15117
class Dummy(TransformerMixin, BaseEstimator):
def fit(self, X, y=None, **fit_params):
if fit_params != {"a": 0}:
raise ValueError
return self
def transform(self, X, y=None):
return X
X, y = iris.data, iris.target
t = FeatureUnion([("dummy0", Dummy()), ("dummy1", Dummy())])
with pytest.raises(ValueError):
t.fit(X, y)
with pytest.raises(ValueError):
t.fit_transform(X, y)
t.fit(X, y, a=0)
t.fit_transform(X, y, a=0)
def test_pipeline_missing_values_leniency():
# check that pipeline let the missing values validation to
# the underlying transformers and predictors.
X, y = iris.data, iris.target
mask = np.random.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool)
X[mask] = np.nan
pipe = make_pipeline(SimpleImputer(), LogisticRegression())
assert pipe.fit(X, y).score(X, y) > 0.4
def test_feature_union_warns_unknown_transformer_weight():
# Warn user when transformer_weights containers a key not present in
# transformer_list
X = [[1, 2], [3, 4], [5, 6]]
y = [0, 1, 2]
transformer_list = [("transf", Transf())]
# Transformer weights dictionary with incorrect name
weights = {"transformer": 1}
expected_msg = (
'Attempting to weight transformer "transformer", '
"but it is not present in transformer_list."
)
union = FeatureUnion(transformer_list, transformer_weights=weights)
with pytest.raises(ValueError, match=expected_msg):
union.fit(X, y)
# TODO: Remove in 1.2 when get_feature_names is removed
def test_feature_union_get_feature_names_deprecated():
"""Check that get_feature_names is deprecated"""
msg = "get_feature_names is deprecated in 1.0"
mult2 = Mult(2)
mult2.get_feature_names = lambda: ["x2"]
ft = FeatureUnion([("m2", mult2)])
with pytest.warns(FutureWarning, match=msg):
ft.get_feature_names()
@pytest.mark.parametrize("passthrough", [None, "passthrough"])
def test_pipeline_get_tags_none(passthrough):
# Checks that tags are set correctly when the first transformer is None or
# 'passthrough'
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/18815
pipe = make_pipeline(passthrough, SVC())
assert not pipe._get_tags()["pairwise"]
# FIXME: Replace this test with a full `check_estimator` once we have API only
# checks.
@pytest.mark.parametrize("Predictor", [MinimalRegressor, MinimalClassifier])
def test_search_cv_using_minimal_compatible_estimator(Predictor):
# Check that third-party library estimators can be part of a pipeline
# and tuned by grid-search without inheriting from BaseEstimator.
rng = np.random.RandomState(0)
X, y = rng.randn(25, 2), np.array([0] * 5 + [1] * 20)
model = Pipeline(
[("transformer", MinimalTransformer()), ("predictor", Predictor())]
)
model.fit(X, y)
y_pred = model.predict(X)
if is_classifier(model):
assert_array_equal(y_pred, 1)
assert model.score(X, y) == pytest.approx(accuracy_score(y, y_pred))
else:
assert_allclose(y_pred, y.mean())
assert model.score(X, y) == pytest.approx(r2_score(y, y_pred))
def test_pipeline_check_if_fitted():
class Estimator(BaseEstimator):
def fit(self, X, y):
self.fitted_ = True
return self
pipeline = Pipeline([("clf", Estimator())])
with pytest.raises(NotFittedError):
check_is_fitted(pipeline)
pipeline.fit(iris.data, iris.target)
check_is_fitted(pipeline)
def test_pipeline_get_feature_names_out_passes_names_through():
"""Check that pipeline passes names through.
Non-regresion test for #21349.
"""
X, y = iris.data, iris.target
class AddPrefixStandardScalar(StandardScaler):
def get_feature_names_out(self, input_features=None):
names = super().get_feature_names_out(input_features=input_features)
return np.asarray([f"my_prefix_{name}" for name in names], dtype=object)
pipe = make_pipeline(AddPrefixStandardScalar(), StandardScaler())
pipe.fit(X, y)
input_names = iris.feature_names
feature_names_out = pipe.get_feature_names_out(input_names)
assert_array_equal(feature_names_out, [f"my_prefix_{name}" for name in input_names])
| 34.084778
| 88
| 0.650386
|
ffd98cfc7935f076db06a07aab426e25846391fa
| 4,554
|
py
|
Python
|
drf_admin/apps/system/views/jobs.py
|
DingGuodong/drf_admin
|
1a2a5d0a568ddd37718ccfc169677e6e31070641
|
[
"MIT"
] | 228
|
2020-06-20T10:07:03.000Z
|
2022-03-29T07:11:01.000Z
|
drf_admin/apps/system/views/jobs.py
|
DingGuodong/drf_admin
|
1a2a5d0a568ddd37718ccfc169677e6e31070641
|
[
"MIT"
] | 25
|
2020-07-16T12:29:04.000Z
|
2022-02-16T06:31:06.000Z
|
drf_admin/apps/system/views/jobs.py
|
DingGuodong/drf_admin
|
1a2a5d0a568ddd37718ccfc169677e6e31070641
|
[
"MIT"
] | 82
|
2020-10-26T07:14:15.000Z
|
2022-03-29T07:53:23.000Z
|
# -*- coding: utf-8 -*-
"""
@author : Wang Meng
@github : https://github.com/tianpangji
@software : PyCharm
@file : jobs.py
@create : 2020/11/25 20:56
"""
from inspect import isfunction, getmembers
from apscheduler.jobstores.base import JobLookupError
from django_apscheduler.models import DjangoJobExecution
from django_filters.rest_framework import DjangoFilterBackend
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.filters import SearchFilter
from rest_framework.generics import ListAPIView, ListCreateAPIView, DestroyAPIView
from rest_framework.response import Response
from rest_framework import mixins
from system.jobs import tasks
from system.jobs.run import scheduler
from system.serializers.jobs import JobsListSerializer, JobCreateSerializer, JobFunctionsSerializer, \
JobUpdateSerializer, JobExecutionsSerializer
class JobFunctionsListAPIView(ListAPIView):
"""
get:
任务调度--任务调度函数列表
获取任务调度函数列表, status: 200(成功), return: 任务调度函数列表
"""
serializer_class = JobFunctionsSerializer
filter_backends = (SearchFilter,)
search_fields = ('name', 'desc')
def get_queryset(self):
return list(filter(None, [obj if isfunction(obj[1]) and obj[0] != 'single_task' else None for obj in
getmembers(tasks)]))
def filter_queryset(self, queryset):
search_params = self.request.query_params.get('search')
if search_params:
obj_list = list()
for obj in queryset:
doc = '' if obj[1].__doc__ is None else obj[1].__doc__
if search_params in obj[0] or search_params in doc:
obj_list.append(obj)
return obj_list
else:
return queryset
class JobsListCreateAPIView(ListCreateAPIView):
"""
get:
任务调度--任务列表
获取任务调度任务列表, status: 200(成功), return: 任务列表
post:
任务调度--新增
新增任务, status: 201(成功), return: 新增任务信息
delete:
任务调度--清空任务
清空任务, status: 204(成功), return: None
"""
filter_backends = (SearchFilter,)
search_fields = ('name', 'desc')
def filter_queryset(self, queryset):
search_params = self.request.query_params.get('search')
if search_params:
obj_list = list()
for obj in queryset:
doc = tasks.__dict__.get(obj.name).__doc__
if search_params in obj.name or search_params in doc:
obj_list.append(obj)
return obj_list
else:
return queryset
def get_queryset(self):
return scheduler.get_jobs()
def get_serializer_class(self):
if self.request.method.lower() == 'get':
return JobsListSerializer
else:
return JobCreateSerializer
@swagger_auto_schema(operation_id='system_jobs_deletes')
def delete(self, request, *args, **kwargs):
scheduler.remove_all_jobs()
return Response(status=status.HTTP_204_NO_CONTENT)
class JobUpdateDestroyAPIView(mixins.UpdateModelMixin, DestroyAPIView):
"""
patch:
任务调度--任务启动/停止
任务启动/停止, status: 200(成功), return: 任务列表
delete:
任务调度--删除
删除, status: 204(成功), return: None
"""
serializer_class = JobUpdateSerializer
def get_queryset(self):
return scheduler.get_jobs()
def get_job_id(self):
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
return self.kwargs[lookup_url_kwarg]
def patch(self, request, *args, **kwargs):
job = scheduler.get_job(self.get_job_id())
if not job:
return Response(data={'detail': '调度任务不存在'}, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(job, data=request.data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
def delete(self, request, *args, **kwargs):
try:
scheduler.remove_job(self.get_job_id())
except JobLookupError:
return Response(data={'detail': '调度任务不存在'}, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
class JobExecutionsListAPIView(ListAPIView):
"""
get:
任务调度--任务执行历史记录
获取任务执行历史记录, status: 200(成功), return: 任务执行历史记录
"""
serializer_class = JobExecutionsSerializer
queryset = DjangoJobExecution.objects.all()
filter_backends = (DjangoFilterBackend,)
filter_fields = ['job__id']
| 30.15894
| 108
| 0.669082
|
b862a05de4a03db72986267b62e056d40e9e6374
| 1,996
|
py
|
Python
|
lib/python2.7/keyword.py
|
eladkarako/1st_font_webkit_gen
|
7e91abda7988b7b1beec28021e43f0dab5ca199c
|
[
"Unlicense"
] | 3
|
2018-12-25T15:33:48.000Z
|
2019-12-04T20:32:32.000Z
|
lib/python2.7/keyword.py
|
eladkarako/1st_font_webkit_gen
|
7e91abda7988b7b1beec28021e43f0dab5ca199c
|
[
"Unlicense"
] | null | null | null |
lib/python2.7/keyword.py
|
eladkarako/1st_font_webkit_gen
|
7e91abda7988b7b1beec28021e43f0dab5ca199c
|
[
"Unlicense"
] | 1
|
2020-11-04T07:54:34.000Z
|
2020-11-04T07:54:34.000Z
|
#!/usr/bin/env python2
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
fp = open(iptfile)
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()
# load the output skeleton from the target
fp = open(optfile)
format = fp.readlines()
fp.close()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| 21.234043
| 68
| 0.507014
|
f7168e80f5e8ed5edf7f7288316f9a3d0d8192e3
| 5,675
|
py
|
Python
|
nimiqclient/models/block.py
|
rraallvv/python-client
|
65d0c3f835ed8ce3ba6bfa2565cac61f7da6b748
|
[
"Apache-2.0"
] | 4
|
2020-11-03T21:13:13.000Z
|
2022-01-18T08:40:27.000Z
|
nimiqclient/models/block.py
|
rraallvv/python-client
|
65d0c3f835ed8ce3ba6bfa2565cac61f7da6b748
|
[
"Apache-2.0"
] | 1
|
2020-08-09T21:36:02.000Z
|
2020-08-09T21:36:02.000Z
|
nimiqclient/models/block.py
|
rraallvv/python-client
|
65d0c3f835ed8ce3ba6bfa2565cac61f7da6b748
|
[
"Apache-2.0"
] | 1
|
2020-08-03T01:05:44.000Z
|
2020-08-03T01:05:44.000Z
|
__all__ = ["Block", "BlockTemplateHeader", "BlockTemplateBody", "BlockTemplate"]
from .transaction import Transaction
class Block:
"""
Block returned by the server.
:param number: Height of the block.
:type number: int
:param hash: Hex-encoded 32-byte hash of the block.
:type hash: str
:param pow: Hex-encoded 32-byte Proof-of-Work hash of the block.
:type pow: str
:param parentHash: Hex-encoded 32-byte hash of the predecessor block.
:type parentHash: str
:param nonce: The nonce of the block used to fulfill the Proof-of-Work.
:type nonce: int
:param bodyHash: Hex-encoded 32-byte hash of the block body Merkle root.
:type bodyHash: str
:param accountsHash: Hex-encoded 32-byte hash of the accounts tree root.
:type accountsHash: str
:param difficulty: Block difficulty, encoded as decimal number in string.
:type difficulty: str
:param timestamp: UNIX timestamp of the block.
:type timestamp: int
:param confirmations: Number of confirmations for this transaction (number of blocks on top of the block where this transaction was in).
:type confirmations: int
:param miner: Hex-encoded 20 byte address of the miner of the block.
:type miner: str
:param minerAddress: User friendly address (NQ-address) of the miner of the block.
:type minerAddress: str
:param extraData: Hex-encoded value of the extra data field, maximum of 255 bytes.
:type extraData: str
:param size: Block size in byte.
:type size: int
:param transactions: List of transactions. Either represented by the transaction hash or a Transaction object.
:type transactions: list of (Transaction or str)
"""
def __init__(
self,
number,
hash,
pow,
parentHash,
nonce,
bodyHash,
accountsHash,
difficulty,
timestamp,
confirmations,
miner,
minerAddress,
extraData,
size,
transactions,
):
self.number = number
self.hash = hash
self.pow = pow
self.parentHash = parentHash
self.nonce = nonce
self.bodyHash = bodyHash
self.accountsHash = accountsHash
self.difficulty = difficulty
self.timestamp = timestamp
self.confirmations = confirmations
self.miner = miner
self.minerAddress = minerAddress
self.extraData = extraData
self.size = size
for index, transaction in enumerate(transactions):
tt = type(transaction)
if tt is not str and tt is not Transaction:
if tt is dict:
transactions[index] = Transaction(**transaction)
else:
from ..nimiq_client import InternalErrorException
raise InternalErrorException(
"Couldn't parse Transaction {0}".format(transaction)
)
self.transactions = transactions
class BlockTemplateHeader:
"""
Block template header returned by the server.
:param version: Version in block header.
:type version: int
:param prevHash: 32-byte hex-encoded hash of the previous block.
:type prevHash: str
:param interlinkHash: 32-byte hex-encoded hash of the interlink.
:type interlinkHash: str
:param accountsHash: 32-byte hex-encoded hash of the accounts tree.
:type accountsHash: str
:param nBits: Compact form of the hash target for this block.
:type nBits: int
:param height: Height of the block in the block chain (also known as block number).
:type height: int
"""
def __init__(self, version, prevHash, interlinkHash, accountsHash, nBits, height):
self.version = version
self.prevHash = prevHash
self.interlinkHash = interlinkHash
self.accountsHash = accountsHash
self.nBits = nBits
self.height = height
class BlockTemplateBody:
"""
Block template body returned by the server.
:param hash: 32-byte hex-encoded hash of the block body.
:type hash: str
:param minerAddr: 20-byte hex-encoded miner address.
:type minerAddr: str
:param extraData: Hex-encoded value of the extra data field.
:type extraData: str
:param transactions: List of hex-encoded transactions for this block.
:type transactions: str
:param prunedAccounts: List of hex-encoded pruned accounts for this block.
:type prunedAccounts: str
:param merkleHashes: List of hex-encoded hashes that verify the path of the miner address in the merkle tree. This can be used to change the miner address easily.
:type merkleHashes: str
"""
def __init__(
self, hash, minerAddr, extraData, transactions, prunedAccounts, merkleHashes
):
self.hash = hash
self.minerAddr = minerAddr
self.extraData = extraData
self.transactions = transactions
self.prunedAccounts = prunedAccounts
self.merkleHashes = merkleHashes
class BlockTemplate:
"""
Block template returned by the server.
:param header: Block template header returned by the server.
:type header: BlockTemplateHeader
:param interlink: Hex-encoded interlink.
:type interlink: str
:param body: Block template body returned by the server.
:type body: BlockTemplateBody
:param target: Compact form of the hash target to submit a block to this client.
:type target: int
"""
def __init__(self, header, interlink, body, target):
self.header = header
self.interlink = interlink
self.body = body
self.target = target
| 34.815951
| 166
| 0.66185
|
7aefd6f8acbcfef3744c9277d0b9b7f9f45cc77a
| 571
|
py
|
Python
|
chores/create_weekly_lists.py
|
beegisfay/home-automation
|
4423b36500c946182619f73d07b483623f920ea4
|
[
"MIT"
] | null | null | null |
chores/create_weekly_lists.py
|
beegisfay/home-automation
|
4423b36500c946182619f73d07b483623f920ea4
|
[
"MIT"
] | 1
|
2020-07-02T18:24:25.000Z
|
2020-07-02T18:24:25.000Z
|
chores/create_weekly_lists.py
|
beegisfay/home-automation
|
4423b36500c946182619f73d07b483623f920ea4
|
[
"MIT"
] | null | null | null |
# This code sample uses the 'requests' library:
# http://docs.python-requests.org
import requests
import os
url = "https://api.trello.com/1/lists"
board_pos = int(os.environ.get("DAYS_OUT")) + 2
query = {
'key': os.environ.get("TRELLO_KEY"),
'token': os.environ.get("TRELLO_TOKEN"),
'name': os.environ.get("FORMATTED_DATE"),
'idBoard': os.environ.get("TRELLO_BOARD"),
'idListSource': os.environ.get("TRELLO_DAILYTEMPLATE_LIST"),
'pos': board_pos
}
print(query)
response = requests.request(
"POST",
url,
params=query
)
print(response.text)
| 21.148148
| 63
| 0.684764
|
40d96461ac318310c5658d37611b850977c5203b
| 1,711
|
py
|
Python
|
src/models/appbasemodel.py
|
solnsumei/properties
|
45361b7d46a5ac34931f3ed24bb6c5eb7fc8a81b
|
[
"MIT"
] | null | null | null |
src/models/appbasemodel.py
|
solnsumei/properties
|
45361b7d46a5ac34931f3ed24bb6c5eb7fc8a81b
|
[
"MIT"
] | null | null | null |
src/models/appbasemodel.py
|
solnsumei/properties
|
45361b7d46a5ac34931f3ed24bb6c5eb7fc8a81b
|
[
"MIT"
] | null | null | null |
from src.utils.status import Status
from slugify import slugify
from tortoise import fields, models
class AppBaseModel(models.Model):
id = fields.IntField(pk=True)
status = fields.CharEnumField(Status, default=Status.ACTIVE)
created_at = fields.DatetimeField(null=True, auto_now_add=True)
modified_at = fields.DatetimeField(null=True, auto_now=True)
""" Database methods """
@classmethod
async def create_one(cls, item):
return await cls.create(**item.dict())
@classmethod
async def find_by(cls, **kwargs):
return await cls.filter(**kwargs).all()
@classmethod
async def find_one(cls, **kwargs):
return await cls.filter(**kwargs).first()
@classmethod
async def update_one(cls, _id: int, item):
await cls.filter(id=_id).update(**item.dict(exclude_unset=True))
return cls.get(id=_id)
@classmethod
async def delete_one(cls, _id: int) -> int:
deleted_count = await cls.filter(id=_id).delete()
return deleted_count
class Meta:
__abstract__ = True
class SluggableModel(AppBaseModel):
slug = fields.CharField(max_length=70)
""" Database methods """
@classmethod
async def create_one(cls, item):
return await cls.create(**item.dict(), slug=cls.make_slug(item.name))
@classmethod
async def update_one(cls, _id: int, item):
await cls.filter(id=_id).update(
**item.dict(exclude_unset=True),
slug=cls.make_slug(item.name)
)
return cls.get(id=_id)
""" Utility methods """
@classmethod
def make_slug(cls, title: str):
return slugify(title)
class Meta:
__abstract__ = True
| 27.596774
| 77
| 0.653419
|
b8a8d3cc8dd894229616164399685c0a3821e63c
| 1,364
|
py
|
Python
|
project/max_subarray.py
|
gradeawarrior/python-interview-problems
|
ede738df98f979c45b6657aa6147f0fd5cbfc3dc
|
[
"Apache-2.0"
] | null | null | null |
project/max_subarray.py
|
gradeawarrior/python-interview-problems
|
ede738df98f979c45b6657aa6147f0fd5cbfc3dc
|
[
"Apache-2.0"
] | null | null | null |
project/max_subarray.py
|
gradeawarrior/python-interview-problems
|
ede738df98f979c45b6657aa6147f0fd5cbfc3dc
|
[
"Apache-2.0"
] | null | null | null |
"""
Given an integer array nums, find the contiguous subarray (containing at least one number) which
has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Follow up:
If you have figured out the O(n) solution, try coding another solution using the divide and conquer
approach, which is more subtle.
"""
class Solution(object):
def maxSubArrayOptimized(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums: raise ValueError()
elif len(nums) == 1: return nums[0]
result = max(nums)
dp = [nums[0]]
for i in xrange(1, len(nums)):
dp.append(nums[i] + (dp[i-1] if dp[i-1] > 0 else 0))
if dp[i] > result: result = dp[i]
return result
def maxSubArrayUnOptimized(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums: raise ValueError()
elif len(nums) == 1: return nums[0]
result = max(nums)
idx1 = 0
while idx1 < len(nums)-1:
idx2 = idx1 + 1
while idx2 < len(nums):
current = sum(nums[idx1:idx2+1])
if current > result: result = current
idx2 += 1
idx1 += 1
return result
| 26.745098
| 99
| 0.542522
|
5759f359dbd15dd951857b7935603e74a42c789f
| 10,080
|
py
|
Python
|
pychron/experiment/image_browser.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/experiment/image_browser.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/experiment/image_browser.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from chaco.api import ArrayPlotData, Plot, HPlotContainer
from chaco.tools.api import ZoomTool, PanTool
from chaco.tools.image_inspector_tool import ImageInspectorOverlay, ImageInspectorTool
from enable.component import Component
from enable.component_editor import ComponentEditor
from traits.api import (
HasTraits,
Instance,
List,
Str,
Bool,
on_trait_change,
String,
Button,
Dict,
Any,
)
from traitsui.api import (
View,
Item,
ListStrEditor,
HGroup,
VGroup,
spring,
VSplit,
Group,
)
# ============= standard library imports ========================
import Image
from numpy import array
import os
import six.moves.http_client
# ============= local library imports ==========================
from pychron.core.ui.custom_label_editor import CustomLabel
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.paths import paths
PORT = 8083
# TEST_IMAGE = Image.open(open('/Users/ross/Sandbox/snapshot001.jpg'))
# TEST_IMAGE = ImageData.fromfile('/Users/ross/Sandbox/foo.png')
class ImageContainer(HasTraits):
container = Instance(HPlotContainer, ())
name = String
def traits_view(self):
v = View(
VGroup(
HGroup(
spring,
CustomLabel(
"name",
color="maroon",
size=16,
height=-25,
width=100,
),
spring,
),
Item("container", show_label=False, editor=ComponentEditor()),
)
)
return v
class ImageSpec(HasTraits):
name = Str
note = Str
def traits_view(self):
v = View(
VGroup(
Item("name"),
Group(
Item("note", style="custom", show_label=False),
show_border=True,
label="Note",
),
)
)
return v
class ImageEditor(HasTraits):
names = List
selected = Str
save_db = Button("Save to DB")
image_spec = Instance(ImageSpec)
image_specs = Dict
db = Any
# ===============================================================================
# handlers
# ===============================================================================
def _selected_changed(self):
if self.selected in self.image_specs:
spec = self.image_specs[self.selected]
else:
spec = ImageSpec(name=self.selected)
self.image_specs[self.selected] = spec
self.image_spec = spec
def _save_db_fired(self):
db = self.db
print(db)
def traits_view(self):
v = View(
VSplit(
Item(
"names",
show_label=False,
editor=ListStrEditor(
editable=False, selected="selected", operations=[]
),
height=0.6,
),
Item("image_spec", show_label=False, style="custom", height=0.4),
),
Item("save_db", show_label=False),
)
return v
class ImageBrowser(IsotopeDatabaseManager):
# db = Instance(IsotopeAdapter)
image_container = Instance(ImageContainer, ())
image_editor = Instance(ImageEditor)
plot = Instance(Component)
# names = List
# selected = Str
use_cache = Bool(True)
cache_dir = paths.image_cache_dir
_conn = None
def _image_editor_default(self):
im = ImageEditor(db=self.db)
return im
def _is_cached(self, p):
p = os.path.join(self.cache_dir, p)
return os.path.isfile(p)
def load_from_remote_source(self, name):
if self._is_cached(name):
data = self._get_cached(name)
else:
data = self._get_remote_file(name)
self._load_image_data(data)
def load_remote_directory(self, name):
self.info("retrieve contents of remote directory {}".format(name))
resp = self._get(name)
if resp:
htxt = resp.read()
for li in htxt.split("\n"):
if li.startswith("<li>"):
args = li[4:].split(">")
name, _tail = args[1].split("<")
self.image_editor.names.append(name)
return True
def _connection_factory(self, reset=False):
if reset or self._conn is None:
host, port = "localhost", 8081
url = "{}:{}".format(host, port)
conn = six.moves.http_client.HTTPConnection(url)
else:
conn = self._conn
self._conn = conn
return conn
# def _get(self, name):
# conn = self._connection_factory()
# conn.request('GET', '/{}'.format(name))
# return conn.getresponse()
# def _get_remote_file(self, name):
# self.info('retrieve {} from remote directory'.format(name))
# resp = self._get(name)
#
# buf = StringIO()
# buf.write(resp.read())
# buf.seek(0)
# im = Image.open(buf)
# im = im.convert('RGB')
#
# if self.use_cache:
# buf.seek(0)
# if os.path.isdir(self.cache_dir):
# with open(os.path.join(self.cache_dir, name), 'w') as fp:
# fp.write(buf.read())
# else:
# self.info('cache directory does not exist. {}'.format(self.cache_dir))
#
# buf.close()
#
# return array(im)
def _get_cached(self, name):
self.info("retrieve {} from cache directory".format(name))
p = os.path.join(self.cache_dir, name)
with open(p, "r") as rfile:
im = Image.open(rfile)
im = im.convert("RGB")
return array(im)
def _load_image_data(self, data):
cont = HPlotContainer()
pd = ArrayPlotData()
plot = Plot(data=pd, padding=[30, 5, 5, 30], default_origin="top left")
pd.set_data("img", data)
img_plot = plot.img_plot(
"img",
)[0]
self._add_inspector(img_plot)
self._add_tools(img_plot)
cont.add(plot)
cont.request_redraw()
self.image_container.container = cont
def _add_inspector(self, img_plot):
imgtool = ImageInspectorTool(img_plot)
img_plot.tools.append(imgtool)
overlay = ImageInspectorOverlay(
component=img_plot,
image_inspector=imgtool,
bgcolor="white",
border_visible=True,
)
img_plot.overlays.append(overlay)
#
def _add_tools(self, img_plot):
zoom = ZoomTool(component=img_plot, tool_mode="box", always_on=False)
pan = PanTool(component=img_plot, restrict_to_data=True)
img_plot.tools.append(pan)
img_plot.overlays.append(zoom)
# ===============================================================================
# handlers
# ===============================================================================
@on_trait_change("image_editor:selected")
def _selected_changed(self):
sel = self.image_editor.selected
if sel:
self.load_from_remote_source(sel)
self.image_container.name = sel
def traits_view(self):
v = View(
HGroup(
Item("image_editor", show_label=False, style="custom", width=0.3),
# Item('names', show_label=False, editor=ListStrEditor(editable=False,
# selected='selected',
# operations=[]
# ),
# width=0.3,
# ),
Item("image_container", style="custom", width=0.7, show_label=False),
),
# Item('container', show_label=False,
# width=0.7,
# editor=ComponentEditor())),
resizable=True,
height=800,
width=900,
)
return v
if __name__ == "__main__":
from pychron.core.helpers.logger_setup import logging_setup
logging_setup("image_viewer")
im = ImageBrowser(cache_dir="/Users/ross/Sandbox/cache")
im.load_remote_directory("")
# im.load_from_remote_source('raster2.png')
# im.load_remote_directory()
# im.names = 'snapshot001.jpg,snapshot002.jpg,snapshot003.jpg,snapshot004.jpg'.split(',')
# im.load_from_remote_source('foo')
# im.load_image_from_file('/Users/ross/Sandbox/diodefailsnapshot.jpg')
im.configure_traits()
# ============= EOF =============================================
| 31.401869
| 110
| 0.510417
|
a33579293e13253cb2362f054b14c586224a12b7
| 640
|
py
|
Python
|
SMS_18/main/migrations/0004_currenttime.py
|
ujjwalgandhi/sms18
|
78d1476367bb255c8c13f19b13a103905a36014f
|
[
"MIT"
] | 8
|
2018-02-24T22:32:39.000Z
|
2022-03-18T18:28:41.000Z
|
SMS_18/main/migrations/0004_currenttime.py
|
ujjwalgandhi/sms18
|
78d1476367bb255c8c13f19b13a103905a36014f
|
[
"MIT"
] | 3
|
2020-06-06T00:41:08.000Z
|
2021-06-10T22:34:45.000Z
|
SMS_18/main/migrations/0004_currenttime.py
|
ujjwalgandhi/sms18
|
78d1476367bb255c8c13f19b13a103905a36014f
|
[
"MIT"
] | 3
|
2019-01-16T09:20:44.000Z
|
2019-01-22T07:05:38.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-21 10:20
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0003_remove_stock_stocks_sold'),
]
operations = [
migrations.CreateModel(
name='CurrentTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
| 26.666667
| 114
| 0.625
|
c1915944767eff1b28c2c7e03da56efda8de42e3
| 881
|
py
|
Python
|
bot.py
|
damiankw/gitlib-bot
|
8c576a03c66e8332284887a708cd0ff49181f8ff
|
[
"MIT"
] | null | null | null |
bot.py
|
damiankw/gitlib-bot
|
8c576a03c66e8332284887a708cd0ff49181f8ff
|
[
"MIT"
] | null | null | null |
bot.py
|
damiankw/gitlib-bot
|
8c576a03c66e8332284887a708cd0ff49181f8ff
|
[
"MIT"
] | null | null | null |
import logging
import threading
import time
import ezclib
log = logging.getLogger(__name__)
def main():
client = ezclib.EzcapechatRTMPProtocol(ezclib.CONFIG.ROOM, ezclib.CONFIG.USER, ezclib.CONFIG.EMAIL, ezclib.CONFIG.PASSWORD)
t = threading.Thread(target=client.connect)
t.daemon = True
t.start()
while not client.is_connected:
time.sleep(2)
while client.is_connected:
chat_msg = raw_input()
client.send_public(chat_msg)
if __name__ == '__main__':
formatter = '%(asctime)s : %(levelname)s : %(filename)s : %(lineno)d : %(funcName)s() : %(name)s : %(message)s'
if ezclib.CONFIG.DEBUG_TO_FILE:
logging.basicConfig(filename='ezclib_debug.log', level=ezclib.CONFIG.FILE_DEBUG_LEVEL, format=formatter)
else:
log.addHandler(logging.NullHandler)
log.info('Starting ezcapechat client.')
main()
| 25.911765
| 127
| 0.69126
|
4051efb4d36eaabd90005d714014088e60b5d367
| 3,113
|
py
|
Python
|
Chatbot-Emotion-Recognition/topic_extraction.py
|
NareshCJ/Deep-Learning-Team-2-Final-Project
|
caae1aadcc24f35a733688810c89dd30b9324286
|
[
"MIT"
] | null | null | null |
Chatbot-Emotion-Recognition/topic_extraction.py
|
NareshCJ/Deep-Learning-Team-2-Final-Project
|
caae1aadcc24f35a733688810c89dd30b9324286
|
[
"MIT"
] | null | null | null |
Chatbot-Emotion-Recognition/topic_extraction.py
|
NareshCJ/Deep-Learning-Team-2-Final-Project
|
caae1aadcc24f35a733688810c89dd30b9324286
|
[
"MIT"
] | null | null | null |
# Code based on work from:
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck
# Chyi-Kwei Yau <chyikwei.yau@gmail.com>
# License: BSD 3 clause
# URL: http://scikit-learn.org/stable/auto_examples/applications/topics_extraction_with_nmf_lda.html
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
import random
import os
class TopicExtraction:
def __init__(self):
self.n_top_words = 1
self.n_samples = 2000
self.n_features = 1000
self.n_topics = 2
self.previous_conversation = []
def load_history(self, username):
check = os.path.isfile('history/' + username + '_history')
if check:
with open('history/' + username + '_history') as f:
for line in f:
self.previous_conversation.append(line)
def get_top_topics(self, input_sentence):
self.append_to_history(input_sentence)
(nmf, lda, nmf_words, lda_words) = self.find_topics()
top_words = self.print_top_words(nmf, nmf_words)
return top_words
def append_to_history(self, conversation):
self.previous_conversation.append(conversation)
def print_top_words(self, model, feature_names):
topics=[]
for topic_idx, topic in enumerate(model.components_):
top = " ".join([feature_names[i]
for i in topic.argsort()[:-self.n_top_words - 1:-1]])
topics.append(top)
return topics
def find_topics(self):
print("Loading dataset...")
random.shuffle(self.previous_conversation)
data_samples = self.previous_conversation
tfidf_vectorizer = TfidfVectorizer(max_df=1.0, min_df=0.0,
max_features=self.n_features,
stop_words='english', encoding='ascii')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
tf_vectorizer = CountVectorizer(max_df=1.0, min_df=0.0,
max_features=self.n_features,
stop_words='english', encoding='ascii')
tf = tf_vectorizer.fit_transform(data_samples)
# Fit the NMF model
nmf = NMF(n_components=self.n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
lda = LatentDirichletAllocation(n_topics=self.n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0)
lda.fit(tf)
tf_feature_names = tf_vectorizer.get_feature_names()
return nmf, lda, tfidf_feature_names, tf_feature_names
def write_history(self, username):
with open('history/' + username + '_history', "w+") as f:
for line in self.previous_conversation:
f.write(line+' ')
f.close()
| 37.963415
| 100
| 0.610022
|
f687b8e87e8b76982a849c057c614f4e49f0660e
| 251,912
|
py
|
Python
|
python/ccxt/huobi.py
|
petardudas/ccxt
|
f82b54ab688768cbe7a884ad92d2ad71986451ed
|
[
"MIT"
] | null | null | null |
python/ccxt/huobi.py
|
petardudas/ccxt
|
f82b54ab688768cbe7a884ad92d2ad71986451ed
|
[
"MIT"
] | null | null | null |
python/ccxt/huobi.py
|
petardudas/ccxt
|
f82b54ab688768cbe7a884ad92d2ad71986451ed
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NotSupported
from ccxt.base.errors import NetworkError
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class huobi(Exchange):
def describe(self):
return self.deep_extend(super(huobi, self).describe(), {
'id': 'huobi',
'name': 'Huobi',
'countries': ['CN'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome39'],
'certified': True,
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro', # api.testnet.huobi.pro
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': None,
'addMargin': None,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'createDepositAddress': None,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBidsAsks': None,
'fetchBorrowInterest': True,
'fetchBorrowRate': None,
'fetchBorrowRateHistories': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRates': True,
'fetchBorrowRatesPerSymbol': True,
'fetchCanceledOrders': None,
'fetchClosedOrder': None,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': None,
'fetchDepositAddress': True,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': True,
'fetchFundingFee': None,
'fetchFundingFees': None,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': True,
'fetchIndexOHLCV': True,
'fetchL3OrderBook': None,
'fetchLedger': True,
'fetchLedgerEntry': None,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyBuys': None,
'fetchMySells': None,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': None,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': True,
'fetchStatus': None,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': False,
'fetchTradingLimits': True,
'fetchTransactions': None,
'fetchTransfers': None,
'fetchWithdrawAddressesByNetwork': True,
'fetchWithdrawal': None,
'fetchWithdrawals': True,
'fetchWithdrawalWhitelist': None,
'reduceMargin': None,
'setLeverage': True,
'setMarginMode': False,
'setPositionMode': False,
'signIn': None,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
# 'test': {
# 'market': 'https://api.testnet.huobi.pro',
# 'public': 'https://api.testnet.huobi.pro',
# 'private': 'https://api.testnet.huobi.pro',
# },
'logo': 'https://user-images.githubusercontent.com/1294454/76137448-22748a80-604e-11ea-8069-6e389271911d.jpg',
'hostnames': {
'contract': 'api.hbdm.com',
'spot': 'api.huobi.pro',
# recommended for AWS
# 'contract': 'api.hbdm.vn',
# 'spot': 'api-aws.huobi.pro',
},
'api': {
'contract': 'https://{hostname}',
'spot': 'https://{hostname}',
'market': 'https://{hostname}',
'public': 'https://{hostname}',
'private': 'https://{hostname}',
'v2Public': 'https://{hostname}',
'v2Private': 'https://{hostname}',
},
'www': 'https://www.huobi.com',
'referral': {
'url': 'https://www.huobi.com/en-us/topic/double-reward/?invite_code=6rmm2223',
'discount': 0.15,
},
'doc': [
'https://huobiapi.github.io/docs/spot/v1/cn/',
'https://huobiapi.github.io/docs/dm/v1/cn/',
'https://huobiapi.github.io/docs/coin_margined_swap/v1/cn/',
'https://huobiapi.github.io/docs/usdt_swap/v1/cn/',
'https://huobiapi.github.io/docs/option/v1/cn/',
],
'fees': 'https://www.huobi.com/about/fee/',
},
'api': {
# ------------------------------------------------------------
# old api definitions
'v2Public': {
'get': {
'reference/currencies': 1, # 币链参考信息
'market-status': 1, # 获取当前市场状态
},
},
'v2Private': {
'get': {
'account/ledger': 1,
'account/withdraw/quota': 1,
'account/withdraw/address': 1, # 提币地址查询(限母用户可用)
'account/deposit/address': 1,
'account/repayment': 5, # 还币交易记录查询
'reference/transact-fee-rate': 1,
'account/asset-valuation': 0.2, # 获取账户资产估值
'point/account': 5, # 点卡余额查询
'sub-user/user-list': 1, # 获取子用户列表
'sub-user/user-state': 1, # 获取特定子用户的用户状态
'sub-user/account-list': 1, # 获取特定子用户的账户列表
'sub-user/deposit-address': 1, # 子用户充币地址查询
'sub-user/query-deposit': 1, # 子用户充币记录查询
'user/api-key': 1, # 母子用户API key信息查询
'user/uid': 1, # 母子用户获取用户UID
'algo-orders/opening': 1, # 查询未触发OPEN策略委托
'algo-orders/history': 1, # 查询策略委托历史
'algo-orders/specific': 1, # 查询特定策略委托
'c2c/offers': 1, # 查询借入借出订单
'c2c/offer': 1, # 查询特定借入借出订单及其交易记录
'c2c/transactions': 1, # 查询借入借出交易记录
'c2c/repayment': 1, # 查询还币交易记录
'c2c/account': 1, # 查询账户余额
'etp/reference': 1, # 基础参考信息
'etp/transactions': 5, # 获取杠杆ETP申赎记录
'etp/transaction': 5, # 获取特定杠杆ETP申赎记录
'etp/rebalance': 1, # 获取杠杆ETP调仓记录
'etp/limit': 1, # 获取ETP持仓限额
},
'post': {
'account/transfer': 1,
'account/repayment': 5, # 归还借币(全仓逐仓通用)
'point/transfer': 5, # 点卡划转
'sub-user/management': 1, # 冻结/解冻子用户
'sub-user/creation': 1, # 子用户创建
'sub-user/tradable-market': 1, # 设置子用户交易权限
'sub-user/transferability': 1, # 设置子用户资产转出权限
'sub-user/api-key-generation': 1, # 子用户API key创建
'sub-user/api-key-modification': 1, # 修改子用户API key
'sub-user/api-key-deletion': 1, # 删除子用户API key
'sub-user/deduct-mode': 1, # 设置子用户手续费抵扣模式
'algo-orders': 1, # 策略委托下单
'algo-orders/cancel-all-after': 1, # 自动撤销订单
'algo-orders/cancellation': 1, # 策略委托(触发前)撤单
'c2c/offer': 1, # 借入借出下单
'c2c/cancellation': 1, # 借入借出撤单
'c2c/cancel-all': 1, # 撤销所有借入借出订单
'c2c/repayment': 1, # 还币
'c2c/transfer': 1, # 资产划转
'etp/creation': 5, # 杠杆ETP换入
'etp/redemption': 5, # 杠杆ETP换出
'etp/{transactId}/cancel': 10, # 杠杆ETP单个撤单
'etp/batch-cancel': 50, # 杠杆ETP批量撤单
},
},
'market': {
'get': {
'history/kline': 1, # 获取K线数据
'detail/merged': 1, # 获取聚合行情(Ticker)
'depth': 1, # 获取 Market Depth 数据
'trade': 1, # 获取 Trade Detail 数据
'history/trade': 1, # 批量获取最近的交易记录
'detail': 1, # 获取 Market Detail 24小时成交量数据
'tickers': 1,
'etp': 1, # 获取杠杆ETP实时净值
},
},
'public': {
'get': {
'common/symbols': 1, # 查询系统支持的所有交易对
'common/currencys': 1, # 查询系统支持的所有币种
'common/timestamp': 1, # 查询系统当前时间
'common/exchange': 1, # order limits
'settings/currencys': 1, # ?language=en-US
},
},
'private': {
'get': {
'account/accounts': 0.2, # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance': 0.2, # 查询指定账户的余额
'account/accounts/{sub-uid}': 1,
'account/history': 4,
'cross-margin/loan-info': 1,
'margin/loan-info': 1, # 查询借币币息率及额度
'fee/fee-rate/get': 1,
'order/openOrders': 0.4,
'order/orders': 0.4,
'order/orders/{id}': 0.4, # 查询某个订单详情
'order/orders/{id}/matchresults': 0.4, # 查询某个订单的成交明细
'order/orders/getClientOrder': 0.4,
'order/history': 1, # 查询当前委托、历史委托
'order/matchresults': 1, # 查询当前成交、历史成交
# 'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw': 1,
# 'margin/loan-info', # duplicate
'margin/loan-orders': 0.2, # 借贷订单
'margin/accounts/balance': 0.2, # 借贷账户详情
'cross-margin/loan-orders': 1, # 查询借币订单
'cross-margin/accounts/balance': 1, # 借币账户详情
'points/actions': 1,
'points/orders': 1,
'subuser/aggregate-balance': 10,
'stable-coin/exchange_rate': 1,
'stable-coin/quote': 1,
},
'post': {
'account/transfer': 1, # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer': 1,
'order/batch-orders': 0.4,
'order/orders/place': 0.2, # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder': 0.2,
'order/orders/batchCancelOpenOrders': 0.4,
# 'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
# 'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel': 0.2, # 申请撤销一个订单请求
'order/orders/batchcancel': 0.4, # 批量撤销订单
# 'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create': 1, # 申请提现虚拟币
# 'dw/withdraw-virtual/create', # 申请提现虚拟币
# 'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel': 1, # 申请取消提现虚拟币
'dw/transfer-in/margin': 10, # 现货账户划入至借贷账户
'dw/transfer-out/margin': 10, # 借贷账户划出至现货账户
'margin/orders': 10, # 申请借贷
'margin/orders/{id}/repay': 10, # 归还借贷
'cross-margin/transfer-in': 1, # 资产划转
'cross-margin/transfer-out': 1, # 资产划转
'cross-margin/orders': 1, # 申请借币
'cross-margin/orders/{id}/repay': 1, # 归还借币
'stable-coin/exchange': 1,
'subuser/transfer': 10,
},
},
# ------------------------------------------------------------
# new api definitions
# 'https://status.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-dm.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-swap.huobigroup.com/api/v2/summary.json': 1,
# 'https://status-linear-swap.huobigroup.com/api/v2/summary.json': 1,
'spot': {
'public': {
'get': {
'v2/market-status': 1,
'v1/common/symbols': 1,
'v1/common/currencys': 1,
'v2/reference/currencies': 1,
'v1/common/timestamp': 1,
'v1/common/exchange': 1, # order limits
# Market Data
'market/history/candles': 1,
'market/history/kline': 1,
'market/detail/merged': 1,
'market/tickers': 1,
'market/depth': 1,
'market/trade': 1,
'market/history/trade': 1,
'market/detail/': 1,
'market/etp': 1,
# ETP
'v2/etp/reference': 1,
'v2/etp/rebalance': 1,
},
},
'private': {
'get': {
# Account
'v1/account/accounts': 0.2,
'v1/account/accounts/{account-id}/balance': 0.2,
'v2/account/valuation': 1,
'v2/account/asset-valuation': 0.2,
'v1/account/history': 4,
'v2/account/ledger': 1,
'v2/point/account': 5,
# Wallet(Deposit and Withdraw)
'v2/account/deposit/address': 1,
'v2/account/withdraw/quota': 1,
'v2/account/withdraw/address': 1,
'v2/reference/currencies': 1,
'v1/query/deposit-withdraw': 1,
# Sub user management
'v2/user/api-key': 1,
'v2/user/uid': 1,
'v2/sub-user/user-list': 1,
'v2/sub-user/user-state': 1,
'v2/sub-user/account-list': 1,
'v2/sub-user/deposit-address': 1,
'v2/sub-user/query-deposit': 1,
'v1/subuser/aggregate-balance': 10,
'v1/account/accounts/{sub-uid}': 1,
# Trading
'v1/order/openOrders': 0.4,
'v1/order/orders/{order-id}': 0.4,
'v1/order/orders/getClientOrder': 0.4,
'v1/order/orders/{order-id}/matchresults': 0.4,
'v1/order/orders': 0.4,
'v1/order/history': 1,
'v1/order/matchresults': 1,
'v2/reference/transact-fee-rate': 1,
# Conditional Order
'v2/algo-orders/opening': 1,
'v2/algo-orders/history': 1,
'v2/algo-orders/specific': 1,
# Margin Loan(Cross/Isolated)
'v1/margin/loan-info': 1,
'v1/margin/loan-orders': 0.2,
'v1/margin/accounts/balance': 0.2,
'v1/cross-margin/loan-info': 1,
'v1/cross-margin/loan-orders': 1,
'v1/cross-margin/accounts/balance': 1,
'v2/account/repayment': 5,
# Stable Coin Exchange
'v1/stable-coin/quote': 1,
# ETP
'v2/etp/transactions': 5,
'v2/etp/transaction': 5,
'v2/etp/limit': 1,
},
'post': {
# Account
'v1/account/transfer': 1,
'v1/futures/transfer': 1, # future transfers
'v2/point/transfer': 5,
'v2/account/transfer': 1, # swap transfers
# Wallet(Deposit and Withdraw)
'v1/dw/withdraw/api/create': 1,
'v1/dw/withdraw-virtual/{withdraw-id}/cancel': 1,
# Sub user management
'v2/sub-user/deduct-mode': 1,
'v2/sub-user/creation': 1,
'v2/sub-user/management': 1,
'v2/sub-user/tradable-market': 1,
'v2/sub-user/transferability': 1,
'v2/sub-user/api-key-generation': 1,
'v2/sub-user/api-key-modification': 1,
'v2/sub-user/api-key-deletion': 1,
'v1/subuser/transfer': 10,
# Trading
'v1/order/orders/place': 0.2,
'v1/order/batch-orders': 0.4,
'v1/order/orders/{order-id}/submitcancel': 0.2,
'v1/order/orders/submitCancelClientOrder': 0.2,
'v1/order/orders/batchCancelOpenOrders': 0.4,
'v1/order/orders/batchcancel': 0.4,
'v2/algo-orders/cancel-all-after': 1,
# Conditional Order
'v2/algo-orders': 1,
'v2/algo-orders/cancellation': 1,
# Margin Loan(Cross/Isolated)
'v2/account/repayment': 5,
'v1/dw/transfer-in/margin': 10,
'v1/dw/transfer-out/margin': 10,
'v1/margin/orders': 10,
'v1/margin/orders/{order-id}/repay': 10,
'v1/cross-margin/transfer-in': 1,
'v1/cross-margin/transfer-out': 1,
'v1/cross-margin/orders': 1,
'v1/cross-margin/orders/{order-id}/repay': 1,
# Stable Coin Exchange
'v1/stable-coin/exchange': 1,
# ETP
'v2/etp/creation': 5,
'v2/etp/redemption': 5,
'v2/etp/{transactId}/cancel': 10,
'v2/etp/batch-cancel': 50,
},
},
},
'contract': {
'public': {
'get': {
'api/v1/timestamp': 1,
# Future Market Data interface
'api/v1/contract_contract_info': 1,
'api/v1/contract_index': 1,
'api/v1/contract_price_limit': 1,
'api/v1/contract_open_interest': 1,
'api/v1/contract_delivery_price': 1,
'market/depth': 1,
'market/bbo': 1,
'market/history/kline': 1,
'index/market/history/mark_price_kline': 1,
'market/detail/merged': 1,
'market/detail/batch_merged': 1,
'market/trade': 1,
'market/history/trade': 1,
'api/v1/contract_risk_info': 1,
'api/v1/contract_insurance_fund': 1,
'api/v1/contract_adjustfactor': 1,
'api/v1/contract_his_open_interest': 1,
'api/v1/contract_ladder_margin': 1,
'api/v1/contract_api_state': 1,
'api/v1/contract_elite_account_ratio': 1,
'api/v1/contract_elite_position_ratio': 1,
'api/v1/contract_liquidation_orders': 1,
'api/v1/contract_settlement_records': 1,
'index/market/history/index': 1,
'index/market/history/basis': 1,
'api/v1/contract_estimated_settlement_price': 1,
# Swap Market Data interface
'swap-api/v1/swap_contract_info': 1,
'swap-api/v1/swap_index': 1,
'swap-api/v1/swap_price_limit': 1,
'swap-api/v1/swap_open_interest': 1,
'swap-ex/market/depth': 1,
'swap-ex/market/bbo': 1,
'swap-ex/market/history/kline': 1,
'index/market/history/swap_mark_price_kline': 1,
'swap-ex/market/detail/merged': 1,
'swap-ex/market/detail/batch_merged': 1,
'swap-ex/market/trade': 1,
'swap-ex/market/history/trade': 1,
'swap-api/v1/swap_risk_info': 1,
'swap-api/v1/swap_insurance_fund': 1,
'swap-api/v1/swap_adjustfactor': 1,
'swap-api/v1/swap_his_open_interest': 1,
'swap-api/v1/swap_ladder_margin': 1,
'swap-api/v1/swap_api_state': 1,
'swap-api/v1/swap_elite_account_ratio': 1,
'swap-api/v1/swap_elite_position_ratio': 1,
'swap-api/v1/swap_estimated_settlement_price': 1,
'swap-api/v1/swap_liquidation_orders': 1,
'swap-api/v1/swap_settlement_records': 1,
'swap-api/v1/swap_funding_rate': 1,
'swap-api/v1/swap_batch_funding_rate': 1,
'swap-api/v1/swap_historical_funding_rate': 1,
'index/market/history/swap_premium_index_kline': 1,
'index/market/history/swap_estimated_rate_kline': 1,
'index/market/history/swap_basis': 1,
# Swap Market Data interface
'linear-swap-api/v1/swap_contract_info': 1,
'linear-swap-api/v1/swap_index': 1,
'linear-swap-api/v1/swap_price_limit': 1,
'linear-swap-api/v1/swap_open_interest': 1,
'linear-swap-ex/market/depth': 1,
'linear-swap-ex/market/bbo': 1,
'linear-swap-ex/market/history/kline': 1,
'index/market/history/linear_swap_mark_price_kline': 1,
'linear-swap-ex/market/detail/merged': 1,
'linear-swap-ex/market/detail/batch_merged': 1,
'linear-swap-ex/market/trade': 1,
'linear-swap-ex/market/history/trade': 1,
'linear-swap-api/v1/swap_risk_info': 1,
'swap-api/v1/linear-swap-api/v1/swap_insurance_fund': 1,
'linear-swap-api/v1/swap_adjustfactor': 1,
'linear-swap-api/v1/swap_cross_adjustfactor': 1,
'linear-swap-api/v1/swap_his_open_interest': 1,
'linear-swap-api/v1/swap_ladder_margin': 1,
'linear-swap-api/v1/swap_cross_ladder_margin': 1,
'linear-swap-api/v1/swap_api_state': 1,
'linear-swap-api/v1/swap_cross_transfer_state': 1,
'linear-swap-api/v1/swap_cross_trade_state': 1,
'linear-swap-api/v1/swap_elite_account_ratio': 1,
'linear-swap-api/v1/swap_elite_position_ratio': 1,
'linear-swap-api/v1/swap_liquidation_orders': 1,
'linear-swap-api/v1/swap_settlement_records': 1,
'linear-swap-api/v1/swap_funding_rate': 1,
'linear-swap-api/v1/swap_batch_funding_rate': 1,
'linear-swap-api/v1/swap_historical_funding_rate': 1,
'index/market/history/linear_swap_premium_index_kline': 1,
'index/market/history/linear_swap_estimated_rate_kline': 1,
'index/market/history/linear_swap_basis': 1,
'linear-swap-api/v1/swap_estimated_settlement_price': 1,
},
},
'private': {
'get': {
# Future Account Interface
'api/v1/contract_api_trading_status': 1,
# Swap Account Interface
'swap-api/v1/swap_api_trading_status': 1,
# Swap Account Interface
'linear-swap-api/v1/swap_api_trading_status': 1,
},
'post': {
# Future Account Interface
'api/v1/contract_balance_valuation': 1,
'api/v1/contract_account_info': 1,
'api/v1/contract_position_info': 1,
'api/v1/contract_sub_auth': 1,
'api/v1/contract_sub_account_list': 1,
'api/v1/contract_sub_account_info_list': 1,
'api/v1/contract_sub_account_info': 1,
'api/v1/contract_sub_position_info': 1,
'api/v1/contract_financial_record': 1,
'api/v1/contract_financial_record_exact': 1,
'api/v1/contract_user_settlement_records': 1,
'api/v1/contract_order_limit': 1,
'api/v1/contract_fee': 1,
'api/v1/contract_transfer_limit': 1,
'api/v1/contract_position_limit': 1,
'api/v1/contract_account_position_info': 1,
'api/v1/contract_master_sub_transfer': 1,
'api/v1/contract_master_sub_transfer_record': 1,
'api/v1/contract_available_level_rate': 1,
# Future Trade Interface
'api/v1/contract_order': 1,
'v1/contract_batchorder': 1,
'api/v1/contract_cancel': 1,
'api/v1/contract_cancelall': 1,
'api/v1/contract_switch_lever_rate': 1,
'api/v1/lightning_close_position': 1,
'api/v1/contract_order_info': 1,
'api/v1/contract_order_detail': 1,
'api/v1/contract_openorders': 1,
'api/v1/contract_hisorders': 1,
'api/v1/contract_hisorders_exact': 1,
'api/v1/contract_matchresults': 1,
'api/v1/contract_matchresults_exact': 1,
# Contract Strategy Order Interface
'api/v1/contract_trigger_order': 1,
'api/v1/contract_trigger_cancel': 1,
'api/v1/contract_trigger_cancelall': 1,
'api/v1/contract_trigger_openorders': 1,
'api/v1/contract_trigger_hisorders': 1,
'api/v1/contract_tpsl_order': 1,
'api/v1/contract_tpsl_cancel': 1,
'api/v1/contract_tpsl_cancelall': 1,
'api/v1/contract_tpsl_openorders': 1,
'api/v1/contract_tpsl_hisorders': 1,
'api/v1/contract_relation_tpsl_order': 1,
'api/v1/contract_track_order': 1,
'api/v1/contract_track_cancel': 1,
'api/v1/contract_track_cancelall': 1,
'api/v1/contract_track_openorders': 1,
'api/v1/contract_track_hisorders': 1,
# Swap Account Interface
'swap-api/v1/swap_balance_valuation': 1,
'swap-api/v1/swap_account_info': 1,
'swap-api/v1/swap_position_info': 1,
'swap-api/v1/swap_account_position_info': 1,
'swap-api/v1/swap_sub_auth': 1,
'swap-api/v1/swap_sub_account_list': 1,
'swap-api/v1/swap_sub_account_info_list': 1,
'swap-api/v1/swap_sub_account_info': 1,
'swap-api/v1/swap_sub_position_info': 1,
'swap-api/v1/swap_financial_record': 1,
'swap-api/v1/swap_financial_record_exact': 1,
'swap-api/v1/swap_user_settlement_records': 1,
'swap-api/v1/swap_available_level_rate': 1,
'swap-api/v1/swap_order_limit': 1,
'swap-api/v1/swap_fee': 1,
'swap-api/v1/swap_transfer_limit': 1,
'swap-api/v1/swap_position_limit': 1,
'swap-api/v1/swap_master_sub_transfer': 1,
'swap-api/v1/swap_master_sub_transfer_record': 1,
# Swap Trade Interface
'swap-api/v1/swap_order': 1,
'swap-api/v1/swap_batchorder': 1,
'swap-api/v1/swap_cancel': 1,
'swap-api/v1/swap_cancelall': 1,
'swap-api/v1/swap_lightning_close_position': 1,
'swap-api/v1/swap_switch_lever_rate': 1,
'swap-api/v1/swap_order_info': 1,
'swap-api/v1/swap_order_detail': 1,
'swap-api/v1/swap_openorders': 1,
'swap-api/v1/swap_hisorders': 1,
'swap-api/v1/swap_hisorders_exact': 1,
'swap-api/v1/swap_matchresults': 1,
'swap-api/v1/swap_matchresults_exact': 1,
# Swap Strategy Order Interface
'swap-api/v1/swap_trigger_order': 1,
'swap-api/v1/swap_trigger_cancel': 1,
'swap-api/v1/swap_trigger_cancelall': 1,
'swap-api/v1/swap_trigger_openorders': 1,
'swap-api/v1/swap_trigger_hisorders': 1,
'swap-api/v1/swap_tpsl_order': 1,
'swap-api/v1/swap_tpsl_cancel': 1,
'swap-api/v1/swap_tpsl_cancelall': 1,
'swap-api/v1/swap_tpsl_openorders': 1,
'swap-api/v1/swap_tpsl_hisorders': 1,
'swap-api/v1/swap_relation_tpsl_order': 1,
'swap-api/v1/swap_track_order': 1,
'swap-api/v1/swap_track_cancel': 1,
'swap-api/v1/swap_track_cancelall': 1,
'swap-api/v1/swap_track_openorders': 1,
'swap-api/v1/swap_track_hisorders': 1,
# Swap Account Interface
'linear-swap-api/v1/swap_lever_position_limit': 1,
'linear-swap-api/v1/swap_cross_lever_position_limit': 1,
'linear-swap-api/v1/swap_balance_valuation': 1,
'linear-swap-api/v1/swap_account_info': 1,
'linear-swap-api/v1/swap_cross_account_info': 1,
'linear-swap-api/v1/swap_position_info': 1,
'linear-swap-api/v1/swap_cross_position_info': 1,
'linear-swap-api/v1/swap_account_position_info': 1,
'linear-swap-api/v1/swap_cross_account_position_info': 1,
'linear-swap-api/v1/swap_sub_auth': 1,
'linear-swap-api/v1/swap_sub_account_list': 1,
'linear-swap-api/v1/swap_cross_sub_account_list': 1,
'linear-swap-api/v1/swap_sub_account_info_list': 1,
'linear-swap-api/v1/swap_cross_sub_account_info_list': 1,
'linear-swap-api/v1/swap_sub_account_info': 1,
'linear-swap-api/v1/swap_cross_sub_account_info': 1,
'linear-swap-api/v1/swap_sub_position_info': 1,
'linear-swap-api/v1/swap_cross_sub_position_info': 1,
'linear-swap-api/v1/swap_financial_record': 1,
'linear-swap-api/v1/swap_financial_record_exact': 1,
'linear-swap-api/v1/swap_user_settlement_records': 1,
'linear-swap-api/v1/swap_cross_user_settlement_records': 1,
'linear-swap-api/v1/swap_available_level_rate': 1,
'linear-swap-api/v1/swap_cross_available_level_rate': 1,
'linear-swap-api/v1/swap_order_limit': 1,
'linear-swap-api/v1/swap_fee': 1,
'linear-swap-api/v1/swap_transfer_limit': 1,
'linear-swap-api/v1/swap_cross_transfer_limit': 1,
'linear-swap-api/v1/swap_position_limit': 1,
'linear-swap-api/v1/swap_cross_position_limit': 1,
'linear-swap-api/v1/swap_master_sub_transfer': 1,
'linear-swap-api/v1/swap_master_sub_transfer_record': 1,
'linear-swap-api/v1/swap_transfer_inner': 1,
# Swap Trade Interface
'linear-swap-api/v1/swap_order': 1,
'linear-swap-api/v1/swap_cross_order': 1,
'linear-swap-api/v1/swap_batchorder': 1,
'linear-swap-api/v1/swap_cross_batchorder': 1,
'linear-swap-api/v1/swap_cancel': 1,
'linear-swap-api/v1/swap_cross_cancel': 1,
'linear-swap-api/v1/swap_cancelall': 1,
'linear-swap-api/v1/swap_cross_cancelall': 1,
'linear-swap-api/v1/swap_switch_lever_rate': 1,
'linear-swap-api/v1/swap_cross_switch_lever_rate': 1,
'linear-swap-api/v1/swap_lightning_close_position': 1,
'linear-swap-api/v1/swap_cross_lightning_close_position': 1,
'linear-swap-api/v1/swap_order_info': 1,
'linear-swap-api/v1/swap_cross_order_info': 1,
'linear-swap-api/v1/swap_order_detail': 1,
'linear-swap-api/v1/swap_cross_order_detail': 1,
'linear-swap-api/v1/swap_openorders': 1,
'linear-swap-api/v1/swap_cross_openorders': 1,
'linear-swap-api/v1/swap_hisorders': 1,
'linear-swap-api/v1/swap_cross_hisorders': 1,
'linear-swap-api/v1/swap_hisorders_exact': 1,
'linear-swap-api/v1/swap_cross_hisorders_exact': 1,
'linear-swap-api/v1/swap_matchresults': 1,
'linear-swap-api/v1/swap_cross_matchresults': 1,
'linear-swap-api/v1/swap_matchresults_exact': 1,
'linear-swap-api/v1/swap_cross_matchresults_exact': 1,
'linear-swap-api/v1/swap_switch_position_mode': 1,
'linear-swap-api/v1/swap_cross_switch_position_mode': 1,
# Swap Strategy Order Interface
'linear-swap-api/v1/swap_trigger_order': 1,
'linear-swap-api/v1/swap_cross_trigger_order': 1,
'linear-swap-api/v1/swap_trigger_cancel': 1,
'linear-swap-api/v1/swap_cross_trigger_cancel': 1,
'linear-swap-api/v1/swap_trigger_cancelall': 1,
'linear-swap-api/v1/swap_cross_trigger_cancelall': 1,
'linear-swap-api/v1/swap_trigger_openorders': 1,
'linear-swap-api/v1/swap_cross_trigger_openorders': 1,
'linear-swap-api/v1/swap_trigger_hisorders': 1,
'linear-swap-api/v1/swap_cross_trigger_hisorders': 1,
'linear-swap-api/v1/swap_tpsl_order': 1,
'linear-swap-api/v1/swap_cross_tpsl_order': 1,
'linear-swap-api/v1/swap_tpsl_cancel': 1,
'linear-swap-api/v1/swap_cross_tpsl_cancel': 1,
'linear-swap-api/v1/swap_tpsl_cancelall': 1,
'linear-swap-api/v1/swap_cross_tpsl_cancelall': 1,
'linear-swap-api/v1/swap_tpsl_openorders': 1,
'linear-swap-api/v1/swap_cross_tpsl_openorders': 1,
'linear-swap-api/v1/swap_tpsl_hisorders': 1,
'linear-swap-api/v1/swap_cross_tpsl_hisorders': 1,
'linear-swap-api/v1/swap_relation_tpsl_order': 1,
'linear-swap-api/v1/swap_cross_relation_tpsl_order': 1,
'linear-swap-api/v1/swap_track_order': 1,
'linear-swap-api/v1/swap_cross_track_order': 1,
'linear-swap-api/v1/swap_track_cancel': 1,
'linear-swap-api/v1/swap_cross_track_cancel': 1,
'linear-swap-api/v1/swap_track_cancelall': 1,
'linear-swap-api/v1/swap_cross_track_cancelall': 1,
'linear-swap-api/v1/swap_track_openorders': 1,
'linear-swap-api/v1/swap_cross_track_openorders': 1,
'linear-swap-api/v1/swap_track_hisorders': 1,
'linear-swap-api/v1/swap_cross_track_hisorders': 1,
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
'exceptions': {
'broad': {
'contract is restricted of closing positions on API. Please contact customer service': OnMaintenance,
'maintain': OnMaintenance,
},
'exact': {
# err-code
'1010': AccountNotEnabled, # {"status":"error","err_code":1010,"err_msg":"Account doesnt exist.","ts":1648137970490}
'1017': OrderNotFound, # {"status":"error","err_code":1017,"err_msg":"Order doesnt exist.","ts":1640550859242}
'1034': InvalidOrder, # {"status":"error","err_code":1034,"err_msg":"Incorrect field of order price type.","ts":1643802870182}
'1036': InvalidOrder, # {"status":"error","err_code":1036,"err_msg":"Incorrect field of open long form.","ts":1643802518986}
'1039': InvalidOrder, # {"status":"error","err_code":1039,"err_msg":"Buy price must be lower than 39270.9USDT. Sell price must exceed 37731USDT.","ts":1643802374403}
'1041': InvalidOrder, # {"status":"error","err_code":1041,"err_msg":"The order amount exceeds the limit(170000Cont), please modify and order again.","ts":1643802784940}
'1047': InsufficientFunds, # {"status":"error","err_code":1047,"err_msg":"Insufficient margin available.","ts":1643802672652}
'1066': BadSymbol, # {"status":"error","err_code":1066,"err_msg":"The symbol field cannot be empty. Please re-enter.","ts":1640550819147}
'1067': InvalidOrder, # {"status":"error","err_code":1067,"err_msg":"The client_order_id field is invalid. Please re-enter.","ts":1643802119413}
'1013': BadSymbol, # {"status":"error","err_code":1013,"err_msg":"This contract symbol doesnt exist.","ts":1640550459583}
'1094': InvalidOrder, # {"status":"error","err_code":1094,"err_msg":"The leverage cannot be empty, please switch the leverage or contact customer service","ts":1640496946243}
'1220': AccountNotEnabled, # {"status":"error","err_code":1220,"err_msg":"You don’t have access permission as you have not opened contracts trading.","ts":1645096660718}
'bad-request': BadRequest,
'validation-format-error': BadRequest, # {"status":"error","err-code":"validation-format-error","err-msg":"Format Error: order-id.","data":null}
'validation-constraints-required': BadRequest, # {"status":"error","err-code":"validation-constraints-required","err-msg":"Field is missing: client-order-id.","data":null}
'base-date-limit-error': BadRequest, # {"status":"error","err-code":"base-date-limit-error","err-msg":"date less than system limit","data":null}
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-holding-limit-failed': InvalidOrder, # {"status":"error","err-code":"order-holding-limit-failed","err-msg":"Order failed, exceeded the holding limit of self currency","data":null}
'order-orderprice-precision-error': InvalidOrder, # {"status":"error","err-code":"order-orderprice-precision-error","err-msg":"order price precision error, scale: `4`","data":null}
'order-etp-nav-price-max-error': InvalidOrder, # {"status":"error","err-code":"order-etp-nav-price-max-error","err-msg":"Order price cannot be higher than 5% of NAV","data":null}
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
'base-request-exceed-frequency-limit': RateLimitExceeded, # {"status":"error","err-code":"base-request-exceed-frequency-limit","err-msg":"Frequency of requests has exceeded the limit, please try again later","data":null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
'require-symbol': BadSymbol, # {"status":"error","err-code":"require-symbol","err-msg":"Parameter `symbol` is required.","data":null}
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchMarkets': {
'types': {
'spot': True,
'future': {
'linear': True,
'inverse': True,
},
'swap': {
'linear': True,
'inverse': True,
},
},
},
'defaultType': 'spot', # spot, future, swap
'defaultSubType': 'inverse', # inverse, linear
'defaultNetwork': 'ERC20',
'networks': {
'ETH': 'erc20',
'TRX': 'trc20',
'HRC20': 'hrc20',
'HECO': 'hrc20',
'HT': 'hrc20',
'ALGO': 'algo',
'OMNI': '',
},
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'spot_private_get_v1_order_orders', # 'spot_private_get_v1_order_history' # https://github.com/ccxt/ccxt/pull/5392
'createMarketBuyOrderRequiresPrice': True,
'language': 'en-US',
'broker': {
'id': 'AA03022abc',
},
'accountsByType': {
'spot': 'pro',
'funding': 'pro',
'future': 'futures',
},
'typesByAccount': {
'pro': 'spot',
'futures': 'future',
},
'spot': {
'stopOrderTypes': {
'stop-limit': True,
'buy-stop-limit': True,
'sell-stop-limit': True,
'stop-limit-fok': True,
'buy-stop-limit-fok': True,
'sell-stop-limit-fok': True,
},
'limitOrderTypes': {
'limit': True,
'buy-limit': True,
'sell-limit': True,
'ioc': True,
'buy-ioc': True,
'sell-ioc': True,
'limit-maker': True,
'buy-limit-maker': True,
'sell-limit-maker': True,
'stop-limit': True,
'buy-stop-limit': True,
'sell-stop-limit': True,
'limit-fok': True,
'buy-limit-fok': True,
'sell-limit-fok': True,
'stop-limit-fok': True,
'buy-stop-limit-fok': True,
'sell-stop-limit-fok': True,
},
},
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'HIT': 'HitChain',
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'BIFI': 'Bitcoin File', # conflict with Beefy.Finance https://github.com/ccxt/ccxt/issues/8706
},
})
def fetch_time(self, params={}):
options = self.safe_value(self.options, 'fetchTime', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
method = 'spotPublicGetV1CommonTimestamp'
if (type == 'future') or (type == 'swap'):
method = 'contractPublicGetApiV1Timestamp'
response = getattr(self, method)(params)
#
# spot
#
# {"status":"ok","data":1637504261099}
#
# future, swap
#
# {"status":"ok","ts":1637504164707}
#
return self.safe_integer_2(response, 'data', 'ts')
def parse_trading_fee(self, fee, market=None):
#
# {
# "symbol":"btcusdt",
# "actualMakerRate":"0.002",
# "actualTakerRate":"0.002",
# "takerFeeRate":"0.002",
# "makerFeeRate":"0.002"
# }
#
marketId = self.safe_string(fee, 'symbol')
return {
'info': fee,
'symbol': self.safe_symbol(marketId, market),
'maker': self.safe_number(fee, 'actualMakerRate'),
'taker': self.safe_number(fee, 'actualTakerRate'),
}
def fetch_trading_fee(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbols': market['id'], # trading symbols comma-separated
}
response = self.spotPrivateGetV2ReferenceTransactFeeRate(self.extend(request, params))
#
# {
# "code":200,
# "data":[
# {
# "symbol":"btcusdt",
# "actualMakerRate":"0.002",
# "actualTakerRate":"0.002",
# "takerFeeRate":"0.002",
# "makerFeeRate":"0.002"
# }
# ],
# "success":true
# }
#
data = self.safe_value(response, 'data', [])
first = self.safe_value(data, 0, {})
return self.parse_trading_fee(first, market)
def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = self.spotPublicGetV1CommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_number(limits, 'limit-order-must-greater-than'),
'max': self.safe_number(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def fetch_markets(self, params={}):
options = self.safe_value(self.options, 'fetchMarkets', {})
types = self.safe_value(options, 'types', {})
allMarkets = []
promises = []
keys = list(types.keys())
for i in range(0, len(keys)):
type = keys[i]
value = self.safe_value(types, type)
if value is True:
promises.append(self.fetch_markets_by_type_and_sub_type(type, None, params))
else:
subKeys = list(value.keys())
for j in range(0, len(subKeys)):
subType = subKeys[j]
subValue = self.safe_value(value, subType)
if subValue:
promises.append(self.fetch_markets_by_type_and_sub_type(type, subType, params))
for i in range(0, len(promises)):
allMarkets = self.array_concat(allMarkets, promises[i])
return allMarkets
def fetch_markets_by_type_and_sub_type(self, type, subType, params={}):
method = 'spotPublicGetV1CommonSymbols'
query = self.omit(params, ['type', 'subType'])
spot = (type == 'spot')
contract = (type != 'spot')
future = (type == 'future')
swap = (type == 'swap')
linear = None
inverse = None
request = {}
if contract:
linear = (subType == 'linear')
inverse = (subType == 'inverse')
if linear:
method = 'contractPublicGetLinearSwapApiV1SwapContractInfo'
if future:
request['business_type'] = 'futures'
elif inverse:
if future:
method = 'contractPublicGetApiV1ContractContractInfo'
elif swap:
method = 'contractPublicGetSwapApiV1SwapContractInfo'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "status":"ok",
# "data":[
# {
# "base-currency":"xrp3s",
# "quote-currency":"usdt",
# "price-precision":4,
# "amount-precision":4,
# "symbol-partition":"innovation",
# "symbol":"xrp3susdt",
# "state":"online",
# "value-precision":8,
# "min-order-amt":0.01,
# "max-order-amt":1616.4353,
# "min-order-value":5,
# "limit-order-min-order-amt":0.01,
# "limit-order-max-order-amt":1616.4353,
# "limit-order-max-buy-amt":1616.4353,
# "limit-order-max-sell-amt":1616.4353,
# "sell-market-min-order-amt":0.01,
# "sell-market-max-order-amt":1616.4353,
# "buy-market-max-order-value":2500,
# "max-order-value":2500,
# "underlying":"xrpusdt",
# "mgmt-fee-rate":0.035000000000000000,
# "charge-time":"23:55:00",
# "rebal-time":"00:00:00",
# "rebal-threshold":-5,
# "init-nav":10.000000000000000000,
# "api-trading":"enabled",
# "tags":"etp,nav,holdinglimit"
# },
# ]
# }
#
# inverse future
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC211126",
# "contract_type":"self_week",
# "contract_size":100.000000000000000000,
# "price_tick":0.010000000000000000,
# "delivery_date":"20211126",
# "delivery_time":"1637913600000",
# "create_date":"20211112",
# "contract_status":1,
# "settlement_time":"1637481600000"
# },
# ],
# "ts":1637474595140
# }
#
# linear futures
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC-USDT-211231",
# "contract_size":0.001000000000000000,
# "price_tick":0.100000000000000000,
# "delivery_date":"20211231",
# "delivery_time":"1640937600000",
# "create_date":"20211228",
# "contract_status":1,
# "settlement_date":"1640764800000",
# "support_margin_mode":"cross",
# "business_type":"futures",
# "pair":"BTC-USDT",
# "contract_type":"self_week" # next_week, quarter
# },
# ],
# "ts":1640736207263
# }
#
# swaps
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "contract_size":0.001000000000000000,
# "price_tick":0.100000000000000000,
# "delivery_time":"",
# "create_date":"20201021",
# "contract_status":1,
# "settlement_date":"1637481600000",
# "support_margin_mode":"all", # isolated
# },
# ],
# "ts":1637474774467
# }
#
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' fetchMarkets() returned an empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = None
quoteId = None
settleId = None
id = None
lowercaseId = None
lowercaseBaseId = None
if contract:
id = self.safe_string(market, 'contract_code')
lowercaseId = id.lower()
if swap:
parts = id.split('-')
baseId = self.safe_string(market, 'symbol')
lowercaseBaseId = baseId.lower()
quoteId = self.safe_string_lower(parts, 1)
settleId = baseId if inverse else quoteId
elif future:
baseId = self.safe_string(market, 'symbol')
lowercaseBaseId = baseId.lower()
if inverse:
quoteId = 'USD'
settleId = baseId
else:
pair = self.safe_string(market, 'pair')
parts = pair.split('-')
quoteId = self.safe_string(parts, 1)
settleId = quoteId
else:
baseId = self.safe_string(market, 'base-currency')
lowercaseBaseId = baseId.lower()
quoteId = self.safe_string(market, 'quote-currency')
id = baseId + quoteId
lowercaseId = id.lower()
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote
expiry = None
if contract:
if inverse:
symbol += ':' + base
elif linear:
symbol += ':' + quote
if future:
expiry = self.safe_integer(market, 'delivery_time')
symbol += '-' + self.yymmdd(expiry)
contractSize = self.safe_number(market, 'contract_size')
pricePrecision = None
amountPrecision = None
costPrecision = None
if spot:
pricePrecision = self.safe_string(market, 'price-precision')
pricePrecision = self.parse_number('1e-' + pricePrecision)
amountPrecision = self.safe_string(market, 'amount-precision')
amountPrecision = self.parse_number('1e-' + amountPrecision)
costPrecision = self.safe_string(market, 'value-precision')
costPrecision = self.parse_number('1e-' + costPrecision)
else:
pricePrecision = self.safe_number(market, 'price_tick')
amountPrecision = 1
maker = None
taker = None
if spot:
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
minAmount = self.safe_number(market, 'min-order-amt')
maxAmount = self.safe_number(market, 'max-order-amt')
minCost = self.safe_number(market, 'min-order-value', 0)
active = None
if spot:
state = self.safe_string(market, 'state')
active = (state == 'online')
elif contract:
contractStatus = self.safe_integer(market, 'contract_status')
active = (contractStatus == 1)
leverageRatio = self.safe_string(market, 'leverage-ratio', '1')
superLeverageRatio = self.safe_string(market, 'super-margin-leverage-ratio', '1')
hasLeverage = Precise.string_gt(leverageRatio, '1') or Precise.string_gt(superLeverageRatio, '1')
# 0 Delisting
# 1 Listing
# 2 Pending Listing
# 3 Suspension
# 4 Suspending of Listing
# 5 In Settlement
# 6 Delivering
# 7 Settlement Completed
# 8 Delivered
# 9 Suspending of Trade
result.append({
'id': id,
'lowercaseId': lowercaseId,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'lowercaseBaseId': lowercaseBaseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': spot,
'margin': (spot and hasLeverage),
'swap': swap,
'future': future,
'option': False,
'active': active,
'contract': contract,
'linear': linear,
'inverse': inverse,
'taker': taker,
'maker': maker,
'contractSize': contractSize,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': pricePrecision,
'cost': costPrecision,
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.parse_number(leverageRatio),
'superMax': self.parse_number(superLeverageRatio),
},
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
#
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
marketId = self.safe_string_2(ticker, 'symbol', 'contract_code')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_string(ticker['bid'], 0)
bidVolume = self.safe_string(ticker['bid'], 1)
else:
bid = self.safe_string(ticker, 'bid')
bidVolume = self.safe_string(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_string(ticker['ask'], 0)
askVolume = self.safe_string(ticker['ask'], 1)
else:
ask = self.safe_string(ticker, 'ask')
askVolume = self.safe_string(ticker, 'askSize')
open = self.safe_string(ticker, 'open')
close = self.safe_string(ticker, 'close')
baseVolume = self.safe_string(ticker, 'amount')
quoteVolume = self.safe_string(ticker, 'vol')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {}
fieldName = 'symbol'
method = 'spotPublicGetMarketDetailMerged'
if market['linear']:
method = 'contractPublicGetLinearSwapExMarketDetailMerged'
fieldName = 'contract_code'
elif market['inverse']:
if market['future']:
method = 'contractPublicGetMarketDetailMerged'
elif market['swap']:
method = 'contractPublicGetSwapExMarketDetailMerged'
fieldName = 'contract_code'
request[fieldName] = market['id']
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
# future, swap
#
# {
# "ch":"market.BTC211126.detail.merged",
# "status":"ok",
# "tick":{
# "amount":"669.3385682049668320322569544150680718474",
# "ask":[59117.44,48],
# "bid":[59082,48],
# "close":"59087.97",
# "count":5947,
# "high":"59892.62",
# "id":1637502670,
# "low":"57402.87",
# "open":"57638",
# "ts":1637502670059,
# "vol":"394598"
# },
# "ts":1637502670059
# }
#
tick = self.safe_value(response, 'tick', {})
ticker = self.parse_ticker(tick, market)
timestamp = self.safe_integer(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchTickers', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
method = 'spotPublicGetMarketTickers'
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
request = {}
future = (type == 'future')
swap = (type == 'swap')
linear = (subType == 'linear')
inverse = (subType == 'inverse')
if linear:
method = 'contractPublicGetLinearSwapExMarketDetailBatchMerged'
if future:
request['business_type'] = 'futures'
elif inverse:
if future:
method = 'contractPublicGetMarketDetailBatchMerged'
elif swap:
method = 'contractPublicGetSwapExMarketDetailBatchMerged'
params = self.omit(params, ['type', 'subType'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "data":[
# {
# "symbol":"hbcbtc",
# "open":5.313E-5,
# "high":5.34E-5,
# "low":5.112E-5,
# "close":5.175E-5,
# "amount":1183.87,
# "vol":0.0618599229,
# "count":205,
# "bid":5.126E-5,
# "bidSize":5.25,
# "ask":5.214E-5,
# "askSize":150.0
# },
# ],
# "status":"ok",
# "ts":1639547261293
# }
#
# inverse swaps, linear swaps, inverse futures
#
# {
# "status":"ok",
# "ticks":[
# {
# "id":1637504679,
# "ts":1637504679372,
# "ask":[0.10644,100],
# "bid":[0.10624,26],
# "symbol":"TRX_CW",
# "open":"0.10233",
# "close":"0.10644",
# "low":"0.1017",
# "high":"0.10725",
# "amount":"2340267.415144052378486261756692535687481566",
# "count":882,
# "vol":"24706"
# }
# ],
# "ts":1637504679376
# }
#
# linear futures
#
# {
# "status":"ok",
# "ticks":[
# {
# "id":1640745627,
# "ts":1640745627957,
# "ask":[48079.1,20],
# "bid":[47713.8,125],
# "business_type":"futures",
# "contract_code":"BTC-USDT-CW",
# "open":"49011.8",
# "close":"47934",
# "low":"47292.3",
# "high":"49011.8",
# "amount":"17.398",
# "count":1515,
# "vol":"17398",
# "trade_turnover":"840726.5048"
# }
# ],
# "ts":1640745627988
# }
#
tickers = self.safe_value_2(response, 'data', 'ticks', [])
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
# the market ids for linear futures are non-standard and differ from all the other endpoints
# we are doing a linear-matching here
if future and linear:
for j in range(0, len(self.symbols)):
symbol = self.symbols[j]
market = self.market(symbol)
contractType = self.safe_string(market['info'], 'contract_type')
if (contractType == 'this_week') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-CW')):
ticker['symbol'] = market['symbol']
break
elif (contractType == 'next_week') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-NW')):
ticker['symbol'] = market['symbol']
break
elif (contractType == 'this_quarter') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-CQ')):
ticker['symbol'] = market['symbol']
break
elif (contractType == 'next_quarter') and (ticker['symbol'] == (market['baseId'] + '-' + market['quoteId'] + '-NQ')):
ticker['symbol'] = market['symbol']
break
symbol = ticker['symbol']
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
#
# from the API docs
#
# to get depth data within step 150, use step0, step1, step2, step3, step4, step5, step14, step15(merged depth data 0-5,14-15, when step is 0,depth data will not be merged
# to get depth data within step 20, use step6, step7, step8, step9, step10, step11, step12, step13(merged depth data 7-13), when step is 6, depth data will not be merged
#
'type': 'step0',
# 'symbol': market['id'], # spot, future
# 'contract_code': market['id'], # swap
}
fieldName = 'symbol'
method = 'spotPublicGetMarketDepth'
if market['linear']:
method = 'contractPublicGetLinearSwapExMarketDepth'
fieldName = 'contract_code'
elif market['inverse']:
if market['future']:
method = 'contractPublicGetMarketDepth'
elif market['swap']:
method = 'contractPublicGetSwapExMarketDepth'
fieldName = 'contract_code'
else:
if limit is not None:
# Valid depths are 5, 10, 20 or empty https://huobiapi.github.io/docs/spot/v1/en/#get-market-depth
if (limit != 5) and (limit != 10) and (limit != 20) and (limit != 150):
raise BadRequest(self.id + ' fetchOrderBook() limit argument must be None, 5, 10, 20, or 150, default is 150')
# only set the depth if it is not 150
# 150 is the implicit default on the exchange side for step0 and no orderbook aggregation
# it is not accepted by the exchange if you set it explicitly
if limit != 150:
request['depth'] = limit
request[fieldName] = market['id']
response = getattr(self, method)(self.extend(request, params))
#
# spot, future, swap
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ch":"market.BTC-USD.depth.step0",
# "ts":1583474832008,
# "id":1637554816,
# "mrid":121654491624,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, symbol, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
def parse_trade(self, trade, market=None):
#
# spot fetchTrades(public)
#
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
#
# spot fetchMyTrades(private)
#
# {
# 'symbol': 'swftcbtc',
# 'fee-currency': 'swftc',
# 'filled-fees': '0',
# 'source': 'spot-api',
# 'id': 83789509854000,
# 'type': 'buy-limit',
# 'order-id': 83711103204909,
# 'filled-points': '0.005826843283532154',
# 'fee-deduct-currency': 'ht',
# 'filled-amount': '45941.53',
# 'price': '0.0000001401',
# 'created-at': 1597933260729,
# 'match-id': 100087455560,
# 'role': 'maker',
# 'trade-id': 100050305348
# }
#
# linear swap isolated margin fetchOrder details
#
# {
# "trade_id": 131560927,
# "trade_price": 13059.800000000000000000,
# "trade_volume": 1.000000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_fee": -0.005223920000000000,
# "created_at": 1603703614715,
# "role": "taker",
# "fee_asset": "USDT",
# "profit": 0,
# "real_profit": 0,
# "id": "131560927-770334322963152896-1"
# }
#
# inverse swap cross margin fetchMyTrades
#
# {
# "contract_type":"swap",
# "pair":"O3-USDT",
# "business_type":"swap",
# "query_id":652123190,
# "match_id":28306009409,
# "order_id":941137865226903553,
# "symbol":"O3",
# "contract_code":"O3-USDT",
# "direction":"sell",
# "offset":"open",
# "trade_volume":100.000000000000000000,
# "trade_price":0.398500000000000000,
# "trade_turnover":39.850000000000000000,
# "trade_fee":-0.007970000000000000,
# "offset_profitloss":0E-18,
# "create_date":1644426352999,
# "role":"Maker",
# "order_source":"api",
# "order_id_str":"941137865226903553",
# "id":"28306009409-941137865226903553-1",
# "fee_asset":"USDT",
# "margin_mode":"cross",
# "margin_account":"USDT",
# "real_profit":0E-18,
# "trade_partition":"USDT"
# }
#
marketId = self.safe_string_2(trade, 'contract_code', 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
timestamp = self.safe_integer_2(trade, 'created_at', 'create_date', timestamp)
order = self.safe_string_2(trade, 'order-id', 'order_id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string_lower(trade, 'role')
priceString = self.safe_string_2(trade, 'price', 'trade_price')
amountString = self.safe_string_2(trade, 'filled-amount', 'amount')
amountString = self.safe_string(trade, 'trade_volume', amountString)
costString = self.safe_string(trade, 'trade_turnover')
fee = None
feeCost = self.safe_string_2(trade, 'filled-fees', 'trade_fee')
feeCurrencyId = self.safe_string_2(trade, 'fee-currency', 'fee_asset')
feeCurrency = self.safe_currency_code(feeCurrencyId)
filledPoints = self.safe_string(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or Precise.string_equals(feeCost, '0'):
feeDeductCurrency = self.safe_string(trade, 'fee-deduct-currency')
if feeDeductCurrency != '':
feeCost = filledPoints
feeCurrency = self.safe_currency_code(feeDeductCurrency)
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string_2(trade, 'trade_id', 'id', tradeId)
return self.safe_trade({
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrderTrades', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'fetchSpotOrderTrades',
# 'swap': 'fetchContractOrderTrades',
# 'future': 'fetchContractOrderTrades',
})
return getattr(self, method)(id, symbol, since, limit, params)
def fetch_spot_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'order-id': id,
}
response = self.spotPrivateGetV1OrderOrdersOrderIdMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], None, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchMyTrades', None, params)
request = {
# spot -----------------------------------------------------------
# 'symbol': market['id'],
# 'types': 'buy-market,sell-market,buy-limit,sell-limit,buy-ioc,sell-ioc,buy-limit-maker,sell-limit-maker,buy-stop-limit,sell-stop-limit',
# 'start-time': since, # max 48 hours within 120 days
# 'end-time': self.milliseconds(), # max 48 hours within 120 days
# 'from': 'id', # tring False N/A Search internal id to begin with if search next page, then self should be the last id(not trade-id) of last page; if search previous page, then self should be the first id(not trade-id) of last page
# 'direct': 'next', # next, prev
# 'size': limit, # default 100, max 500 The number of orders to return [1-500]
# contracts ------------------------------------------------------
# 'symbol': market['settleId'], # required
# 'trade_type': 0, # required, 0 all, 1 open long, 2 open short, 3 close short, 4 close long, 5 liquidate long positions, 6 liquidate short positions
# 'contract_code': market['id'],
# 'start_time': since, # max 48 hours within 120 days
# 'end_time': self.milliseconds(), # max 48 hours within 120 days
# 'from_id': 'id', # tring False N/A Search internal id to begin with if search next page, then self should be the last id(not trade-id) of last page; if search previous page, then self should be the first id(not trade-id) of last page
# 'direct': 'prev', # next, prev
# 'size': limit, # default 20, max 50
}
method = None
market = None
if marketType == 'spot':
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # default 100, max 500
if since is not None:
request['start-time'] = since # a date within 120 days from today
# request['end-time'] = self.sum(since, 172800000) # 48 hours window
method = 'spotPrivateGetV1OrderMatchresults'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
request['trade_type'] = 0 # 0 all, 1 open long, 2 open short, 3 close short, 4 close long, 5 liquidate long positions, 6 liquidate short positions
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapMatchresultsExact'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossMatchresultsExact'
elif market['inverse']:
if marketType == 'future':
method = 'contractPrivatePostApiV1ContractMatchresultsExact'
request['symbol'] = market['settleId']
elif marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapMatchresultsExact'
else:
raise NotSupported(self.id + ' fetchMyTrades() does not support ' + marketType + ' markets')
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "data": [
# {
# "symbol": "polyusdt",
# "fee-currency": "poly",
# "source": "spot-web",
# "price": "0.338",
# "created-at": 1629443051839,
# "role": "taker",
# "order-id": 345487249132375,
# "match-id": 5014,
# "trade-id": 1085,
# "filled-amount": "147.928994082840236",
# "filled-fees": "0",
# "filled-points": "0.1",
# "fee-deduct-currency": "hbpoint",
# "fee-deduct-state": "done",
# "id": 313288753120940,
# "type": "buy-market"
# }
# ]
# }
#
# contracts
#
# {
# "status": "ok",
# "data": {
# "trades": [
# {
# "query_id": 2424420723,
# "match_id": 113891764710,
# "order_id": 773135295142658048,
# "symbol": "ADA",
# "contract_type": "quarter", # swap
# "business_type": "futures", # swap
# "contract_code": "ADA201225",
# "direction": "buy",
# "offset": "open",
# "trade_volume": 1,
# "trade_price": 0.092,
# "trade_turnover": 10,
# "trade_fee": -0.021739130434782608,
# "offset_profitloss": 0,
# "create_date": 1604371703183,
# "role": "Maker",
# "order_source": "web",
# "order_id_str": "773135295142658048",
# "fee_asset": "ADA",
# "margin_mode": "isolated", # cross
# "margin_account": "BTC-USDT",
# "real_profit": 0,
# "id": "113891764710-773135295142658048-1",
# "trade_partition":"USDT",
# }
# ],
# "remain_size": 15,
# "next_id": 2424413094
# },
# "ts": 1604372202243
# }
#
trades = self.safe_value(response, 'data')
if not isinstance(trades, list):
trades = self.safe_value(trades, 'trades')
return self.parse_trades(trades, market, since, limit)
def fetch_trades(self, symbol, since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
request = {
# 'symbol': market['id'], # spot, future
# 'contract_code': market['id'], # swap
}
fieldName = 'symbol'
method = 'spotPublicGetMarketHistoryTrade'
if market['future']:
if market['inverse']:
method = 'contractPublicGetMarketHistoryTrade'
elif market['linear']:
method = 'contractPublicGetLinearSwapExMarketHistoryTrade'
fieldName = 'contract_code'
elif market['swap']:
if market['inverse']:
method = 'contractPublicGetSwapExMarketHistoryTrade'
elif market['linear']:
method = 'contractPublicGetLinearSwapExMarketHistoryTrade'
fieldName = 'contract_code'
request[fieldName] = market['id']
if limit is not None:
request['size'] = limit # max 2000
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, market['symbol'], since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'amount'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'period': self.timeframes[timeframe],
# 'symbol': market['id'], # spot, future
# 'contract_code': market['id'], # swap
# 'size': 1000, # max 1000 for spot, 2000 for contracts
# 'from': int(since / 1000), spot only
# 'to': self.seconds(), spot only
}
fieldName = 'symbol'
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
method = 'spotPublicGetMarketHistoryCandles'
if market['spot']:
if since is not None:
request['from'] = int(since / 1000)
if limit is not None:
request['size'] = limit # max 2000
elif market['future']:
if market['inverse']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistoryMarkPriceKline'
elif price == 'index':
method = 'contractPublicGetIndexMarketHistoryIndex'
elif price == 'premiumIndex':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
else:
method = 'contractPublicGetMarketHistoryKline'
elif market['linear']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistoryLinearSwapMarkPriceKline'
elif price == 'index':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
elif price == 'premiumIndex':
method = 'contractPublicGetIndexMarketHistoryLinearSwapPremiumIndexKline'
else:
method = 'contractPublicGetLinearSwapExMarketHistoryKline'
fieldName = 'contract_code'
elif market['swap']:
if market['inverse']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistorySwapMarkPriceKline'
elif price == 'index':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
elif price == 'premiumIndex':
method = 'contractPublicGetIndexMarketHistorySwapPremiumIndexKline'
else:
method = 'contractPublicGetSwapExMarketHistoryKline'
elif market['linear']:
if price == 'mark':
method = 'contractPublicGetIndexMarketHistoryLinearSwapMarkPriceKline'
elif price == 'index':
raise BadRequest(self.id + ' ' + market['type'] + ' has no api endpoint for ' + price + ' kline data')
elif price == 'premiumIndex':
method = 'contractPublicGetIndexMarketHistoryLinearSwapPremiumIndexKline'
else:
method = 'contractPublicGetLinearSwapExMarketHistoryKline'
fieldName = 'contract_code'
if market['contract']:
if limit is None:
limit = 2000
if price is None:
duration = self.parse_timeframe(timeframe)
if since is None:
now = self.seconds()
request['from'] = now - duration * (limit - 1)
request['to'] = now
else:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, duration * (limit - 1))
request[fieldName] = market['id']
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'index',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'mark',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_premium_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
'price': 'premiumIndex',
}
return self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def fetch_accounts(self, params={}):
self.load_markets()
response = self.spotPrivateGetV1AccountAccounts(params)
#
# {
# "status":"ok",
# "data":[
# {"id":5202591,"type":"point","subtype":"","state":"working"},
# {"id":1528640,"type":"spot","subtype":"","state":"working"},
# ]
# }
#
return response['data']
def fetch_account_id_by_type(self, type, params={}):
accounts = self.load_accounts()
accountId = self.safe_value(params, 'account-id')
if accountId is not None:
return accountId
indexedAccounts = self.index_by(accounts, 'type')
defaultAccount = self.safe_value(accounts, 0, {})
account = self.safe_value(indexedAccounts, type, defaultAccount)
return self.safe_string(account, 'id')
def fetch_currencies(self, params={}):
response = self.spotPublicGetV2ReferenceCurrencies()
# {
# "code": 200,
# "data": [
# {
# "currency": "sxp",
# "assetType": "1",
# "chains": [
# {
# "chain": "sxp",
# "displayName": "ERC20",
# "baseChain": "ETH",
# "baseChainProtocol": "ERC20",
# "isDynamic": True,
# "numOfConfirmations": "12",
# "numOfFastConfirmations": "12",
# "depositStatus": "allowed",
# "minDepositAmt": "0.23",
# "withdrawStatus": "allowed",
# "minWithdrawAmt": "0.23",
# "withdrawPrecision": "8",
# "maxWithdrawAmt": "227000.000000000000000000",
# "withdrawQuotaPerDay": "227000.000000000000000000",
# "withdrawQuotaPerYear": null,
# "withdrawQuotaTotal": null,
# "withdrawFeeType": "fixed",
# "transactFeeWithdraw": "11.1653",
# "addrWithTag": False,
# "addrDepositTag": False
# }
# ],
# "instStatus": "normal"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(data)):
entry = data[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
chains = self.safe_value(entry, 'chains', [])
networks = {}
instStatus = self.safe_string(entry, 'instStatus')
currencyActive = instStatus == 'normal'
fee = None
minPrecision = None
minWithdraw = None
maxWithdraw = None
deposit = None
withdraw = None
for j in range(0, len(chains)):
chain = chains[j]
networkId = self.safe_string(chain, 'chain')
baseChainProtocol = self.safe_string(chain, 'baseChainProtocol')
huobiToken = 'h' + currencyId
if baseChainProtocol is None:
if huobiToken == networkId:
baseChainProtocol = 'ERC20'
else:
baseChainProtocol = self.safe_string(chain, 'displayName')
network = self.safe_network(baseChainProtocol)
minWithdraw = self.safe_number(chain, 'minWithdrawAmt')
maxWithdraw = self.safe_number(chain, 'maxWithdrawAmt')
withdrawStatus = self.safe_string(chain, 'withdrawStatus')
depositStatus = self.safe_string(chain, 'depositStatus')
withdrawEnabled = (withdrawStatus == 'allowed')
depositEnabled = (depositStatus == 'allowed')
active = withdrawEnabled and depositEnabled
precision = self.safe_string(chain, 'withdrawPrecision')
if precision is not None:
precision = self.parse_number('1e-' + precision)
minPrecision = precision if (minPrecision is None) else max(precision, minPrecision)
if withdrawEnabled and not withdraw:
withdraw = True
elif not withdrawEnabled:
withdraw = False
if depositEnabled and not deposit:
deposit = True
elif not depositEnabled:
deposit = False
fee = self.safe_number(chain, 'transactFeeWithdraw')
networks[network] = {
'info': chain,
'id': networkId,
'network': network,
'limits': {
'withdraw': {
'min': minWithdraw,
'max': maxWithdraw,
},
},
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': fee,
'precision': precision,
}
networksKeys = list(networks.keys())
networkLength = len(networksKeys)
result[code] = {
'info': entry,
'code': code,
'id': currencyId,
'active': currencyActive,
'deposit': deposit,
'withdraw': withdraw,
'fee': fee if (networkLength <= 1) else None,
'name': None,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': minWithdraw if (networkLength <= 1) else None,
'max': maxWithdraw if (networkLength <= 1) else None,
},
},
'precision': minPrecision,
'networks': networks,
}
return result
def fetch_balance(self, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchTickers', {})
defaultType = self.safe_string(self.options, 'defaultType', 'spot')
type = self.safe_string(options, 'type', defaultType)
type = self.safe_string(params, 'type', type)
params = self.omit(params, 'type')
request = {}
method = None
spot = (type == 'spot')
future = (type == 'future')
swap = (type == 'swap')
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
inverse = (subType == 'inverse')
linear = (subType == 'linear')
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', 'isolated')
isolated = (marginType == 'isolated')
cross = (marginType == 'cross')
if spot:
self.load_accounts()
accountId = self.fetch_account_id_by_type(type, params)
request['account-id'] = accountId
method = 'spotPrivateGetV1AccountAccountsAccountIdBalance'
elif linear:
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapAccountInfo'
else:
method = 'contractPrivatePostLinearSwapApiV1SwapCrossAccountInfo'
elif inverse:
if future:
method = 'contractPrivatePostApiV1ContractAccountInfo'
elif swap:
method = 'contractPrivatePostSwapApiV1SwapAccountInfo'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "data":{
# "id":1528640,
# "type":"spot",
# "state":"working",
# "list":[
# {"currency":"lun","type":"trade","balance":"0","seq-num":"0"},
# {"currency":"lun","type":"frozen","balance":"0","seq-num":"0"},
# {"currency":"ht","type":"frozen","balance":"0","seq-num":"145"},
# ]
# },
# "ts":1637644827566
# }
#
# future, swap isolated
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"BTC",
# "margin_balance":0,
# "margin_position":0E-18,
# "margin_frozen":0,
# "margin_available":0E-18,
# "profit_real":0,
# "profit_unreal":0,
# "risk_rate":null,
# "withdraw_available":0,
# "liquidation_price":null,
# "lever_rate":5,
# "adjust_factor":0.025000000000000000,
# "margin_static":0,
# "is_debit":0, # future only
# "contract_code":"BTC-USD", # swap only
# "margin_asset":"USDT", # linear only
# "margin_mode":"isolated", # linear only
# "margin_account":"BTC-USDT" # linear only
# "transfer_profit_ratio":null # inverse only
# },
# ],
# "ts":1637644827566
# }
#
# linear cross futures and linear cross swap
#
# {
# "status":"ok",
# "data":[
# {
# "futures_contract_detail":[
# {
# "symbol":"ETH",
# "contract_code":"ETH-USDT-220325",
# "margin_position":0,
# "margin_frozen":0,
# "margin_available":200.000000000000000000,
# "profit_unreal":0E-18,
# "liquidation_price":null,
# "lever_rate":5,
# "adjust_factor":0.060000000000000000,
# "contract_type":"quarter",
# "pair":"ETH-USDT",
# "business_type":"futures"
# },
# ],
# "margin_mode":"cross",
# "margin_account":"USDT",
# "margin_asset":"USDT",
# "margin_balance":200.000000000000000000,
# "margin_static":200.000000000000000000,
# "margin_position":0,
# "margin_frozen":0,
# "profit_real":0E-18,
# "profit_unreal":0,
# "withdraw_available":2E+2,
# "risk_rate":null,
# "contract_detail":[
# {
# "symbol":"MANA",
# "contract_code":"MANA-USDT",
# "margin_position":0,
# "margin_frozen":0,
# "margin_available":200.000000000000000000,
# "profit_unreal":0E-18,
# "liquidation_price":null,
# "lever_rate":5,
# "adjust_factor":0.100000000000000000,
# "contract_type":"swap",
# "pair":"MANA-USDT",
# "business_type":"swap"
# },
# ]
# }
# ],
# "ts":1640915104870
# }
#
result = {'info': response}
data = self.safe_value(response, 'data')
if spot:
balances = self.safe_value(data, 'list', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_string(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
elif linear:
first = self.safe_value(data, 0, {})
if cross:
account = self.account()
account['free'] = self.safe_string(first, 'margin_balance', 'margin_available')
account['used'] = self.safe_string(first, 'margin_frozen')
currencyId = self.safe_string_2(first, 'margin_asset', 'symbol')
code = self.safe_currency_code(currencyId)
result[code] = account
elif isolated:
for i in range(0, len(data)):
balance = data[i]
marketId = self.safe_string_2(balance, 'contract_code', 'margin_account')
market = self.safe_market(marketId)
account = self.account()
account['free'] = self.safe_string(balance, 'margin_balance')
account['used'] = self.safe_string(balance, 'margin_frozen')
code = market['settle']
accountsByCode = {}
accountsByCode[code] = account
symbol = market['symbol']
result[symbol] = self.safe_balance(accountsByCode)
return result
elif inverse:
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'margin_available')
account['used'] = self.safe_string(balance, 'margin_frozen')
result[code] = account
return self.safe_balance(result)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'order-id': 'id',
# 'symbol': market['id'],
# 'client-order-id': clientOrderId,
# 'clientOrderId': clientOrderId,
# contracts ------------------------------------------------------
# 'order_id': id,
# 'client_order_id': clientOrderId,
# 'contract_code': market['id'],
# 'pair': 'BTC-USDT',
# 'contract_type': 'this_week', # swap, self_week, next_week, quarter, next_ quarter
}
method = None
market = None
if marketType == 'spot':
clientOrderId = self.safe_string(params, 'clientOrderId')
method = 'spotPrivateGetV1OrderOrdersOrderId'
if clientOrderId is not None:
method = 'spotPrivateGetV1OrderOrdersGetClientOrder'
# will be filled below in self.extend()
# they expect clientOrderId instead of client-order-id
# request['clientOrderId'] = clientOrderId
else:
request['order-id'] = id
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapOrderInfo'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossOrderInfo'
elif market['inverse']:
if marketType == 'future':
method = 'contractPrivatePostApiV1ContractOrderInfo'
request['symbol'] = market['settleId']
elif marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapOrderInfo'
else:
raise NotSupported(self.id + ' fetchOrder() does not support ' + marketType + ' markets')
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is None:
request['order_id'] = id
else:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "data":{
# "id":438398393065481,
# "symbol":"ethusdt",
# "account-id":1528640,
# "client-order-id":"AA03022abc2163433e-006b-480e-9ad1-d4781478c5e7",
# "amount":"0.100000000000000000",
# "price":"3000.000000000000000000",
# "created-at":1640549994642,
# "type":"buy-limit",
# "field-amount":"0.0",
# "field-cash-amount":"0.0",
# "field-fees":"0.0",
# "finished-at":0,
# "source":"spot-api",
# "state":"submitted",
# "canceled-at":0
# }
# }
#
# linear swap cross margin
#
# {
# "status":"ok",
# "data":[
# {
# "business_type":"swap",
# "contract_type":"swap",
# "pair":"BTC-USDT",
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "volume":1,
# "price":3000,
# "order_price_type":"limit",
# "order_type":1,
# "direction":"buy",
# "offset":"open",
# "lever_rate":1,
# "order_id":924912513206878210,
# "client_order_id":null,
# "created_at":1640557927189,
# "trade_volume":0,
# "trade_turnover":0,
# "fee":0,
# "trade_avg_price":null,
# "margin_frozen":3.000000000000000000,
# "profit":0,
# "status":3,
# "order_source":"api",
# "order_id_str":"924912513206878210",
# "fee_asset":"USDT",
# "liquidation_type":"0",
# "canceled_at":0,
# "margin_asset":"USDT",
# "margin_account":"USDT",
# "margin_mode":"cross",
# "is_tpsl":0,
# "real_profit":0
# }
# ],
# "ts":1640557982556
# }
#
# linear swap isolated margin detail
#
# {
# "status": "ok",
# "data": {
# "symbol": "BTC",
# "contract_code": "BTC-USDT",
# "instrument_price": 0,
# "final_interest": 0,
# "adjust_value": 0,
# "lever_rate": 10,
# "direction": "sell",
# "offset": "open",
# "volume": 1.000000000000000000,
# "price": 13059.800000000000000000,
# "created_at": 1603703614712,
# "canceled_at": 0,
# "order_source": "api",
# "order_price_type": "opponent",
# "margin_frozen": 0,
# "profit": 0,
# "trades": [
# {
# "trade_id": 131560927,
# "trade_price": 13059.800000000000000000,
# "trade_volume": 1.000000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_fee": -0.005223920000000000,
# "created_at": 1603703614715,
# "role": "taker",
# "fee_asset": "USDT",
# "profit": 0,
# "real_profit": 0,
# "id": "131560927-770334322963152896-1"
# }
# ],
# "total_page": 1,
# "current_page": 1,
# "total_size": 1,
# "liquidation_type": "0",
# "fee_asset": "USDT",
# "fee": -0.005223920000000000,
# "order_id": 770334322963152896,
# "order_id_str": "770334322963152896",
# "client_order_id": 57012021045,
# "order_type": "1",
# "status": 6,
# "trade_avg_price": 13059.800000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_volume": 1.000000000000000000,
# "margin_asset": "USDT",
# "margin_mode": "isolated",
# "margin_account": "BTC-USDT",
# "real_profit": 0,
# "is_tpsl": 0
# },
# "ts": 1603703678477
# }
order = self.safe_value(response, 'data')
if isinstance(order, list):
order = self.safe_value(order, 0)
return self.parse_order(order)
def fetch_spot_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'spot_private_get_v1_order_orders') # spot_private_get_v1_order_history
if method == 'spot_private_get_v1_order_orders':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
self.load_markets()
market = None
request = {
# spot_private_get_v1_order_orders GET /v1/order/orders ----------
# 'symbol': market['id'], # required
# 'types': 'buy-market,sell-market,buy-limit,sell-limit,buy-ioc,sell-ioc,buy-stop-limit,sell-stop-limit,buy-limit-fok,sell-limit-fok,buy-stop-limit-fok,sell-stop-limit-fok',
# 'start-time': since, # max window of 48h within a range of 180 days, within past 2 hours for cancelled orders
# 'end-time': self.milliseconds(),
'states': states, # filled, partial-canceled, canceled
# 'from': order['id'],
# 'direct': 'next', # next, prev, used with from
# 'size': 100, # max 100
# spot_private_get_v1_order_history GET /v1/order/history --------
# 'symbol': market['id'], # optional
# 'start-time': since, # max window of 48h within a range of 180 days, within past 2 hours for cancelled orders
# 'end-time': self.milliseconds(),
# 'direct': 'next', # next, prev, used with from
# 'size': 100, # max 100
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start-time'] = since # a window of 48 hours within 180 days
request['end-time'] = self.sum(since, 48 * 60 * 60 * 1000)
if limit is not None:
request['size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot_private_get_v1_order_orders GET /v1/order/orders
#
# {
# status: "ok",
# data: [
# {
# id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# 'client-order-id': "23456",
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def fetch_spot_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_spot_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_closed_spot_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_spot_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_contract_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchContractOrders() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrders', market, params)
request = {
# POST /api/v1/contract_hisorders inverse futures ----------------
# 'symbol': market['settleId'], # BTC, ETH, ...
# 'order_type': '1', # 1 limit,3 opponent,4 lightning, 5 trigger order, 6 pst_only, 7 optimal_5, 8 optimal_10, 9 optimal_20, 10 fok, 11 ioc
# POST /swap-api/v1/swap_hisorders inverse swap ------------------
# POST /linear-swap-api/v1/swap_hisorders linear isolated --------
# POST /linear-swap-api/v1/swap_cross_hisorders linear cross -----
'contract_code': market['id'],
'trade_type': 0, # 0 all, 1 buy long, 2 sell short, 3 buy short, 4 sell long, 5 sell liquidation, 6 buy liquidation, 7 Delivery long, 8 Delivery short 11 reduce positions to close long, 12 reduce positions to close short
'type': 1, # 1 all orders, 2 finished orders
'status': '0', # comma separated, 0 all, 3 submitted orders, 4 partially matched, 5 partially cancelled, 6 fully matched and closed, 7 canceled
'create_date': 90, # in days?
# 'page_index': 1,
# 'page_size': limit, # default 20, max 50
# 'sort_by': 'create_date', # create_date descending, update_time descending
}
method = None
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapHisorders',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossHisorders',
})
elif market['inverse']:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractHisorders',
'swap': 'contractPrivatePostSwapApiV1SwapHisorders',
})
if marketType == 'future':
request['symbol'] = market['settleId']
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "orders": [
# {
# "order_id": 773131315209248768,
# "contract_code": "ADA201225",
# "symbol": "ADA",
# "lever_rate": 20,
# "direction": "buy",
# "offset": "close",
# "volume": 1,
# "price": 0.0925,
# "create_date": 1604370469629,
# "update_time": 1603704221118,
# "order_source": "web",
# "order_price_type": 6,
# "order_type": 1,
# "margin_frozen": 0,
# "profit": 0,
# "contract_type": "quarter",
# "trade_volume": 0,
# "trade_turnover": 0,
# "fee": 0,
# "trade_avg_price": 0,
# "status": 3,
# "order_id_str": "773131315209248768",
# "fee_asset": "ADA",
# "liquidation_type": "0",
# "is_tpsl": 0,
# "real_profit": 0
# "pair": "BTC-USDT",
# "business_type": "futures",
# "margin_asset": "USDT",
# "margin_mode": "cross",
# "margin_account": "USDT",
# }
# ],
# "total_page": 19,
# "current_page": 1,
# "total_size": 19
# },
# "ts": 1604370617322
# }
#
data = self.safe_value(response, 'data', {})
orders = self.safe_value(data, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def fetch_closed_contract_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': '5,6,7', # comma separated, 0 all, 3 submitted orders, 4 partially matched, 5 partially cancelled, 6 fully matched and closed, 7 canceled
}
return self.fetch_contract_orders(symbol, since, limit, self.extend(request, params))
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOrders', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'fetchSpotOrders',
'swap': 'fetchContractOrders',
'future': 'fetchContractOrders',
})
if method is None:
raise NotSupported(self.id + ' fetchOrders does not support ' + marketType + ' markets yet')
contract = (marketType == 'swap') or (marketType == 'future')
if contract and (symbol is None):
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument for ' + marketType + ' orders')
return getattr(self, method)(symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchClosedOrders', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'fetchClosedSpotOrders',
'swap': 'fetchClosedContractOrders',
'future': 'fetchClosedContractOrders',
})
if method is None:
raise NotSupported(self.id + ' fetchClosedOrders does not support ' + marketType + ' markets yet')
return getattr(self, method)(symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOpenOrders', None, params)
request = {
# spot -----------------------------------------------------------
# 'account-id': account['id'],
# 'symbol': market['id'],
# 'side': 'buy', # buy, sell
# 'from': 'id', # order id to begin with
# 'direct': 'prev', # prev, next, mandatory if from is defined
# 'size': 100, # default 100, max 500
# futures --------------------------------------------------------
# 'symbol': market['settleId'],
# 'page_index': 1, # default 1
# 'page_size': limit, # default 20, max 50
# 'sort_by': 'created_at', # created_at, update_time, descending sorting field
# 'trade_type': 0, # 0 all, 1 buy long, 2 sell short, 3 buy short, 4 sell long
}
method = None
market = None
if marketType == 'spot':
method = 'spotPrivateGetV1OrderOpenOrders'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
# todo replace with fetchAccountIdByType
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request['account-id'] = accountId
if limit is not None:
request['size'] = limit
params = self.omit(params, 'account-id')
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapOpenorders'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossOpenorders'
elif market['inverse']:
if market['future']:
method = 'contractPrivatePostApiV1ContractOpenorders'
request['symbol'] = market['settleId']
elif market['swap']:
method = 'contractPrivatePostSwapApiV1SwapOpenorders'
if limit is not None:
request['page_size'] = limit
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
# futures
#
# {
# "status": "ok",
# "data": {
# "orders": [
# {
# "symbol": "ADA",
# "contract_code": "ADA201225",
# "contract_type": "quarter",
# "volume": 1,
# "price": 0.0925,
# "order_price_type": "post_only",
# "order_type": 1,
# "direction": "buy",
# "offset": "close",
# "lever_rate": 20,
# "order_id": 773131315209248768,
# "client_order_id": null,
# "created_at": 1604370469629,
# "trade_volume": 0,
# "trade_turnover": 0,
# "fee": 0,
# "trade_avg_price": null,
# "margin_frozen": 0,
# "profit": 0,
# "status": 3,
# "order_source": "web",
# "order_id_str": "773131315209248768",
# "fee_asset": "ADA",
# "liquidation_type": null,
# "canceled_at": null,
# "is_tpsl": 0,
# "update_time": 1606975980467,
# "real_profit": 0
# }
# ],
# "total_page": 1,
# "current_page": 1,
# "total_size": 1
# },
# "ts": 1604370488518
# }
#
orders = self.safe_value(response, 'data')
if not isinstance(orders, list):
orders = self.safe_value(orders, 'orders', [])
return self.parse_orders(orders, market, since, limit)
def parse_order_status(self, status):
statuses = {
# spot
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
'created': 'open', # For stop orders
# contract
'1': 'open',
'2': 'open',
'3': 'open',
'4': 'open',
'5': 'canceled', # partially matched
'6': 'closed',
'7': 'canceled',
'11': 'canceling',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# spot
#
# {
# id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0
# }
#
# {
# id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0
# }
#
# linear swap cross margin createOrder
#
# {
# "order_id":924660854912552960,
# "order_id_str":"924660854912552960"
# }
#
# contracts fetchOrder
#
# {
# "business_type":"swap",
# "contract_type":"swap",
# "pair":"BTC-USDT",
# "symbol":"BTC",
# "contract_code":"BTC-USDT",
# "volume":1,
# "price":3000,
# "order_price_type":"limit",
# "order_type":1,
# "direction":"buy",
# "offset":"open",
# "lever_rate":1,
# "order_id":924912513206878210,
# "client_order_id":null,
# "created_at":1640557927189,
# "trade_volume":0,
# "trade_turnover":0,
# "fee":0,
# "trade_avg_price":null,
# "margin_frozen":3.000000000000000000,
# "profit":0,
# "status":3,
# "order_source":"api",
# "order_id_str":"924912513206878210",
# "fee_asset":"USDT",
# "liquidation_type":"0",
# "canceled_at":0,
# "margin_asset":"USDT",
# "margin_account":"USDT",
# "margin_mode":"cross",
# "is_tpsl":0,
# "real_profit":0
# }
#
# contracts fetchOrder detailed
#
# {
# "status": "ok",
# "data": {
# "symbol": "BTC",
# "contract_code": "BTC-USDT",
# "instrument_price": 0,
# "final_interest": 0,
# "adjust_value": 0,
# "lever_rate": 10,
# "direction": "sell",
# "offset": "open",
# "volume": 1.000000000000000000,
# "price": 13059.800000000000000000,
# "created_at": 1603703614712,
# "canceled_at": 0,
# "order_source": "api",
# "order_price_type": "opponent",
# "margin_frozen": 0,
# "profit": 0,
# "trades": [
# {
# "trade_id": 131560927,
# "trade_price": 13059.800000000000000000,
# "trade_volume": 1.000000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_fee": -0.005223920000000000,
# "created_at": 1603703614715,
# "role": "taker",
# "fee_asset": "USDT",
# "profit": 0,
# "real_profit": 0,
# "id": "131560927-770334322963152896-1"
# }
# ],
# "total_page": 1,
# "current_page": 1,
# "total_size": 1,
# "liquidation_type": "0",
# "fee_asset": "USDT",
# "fee": -0.005223920000000000,
# "order_id": 770334322963152896,
# "order_id_str": "770334322963152896",
# "client_order_id": 57012021045,
# "order_type": "1",
# "status": 6,
# "trade_avg_price": 13059.800000000000000000,
# "trade_turnover": 13.059800000000000000,
# "trade_volume": 1.000000000000000000,
# "margin_asset": "USDT",
# "margin_mode": "isolated",
# "margin_account": "BTC-USDT",
# "real_profit": 0,
# "is_tpsl": 0
# },
# "ts": 1603703678477
# }
#
id = self.safe_string_2(order, 'id', 'order_id_str')
side = self.safe_string(order, 'direction')
type = self.safe_string(order, 'order_price_type')
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string_2(order, 'state', 'status'))
marketId = self.safe_string_2(order, 'contract_code', 'symbol')
market = self.safe_market(marketId, market)
timestamp = self.safe_integer_2(order, 'created_at', 'created-at')
clientOrderId = self.safe_string_2(order, 'client_order_id', 'client-order-id')
amount = self.safe_string_2(order, 'volume', 'amount')
filled = self.safe_string_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
filled = self.safe_string(order, 'trade_volume', filled)
price = self.safe_string(order, 'price')
cost = self.safe_string_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
cost = self.safe_string(order, 'trade_turnover', cost)
feeCost = self.safe_string_2(order, 'filled-fees', 'field-fees') # typo in their API, filled feeSide
feeCost = self.safe_string(order, 'fee', feeCost)
fee = None
if feeCost is not None:
feeCurrency = None
feeCurrencyId = self.safe_string(order, 'fee_asset')
if feeCurrencyId is not None:
feeCurrency = self.safe_currency_code(feeCurrencyId)
else:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
stopPrice = self.safe_string(order, 'stop-price')
average = self.safe_string(order, 'trade_avg_price')
trades = self.safe_value(order, 'trades')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': trades,
}, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('createOrder', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'createSpotOrder',
'swap': 'createContractOrder',
'future': 'createContractOrder',
})
if method is None:
raise NotSupported(self.id + ' createOrder does not support ' + marketType + ' markets yet')
return getattr(self, method)(symbol, type, side, amount, price, query)
def create_spot_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
accountId = self.fetch_account_id_by_type(market['type'])
request = {
# spot -----------------------------------------------------------
'account-id': accountId,
'symbol': market['id'],
# 'type': side + '-' + type, # buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-limit-maker, sell-limit-maker, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'amount': self.amount_to_precision(symbol, amount), # for buy market orders it's the order cost
# 'price': self.price_to_precision(symbol, price),
# 'source': 'spot-api', # optional, spot-api, margin-api = isolated margin, super-margin-api = cross margin, c2c-margin-api
# 'client-order-id': clientOrderId, # optional, max 64 chars, must be unique within 8 hours
# 'stop-price': self.price_to_precision(symbol, stopPrice), # trigger price for stop limit orders
# 'operator': 'gte', # gte, lte, trigger price condition
}
orderType = type.replace('buy-', '')
orderType = orderType.replace('sell-', '')
options = self.safe_value(self.options, market['type'], {})
stopPrice = self.safe_string_2(params, 'stopPrice', 'stop-price')
if stopPrice is None:
stopOrderTypes = self.safe_value(options, 'stopOrderTypes', {})
if orderType in stopOrderTypes:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice or a stop-price parameter for a stop order')
else:
stopOperator = self.safe_string(params, 'operator')
if stopOperator is None:
raise ArgumentsRequired(self.id + ' createOrder() requires an operator parameter "gte" or "lte" for a stop order')
params = self.omit(params, ['stopPrice', 'stop-price'])
request['stop-price'] = self.price_to_precision(symbol, stopPrice)
request['operator'] = stopOperator
if (orderType == 'limit') or (orderType == 'limit-fok'):
orderType = 'stop-' + orderType
elif (orderType != 'stop-limit') and (orderType != 'stop-limit-fok'):
raise NotSupported(self.id + ' createOrder() does not support ' + type + ' orders')
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
orderType = 'limit-maker'
request['type'] = side + '-' + orderType
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client-order-id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client-order-id'] = brokerId + self.uuid()
else:
request['client-order-id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client-order-id', 'postOnly'])
if (orderType == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
limitOrderTypes = self.safe_value(options, 'limitOrderTypes', {})
if orderType in limitOrderTypes:
request['price'] = self.price_to_precision(symbol, price)
response = self.spotPrivatePostV1OrderOrdersPlace(self.extend(request, params))
#
# spot
#
# {"status":"ok","data":"438398393065481"}
#
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'status': None,
'symbol': None,
'type': None,
'side': None,
'price': None,
'amount': None,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
def create_contract_order(self, symbol, type, side, amount, price=None, params={}):
offset = self.safe_string(params, 'offset')
if offset is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a string offset parameter for contract orders, open or close')
stopPrice = self.safe_string(params, 'stopPrice')
if stopPrice is not None:
raise NotSupported(self.id + ' createOrder() supports tp_trigger_price + tp_order_price for take profit orders and/or sl_trigger_price + sl_order price for stop loss orders, stop orders are supported only with open long orders and open short orders')
market = self.market(symbol)
request = {
# 'symbol': 'BTC', # optional, case-insenstive, both uppercase and lowercase are supported, "BTC", "ETH", ...
# 'contract_type': 'this_week', # optional, self_week, next_week, quarter, next_quarter
'contract_code': market['id'], # optional BTC180914
# 'client_order_id': clientOrderId, # optional, must be less than 9223372036854775807
# 'price': self.price_to_precision(symbol, price), # optional
'volume': self.amount_to_precision(symbol, amount),
'direction': side, # buy, sell
'offset': offset, # open, close
#
# direction buy, offset open = open long
# direction sell, offset close = close long
# direction sell, offset open = open short
# direction buy, offset close = close short
#
# 'reduce_only': 0, # 1 or 0, in hedge mode it is invalid, and in one-way mode its value is 0 when not filled
'lever_rate': 1, # required, using leverage greater than 20x requires prior approval of high-leverage agreement
# 'order_price_type': 'limit', # required
#
# order_price_type can be:
#
# limit
# opponent # BBO
# post_only
# optimal_5
# optimal_10
# optimal_20
# ioc
# fok
# opponent_ioc # IOC order using the BBO price
# optimal_5_ioc
# optimal_10_ioc
# optimal_20_ioc
# opponent_fok # FOR order using the BBO price
# optimal_5_fok
# optimal_10_fok
# optimal_20_fok
#
# 'tp_trigger_price': self.price_to_precision(symbol, triggerPrice),
# 'tp_order_price': self.price_to_precision(symbol, price),
# 'tp_order_price_type': 'limit', # limit,optimal_5,optimal_10,optimal_20
# 'sl_trigger_price': self.price_to_precision(symbol, stopLossPrice),
# 'sl_order_price': self.price_to_precision(symbol, price),
# 'sl_order_price_type': 'limit', # limit,optimal_5,optimal_10,optimal_20
}
stopLossOrderPrice = self.safe_string(params, 'sl_order_price')
stopLossTriggerPrice = self.safe_string(params, 'sl_trigger_price')
takeProfitOrderPrice = self.safe_string(params, 'tp_order_price')
takeProfitTriggerPrice = self.safe_string(params, 'tp_trigger_price')
isOpenOrder = (offset == 'open')
isStopOrder = False
if stopLossTriggerPrice is not None:
request['sl_trigger_price'] = self.price_to_precision(symbol, stopLossTriggerPrice)
isStopOrder = True
if price is not None:
request['sl_order_price'] = self.price_to_precision(symbol, price)
if stopLossOrderPrice is not None:
request['sl_order_price'] = self.price_to_precision(symbol, stopLossOrderPrice)
isStopOrder = True
if takeProfitTriggerPrice is not None:
request['tp_trigger_price'] = self.price_to_precision(symbol, takeProfitTriggerPrice)
isStopOrder = True
if price is not None:
request['tp_order_price'] = self.price_to_precision(symbol, price)
if takeProfitOrderPrice is not None:
request['tp_order_price'] = self.price_to_precision(symbol, takeProfitOrderPrice)
isStopOrder = True
if isStopOrder and not isOpenOrder:
raise NotSupported(self.id + ' createOrder() supports tp_trigger_price + tp_order_price for take profit orders and/or sl_trigger_price + sl_order price for stop loss orders, stop orders are supported only with open long orders and open short orders')
params = self.omit(params, ['sl_order_price', 'sl_trigger_price', 'tp_order_price', 'tp_trigger_price'])
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
type = 'post_only'
if type == 'limit' or type == 'ioc' or type == 'fok' or type == 'post_only':
request['price'] = self.price_to_precision(symbol, price)
request['order_price_type'] = type
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['channel_code'] = brokerId
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is not None:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
method = None
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapOrder'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossOrder'
elif market['inverse']:
if market['swap']:
method = 'contractPrivatePostSwapApiV1SwapOrder'
elif market['future']:
method = 'contractPrivatePostApiV1ContractOrder'
response = getattr(self, method)(self.extend(request, params))
#
# linear swap cross margin
#
# {
# "status":"ok",
# "data":{
# "order_id":924660854912552960,
# "order_id_str":"924660854912552960"
# },
# "ts":1640497927185
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'order-id': 'id',
# 'symbol': market['id'],
# 'client-order-id': clientOrderId,
# contracts ------------------------------------------------------
# 'order_id': id,
# 'client_order_id': clientOrderId,
# 'contract_code': market['id'],
# 'pair': 'BTC-USDT',
# 'contract_type': 'this_week', # swap, self_week, next_week, quarter, next_ quarter
}
method = None
market = None
if marketType == 'spot':
clientOrderId = self.safe_string_2(params, 'client-order-id', 'clientOrderId')
method = 'spotPrivatePostV1OrderOrdersOrderIdSubmitcancel'
if clientOrderId is None:
request['order-id'] = id
else:
request['client-order-id'] = clientOrderId
method = 'spotPrivatePostV1OrderOrdersSubmitCancelClientOrder'
params = self.omit(params, ['client-order-id', 'clientOrderId'])
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapCancel'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel'
elif market['inverse']:
if market['future']:
method = 'contractPrivatePostApiV1ContractCancel'
request['symbol'] = market['settleId']
elif market['swap']:
method = 'contractPrivatePostSwapApiV1SwapCancel'
else:
raise NotSupported(self.id + ' cancelOrder() does not support ' + marketType + ' markets')
clientOrderId = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
if clientOrderId is None:
request['order_id'] = id
else:
request['client_order_id'] = clientOrderId
params = self.omit(params, ['client_order_id', 'clientOrderId'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
# linear swap cross margin
#
# {
# "status":"ok",
# "data":{
# "errors":[],
# "successes":"924660854912552960"
# },
# "ts":1640504486089
# }
#
return self.extend(self.parse_order(response, market), {
'id': id,
'status': 'canceled',
})
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'order-ids': ids.jsoin(','), # max 50
# 'client-order-ids': ','.join(ids), # max 50
# contracts ------------------------------------------------------
# 'order_id': id, # comma separated, max 10
# 'client_order_id': clientOrderId, # comma separated, max 10
# 'contract_code': market['id'],
# 'symbol': market['settleId'],
}
method = None
if marketType == 'spot':
clientOrderIds = self.safe_value_2(params, 'client-order-id', 'clientOrderId')
clientOrderIds = self.safe_value_2(params, 'client-order-ids', 'clientOrderIds', clientOrderIds)
if clientOrderIds is None:
if isinstance(clientOrderIds, str):
request['order-ids'] = ids
else:
request['order-ids'] = ','.join(ids)
else:
if isinstance(clientOrderIds, str):
request['client-order-ids'] = clientOrderIds
else:
request['client-order-ids'] = ','.join(clientOrderIds)
params = self.omit(params, ['client-order-id', 'client-order-ids', 'clientOrderId', 'clientOrderIds'])
method = 'spotPrivatePostV1OrderOrdersBatchcancel'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapCancel'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancel'
elif market['inverse']:
if market['future']:
method = 'contractPrivatePostApiV1ContractCancel'
request['symbol'] = market['settleId']
elif market['swap']:
method = 'contractPrivatePostSwapApiV1SwapCancel'
else:
raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets')
clientOrderIds = self.safe_string_2(params, 'client_order_id', 'clientOrderId')
clientOrderIds = self.safe_string_2(params, 'client_order_ids', 'clientOrderIds', clientOrderIds)
if clientOrderIds is None:
request['order_id'] = ','.join(ids)
else:
request['client_order_id'] = clientOrderIds
params = self.omit(params, ['client_order_id', 'client_order_ids', 'clientOrderId', 'clientOrderIds'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "status": "ok",
# "data": {
# "success": [
# "5983466"
# ],
# "failed": [
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "first"
# },
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "second"
# },
# {
# "err-msg": "The record is not found.",
# "order-id": "",
# "err-code": "base-not-found",
# "client-order-id": "third"
# }
# ]
# }
# }
#
# contracts
#
# {
# "status": "ok",
# "data": {
# "errors": [
# {
# "order_id": "769206471845261312",
# "err_code": 1061,
# "err_msg": "This order doesnt exist."
# }
# ],
# "successes": "773120304138219520"
# },
# "ts": 1604367997451
# }
#
return response
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', None, params)
request = {
# spot -----------------------------------------------------------
# 'account-id': account['id'],
# 'symbol': market['id'], # a list of comma-separated symbols, all symbols by default
# 'types' 'string', buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'side': 'buy', # or 'sell'
# 'size': 100, # the number of orders to cancel 1-100
# contract -------------------------------------------------------
# 'symbol': market['settleId'], # required
# 'contract_code': market['id'],
# 'contract_type': 'this_week', # swap, self_week, next_week, quarter, next_ quarter
# 'direction': 'buy': # buy, sell
# 'offset': 'open', # open, close
}
market = None
method = None
if marketType == 'spot':
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = 'spotPrivatePostV1OrderOrdersBatchCancelOpenOrders'
else:
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrders() requires a symbol for ' + marketType + ' orders')
market = self.market(symbol)
request['contract_code'] = market['id']
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
method = 'contractPrivatePostLinearSwapApiV1SwapCancelallall'
elif marginType == 'cross':
method = 'contractPrivatePostLinearSwapApiV1SwapCrossCancelall'
elif market['inverse']:
if marketType == 'future':
method = 'contractPrivatePostApiV1ContractCancelall'
request['symbol'] = market['settleId']
elif marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapCancelall'
else:
raise NotSupported(self.id + ' cancelOrders() does not support ' + marketType + ' markets')
response = getattr(self, method)(self.extend(request, params))
#
# {
# code: 200,
# data: {
# "success-count": 2,
# "failed-count": 0,
# "next-id": 5454600
# }
# }
#
return response
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def safe_network(self, networkId):
lastCharacterIndex = len(networkId) - 1
lastCharacter = networkId[lastCharacterIndex]
if lastCharacter == '1':
networkId = networkId[0:lastCharacterIndex]
networksById = {}
return self.safe_string(networksById, networkId, networkId)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "usdt",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "usdterc20", # trc20usdt, hrc20usdt, usdt, algousdt
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
if tag == '':
tag = None
currencyId = self.safe_string(depositAddress, 'currency')
currency = self.safe_currency(currencyId, currency)
code = self.safe_currency_code(currencyId, currency)
networkId = self.safe_string(depositAddress, 'chain')
networks = self.safe_value(currency, 'networks', {})
networksById = self.index_by(networks, 'id')
networkValue = self.safe_value(networksById, networkId, networkId)
network = self.safe_string(networkValue, 'network')
note = self.safe_string(depositAddress, 'note')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'note': note,
'info': depositAddress,
}
def fetch_deposit_addresses_by_network(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.spotPrivateGetV2AccountDepositAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
parsed = self.parse_deposit_addresses(data, [code], False)
return self.index_by(parsed, 'network')
def fetch_deposit_address(self, code, params={}):
rawNetwork = self.safe_string_upper(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(networks, rawNetwork, rawNetwork)
params = self.omit(params, 'network')
response = self.fetch_deposit_addresses_by_network(code, params)
result = None
if network is None:
result = self.safe_value(response, code)
if result is None:
alias = self.safe_string(networks, code, code)
result = self.safe_value(response, alias)
if result is None:
defaultNetwork = self.safe_string(self.options, 'defaultNetwork', 'ERC20')
result = self.safe_value(response, defaultNetwork)
if result is None:
values = list(response.values())
result = self.safe_value(values, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find deposit address for ' + code)
return result
result = self.safe_value(response, network)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find ' + network + ' deposit address for ' + code)
return result
def fetch_withdraw_addresses_by_network(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.spotPrivateGetV2AccountWithdrawAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# chain: "eth"
# note: "Binance - TRC20",
# addressTag: "",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
parsed = self.parse_deposit_addresses(data, [code], False)
return self.index_by(parsed, 'network')
def fetch_withdraw_address(self, code, params={}):
rawNetwork = self.safe_string_upper(params, 'network')
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(networks, rawNetwork, rawNetwork)
params = self.omit(params, 'network')
response = self.fetch_withdraw_addresses_by_network(code, params)
result = None
if network is None:
result = self.safe_value(response, code)
if result is None:
alias = self.safe_string(networks, code, code)
result = self.safe_value(response, alias)
if result is None:
defaultNetwork = self.safe_string(self.options, 'defaultNetwork', 'ERC20')
result = self.safe_value(response, defaultNetwork)
if result is None:
values = list(response.values())
result = self.safe_value(values, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchWithdrawAddress() cannot find withdraw address for ' + code)
return result
result = self.safe_value(response, network)
if result is None:
raise InvalidAddress(self.id + ' fetchWithdrawAddress() cannot find ' + network + ' withdraw address for ' + code)
return result
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.spotPrivateGetV1QueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.spotPrivateGetV1QueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
address = self.safe_string(transaction, 'address')
network = self.safe_string_upper(transaction, 'chain')
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': network,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
# possible chains - usdterc20, trc20usdt, hrc20usdt, usdt, algousdt
if network == 'erc20':
request['chain'] = currency['id'] + network
else:
request['chain'] = network + currency['id']
params = self.omit(params, 'network')
response = self.spotPrivatePostV1DwWithdrawApiCreate(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def parse_transfer(self, transfer, currency=None):
#
# transfer
#
# {
# "data": 12345,
# "status": "ok"
# }
#
id = self.safe_string(transfer, 'data')
code = self.safe_currency_code(None, currency)
return {
'info': transfer,
'id': id,
'timestamp': None,
'datetime': None,
'currency': code,
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': None,
}
def transfer(self, code, amount, fromAccount, toAccount, params={}):
self.load_markets()
currency = self.currency(code)
type = self.safe_string(params, 'type')
if type is None:
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromAccount = fromAccount.lower() # pro, futures
toAccount = toAccount.lower() # pro, futures
fromId = self.safe_string(accountsByType, fromAccount)
toId = self.safe_string(accountsByType, toAccount)
if fromId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accountsByType.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
type = fromAccount + '-to-' + toAccount
request = {
'currency': currency['id'],
'amount': float(self.currency_to_precision(code, amount)),
'type': type,
}
response = self.spotPrivatePostFuturesTransfer(self.extend(request, params))
#
# {
# "data": 12345,
# "status": "ok"
# }
#
transfer = self.parse_transfer(response, currency)
return self.extend(transfer, {
'amount': amount,
'currency': code,
'fromAccount': fromAccount,
'toAccount': toAccount,
})
def fetch_borrow_rates_per_symbol(self, params={}):
self.load_markets()
response = self.spotPrivateGetV1MarginLoanInfo(params)
# {
# "status": "ok",
# "data": [
# {
# "symbol": "1inchusdt",
# "currencies": [
# {
# "currency": "1inch",
# "interest-rate": "0.00098",
# "min-loan-amt": "90.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# },
# {
# "currency": "usdt",
# "interest-rate": "0.00098",
# "min-loan-amt": "100.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# }
# ]
# },
# ...
# ]
# }
timestamp = self.milliseconds()
data = self.safe_value(response, 'data')
rates = {
'info': response,
}
for i in range(0, len(data)):
rate = data[i]
currencies = self.safe_value(rate, 'currencies')
symbolRates = {}
for j in range(0, len(currencies)):
currency = currencies[j]
currencyId = self.safe_string(currency, 'currency')
code = self.safe_currency_code(currencyId, 'currency')
symbolRates[code] = {
'currency': code,
'rate': self.safe_number(currency, 'actual-rate'),
'span': 86400000,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
market = self.markets_by_id[self.safe_string(rate, 'symbol')]
symbol = market['symbol']
rates[symbol] = symbolRates
return rates
def fetch_borrow_rates(self, params={}):
self.load_markets()
response = self.spotPrivateGetV1MarginLoanInfo(params)
# {
# "status": "ok",
# "data": [
# {
# "symbol": "1inchusdt",
# "currencies": [
# {
# "currency": "1inch",
# "interest-rate": "0.00098",
# "min-loan-amt": "90.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# },
# {
# "currency": "usdt",
# "interest-rate": "0.00098",
# "min-loan-amt": "100.000000000000000000",
# "max-loan-amt": "1000.000000000000000000",
# "loanable-amt": "0.0",
# "actual-rate": "0.00098"
# }
# ]
# },
# ...
# ]
# }
timestamp = self.milliseconds()
data = self.safe_value(response, 'data')
rates = {}
for i in range(0, len(data)):
market = data[i]
currencies = self.safe_value(market, 'currencies')
for j in range(0, len(currencies)):
currency = currencies[j]
currencyId = self.safe_string(currency, 'currency')
code = self.safe_currency_code(currencyId, 'currency')
rates[code] = {
'currency': code,
'rate': self.safe_number(currency, 'actual-rate'),
'span': 86400000,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': None,
}
return rates
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
#
# Gets a history of funding rates with their timestamps
# (param) symbol: Future currency pair
# (param) limit: not used by huobi
# (param) since: not used by huobi
# (param) params: Object containing more params for the request
# return: [{symbol, fundingRate, timestamp, dateTime}]
#
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'contract_code': market['id'],
}
method = None
if market['inverse']:
method = 'contractPublicGetSwapApiV1SwapHistoricalFundingRate'
elif market['linear']:
method = 'contractPublicGetLinearSwapApiV1SwapHistoricalFundingRate'
else:
raise NotSupported(self.id + ' fetchFundingRateHistory() supports inverse and linear swaps only')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "total_page": 62,
# "current_page": 1,
# "total_size": 1237,
# "data": [
# {
# "avg_premium_index": "-0.000208064395065541",
# "funding_rate": "0.000100000000000000",
# "realized_rate": "0.000100000000000000",
# "funding_time": "1638921600000",
# "contract_code": "BTC-USDT",
# "symbol": "BTC",
# "fee_asset": "USDT"
# },
# ]
# },
# "ts": 1638939294277
# }
#
data = self.safe_value(response, 'data')
result = self.safe_value(data, 'data')
rates = []
for i in range(0, len(result)):
entry = result[i]
marketId = self.safe_string(entry, 'contract_code')
symbol = self.safe_symbol(marketId)
timestamp = self.safe_string(entry, 'funding_time')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'funding_rate'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
def parse_funding_rate(self, fundingRate, market=None):
#
# {
# "status": "ok",
# "data": {
# "estimated_rate": "0.000100000000000000",
# "funding_rate": "0.000100000000000000",
# "contract_code": "BCH-USD",
# "symbol": "BCH",
# "fee_asset": "BCH",
# "funding_time": "1639094400000",
# "next_funding_time": "1639123200000"
# },
# "ts": 1639085854775
# }
#
nextFundingRate = self.safe_number(fundingRate, 'estimated_rate')
fundingTimestamp = self.safe_integer(fundingRate, 'funding_time')
nextFundingTimestamp = self.safe_integer(fundingRate, 'next_funding_time')
marketId = self.safe_string(fundingRate, 'contract_code')
symbol = self.safe_symbol(marketId, market)
return {
'info': fundingRate,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': None,
'datetime': None,
'fundingRate': self.safe_number(fundingRate, 'funding_rate'),
'fundingTimestamp': fundingTimestamp,
'fundingDatetime': self.iso8601(fundingTimestamp),
'nextFundingRate': nextFundingRate,
'nextFundingTimestamp': nextFundingTimestamp,
'nextFundingDatetime': self.iso8601(nextFundingTimestamp),
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def fetch_funding_rate(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
method = None
if market['inverse']:
method = 'contractPublicGetSwapApiV1SwapFundingRate'
elif market['linear']:
method = 'contractPublicGetLinearSwapApiV1SwapFundingRate'
else:
raise NotSupported(self.id + ' fetchFundingRateHistory() supports inverse and linear swaps only')
request = {
'contract_code': market['id'],
}
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "estimated_rate": "0.000100000000000000",
# "funding_rate": "0.000100000000000000",
# "contract_code": "BTC-USDT",
# "symbol": "BTC",
# "fee_asset": "USDT",
# "funding_time": "1603699200000",
# "next_funding_time": "1603728000000"
# },
# "ts": 1603696494714
# }
#
result = self.safe_value(response, 'data', {})
return self.parse_funding_rate(result, market)
def fetch_funding_rates(self, symbols, params={}):
self.load_markets()
options = self.safe_value(self.options, 'fetchFundingRates', {})
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
subType = self.safe_string(options, 'subType', defaultSubType)
subType = self.safe_string(params, 'subType', subType)
request = {
# 'contract_code': market['id'],
}
method = self.get_supported_mapping(subType, {
'linear': 'contractPublicGetLinearSwapApiV1SwapBatchFundingRate',
'inverse': 'contractPublicGetSwapApiV1SwapBatchFundingRate',
})
params = self.omit(params, 'subType')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "estimated_rate": "0.000100000000000000",
# "funding_rate": "0.000100000000000000",
# "contract_code": "MANA-USDT",
# "symbol": "MANA",
# "fee_asset": "USDT",
# "funding_time": "1643356800000",
# "next_funding_time": "1643385600000",
# "trade_partition":"USDT"
# },
# ],
# "ts": 1643346173103
# }
#
data = self.safe_value(response, 'data', [])
result = self.parse_funding_rates(data)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_borrow_interest(self, code=None, symbol=None, since=None, limit=None, params={}):
self.load_markets()
defaultMargin = self.safe_string(params, 'marginType', 'cross') # cross or isolated
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
request = {}
if since is not None:
request['start-date'] = self.yyyymmdd(since)
if limit is not None:
request['size'] = limit
market = None
method = None
if marginType == 'isolated':
method = 'privateGetMarginLoanOrders'
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
else: # Cross
method = 'privateGetCrossMarginLoanOrders'
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
response = getattr(self, method)(self.extend(request, params))
#
# {
# "status":"ok",
# "data":[
# {
# "loan-balance":"0.100000000000000000",
# "interest-balance":"0.000200000000000000",
# "loan-amount":"0.100000000000000000",
# "accrued-at":1511169724531,
# "interest-amount":"0.000200000000000000",
# "filled-points":"0.2",
# "filled-ht":"0.2",
# "currency":"btc",
# "id":394,
# "state":"accrual",
# "account-id":17747,
# "user-id":119913,
# "created-at":1511169724531
# }
# ]
# }
#
data = self.safe_value(response, 'data')
interest = self.parse_borrow_interests(data, marginType, market)
return self.filter_by_currency_since_limit(interest, code, since, limit)
def parse_borrow_interests(self, response, marginType, market=None):
interest = []
for i in range(0, len(response)):
row = response[i]
interest.append(self.parse_borrow_interest(row, marginType, market))
return interest
def parse_borrow_interest(self, info, marginType, market=None):
# isolated
# {
# "interest-rate":"0.000040830000000000",
# "user-id":35930539,
# "account-id":48916071,
# "updated-at":1649320794195,
# "deduct-rate":"1",
# "day-interest-rate":"0.000980000000000000",
# "hour-interest-rate":"0.000040830000000000",
# "loan-balance":"100.790000000000000000",
# "interest-balance":"0.004115260000000000",
# "loan-amount":"100.790000000000000000",
# "paid-coin":"0.000000000000000000",
# "accrued-at":1649320794148,
# "created-at":1649320794148,
# "interest-amount":"0.004115260000000000",
# "deduct-amount":"0",
# "deduct-currency":"",
# "paid-point":"0.000000000000000000",
# "currency":"usdt",
# "symbol":"ltcusdt",
# "id":20242721,
# }
#
# cross
# {
# "id":3416576,
# "user-id":35930539,
# "account-id":48956839,
# "currency":"usdt",
# "loan-amount":"102",
# "loan-balance":"102",
# "interest-amount":"0.00416466",
# "interest-balance":"0.00416466",
# "created-at":1649322735333,
# "accrued-at":1649322735382,
# "state":"accrual",
# "filled-points":"0",
# "filled-ht":"0"
# }
#
marketId = self.safe_string(info, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
account = marginType if (marginType == 'cross') else symbol
timestamp = self.safe_number(info, 'accrued-at')
return {
'account': account, # isolated symbol, will not be returned for crossed margin
'currency': self.safe_currency_code(self.safe_string(info, 'currency')),
'interest': self.safe_number(info, 'interest-amount'),
'interestRate': self.safe_number(info, 'interest-rate'),
'amountBorrowed': self.safe_number(info, 'loan-amount'),
'timestamp': timestamp, # Interest accrued time
'datetime': self.iso8601(timestamp),
'info': info,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
query = self.omit(params, self.extract_params(path))
if isinstance(api, str):
# signing implementation for the old endpoints
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
payload = "\n".join([method, self.hostname, url, auth]) # eslint-disable-line quotes
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if query:
url += '?' + self.urlencode(query)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
else:
# signing implementation for the new endpoints
# type, access = api
type = self.safe_string(api, 0)
access = self.safe_string(api, 1)
url += self.implode_params(path, params)
hostname = self.safe_string(self.urls['hostnames'], type)
if access == 'public':
if query:
url += '?' + self.urlencode(query)
elif access == 'private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
payload = "\n".join([method, hostname, url, auth]) # eslint-disable-line quotes
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
url = self.implode_params(self.urls['api'][type], {
'hostname': hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_integer(config, 'cost', 1)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string_2(response, 'err-code', 'err_code')
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string_2(response, 'err-msg', 'err_msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchFundingHistory', market, params)
method = None
request = {
'type': '30,31',
}
if market['linear']:
method = 'contractPrivatePostLinearSwapApiV1SwapFinancialRecordExact'
#
# {
# status: 'ok',
# data: {
# financial_record: [
# {
# id: '1320088022',
# type: '30',
# amount: '0.004732510000000000',
# ts: '1641168019321',
# contract_code: 'BTC-USDT',
# asset: 'USDT',
# margin_account: 'BTC-USDT',
# face_margin_account: ''
# },
# ],
# remain_size: '0',
# next_id: null
# },
# ts: '1641189898425'
# }
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
if marginType == 'isolated':
request['margin_account'] = market['id']
else:
request['margin_account'] = market['quoteId']
else:
if marketType == 'swap':
method = 'contractPrivatePostSwapApiV1SwapFinancialRecordExact'
request['contract_code'] = market['id']
else:
raise ExchangeError(self.id + ' fetchFundingHistory() only makes sense for swap contracts')
#
# swap
# {
# status: 'ok',
# data: {
# financial_record: [
# {
# id: '1667436164',
# symbol: 'BTC',
# type: '30',
# amount: '3.9755491985E-8',
# ts: '1641168097323',
# contract_code: 'BTC-USD'
# },
# ],
# remain_size: '0',
# next_id: null
# },
# ts: '1641190296379'
# }
#
response = getattr(self, method)(self.extend(request, query))
data = self.safe_value(response, 'data', {})
financialRecord = self.safe_value(data, 'financial_record', [])
return self.parse_incomes(financialRecord, market, since, limit)
def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchPosition', market, params)
method = None
if market['linear']:
defaultMargin = 'cross' if market['future'] else 'isolated'
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', defaultMargin)
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapSwitchLeverRate',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossSwitchLeverRate',
})
#
# {
# status: 'ok',
# data: {
# contract_code: 'BTC-USDT',
# lever_rate: '100',
# margin_mode: 'isolated'
# },
# ts: '1641184710649'
# }
#
else:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractSwitchLeverRate',
'swap': 'contractPrivatePostSwapApiV1SwapSwitchLeverRate',
})
#
# future
# {
# status: 'ok',
# data: {symbol: 'BTC', lever_rate: 5},
# ts: 1641184578678
# }
#
# swap
#
# {
# status: 'ok',
# data: {contract_code: 'BTC-USD', lever_rate: '5'},
# ts: '1641184652979'
# }
#
request = {
'lever_rate': leverage,
}
if marketType == 'future' and market['inverse']:
request['symbol'] = market['settleId']
else:
request['contract_code'] = market['id']
response = getattr(self, method)(self.extend(request, query))
return response
def parse_income(self, income, market=None):
#
# {
# id: '1667161118',
# symbol: 'BTC',
# type: '31',
# amount: '-2.11306593188E-7',
# ts: '1641139308983',
# contract_code: 'BTC-USD'
# }
#
marketId = self.safe_string(income, 'contract_code')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_number(income, 'amount')
timestamp = self.safe_integer(income, 'ts')
id = self.safe_string(income, 'id')
currencyId = self.safe_string_2(income, 'symbol', 'asset')
code = self.safe_currency_code(currencyId)
return {
'info': income,
'symbol': symbol,
'code': code,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': id,
'amount': amount,
}
def parse_incomes(self, incomes, market=None, since=None, limit=None):
result = []
for i in range(0, len(incomes)):
entry = incomes[i]
parsed = self.parse_income(entry, market)
result.append(parsed)
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_since_limit(sorted, since, limit, 'timestamp')
def parse_position(self, position, market=None):
#
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '47162.000000000000000000',
# cost_hold: '47151.300000000000000000',
# profit_unreal: '0.007300000000000000',
# profit_rate: '-0.000144183876850008',
# lever_rate: '2',
# position_margin: '23.579300000000000000',
# direction: 'buy',
# profit: '-0.003400000000000000',
# last_price: '47158.6',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT',
# margin_balance: '24.973020070000000000',
# margin_position: '23.579300000000000000',
# margin_frozen: '0',
# margin_available: '1.393720070000000000',
# profit_real: '0E-18',
# risk_rate: '1.044107779705080303',
# withdraw_available: '1.386420070000000000000000000000000000',
# liquidation_price: '22353.229148614609571788',
# adjust_factor: '0.015000000000000000',
# margin_static: '24.965720070000000000'
# }
#
market = self.safe_market(self.safe_string(position, 'contract_code'))
symbol = market['symbol']
contracts = self.safe_string(position, 'volume')
contractSize = self.safe_value(market, 'contractSize')
contractSizeString = self.number_to_string(contractSize)
entryPrice = self.safe_number(position, 'cost_hold')
initialMargin = self.safe_string(position, 'position_margin')
rawSide = self.safe_string(position, 'direction')
side = 'long' if (rawSide == 'buy') else 'short'
unrealizedProfit = self.safe_number(position, 'profit_unreal')
marginType = self.safe_string(position, 'margin_mode')
leverage = self.safe_string(position, 'lever_rate')
percentage = Precise.string_mul(self.safe_string(position, 'profit_rate'), '100')
lastPrice = self.safe_string(position, 'last_price')
faceValue = Precise.string_mul(contracts, contractSizeString)
notional = None
if market['linear']:
notional = Precise.string_mul(faceValue, lastPrice)
else:
notional = Precise.string_div(faceValue, lastPrice)
marginType = 'cross'
intialMarginPercentage = Precise.string_div(initialMargin, notional)
collateral = self.safe_string(position, 'margin_balance')
liquidationPrice = self.safe_number(position, 'liquidation_price')
adjustmentFactor = self.safe_string(position, 'adjust_factor')
maintenanceMarginPercentage = Precise.string_div(adjustmentFactor, leverage)
maintenanceMargin = Precise.string_mul(maintenanceMarginPercentage, notional)
marginRatio = Precise.string_div(maintenanceMargin, collateral)
return {
'info': position,
'symbol': symbol,
'contracts': self.parse_number(contracts),
'contractSize': contractSize,
'entryPrice': entryPrice,
'collateral': self.parse_number(collateral),
'side': side,
'unrealizedProfit': unrealizedProfit,
'leverage': self.parse_number(leverage),
'percentage': self.parse_number(percentage),
'marginType': marginType,
'notional': self.parse_number(notional),
'markPrice': None,
'liquidationPrice': liquidationPrice,
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': self.parse_number(intialMarginPercentage),
'maintenanceMargin': self.parse_number(maintenanceMargin),
'maintenanceMarginPercentage': self.parse_number(maintenanceMarginPercentage),
'marginRatio': self.parse_number(marginRatio),
'timestamp': None,
'datetime': None,
}
def fetch_positions(self, symbols=None, params={}):
self.load_markets()
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', 'isolated')
defaultSubType = self.safe_string(self.options, 'defaultSubType', 'inverse')
marketType, query = self.handle_market_type_and_params('fetchPositions', None, params)
method = None
if defaultSubType == 'linear':
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapPositionInfo',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossPositionInfo',
})
#
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '47162.000000000000000000',
# cost_hold: '47162.000000000000000000',
# profit_unreal: '0.047300000000000000',
# profit_rate: '0.002005852169119206',
# lever_rate: '2',
# position_margin: '23.604650000000000000',
# direction: 'buy',
# profit: '0.047300000000000000',
# last_price: '47209.3',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# ts: '1641108676768'
# }
#
else:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractPositionInfo',
'swap': 'contractPrivatePostSwapApiV1SwapPositionInfo',
})
#
# future
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC220624',
# contract_type: 'next_quarter',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '49018.880000000009853343',
# cost_hold: '49018.880000000009853343',
# profit_unreal: '-8.62360608500000000000000000000000000000000000000E-7',
# profit_rate: '-0.000845439023678622',
# lever_rate: '2',
# position_margin: '0.001019583964880634',
# direction: 'sell',
# profit: '-8.62360608500000000000000000000000000000000000000E-7',
# last_price: '49039.61'
# }
# ],
# ts: '1641109895199'
# }
#
# swap
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USD',
# volume: '1.000000000000000000',
# available: '1.000000000000000000',
# frozen: '0E-18',
# cost_open: '47150.000000000012353300',
# cost_hold: '47150.000000000012353300',
# profit_unreal: '0E-54',
# profit_rate: '-7.86E-16',
# lever_rate: '3',
# position_margin: '0.000706963591375044',
# direction: 'buy',
# profit: '0E-54',
# last_price: '47150'
# }
# ],
# ts: '1641109636572'
# }
#
response = getattr(self, method)(query)
data = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'ts')
result = []
for i in range(0, len(data)):
position = data[i]
parsed = self.parse_position(position)
result.append(self.extend(parsed, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}))
return self.filter_by_array(result, 'symbol', symbols, False)
def fetch_position(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
marginType = self.safe_string_2(self.options, 'defaultMarginType', 'marginType', 'isolated')
marginType = self.safe_string_2(params, 'marginType', 'defaultMarginType', marginType)
params = self.omit(params, ['defaultMarginType', 'marginType'])
marketType, query = self.handle_market_type_and_params('fetchPosition', market, params)
method = None
if market['linear']:
method = self.get_supported_mapping(marginType, {
'isolated': 'contractPrivatePostLinearSwapApiV1SwapAccountPositionInfo',
'cross': 'contractPrivatePostLinearSwapApiV1SwapCrossAccountPositionInfo',
})
#
# {
# status: 'ok',
# data: [
# {
# positions: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: 1,
# available: 1,
# frozen: 0,
# cost_open: 47027.1,
# cost_hold: 47324.4,
# profit_unreal: 0.1705,
# profit_rate: -0.269631765513927,
# lever_rate: 100,
# position_margin: 0.471539,
# direction: 'sell',
# profit: -0.1268,
# last_price: 47153.9,
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# symbol: 'BTC',
# margin_balance: 8.01274699,
# margin_position: 0.471539,
# margin_frozen: 0,
# margin_available: 7.54120799,
# profit_real: 0,
# profit_unreal: 0.1705,
# risk_rate: 16.442755615124092,
# withdraw_available: 7.37070799,
# liquidation_price: 54864.89009448036,
# lever_rate: 100,
# adjust_factor: 0.55,
# margin_static: 7.84224699,
# contract_code: 'BTC-USDT',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# ts: 1641162539767
# }
#
else:
method = self.get_supported_mapping(marketType, {
'future': 'contractPrivatePostApiV1ContractAccountPositionInfo',
'swap': 'contractPrivatePostSwapApiV1SwapAccountPositionInfo',
})
# future
# {
# status: 'ok',
# data: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USD',
# margin_balance: 0.000752347253890835,
# margin_position: 0.000705870726835087,
# margin_frozen: 0,
# margin_available: 0.000046476527055748,
# profit_real: 0,
# profit_unreal: -0.000004546248622,
# risk_rate: 1.0508428311146076,
# withdraw_available: 0.000046476527055748,
# liquidation_price: 35017.91655851386,
# lever_rate: 3,
# adjust_factor: 0.015,
# margin_static: 0.000756893502512835,
# positions: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USD',
# volume: 1,
# available: 1,
# frozen: 0,
# cost_open: 47150.000000000015,
# cost_hold: 47324.6,
# profit_unreal: -0.000004546248622,
# profit_rate: 0.00463757067530574,
# lever_rate: 3,
# position_margin: 0.000705870726835087,
# direction: 'buy',
# profit: 0.0000032785936199,
# last_price: 47223
# }
# ]
# }
# ],
# ts: 1641162795228
# }
#
# swap
# {
# status: 'ok',
# data: [
# {
# positions: [
# {
# symbol: 'BTC',
# contract_code: 'BTC-USDT',
# volume: 1,
# available: 1,
# frozen: 0,
# cost_open: 47027.1,
# cost_hold: 47324.4,
# profit_unreal: 0.1705,
# profit_rate: -0.269631765513927,
# lever_rate: 100,
# position_margin: 0.471539,
# direction: 'sell',
# profit: -0.1268,
# last_price: 47153.9,
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# symbol: 'BTC',
# margin_balance: 8.01274699,
# margin_position: 0.471539,
# margin_frozen: 0,
# margin_available: 7.54120799,
# profit_real: 0,
# profit_unreal: 0.1705,
# risk_rate: 16.442755615124092,
# withdraw_available: 7.37070799,
# liquidation_price: 54864.89009448036,
# lever_rate: 100,
# adjust_factor: 0.55,
# margin_static: 7.84224699,
# contract_code: 'BTC-USDT',
# margin_asset: 'USDT',
# margin_mode: 'isolated',
# margin_account: 'BTC-USDT'
# }
# ],
# ts: 1641162539767
# }
# cross usdt swap
# {
# "status":"ok",
# "data":{
# "positions":[
# ],
# "futures_contract_detail":[
# (...)
# ]
# "margin_mode":"cross",
# "margin_account":"USDT",
# "margin_asset":"USDT",
# "margin_balance":"1.000000000000000000",
# "margin_static":"1.000000000000000000",
# "margin_position":"0",
# "margin_frozen":"1.000000000000000000",
# "profit_real":"0E-18",
# "profit_unreal":"0",
# "withdraw_available":"0",
# "risk_rate":"15.666666666666666666",
# "contract_detail":[
# (...)
# ]
# },
# "ts":"1645521118946"
# }
#
request = {}
if market['future'] and market['inverse']:
request['symbol'] = market['settleId']
else:
if marginType == 'cross':
request['margin_account'] = 'USDT' # only allowed value
request['contract_code'] = market['id']
response = getattr(self, method)(self.extend(request, query))
data = self.safe_value(response, 'data')
account = None
if marginType == 'cross':
account = data
else:
account = self.safe_value(data, 0)
omitted = self.omit(account, ['positions'])
positions = self.safe_value(account, 'positions')
position = None
if market['future'] and market['inverse']:
for i in range(0, len(positions)):
entry = positions[i]
if entry['contract_code'] == market['id']:
position = entry
break
else:
position = self.safe_value(positions, 0)
timestamp = self.safe_integer(response, 'ts')
parsed = self.parse_position(self.extend(position, omitted))
return self.extend(parsed, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
def parse_ledger_entry_type(self, type):
types = {
'trade': 'trade',
'etf': 'trade',
'transact-fee': 'fee',
'fee-deduction': 'fee',
'transfer': 'transfer',
'credit': 'credit',
'liquidation': 'trade',
'interest': 'credit',
'deposit': 'deposit',
'withdraw': 'withdrawal',
'withdraw-fee': 'fee',
'exchange': 'exchange',
'other-types': 'transfer',
'rebate': 'rebate',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "accountId": 10000001,
# "currency": "usdt",
# "transactAmt": 10.000000000000000000,
# "transactType": "transfer",
# "transferType": "margin-transfer-out",
# "transactId": 0,
# "transactTime": 1629882331066,
# "transferer": 28483123,
# "transferee": 13496526
# }
#
id = self.safe_string(item, 'transactId')
currencyId = self.safe_string(item, 'currency')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'transactAmt')
transferType = self.safe_string(item, 'transferType')
type = self.parse_ledger_entry_type(transferType)
direction = self.safe_string(item, 'direction')
timestamp = self.safe_integer(item, 'transactTime')
datetime = self.iso8601(timestamp)
account = self.safe_string(item, 'accountId')
return {
'id': id,
'direction': direction,
'account': account,
'referenceId': id,
'referenceAccount': account,
'type': type,
'currency': code,
'amount': amount,
'timestamp': timestamp,
'datetime': datetime,
'before': None,
'after': None,
'status': None,
'fee': None,
'info': item,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
accountId = self.fetch_account_id_by_type('spot', params)
request = {
'accountId': accountId,
# 'currency': code,
# 'transactTypes': 'all', # default all
# 'startTime': 1546272000000,
# 'endTime': 1546272000000,
# 'sort': asc, # asc, desc
# 'limit': 100, # range 1-500
# 'fromId': 323 # first record ID in self query for pagination
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # max 500
response = self.spotPrivateGetV2AccountLedger(self.extend(request, params))
#
# {
# "code": 200,
# "message": "success",
# "data": [
# {
# "accountId": 10000001,
# "currency": "usdt",
# "transactAmt": 10.000000000000000000,
# "transactType": "transfer",
# "transferType": "margin-transfer-out",
# "transactId": 0,
# "transactTime": 1629882331066,
# "transferer": 28483123,
# "transferee": 13496526
# },
# {
# "accountId": 10000001,
# "currency": "usdt",
# "transactAmt": -10.000000000000000000,
# "transactType": "transfer",
# "transferType": "margin-transfer-in",
# "transactId": 0,
# "transactTime": 1629882096562,
# "transferer": 13496526,
# "transferee": 28483123
# }
# ],
# "nextId": 1624316679,
# "ok": True
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def fetch_leverage_tiers(self, symbols=None, params={}):
self.load_markets()
response = self.contractPublicGetLinearSwapApiV1SwapAdjustfactor(params)
#
# {
# "status": "ok",
# "data": [
# {
# "symbol": "MANA",
# "contract_code": "MANA-USDT",
# "margin_mode": "isolated",
# "trade_partition": "USDT",
# "list": [
# {
# "lever_rate": 75,
# "ladders": [
# {
# "ladder": 0,
# "min_size": 0,
# "max_size": 999,
# "adjust_factor": 0.7
# },
# ...
# ]
# }
# ...
# ]
# },
# ...
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_leverage_tiers(data, symbols, 'contract_code')
def fetch_market_leverage_tiers(self, symbol, params={}):
self.load_markets()
request = {}
if symbol is not None:
market = self.market(symbol)
if not market['contract']:
raise BadRequest(self.id + ' fetchLeverageTiers() symbol supports contract markets only')
request['contract_code'] = market['id']
response = self.contractPublicGetLinearSwapApiV1SwapAdjustfactor(self.extend(request, params))
#
# {
# "status": "ok",
# "data": [
# {
# "symbol": "MANA",
# "contract_code": "MANA-USDT",
# "margin_mode": "isolated",
# "trade_partition": "USDT",
# "list": [
# {
# "lever_rate": 75,
# "ladders": [
# {
# "ladder": 0,
# "min_size": 0,
# "max_size": 999,
# "adjust_factor": 0.7
# },
# ...
# ]
# }
# ...
# ]
# },
# ...
# ]
# }
#
data = self.safe_value(response, 'data')
tiers = self.parse_leverage_tiers(data, [symbol], 'contract_code')
return self.safe_value(tiers, symbol)
def parse_leverage_tiers(self, response, symbols, marketIdKey):
result = {}
for i in range(0, len(response)):
item = response[i]
list = self.safe_value(item, 'list', [])
tiers = []
currency = self.safe_string(item, 'trade_partition')
id = self.safe_string(item, marketIdKey)
symbol = self.safe_symbol(id)
if self.in_array(symbols, symbol):
for j in range(0, len(list)):
obj = list[j]
leverage = self.safe_string(obj, 'lever_rate')
ladders = self.safe_value(obj, 'ladders', [])
for k in range(0, len(ladders)):
bracket = ladders[k]
adjustFactor = self.safe_string(bracket, 'adjust_factor')
tiers.append({
'tier': self.safe_integer(bracket, 'ladder'),
'currency': self.safe_currency_code(currency),
'notionalFloor': self.safe_number(bracket, 'min_size'),
'notionalCap': self.safe_number(bracket, 'max_size'),
'maintenanceMarginRate': self.parse_number(Precise.string_div(adjustFactor, leverage)),
'maxLeverage': self.parse_number(leverage),
'info': bracket,
})
result[symbol] = tiers
return result
| 46.928465
| 355
| 0.468255
|
36ea6c23b7f4805eaa168075ed6c798fc0a50c09
| 4,207
|
py
|
Python
|
tests/test_querier.py
|
Jeketam/supertokens-flask
|
36a4b798f81e03caac8b0985c8c6f736b0aa186b
|
[
"Apache-2.0"
] | 5
|
2020-06-01T13:27:42.000Z
|
2021-05-03T16:19:28.000Z
|
tests/test_querier.py
|
Jeketam/supertokens-flask
|
36a4b798f81e03caac8b0985c8c6f736b0aa186b
|
[
"Apache-2.0"
] | 8
|
2020-09-15T20:26:55.000Z
|
2020-11-02T04:13:43.000Z
|
tests/test_querier.py
|
Jeketam/supertokens-flask
|
36a4b798f81e03caac8b0985c8c6f736b0aa186b
|
[
"Apache-2.0"
] | 8
|
2020-10-05T11:27:24.000Z
|
2020-10-21T13:39:23.000Z
|
"""
Copyright (c) 2020, VRAI Labs and/or its affiliates. All rights reserved.
This software is licensed under the Apache License, Version 2.0 (the
"License") as published by the Apache Software Foundation.
You may not use this file except in compliance with the License. You may
obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from supertokens_flask.querier import Querier
from supertokens_flask.utils import find_max_version
from supertokens_flask.exceptions import SuperTokensGeneralError
from .utils import (
reset, setup_st, clean_st, start_st,
API_VERSION_TEST_NON_SUPPORTED_SV,
API_VERSION_TEST_NON_SUPPORTED_CV,
API_VERSION_TEST_SINGLE_SUPPORTED_SV,
API_VERSION_TEST_SINGLE_SUPPORTED_CV,
API_VERSION_TEST_MULTIPLE_SUPPORTED_SV,
API_VERSION_TEST_MULTIPLE_SUPPORTED_CV,
API_VERSION_TEST_SINGLE_SUPPORTED_RESULT,
API_VERSION_TEST_MULTIPLE_SUPPORTED_RESULT,
SUPPORTED_CORE_DRIVER_INTERFACE_FILE
)
from json import load
from supertokens_flask.constants import (
HELLO,
SUPPORTED_CDI_VERSIONS
)
def setup_function(f):
reset()
clean_st()
setup_st()
def teardown_function(f):
reset()
clean_st()
def test_get_api_version():
try:
Querier.get_instance().get_api_version()
assert False
except SuperTokensGeneralError:
assert True
start_st()
cv = API_VERSION_TEST_SINGLE_SUPPORTED_CV
sv = API_VERSION_TEST_SINGLE_SUPPORTED_SV
assert find_max_version(cv, sv) == API_VERSION_TEST_SINGLE_SUPPORTED_RESULT
cv = API_VERSION_TEST_MULTIPLE_SUPPORTED_CV
sv = API_VERSION_TEST_MULTIPLE_SUPPORTED_SV
assert find_max_version(
cv, sv) == API_VERSION_TEST_MULTIPLE_SUPPORTED_RESULT
cv = API_VERSION_TEST_NON_SUPPORTED_CV
sv = API_VERSION_TEST_NON_SUPPORTED_SV
assert find_max_version(cv, sv) is None
def test_check_supported_core_driver_interface_versions():
f = open(SUPPORTED_CORE_DRIVER_INTERFACE_FILE, 'r')
sv = set(load(f)['versions'])
f.close()
assert sv == set(SUPPORTED_CDI_VERSIONS)
def test_core_not_available():
try:
querier = Querier.get_instance()
querier.send_get_request('/', [])
assert False
except SuperTokensGeneralError:
assert True
def test_three_cores_and_round_robin():
start_st()
start_st('localhost', 3568)
start_st('localhost', 3569)
Querier.init_instance('http://localhost:3567;http://localhost:3568/;http://localhost:3569', None)
querier = Querier.get_instance()
assert querier.send_get_request(HELLO, []) == 'Hello\n'
assert querier.send_get_request(HELLO, []) == 'Hello\n'
assert querier.send_get_request(HELLO, []) == 'Hello\n'
assert len(querier.get_hosts_alive_for_testing()) == 3
assert querier.send_delete_request(HELLO, []) == 'Hello\n'
assert len(querier.get_hosts_alive_for_testing()) == 3
assert 'http://localhost:3567' in querier.get_hosts_alive_for_testing()
assert 'http://localhost:3568' in querier.get_hosts_alive_for_testing()
assert 'http://localhost:3569' in querier.get_hosts_alive_for_testing()
def test_three_cores_one_dead_and_round_robin():
start_st()
start_st('localhost', 3568)
Querier.init_instance('http://localhost:3567;http://localhost:3568/;http://localhost:3569', None)
querier = Querier.get_instance()
assert querier.send_get_request(HELLO, []) == 'Hello\n'
assert querier.send_get_request(HELLO, []) == 'Hello\n'
assert len(querier.get_hosts_alive_for_testing()) == 2
assert querier.send_delete_request(HELLO, []) == 'Hello\n'
assert len(querier.get_hosts_alive_for_testing()) == 2
assert 'http://localhost:3567' in querier.get_hosts_alive_for_testing()
assert 'http://localhost:3568' in querier.get_hosts_alive_for_testing()
assert 'http://localhost:3569' not in querier.get_hosts_alive_for_testing()
| 36.582609
| 101
| 0.754932
|
1c5da41aefe207fe784a69f570788f7916feae63
| 554
|
py
|
Python
|
nlu_bert_crosswoz/util/nlu.py
|
huakeda1/Basic-algorithm-and-framework-study-for-AI
|
8776dc500772a6c1f28be9c4a426ed9eca2ec775
|
[
"MIT"
] | 2
|
2020-11-24T02:58:24.000Z
|
2021-08-18T06:50:28.000Z
|
nlu_bert_crosswoz/util/nlu.py
|
huakeda1/Basic-algorithm-and-framework-study-for-AI
|
8776dc500772a6c1f28be9c4a426ed9eca2ec775
|
[
"MIT"
] | null | null | null |
nlu_bert_crosswoz/util/nlu.py
|
huakeda1/Basic-algorithm-and-framework-study-for-AI
|
8776dc500772a6c1f28be9c4a426ed9eca2ec775
|
[
"MIT"
] | null | null | null |
"""Natural language understanding interface."""
from util.module import Module
class NLU(Module):
"""NLU module interface."""
def predict(self, utterance, context=list()):
"""Predict the dialog act of a natural language utterance.
Args:
utterance (string):
A natural language utterance.
context (list of string):
Previous utterances.
Returns:
action (list of list):
The dialog act of utterance.
"""
return []
| 25.181818
| 66
| 0.555957
|
bb8353d0f5d93d7b633fc2ee6815c5f703045ef0
| 1,263
|
py
|
Python
|
Implementation/ANN/section_three/main.py
|
FarshidNooshi/CI_ANN_Fruit_Detector
|
e027ab95e744f6421dee1ca9a96cac80ff89f881
|
[
"MIT"
] | 1
|
2022-02-08T18:01:10.000Z
|
2022-02-08T18:01:10.000Z
|
Implementation/ANN/section_three/main.py
|
FarshidNooshi/CI_ANN_Fruit_Detector
|
e027ab95e744f6421dee1ca9a96cac80ff89f881
|
[
"MIT"
] | null | null | null |
Implementation/ANN/section_three/main.py
|
FarshidNooshi/CI_ANN_Fruit_Detector
|
e027ab95e744f6421dee1ca9a96cac80ff89f881
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
from ANN.section_one.credentials import get_path_of_Datasets, get_path_of_documents
from ANN.section_one.utils.utilsV1 import load_data
from ANN.section_three.utils.utilsV3 import L_layer_model
def run_program(file):
path = get_path_of_Datasets()
train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig = load_data(path)
x_train = np.zeros((102, 1962))
y_train = np.zeros((4, 1962))
for i in range(1962):
for j in range(102):
x_train[j, i] = train_set_x_orig[i][j]
for i in range(1962):
for j in range(4):
y_train[j, i] = train_set_y_orig[i][j]
x_section_three = x_train[:, 0:200]
y_section_three = y_train[:, 0:200]
start_time = time.time()
val, parameters = L_layer_model(x_section_three, y_section_three, [102, 150, 60, 4], num_epochs=20, print_cost=True, file=file)
file.write("\n--- %s seconds ---" % (time.time() - start_time))
return val
with open(f"{get_path_of_documents()}/section three/report.txt", "w") as f:
sum_of_costs = 0
for i in range(10):
f.write(f"\nrunning program with i = {i}\n")
sum_of_costs += run_program(f)
f.write(f"\n\n----average cost is {sum_of_costs / 200}-----")
| 35.083333
| 131
| 0.672209
|
435f52c8fbd4fb57fce9932cf02c52a4ab8a5a0c
| 1,324
|
py
|
Python
|
sample/mt5Server.py
|
jettom/JtSpider
|
7e2cb32415ca5d439b117c0277a7f7b2b27fa0bf
|
[
"Apache-2.0"
] | 1
|
2019-01-25T12:54:24.000Z
|
2019-01-25T12:54:24.000Z
|
sample/mt5Server.py
|
jettom/JtSpider
|
7e2cb32415ca5d439b117c0277a7f7b2b27fa0bf
|
[
"Apache-2.0"
] | null | null | null |
sample/mt5Server.py
|
jettom/JtSpider
|
7e2cb32415ca5d439b117c0277a7f7b2b27fa0bf
|
[
"Apache-2.0"
] | 1
|
2020-05-16T01:18:25.000Z
|
2020-05-16T01:18:25.000Z
|
import socket, numpy as np
from sklearn.linear_model import LinearRegression
class socketserver:
def __init__(self, address='', port=9090):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.address = address
self.port = port
self.sock.bind((self.address, self.port))
self.cummdata = ''
def recvmsg(self):
self.sock.listen(1)
self.conn, self.addr = self.sock.accept()
print('connected to', self.addr)
self.cummdata = ''
while True:
data = self.conn.recv(10000)
self.cummdata += data.decode("utf-8")
if not data:
break
#self.conn.send(bytes(calcregr(self.cummdata), "utf-8"))
return self.cummdata
def __del__(self):
self.sock.close()
def calcregr(msg = ''):
chartdata = np.fromstring(msg, dtype=float, sep= ' ')
Y = np.array(chartdata).reshape(-1,1)
X = np.array(np.arange(len(chartdata))).reshape(-1,1)
lr = LinearRegression()
lr.fit(X, Y)
Y_pred = lr.predict(X)
type(Y_pred)
P = Y_pred.astype(str).item(-1) + ' ' + Y_pred.astype(str).item(0)
print(P)
return str(P)
if __name__ == "__main__":
serv = socketserver('127.0.0.1', 9090)
while True:
msg = serv.recvmsg()
| 26.48
| 70
| 0.586103
|
0ce81275e9705650e0e210341b126b81d83b245a
| 5,035
|
py
|
Python
|
k_run.py
|
dewyeon/patchcore
|
ca68cce8d814357178668de5663d027337c999b6
|
[
"Apache-2.0"
] | null | null | null |
k_run.py
|
dewyeon/patchcore
|
ca68cce8d814357178668de5663d027337c999b6
|
[
"Apache-2.0"
] | null | null | null |
k_run.py
|
dewyeon/patchcore
|
ca68cce8d814357178668de5663d027337c999b6
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import argparse
parser=argparse.ArgumentParser(description="run")
parser.add_argument("--n", type=int)
args = parser.parse_args()
n = args.n
root = 'results_k'
coreset = 0.01
if args.n == 0:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [512]:
for inp in [224]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 1:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [512]:
for inp in [336]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 2:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [512]:
for inp in [448]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 3:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [256]:
for inp in [224]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 4:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [256]:
for inp in [448]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 5:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [256]:
for inp in [336]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 6:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [128]:
for inp in [224]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 7:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [128]:
for inp in [336]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
if args.n == 8:
for cl in ['bottle', 'cable', 'wood', 'capsule', 'carpet', 'grid', 'hazelnut','zipper', 'leather', 'transistor', 'metal_nut', 'pill', 'screw', 'tile', 'toothbrush']:
for load in [128]:
for inp in [448]:
log_name = cl + str(coreset) + str(inp) + str(load)
subprocess.call(f"python train.py --phase train --load_size {load} --input_size {inp} --project_root_path {root} --coreset_sampling_ratio {coreset} --category {cl} --gpu {n} > logs/{log_name}.txt", shell=True)
| 69.930556
| 225
| 0.597617
|
e937e790415aa60e52e95e0fcf3c8bfae5182ea4
| 3,122
|
py
|
Python
|
app/app/settings.py
|
devansh204/django-app-tutorial
|
be5053ad24733d888e56e967b2f515b4771def7c
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
devansh204/django-app-tutorial
|
be5053ad24733d888e56e967b2f515b4771def7c
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
devansh204/django-app-tutorial
|
be5053ad24733d888e56e967b2f515b4771def7c
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x)qe5r#3-9850@0bf@=zx6y$-__)&sapc!($g=c=&d1qws@f-_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.177419
| 91
| 0.692184
|
b442687dda5a6d1b8828235e15e61cec5e108582
| 1,531
|
py
|
Python
|
tests/test_utils.py
|
ericmand/lambdo
|
48cbfac1429f19a0830f33535922247409230bac
|
[
"MIT"
] | 1
|
2021-02-24T09:06:32.000Z
|
2021-02-24T09:06:32.000Z
|
tests/test_utils.py
|
wangchengrong/lambdo
|
7de0e4bd61ffa6d03f23290c198f08a22c3fcf28
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
wangchengrong/lambdo
|
7de0e4bd61ffa6d03f23290c198f08a22c3fcf28
|
[
"MIT"
] | null | null | null |
import unittest
from lambdo.Workflow import *
class TablesTestCase(unittest.TestCase):
def setUp(self):
pass
def test_imports(self):
wf_json = {
"id": "My workflow",
"imports": ["tests.udf", "os.path"],
"tables": [
{
"id": "My table",
"columns": [
{
"id": "A",
"inputs": ["A"],
"window": "1",
"extensions": [
{"function": "tests.udf:user_import_fn", "outputs": "Success"}
]
}
]
}
]
}
wf = Workflow(wf_json)
self.assertEqual(len(wf.modules), 2)
self.assertTrue(hasattr(wf.modules[0], 'user_import_fn'))
# Provide data directly (without table population)
data = {'A': [1, 2, 3]}
df = pd.DataFrame(data)
tb = wf.tables[0]
tb.data = df
wf.execute()
self.assertEqual(wf.tables[0].data['Success'][0], 'Success')
self.assertEqual(wf.tables[0].data['Success'].nunique(), 1)
def test_getset_pkl(self):
value = "Value to be stored"
json_field = "$file:_test_.pkl"
set_value(json_field, value)
value2 = get_value(json_field)
self.assertEqual(value, value2)
if __name__ == '__main__':
unittest.main()
| 25.949153
| 94
| 0.444807
|
6faa0c35f8864a255612bed8f388617ebb6096d3
| 28,924
|
py
|
Python
|
tests/unit/test_observe_unit.py
|
svedel/greattunes
|
e241d0f6a30479b600d85aafabf27058d3fd1072
|
[
"MIT"
] | null | null | null |
tests/unit/test_observe_unit.py
|
svedel/greattunes
|
e241d0f6a30479b600d85aafabf27058d3fd1072
|
[
"MIT"
] | 20
|
2021-07-14T06:44:56.000Z
|
2022-03-17T05:06:23.000Z
|
tests/unit/test_observe_unit.py
|
svedel/greattunes
|
e241d0f6a30479b600d85aafabf27058d3fd1072
|
[
"MIT"
] | null | null | null |
import pandas as pd
import pytest
import torch
import greattunes.utils
from greattunes.data_format_mappings import tensor2pretty_covariate
@pytest.mark.parametrize("method, tmp_val",
[
["functions", 1.0],
["iterative", 2.0]
])
def test_observe_get_and_verify_response_input_unit(tmp_observe_class, method, tmp_val, monkeypatch):
"""
test that _get_and_verify_response_input works for self.sampling["method"] = "iteratuve" or "functions". Leverage
monkeypatching and create false class to mock that greattunes._observe will be called inside
TuneSession class in greattunes.__init__. Rely on manual input for "iterative" option
"""
# # define class
cls = tmp_observe_class
cls.sampling["method"] = method
# monkeypatch the "support" functions _get_response_function_input, _read_response_manual_input
def mock_get_response_function_input():
return torch.tensor([[tmp_val]], dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
monkeypatch.setattr(
cls, "_get_response_function_input", mock_get_response_function_input
)
manual_tmp_val = tmp_val + 1.0
def mock_read_response_manual_input(additional_text):
return torch.tensor([[manual_tmp_val]], dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
monkeypatch.setattr(
cls, "_read_response_manual_input", mock_read_response_manual_input
)
# set kwarg response to None (so manually provided input is used)
kwarg_response = None
# run test
output = cls._get_and_verify_response_input(response=kwarg_response)
if method == "functions":
assert output[0].item() == tmp_val
elif method == "iterative":
assert output[0].item() == manual_tmp_val
@pytest.mark.parametrize("method", ["WRONG", None])
def test_observe_get_and_verify_response_input_fail_unit(tmp_observe_class, method):
"""
test that _get_and_verify_response_input fails for self.sampling["method"] not equal to "iterative" or "functions".
"""
# # define class
cls = tmp_observe_class
cls.sampling["method"] = method
# set kwarg response to None (so manually provided input is used)
kwarg_response = None
with pytest.raises(Exception) as e:
assert cls._get_and_verify_response_input(response=kwarg_response)
assert str(e.value) == "greattunes._observe._get_and_verify_response_input: class attribute " \
"self.sampling['method'] has non-permissable value " + str(method) + ", must be in " \
"['iterative', 'functions']."
@pytest.mark.parametrize(
"kwarg_response",
[
[1.2],
torch.tensor([[1.2]], dtype=torch.double)
]
)
def test_get_and_verify_response_input_kwarg_input_works(tmp_observe_class, kwarg_response, monkeypatch):
"""
test that _get_and_verify_response_input works for self.sampling["method"] = "iterative" with programmatically
provided input. Leverage monkeypatching for utils.__get_covars_from_kwargs and create false class to mock that
greattunes._observe will be called inside TuneSession class in greattunes.__init__
"""
# set device for torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # define class
cls = tmp_observe_class
cls.sampling["method"] = "iterative"
# monkeypatch "__get_covars_from_kwargs"
def mock__get_covars_from_kwargs(x):
if isinstance(kwarg_response, list):
return torch.tensor([kwarg_response], dtype=torch.double, device=device)
else:
return kwarg_response
monkeypatch.setattr(greattunes.utils, "__get_covars_from_kwargs", mock__get_covars_from_kwargs)
# run test
output = cls._get_and_verify_response_input(response=kwarg_response)
# assert
if isinstance(kwarg_response, list):
assert output[0].item() == kwarg_response[0]
elif isinstance(kwarg_response, torch.DoubleTensor):
assert output[0].item() == kwarg_response[0].item()
@pytest.mark.parametrize("FLAG_TRAINING_DATA", [True, False])
def test_observe_get_response_function_input_unit(tmp_observe_class, training_data_covar_complex, FLAG_TRAINING_DATA):
"""
test _get_response_function_input for pass and fail
"""
# temp class for test
cls = tmp_observe_class
# data
train_X = training_data_covar_complex[1]
covar_details = training_data_covar_complex[3]
# set attributes on class, required for test
cls.train_X = None
cls.covar_details = covar_details
if FLAG_TRAINING_DATA:
cls.train_X = train_X
# add simple response function
tmp_val = 2.2
def mock_response_function(covar):
"""
test response function
:param covar: torch.tensor (num_obs X num_covariates)
:return:
"""
return tmp_val
cls.sampling["response_func"] = mock_response_function
# assert
if FLAG_TRAINING_DATA:
# run test
output = cls._get_response_function_input()
assert output[0].item() == tmp_val
else:
with pytest.raises(Exception) as e:
assert output == cls._get_response_function_input()
assert str(e.value) == "'NoneType' object has no attribute 'shape'"
@pytest.mark.parametrize(
"response, kwarg_response, error_msg",
[
[torch.tensor([[2]], dtype=torch.double), ['a'], "too many dimensions 'str'"],
[torch.tensor([[2]], dtype=torch.double), [1, 2], "greattunes._observe._get_and_verify_response_input: incorrect number of variables provided. Was expecting input of size (1,1) but received torch.Size([1, 2])"],
[torch.tensor([[2]], dtype=torch.double), [1, 'a'], "must be real number, not str"],
[torch.tensor([[2, 3]], dtype=torch.double), None, "greattunes._observe._get_and_verify_response_input: incorrect number of variables provided. Was expecting input of size (1,1) but received torch.Size([1, 2])"],
[torch.tensor([[2]], dtype=torch.double), torch.tensor([[1, 2]], dtype=torch.double), "greattunes.utils.__get_response_from_kwargs: dimension mismatch in provided 'response'. Was expecting torch tensor of size (1,1) but received one of size (1, 2)."],
]
)
def test_get_and_verify_response_input_fails_wrong_input(tmp_observe_class, response, kwarg_response, error_msg,
monkeypatch):
"""
test that _get_and_verify_response_input fails for wrong inputs. Use only the "iterative" sampling option for this
test
"""
# set device for torch
mydevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # define class
cls = tmp_observe_class
cls.sampling["method"] = "iterative"
cls.device = mydevice
# monkeypatch "__get_response_from_kwargs"
def mock__get_response_from_kwargs(x, device):
if isinstance(kwarg_response, list):
return torch.tensor([kwarg_response], dtype=torch.double, device=mydevice)
else:
return kwarg_response
monkeypatch.setattr(greattunes.utils, "__get_response_from_kwargs", mock__get_response_from_kwargs)
# monkeypatch _read_response_manual_input
def mock_read_response_manual_input(additional_text):
return response
monkeypatch.setattr(
cls, "_read_response_manual_input", mock_read_response_manual_input
)
# run test
with pytest.raises(Exception) as e:
output = cls._get_and_verify_response_input(response=kwarg_response)
assert str(e.value) == error_msg
@pytest.mark.parametrize("additional_text, input_data",
[
["temp", [1.1, 2.2]],
["try again", [3.1, -12.2]],
["simple try", [4.5]],
]
)
def test_observe_read_response_manual_input_unit(tmp_observe_class, additional_text, input_data, monkeypatch):
"""
test _read_response_manual_input, monkeypatching the "input" function call in the method
"""
# temp class for test
cls = tmp_observe_class
# set attribute
cls.model = {"covars_proposed_iter": 0}
# monkeypatching "input"
monkeypatch_output = ", ".join([str(x) for x in input_data]) # match data from "input" function
monkeypatch.setattr("builtins.input", lambda _: monkeypatch_output)
# run function
output = cls._read_response_manual_input(additional_text)
# assert
for it in range(len(input_data)):
assert output[0, it].item() == input_data[it]
@pytest.mark.parametrize(
"candidate",
[
torch.tensor([[2.2]], dtype=torch.double),
torch.tensor([[2.2, 3.3, -1]], dtype=torch.double),
]
)
def test_observe_print_candidate_to_prompt_works_unit(tmp_observe_class, candidate):
"""
test that given a candidate, the right string is written by the method _print_candidate_to_prompt
:param candidate (torch tensor): one-row tensor of new datapoint to be investigated
"""
# temporary class to run the test
cls = tmp_observe_class
# extend with required attributes
tmp_covars_proposed_iter = 2
cls.model = {"covars_proposed_iter": tmp_covars_proposed_iter}
# add covariate details to tmp_observe_class
covar_details = {}
for i in range(candidate.size()[1]):
key = "covar" + str(i)
val = candidate[0,i].item()
covar_details[key] = {"guess": val, "min": val-1.0, "max": val+1.0, "type": float, "columns": i}
cls.covar_details = covar_details
# run the method: generate the string to be printed
input_request = cls._print_candidate_to_prompt(candidate=candidate)
# build expected output
cand_pretty = tensor2pretty_covariate(train_X_sample=candidate, covar_details=covar_details)
new_cand_names = [i + " (" + str(covar_details[i]["type"]) + ")" for i in list(cand_pretty.columns)]
cand_pretty.columns = new_cand_names
outtext = "\tNEW datapoint to sample:\n\t" + cand_pretty.to_string(index=False).replace("\n", "\n\t")
# assert
assert input_request == outtext
@pytest.mark.parametrize(
"candidate, error_msg",
[
[torch.tensor([], dtype=torch.double), "greattunes.greattunes._observe._print_candidate_to_prompt: provided input 'candidate' is empty. Expecting torch tensor of size 1 X num_covariates"],
[None, "greattunes.greattunes._observe._print_candidate_to_prompt: provided input 'candidate' is incorrect datatype. Expecting to be of type torch.Tensor"]
]
)
def test_observe_print_candidate_to_prompt_fails_unit(tmp_observe_class, candidate, error_msg):
"""
test that _print_candidate_to_prompt throws the right error for the two cases
:param candidate: supposed to be one-row tensor of new datapoint to be investigated of type torch tensor, here hijacking
"""
# temporary class to run the test
cls = tmp_observe_class
# run _print_candidate_to_prompt method and ensure correct error returned
with pytest.raises(Exception) as e:
# run the method: generate the string to be printed
input_request = cls._print_candidate_to_prompt(candidate=candidate)
assert str(e.value) == error_msg
@pytest.mark.parametrize(
"additional_text", ["testing function", "12345_ygh", None, 22.0, [1.0, 4.4], torch.tensor([[2.2]], dtype=torch.double)]
)
def test_read_covars_manual_input(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
additional_text, monkeypatch):
"""
test reading of covars from manual input by user. Monkeypatches reliance on function 'input'
"""
covariates = [1.1, 2.2, 200, -1.7]
# temp class to execute the test
cls = tmp_observe_class
# add attribute 'initial_guess' required for '_read_covars_manual'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
covar_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
cls.initial_guess = covar_tensor
# add proposed_X attributed required for '_read_covars_manual'
cls.proposed_X = covar_tensor
# add attributes defining the covariate expectation
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
cls.sorted_pandas_columns = covar_details_mapped_covar_mapped_names_tmp_observe_class[2]
# monkeypatch
def mock_input(x): # mock function to replace 'input' for unit testing purposes
return ", ".join([str(x) for x in covariates])
monkeypatch.setattr("builtins.input", mock_input)
# run the test
# different tests for cases where it's supposed to pass vs fail
if isinstance(additional_text, str):
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
print(covars_candidate_float_tensor)
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
assert covars_candidate_float_tensor[0, i].item() == covariates[i]
# cases where type of additonal_text should make test fail
else:
with pytest.raises(AssertionError) as e:
covars_candidate_float_tensor = cls._read_covars_manual_input(additional_text)
assert str(e.value) == "greattunes._observe._read_covars_manual_input: wrong datatype of parameter 'additional_text'. Was expecting 'str' but received " + str(type(additional_text))
def test_get_and_verify_covars_input_works(tmp_observe_class, monkeypatch):
"""
test that _get_and_verify_covars_input works when providing the correct data. Monkeypatching methods
"_read_covars_manual_input" and "__validate_num_covars"
"""
# covariates to sample
covariates = [1.1, 2.2, 200, -1.7]
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# temp class to execute the test
cls = tmp_observe_class
# monkeypatch "_read_covars_manual_input"
def mock_read_covars_manual_input(x):
return torch.tensor([covariates], dtype=torch.double, device=device)
monkeypatch.setattr(cls, "_read_covars_manual_input", mock_read_covars_manual_input)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return True
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# covariate kwargs is set to None so input-based method is used
kwarg_covariates = None
# run method
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=kwarg_covariates)
# assert the output
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
assert covars_candidate_float_tensor[0, i].item() == covariates[i]
@pytest.mark.parametrize(
"covars",
[
[1.1, 2.2, 200, -1.7],
torch.tensor([[1.1, 2.2, 200, -1.7]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")),
]
)
def test_get_and_verify_covars_programmatic_works(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
covars, monkeypatch):
"""
test that _get_and_verify_covars_input works when providing the correct data programmatically. Monkeypatching
method "__validate_num_covars" and helper function "utils.__get_covars_from_kwargs"
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# temp class to execute the test
cls = tmp_observe_class
# attributes
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
# monkeypatch "__get_covars_from_kwargs"
def mock__get_covars_from_kwargs(x):
if isinstance(covars, list):
return torch.tensor([covars], dtype=torch.double, device=device)
else:
return covars
monkeypatch.setattr(greattunes.utils, "__get_covars_from_kwargs", mock__get_covars_from_kwargs)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return True
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# run method
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=covars)
# assert the output
# assert that the right elements are returned in 'covars_candidate_float_tensor'
for i in range(covars_candidate_float_tensor.size()[1]):
if isinstance(covars, list):
assert covars_candidate_float_tensor[0, i].item() == covars[i]
else:
assert covars_candidate_float_tensor[0, i].item() == covars[0, i].item()
@pytest.mark.parametrize(
"proposed_X",
[torch.tensor([[1.1, 2.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")),
None]
)
def test_get_and_verify_covars_input_fails(tmp_observe_class, proposed_X, monkeypatch):
"""
test that _get_and_verify_covars_input fails for both providing incorrect data. Monkeypatching methods
"_read_covars_manual_input" and "__validate_num_covars"
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# covariates to sample
covariates = [1.1, 2.2, 200, -1.7]
covars_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
# temp class to execute the test
cls = tmp_observe_class
# set proposed_X attribute (required for method to work)
cls.proposed_X = proposed_X
# monkeypatch "_read_covars_manual_input"
def mock_read_covars_manual_input(x):
return covars_tensor
monkeypatch.setattr(cls, "_read_covars_manual_input", mock_read_covars_manual_input)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return False
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# expected error message returned
add_text = ""
if cls.proposed_X is not None:
add_text = " Was expecting something like '" + str(cls.proposed_X[-1]) + "', but got '" + str(covars_tensor) + "'"
error_msg = "greattunes._observe._get_and_verify_covars_input: unable to get acceptable covariate input in 3 iterations." + add_text
# covariate kwargs is set to None so input-based method is used
kwarg_covariates = None
# run method
with pytest.raises(Exception) as e:
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=kwarg_covariates)
assert str(e.value) == error_msg
# negative tests for _get_and_verify_covars for kwargs input
@pytest.mark.parametrize(
"covars, error_msg",
[
[[1.1, 2.2, 200, -1.7], "greattunes._observe._get_and_verify_covars_input: unable to get acceptable covariate input in 3 iterations."],
[torch.tensor([[1.1, 2.2, 200, -1.7]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), "greattunes._observe._get_and_verify_covars_input: unable to get acceptable covariate input in 3 iterations."],
[torch.tensor([1.1, 2.2, 200, -1.7], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), "greattunes.utils.__get_covars_from_kwargs: dimension mismatch in provided 'covars'. Was expecting torch tensor of size (1,<num_covariates>) but received one of size (4)."], # this one fails in utils.__get_covars_from_kwargs because of wrong size of input tensor
]
)
def test_get_and_verify_covars_programmatic_fails(tmp_observe_class,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
covars, error_msg, monkeypatch):
"""
test that _get_and_verify_covars_input fails when providing incorrect data programmatically. Monkeypatching
method "__validate_num_covars". Expected error is related to wrong number of elements returned
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# temp class to execute the test
cls = tmp_observe_class
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
cls.covar_mapped_names = covar_details_mapped_covar_mapped_names_tmp_observe_class[1]
# monkeypatch "__get_covars_from_kwargs"
def mock__get_covars_from_kwargs(x):
if isinstance(covars, list):
return torch.tensor([covars], dtype=torch.double, device=device)
else:
return covars
monkeypatch.setattr(greattunes.utils, "__get_covars_from_kwargs", mock__get_covars_from_kwargs)
# monkeypatch "_Validators__validate_num_covars"
def mock_Validators__validate_num_covars(x):
return False
monkeypatch.setattr(cls, "_Validators__validate_num_covars", mock_Validators__validate_num_covars)
# run method
with pytest.raises(Exception) as e:
covars_candidate_float_tensor = cls._get_and_verify_covars_input(covars=covars)
assert str(e.value) == error_msg
@pytest.mark.parametrize(
"train_X, x_data, covars_proposed_iter, covars_sampled_iter, kwarg_covariates",
[
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}), 2, 1, None],
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}), 2, 1, torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))],
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}), 2, 1, pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]})],
[torch.tensor([[0.1, 2.5, 12, 0.22]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"covar0": [0.1], "covar1": [2.5], "covar2": [12], "covar3": [0.22]}), 1, 1, None],
]
)
def test_covars_datapoint_observation_unit(tmp_observe_class, train_X, x_data, covars_proposed_iter,
covars_sampled_iter, kwarg_covariates,
covar_details_mapped_covar_mapped_names_tmp_observe_class,
monkeypatch):
"""
test that _get_covars_datapoint works. Monkeypatching method "_get_and_verify_covars_input". Also test that this
works both when covars is provided as kwargs or not (when covarites kwarg is set to None, different mehtod is used
in _get_and_verify_covars_input; since we're monkeypatching anyways it shouldn't change, but testing anyways).
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# covariates to sample
covariates = [1.1, 2.2, 200, -1.7]
covars_tensor = torch.tensor([covariates], dtype=torch.double, device=device)
# temp class to execute the test
cls = tmp_observe_class
# set proposed_X attribute (required for method to work)
cls.proposed_X = train_X
cls.train_X = train_X
cls.model = {"covars_proposed_iter": covars_proposed_iter,
"covars_sampled_iter": covars_sampled_iter}
cls.x_data = x_data
cls.covar_details = covar_details_mapped_covar_mapped_names_tmp_observe_class[0]
# monkeypatch "_get_and_verify_covars_input"
def mock_get_and_verify_covars_input(covars):
return covars_tensor
monkeypatch.setattr(cls, "_get_and_verify_covars_input", mock_get_and_verify_covars_input)
# run the method being tested
cls._get_covars_datapoint(covars=kwarg_covariates)
# assert the right elements have been added
for i in range(cls.train_X.size()[1]):
assert cls.train_X[-1, i].item() == covariates[i]
# assert that counter has been updated
assert cls.model["covars_sampled_iter"] == cls.model["covars_proposed_iter"]
# only if covars_proposed_iter is ahead of sampled
if covars_proposed_iter > covars_sampled_iter:
# assert that new row has been added
assert cls.train_X.size()[0] == train_X.size()[0] + 1
elif train_X is None:
# assert that cls.train_X has been initiated
assert cls.train_X.size()[0] == 1
else:
# assert that no new row has been added
assert cls.train_X.size()[0] == train_X.size()[0]
@pytest.mark.parametrize(
"train_Y, y_data, covars_proposed_iter, response_sampled_iter, kwarg_response",
[
[torch.tensor([[0.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"Response": [0.2]}), 2, 1, None],
[torch.tensor([[0.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"Response": [0.2]}), 2, 1, [0.2]],
[torch.tensor([[0.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"Response": [0.2]}), 2, 1, torch.tensor([[0.2]], dtype=torch.double)],
[torch.tensor([[0.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"Response": [0.2]}), 2, 1, pd.DataFrame({"Response": [0.2]})],
[torch.tensor([[0.2]], dtype=torch.double, device=torch.device("cuda" if torch.cuda.is_available() else "cpu")), pd.DataFrame({"Response": [0.2]}), 1, 1, None],
[None, None, 0, 0, None]
]
)
def test_response_datapoint_observation_unit(tmp_observe_class, train_Y, y_data, covars_proposed_iter,
response_sampled_iter, kwarg_response, monkeypatch):
"""
test that _get_response_datapoint works. Monkeypatching method "_get_and_verify_response_input". For iterative
sampling,tests that it works both when response is provided as kwargs and not. For kwarg-based input, tests that
it works for both types list, torch tensor and pandas dataframe
"""
# device for torch tensor definitions
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# covariates to sample
resp = [1.1]
resp_tensor = torch.tensor([resp], dtype=torch.double, device=device)
# temp class to execute the test
cls = tmp_observe_class
# adding attributes required for test to work
cls.train_Y = train_Y
cls.model = {"covars_proposed_iter": covars_proposed_iter,
"response_sampled_iter": response_sampled_iter}
cls.y_data = y_data
# monkeypatch "_get_and_verify_covars_input"
def mock_get_and_verify_response_input(response):
return resp_tensor
monkeypatch.setattr(cls, "_get_and_verify_response_input", mock_get_and_verify_response_input)
# run the method being tested
cls._get_response_datapoint(response=kwarg_response)
# assert the right element have been added
assert cls.train_Y[-1].item() == resp[0]
assert cls.y_data["Response"].iloc[-1] == resp[0]
# assert that counter has been updated
assert cls.model["response_sampled_iter"] == cls.model["covars_proposed_iter"]
# only if covars_proposed_iter is ahead of sampled
if covars_proposed_iter > response_sampled_iter:
# assert that new row has been added
assert cls.train_Y.size()[0] == train_Y.size()[0] + 1
assert cls.y_data.shape[0] == train_Y.size()[0] + 1
elif train_Y is None:
# assert that cls.train_X has been initiated
assert cls.train_Y.size()[0] == 1
assert cls.y_data.shape[0] == 1
else:
# assert that no new row has been added
assert cls.train_Y.size()[0] == train_Y.size()[0]
assert cls.y_data.shape[0] == y_data.shape[0]
| 44.158779
| 398
| 0.696031
|
2ba07f2ab754ce5007b7f012408566167f503464
| 40,856
|
py
|
Python
|
src/sage/geometry/polyhedron/representation.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/geometry/polyhedron/representation.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
src/sage/geometry/polyhedron/representation.py
|
fredstro/sage
|
c936d2cda81ec7ec3552a3bdb29c994b40d1bb24
|
[
"BSL-1.0"
] | null | null | null |
"""
H(yperplane) and V(ertex) representation objects for polyhedra
"""
#*****************************************************************************
# Copyright (C) 2008 Marshall Hampton <hamptonio@gmail.com>
# Copyright (C) 2011 Volker Braun <vbraun.name@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.sage_object import SageObject
from sage.structure.element import is_Vector
from sage.rings.all import QQ, ZZ, RDF
from sage.modules.free_module_element import vector
#########################################################################
# PolyhedronRepresentation
# / \
# / \
# Hrepresentation Vrepresentation
# / \ / | \
# / \ / | \
# Inequality Equation Vertex Ray Line
class PolyhedronRepresentation(SageObject):
"""
The internal base class for all representation objects of
``Polyhedron`` (vertices/rays/lines and inequalites/equations)
.. note::
You should not (and cannot) instantiate it yourself. You can
only obtain them from a Polyhedron() class.
TESTS::
sage: import sage.geometry.polyhedron.representation as P
sage: P.PolyhedronRepresentation()
<class 'sage.geometry.polyhedron.representation.PolyhedronRepresentation'>
"""
# Numeric values for the output of the type() method
INEQUALITY = 0
EQUATION = 1
VERTEX = 2
RAY = 3
LINE = 4
def __len__(self):
"""
Returns the length of the representation data.
TESTS::
sage: p = Polyhedron(vertices=[[1,2,3]])
sage: v = p.Vrepresentation(0)
sage: v.__len__()
3
"""
return self._vector.degree()
def __getitem__(self, i):
"""
Supports indexing.
TESTS::
sage: p = Polyhedron(vertices=[[1,2,3]])
sage: v = p.Vrepresentation(0)
sage: v.__getitem__(1)
2
"""
return self._vector[i]
def __hash__(self):
r"""
TESTS::
sage: from sage.geometry.polyhedron.representation import Hrepresentation
sage: pr = Hrepresentation(Polyhedron(vertices = [[1,2,3]]).parent())
sage: hash(pr)
1647257843 # 32-bit
4686581268940269811 # 64-bit
"""
# TODO: ideally the argument self._vector of self should be immutable.
# So that we could change the line below by hash(self._vector). The
# mutability is kept because this argument might be reused (see e.g.
# Hrepresentation._set_data below).
return hash(tuple(self._vector))
def __cmp__(self, other):
"""
Compare two representation objects
They are equal if and only if they define the same
vertex/ray/line or inequality/equation in the ambient space,
regardless of the polyhedron that they belong to.
INPUT:
- ``other`` -- anything.
OUTPUT:
One of `-1`, `0`, `+1`. ``True`` if and only if ``other`` represents the same
H-representation object.
EXAMPLES::
sage: triangle = Polyhedron([(0,0), (1,0), (0,1)])
sage: ieq = next(triangle.inequality_generator()); ieq
An inequality (1, 0) x + 0 >= 0
sage: ieq == copy(ieq)
True
sage: cmp(ieq, copy(ieq))
0
sage: cmp(ieq, 'a string')
-1
sage: square = Polyhedron([(0,0), (1,0), (0,1), (1,1)], base_ring=QQ)
sage: cmp(square.Vrepresentation(0), triangle.Vrepresentation(0))
0
sage: ieq = square.Hrepresentation(0); ieq.vector()
(0, 1, 0)
sage: abs(cmp(ieq, Polyhedron([(0,1,0)]).Vrepresentation(0)))
1
"""
if not isinstance(other, PolyhedronRepresentation):
return -1
return cmp(type(self), type(other)) or cmp(self._vector, other._vector)
def vector(self, base_ring=None):
"""
Returns the vector representation of the H/V-representation object.
INPUT:
- ``base_ring`` -- the base ring of the vector.
OUTPUT:
For a V-representation object, a vector of length
:meth:`~sage.geometry.polyhedron.base.Polyhedron_base.ambient_dim`. For
a H-representation object, a vector of length
:meth:`~sage.geometry.polyhedron.base.Polyhedron_base.ambient_dim`
+ 1.
EXAMPLES::
sage: s = polytopes.cuboctahedron()
sage: v = next(s.vertex_generator())
sage: v
A vertex at (-1, -1, 0)
sage: v.vector()
(-1, -1, 0)
sage: v()
(-1, -1, 0)
sage: type(v())
<type 'sage.modules.vector_integer_dense.Vector_integer_dense'>
Conversion to a different base ring can be forced with the optional argument::
sage: v.vector(RDF)
(-1.0, -1.0, 0.0)
sage: vector(RDF, v)
(-1.0, -1.0, 0.0)
"""
if (base_ring is None) or (base_ring is self._base_ring):
return self._vector
else:
return vector(base_ring, self._vector)
_vector_ = vector
def polyhedron(self):
"""
Returns the underlying polyhedron.
TESTS::
sage: p = Polyhedron(vertices=[[1,2,3]])
sage: v = p.Vrepresentation(0)
sage: v.polyhedron()
A 0-dimensional polyhedron in ZZ^3 defined as the convex hull of 1 vertex
"""
return self._polyhedron
def __call__(self):
"""
Returns the vector representation of the representation
object. Shorthand for the vector() method.
TESTS::
sage: p = Polyhedron(vertices=[[1,2,3]])
sage: v = p.Vrepresentation(0)
sage: v.__call__()
(1, 2, 3)
"""
return self._vector
def index(self):
"""
Returns an arbitrary but fixed number according to the internal
storage order.
NOTES:
H-representation and V-representation objects are enumerated
independently. That is, amongst all vertices/rays/lines there
will be one with ``index()==0``, and amongs all
inequalities/equations there will be one with ``index()==0``,
unless the polyhedron is empty or spans the whole space.
EXAMPLES::
sage: s = Polyhedron(vertices=[[1],[-1]])
sage: first_vertex = next(s.vertex_generator())
sage: first_vertex.index()
0
sage: first_vertex == s.Vrepresentation(0)
True
"""
return self._index
def __add__(self, coordinate_list):
"""
Return the coordinates concatenated with ``coordinate_list``.
INPUT:
- ``coordinate_list`` -- a list.
OUTPUT:
The coordinates of ``self`` concatenated with ``coordinate_list``.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: v = p.Vrepresentation(0); v
A vertex at (1, 0)
sage: v + [4,5]
[1, 0, 4, 5]
"""
if not isinstance(coordinate_list, list):
raise TypeError('Can only concatenate with a list of coordinates')
return list(self) + coordinate_list
def __radd__(self, coordinate_list):
"""
Return ``coordinate_list`` concatenated with the coordinates.
INPUT:
- ``coordinate_list`` -- a list.
OUTPUT:
``coordinate_list`` concatenated with the coordinates of ``self``.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: v = p.Vrepresentation(0); v
A vertex at (1, 0)
sage: [4,5] + v
[4, 5, 1, 0]
"""
if not isinstance(coordinate_list, list):
raise TypeError('Can only concatenate with a list of coordinates')
return coordinate_list + list(self)
def count(self, i):
"""
Count the number of occurrences of ``i`` in the coordinates.
INPUT:
- ``i`` -- Anything.
OUTPUT:
Integer. The number of occurrences of ``i`` in the coordinates.
EXAMPLES::
sage: p = Polyhedron(vertices=[(0,1,1,2,1)])
sage: v = p.Vrepresentation(0); v
A vertex at (0, 1, 1, 2, 1)
sage: v.count(1)
3
"""
return sum([1 for j in self if i==j])
class Hrepresentation(PolyhedronRepresentation):
"""
The internal base class for H-representation objects of
a polyhedron. Inherits from ``PolyhedronRepresentation``.
"""
def __init__(self, polyhedron_parent):
"""
Initializes the PolyhedronRepresentation object.
TESTS::
sage: from sage.geometry.polyhedron.representation import Hrepresentation
sage: pr = Hrepresentation(Polyhedron(vertices = [[1,2,3]]).parent())
sage: tuple(pr)
(0, 0, 0, 0)
sage: TestSuite(pr).run(skip='_test_pickling')
"""
self._polyhedron_parent = polyhedron_parent
self._base_ring = polyhedron_parent.base_ring()
self._vector = polyhedron_parent.Hrepresentation_space()(0)
self._A = polyhedron_parent.ambient_space()(0)
self._b = polyhedron_parent.base_ring()(0)
self._index = 0
def _set_data(self, polyhedron, data):
"""
Initialization function.
The H/V-representation objects are kept in a pool, and this
function is used to reassign new values to already existing
(but unused) objects. You must not call this function on
objects that are in normal use.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the H-representation data.
TESTS::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: pH = p.Hrepresentation(0) # indirect doctest
sage: TestSuite(pH).run(skip='_test_pickling')
"""
assert polyhedron.parent() is self._polyhedron_parent
if len(data) != self._vector.degree():
raise ValueError('H-representation data requires a list of length ambient_dim+1')
self._vector[:] = data
self._A[:] = data[1:]
self._b = self._base_ring(data[0])
self._index = len(polyhedron._Hrepresentation)
polyhedron._Hrepresentation.append(self)
self._polyhedron = polyhedron
def is_H(self):
"""
Returns True if the object is part of a H-representation
(inequality or equation).
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: pH = p.Hrepresentation(0)
sage: pH.is_H()
True
"""
return True
def is_inequality(self):
"""
Returns True if the object is an inequality of the H-representation.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: pH = p.Hrepresentation(0)
sage: pH.is_inequality()
True
"""
return False
def is_equation(self):
"""
Returns True if the object is an equation of the H-representation.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]], eqns = [[1,1,-1]])
sage: pH = p.Hrepresentation(0)
sage: pH.is_equation()
True
"""
return False
def A(self):
r"""
Returns the coefficient vector `A` in `A\vec{x}+b`.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: pH = p.Hrepresentation(2)
sage: pH.A()
(1, 0)
"""
return self._A
def b(self):
r"""
Returns the constant `b` in `A\vec{x}+b`.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: pH = p.Hrepresentation(2)
sage: pH.b()
0
"""
return self._b
def neighbors(self):
"""
Iterate over the adjacent facets (i.e. inequalities/equations)
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0,],[0,1,0,0],
... [1,-1,0,0],[1,0,-1,0,],[1,0,0,-1]])
sage: pH = p.Hrepresentation(0)
sage: a = list(pH.neighbors())
sage: a[0]
An inequality (0, -1, 0) x + 1 >= 0
sage: list(a[0])
[1, 0, -1, 0]
"""
adjacency_matrix = self.polyhedron().facet_adjacency_matrix()
for x in self.polyhedron().Hrep_generator():
if adjacency_matrix[self.index(), x.index()] == 1:
yield x
def adjacent(self):
"""
Alias for neighbors().
TESTS::
sage: p = Polyhedron(ieqs = [[0,0,0,2],[0,0,1,0,],[0,10,0,0],
... [1,-1,0,0],[1,0,-1,0,],[1,0,0,-1]])
sage: pH = p.Hrepresentation(0)
sage: a = list(pH.neighbors())
sage: b = list(pH.adjacent())
sage: a==b
True
"""
return self.neighbors()
def is_incident(self, Vobj):
"""
Returns whether the incidence matrix element (Vobj,self) == 1
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0,],[0,1,0,0],
... [1,-1,0,0],[1,0,-1,0,],[1,0,0,-1]])
sage: pH = p.Hrepresentation(0)
sage: pH.is_incident(p.Vrepresentation(1))
True
sage: pH.is_incident(p.Vrepresentation(5))
False
"""
return self.polyhedron().incidence_matrix()[Vobj.index(), self.index()] == 1
def __mul__(self, Vobj):
"""
Shorthand for ``self.eval(x)``
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,0,1],[0,0,1,0,],[0,1,0,0],
... [1,-1,0,0],[1,0,-1,0,],[1,0,0,-1]])
sage: pH = p.Hrepresentation(0)
sage: pH*p.Vrepresentation(5)
1
"""
return self.eval(Vobj)
def eval(self, Vobj):
r"""
Evaluates the left hand side `A\vec{x}+b` on the given
vertex/ray/line.
NOTES:
* Evaluating on a vertex returns `A\vec{x}+b`
* Evaluating on a ray returns `A\vec{r}`. Only the sign or
whether it is zero is meaningful.
* Evaluating on a line returns `A\vec{l}`. Only whether it
is zero or not is meaningful.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[-1,-1]])
sage: ineq = next(triangle.inequality_generator())
sage: ineq
An inequality (2, -1) x + 1 >= 0
sage: [ ineq.eval(v) for v in triangle.vertex_generator() ]
[0, 0, 3]
sage: [ ineq * v for v in triangle.vertex_generator() ]
[0, 0, 3]
If you pass a vector, it is assumed to be the coordinate vector of a point::
sage: ineq.eval( vector(ZZ, [3,2]) )
5
"""
if is_Vector(Vobj):
return self.A() * Vobj + self.b()
return Vobj.evaluated_on(self)
def incident(self):
"""
Returns a generator for the incident H-representation objects,
that is, the vertices/rays/lines satisfying the (in)equality.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[-1,-1]])
sage: ineq = next(triangle.inequality_generator())
sage: ineq
An inequality (2, -1) x + 1 >= 0
sage: [ v for v in ineq.incident()]
[A vertex at (-1, -1), A vertex at (0, 1)]
sage: p = Polyhedron(vertices=[[0,0,0],[0,1,0],[0,0,1]], rays=[[1,-1,-1]])
sage: ineq = p.Hrepresentation(2)
sage: ineq
An inequality (1, 0, 1) x + 0 >= 0
sage: [ x for x in ineq.incident() ]
[A vertex at (0, 0, 0),
A vertex at (0, 1, 0),
A ray in the direction (1, -1, -1)]
"""
incidence_matrix = self.polyhedron().incidence_matrix()
for V in self.polyhedron().Vrep_generator():
if incidence_matrix[V.index(), self.index()] == 1:
yield V
class Inequality(Hrepresentation):
"""
A linear inequality (supporting hyperplane) of the
polyhedron. Inherits from ``Hrepresentation``.
"""
def type(self):
r"""
Returns the type (equation/inequality/vertex/ray/line) as an
integer.
OUTPUT:
Integer. One of ``PolyhedronRepresentation.INEQUALITY``,
``.EQUATION``, ``.VERTEX``, ``.RAY``, or ``.LINE``.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: repr_obj = next(p.inequality_generator())
sage: repr_obj.type()
0
sage: repr_obj.type() == repr_obj.INEQUALITY
True
sage: repr_obj.type() == repr_obj.EQUATION
False
sage: repr_obj.type() == repr_obj.VERTEX
False
sage: repr_obj.type() == repr_obj.RAY
False
sage: repr_obj.type() == repr_obj.LINE
False
"""
return self.INEQUALITY
def is_inequality(self):
"""
Returns True since this is, by construction, an inequality.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: a = next(p.inequality_generator())
sage: a.is_inequality()
True
"""
return True
def _repr_(self):
"""
The string representation of the inequality.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: a = next(p.inequality_generator())
sage: a._repr_()
'An inequality (-1, 1, 0) x + 0 >= 0'
sage: Polyhedron(ieqs=[(1,-1),(-1,2)]).Hrepresentation()
(An inequality (-1) x + 1 >= 0, An inequality (2) x - 1 >= 0)
sage: Polyhedron(eqns=[(1,0)]).Hrepresentation()
(An equation -1 == 0,)
sage: Polyhedron(eqns=[(-1,0)]).Hrepresentation()
(An equation -1 == 0,)
"""
s = 'An inequality '
have_A = not self.A().is_zero()
if have_A:
s += repr(self.A()) + ' x '
if self.b()>=0:
if have_A:
s += '+'
else:
s += '-'
if have_A:
s += ' '
s += repr(abs(self.b())) + ' >= 0'
return s
def contains(self, Vobj):
"""
Tests whether the halfspace (including its boundary) defined
by the inequality contains the given vertex/ray/line.
EXAMPLES::
sage: p = polytopes.cross_polytope(3)
sage: i1 = next(p.inequality_generator())
sage: [i1.contains(q) for q in p.vertex_generator()]
[True, True, True, True, True, True]
sage: p2 = 3*polytopes.hypercube(3)
sage: [i1.contains(q) for q in p2.vertex_generator()]
[True, False, False, False, True, True, True, False]
"""
try:
if Vobj.is_vector(): # assume we were passed a point
return self.polyhedron()._is_nonneg( self.eval(Vobj) )
except AttributeError:
pass
if Vobj.is_line():
return self.polyhedron()._is_zero( self.eval(Vobj) )
else:
return self.polyhedron()._is_nonneg( self.eval(Vobj) )
def interior_contains(self, Vobj):
"""
Tests whether the interior of the halfspace (excluding its
boundary) defined by the inequality contains the given
vertex/ray/line.
EXAMPLES::
sage: p = polytopes.cross_polytope(3)
sage: i1 = next(p.inequality_generator())
sage: [i1.interior_contains(q) for q in p.vertex_generator()]
[False, True, True, False, False, True]
sage: p2 = 3*polytopes.hypercube(3)
sage: [i1.interior_contains(q) for q in p2.vertex_generator()]
[True, False, False, False, True, True, True, False]
If you pass a vector, it is assumed to be the coordinate vector of a point::
sage: P = Polyhedron(vertices=[[1,1],[1,-1],[-1,1],[-1,-1]])
sage: p = vector(ZZ, [1,0] )
sage: [ ieq.interior_contains(p) for ieq in P.inequality_generator() ]
[True, True, False, True]
"""
try:
if Vobj.is_vector(): # assume we were passed a point
return self.polyhedron()._is_positive( self.eval(Vobj) )
except AttributeError:
pass
if Vobj.is_line():
return self.polyhedron()._is_zero( self.eval(Vobj) )
elif Vobj.is_vertex():
return self.polyhedron()._is_positive( self.eval(Vobj) )
else: # Vobj.is_ray()
return self.polyhedron()._is_nonneg( self.eval(Vobj) )
class Equation(Hrepresentation):
"""
A linear equation of the polyhedron. That is, the polyhedron is
strictly smaller-dimensional than the ambient space, and contained
in this hyperplane. Inherits from ``Hrepresentation``.
"""
def type(self):
r"""
Returns the type (equation/inequality/vertex/ray/line) as an
integer.
OUTPUT:
Integer. One of ``PolyhedronRepresentation.INEQUALITY``,
``.EQUATION``, ``.VERTEX``, ``.RAY``, or ``.LINE``.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: repr_obj = next(p.equation_generator())
sage: repr_obj.type()
1
sage: repr_obj.type() == repr_obj.INEQUALITY
False
sage: repr_obj.type() == repr_obj.EQUATION
True
sage: repr_obj.type() == repr_obj.VERTEX
False
sage: repr_obj.type() == repr_obj.RAY
False
sage: repr_obj.type() == repr_obj.LINE
False
"""
return self.EQUATION
def is_equation(self):
"""
Tests if this object is an equation. By construction, it must be.
TESTS::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: a = next(p.equation_generator())
sage: a.is_equation()
True
"""
return True
def _repr_(self):
"""
A string representation of this object.
TESTS::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: a = next(p.equation_generator())
sage: a._repr_()
'An equation (0, 0, 1) x + 0 == 0'
sage: Polyhedron().Hrepresentation(0)
An equation -1 == 0
"""
s = 'An equation '
have_A = not self.A().is_zero()
if have_A:
s += repr(self.A()) + ' x '
if self.b()>=0:
if have_A:
s += '+'
else:
s += '-'
if have_A:
s += ' '
s += repr(abs(self.b())) + ' == 0'
return s
def contains(self, Vobj):
"""
Tests whether the hyperplane defined by the equation contains
the given vertex/ray/line.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: v = next(p.vertex_generator())
sage: v
A vertex at (0, 0, 0)
sage: a = next(p.equation_generator())
sage: a
An equation (0, 0, 1) x + 0 == 0
sage: a.contains(v)
True
"""
return self.polyhedron()._is_zero( self.eval(Vobj) )
def interior_contains(self, Vobj):
"""
Tests whether the interior of the halfspace (excluding its
boundary) defined by the inequality contains the given
vertex/ray/line.
NOTE:
Returns False for any equation.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: v = next(p.vertex_generator())
sage: v
A vertex at (0, 0, 0)
sage: a = next(p.equation_generator())
sage: a
An equation (0, 0, 1) x + 0 == 0
sage: a.interior_contains(v)
False
"""
return False
class Vrepresentation(PolyhedronRepresentation):
"""
The base class for V-representation objects of a
polyhedron. Inherits from ``PolyhedronRepresentation``.
"""
def __init__(self, polyhedron_parent):
"""
Initializes the PolyhedronRepresentation object.
TESTS::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: a = next(p.inequality_generator())
sage: a
An inequality (-1, 1, 0) x + 0 >= 0
sage: TestSuite(a).run(skip='_test_pickling')
"""
self._polyhedron_parent = polyhedron_parent
self._base_ring = polyhedron_parent.base_ring()
self._vector = polyhedron_parent.Vrepresentation_space()(0)
self._index = 0
def _set_data(self, polyhedron, data):
"""
Initialization function.
The H/V-representation objects are kept in a pool, and this
function is used to reassign new values to already existing
(but unused) objects. You must not call this function on
objects that are in normal use.
INPUT:
- ``polyhedron`` -- the new polyhedron.
- ``data`` -- the V-representation data.
TESTS::
sage: p = Polyhedron(ieqs = [[0,1,0],[0,0,1],[1,-1,0,],[1,0,-1]])
sage: pV = p.Vrepresentation(0) # indirect doctest
sage: TestSuite(pV).run(skip='_test_pickling')
"""
assert polyhedron.parent() is self._polyhedron_parent
if len(data) != self._vector.degree():
raise ValueError('V-representation data requires a list of length ambient_dim')
self._vector[:] = data
self._index = len(polyhedron._Vrepresentation)
polyhedron._Vrepresentation.append(self)
self._polyhedron = polyhedron
def is_V(self):
"""
Returns True if the object is part of a V-representation
(a vertex, ray, or line).
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0],[1,0],[0,3],[1,3]])
sage: v = next(p.vertex_generator())
sage: v.is_V()
True
"""
return True
def is_vertex(self):
"""
Returns True if the object is a vertex of the V-representation.
This method is over-ridden by the corresponding method in the
derived class Vertex.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0],[1,0],[0,3],[1,3]])
sage: v = next(p.vertex_generator())
sage: v.is_vertex()
True
sage: p = Polyhedron(ieqs = [[1, 0, 0, 0, 1], [1, 1, 0, 0, 0], [1, 0, 1, 0, 0]])
sage: r1 = next(p.ray_generator())
sage: r1.is_vertex()
False
"""
return False
def is_ray(self):
"""
Returns True if the object is a ray of the V-representation.
This method is over-ridden by the corresponding method in the
derived class Ray.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[1, 0, 0, 0, 1], [1, 1, 0, 0, 0], [1, 0, 1, 0, 0]])
sage: r1 = next(p.ray_generator())
sage: r1.is_ray()
True
sage: v1 = next(p.vertex_generator())
sage: v1
A vertex at (-1, -1, 0, -1)
sage: v1.is_ray()
False
"""
return False
def is_line(self):
"""
Returns True if the object is a line of the V-representation.
This method is over-ridden by the corresponding method in the
derived class Line.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[1, 0, 0, 0, 1], [1, 1, 0, 0, 0], [1, 0, 1, 0, 0]])
sage: line1 = next(p.line_generator())
sage: line1.is_line()
True
sage: v1 = next(p.vertex_generator())
sage: v1.is_line()
False
"""
return False
def neighbors(self):
"""
Returns a generator for the adjacent vertices/rays/lines.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0],[1,0],[0,3],[1,4]])
sage: v = next(p.vertex_generator())
sage: next(v.neighbors())
A vertex at (0, 3)
"""
adjacency_matrix = self.polyhedron().vertex_adjacency_matrix()
for x in self.polyhedron().Vrep_generator():
if adjacency_matrix[self.index(), x.index()] == 1:
yield x
def adjacent(self):
"""
Alias for neighbors().
TESTS::
sage: p = Polyhedron(vertices = [[0,0],[1,0],[0,3],[1,4]])
sage: v = next(p.vertex_generator())
sage: a = next(v.neighbors())
sage: b = next(v.adjacent())
sage: a==b
True
"""
return self.neighbors()
def is_incident(self, Hobj):
"""
Returns whether the incidence matrix element (self,Hobj) == 1
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: h1 = next(p.inequality_generator())
sage: h1
An inequality (0, 0, -1) x + 1 >= 0
sage: v1 = next(p.vertex_generator())
sage: v1
A vertex at (-1, -1, -1)
sage: v1.is_incident(h1)
False
"""
return self.polyhedron().incidence_matrix()[self.index(), Hobj.index()] == 1
def __mul__(self, Hobj):
"""
Shorthand for self.evaluated_on(Hobj)
TESTS::
sage: p = polytopes.hypercube(3)
sage: h1 = next(p.inequality_generator())
sage: v1 = next(p.vertex_generator())
sage: v1.__mul__(h1)
2
"""
return self.evaluated_on(Hobj)
def incident(self):
"""
Returns a generator for the equations/inequalities that are satisfied on the given
vertex/ray/line.
EXAMPLES::
sage: triangle = Polyhedron(vertices=[[1,0],[0,1],[-1,-1]])
sage: ineq = next(triangle.inequality_generator())
sage: ineq
An inequality (2, -1) x + 1 >= 0
sage: [ v for v in ineq.incident()]
[A vertex at (-1, -1), A vertex at (0, 1)]
sage: p = Polyhedron(vertices=[[0,0,0],[0,1,0],[0,0,1]], rays=[[1,-1,-1]])
sage: ineq = p.Hrepresentation(2)
sage: ineq
An inequality (1, 0, 1) x + 0 >= 0
sage: [ x for x in ineq.incident() ]
[A vertex at (0, 0, 0),
A vertex at (0, 1, 0),
A ray in the direction (1, -1, -1)]
"""
incidence_matrix = self.polyhedron().incidence_matrix()
for H in self.polyhedron().Hrep_generator():
if incidence_matrix[self.index(), H.index()] == 1:
yield H
class Vertex(Vrepresentation):
"""
A vertex of the polyhedron. Inherits from ``Vrepresentation``.
"""
def type(self):
r"""
Returns the type (equation/inequality/vertex/ray/line) as an
integer.
OUTPUT:
Integer. One of ``PolyhedronRepresentation.INEQUALITY``,
``.EQUATION``, ``.VERTEX``, ``.RAY``, or ``.LINE``.
EXAMPLES::
sage: p = Polyhedron(vertices = [[0,0,0],[1,1,0],[1,2,0]])
sage: repr_obj = next(p.vertex_generator())
sage: repr_obj.type()
2
sage: repr_obj.type() == repr_obj.INEQUALITY
False
sage: repr_obj.type() == repr_obj.EQUATION
False
sage: repr_obj.type() == repr_obj.VERTEX
True
sage: repr_obj.type() == repr_obj.RAY
False
sage: repr_obj.type() == repr_obj.LINE
False
"""
return self.VERTEX
def is_vertex(self):
"""
Tests if this object is a vertex. By construction it always is.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,1],[0,1,0],[1,-1,0]])
sage: a = next(p.vertex_generator())
sage: a.is_vertex()
True
"""
return True
def _repr_(self):
"""
Returns a string representation of the vertex.
OUTPUT:
String.
TESTS::
sage: p = Polyhedron(ieqs = [[0,0,1],[0,1,0],[1,-1,0]])
sage: v = next(p.vertex_generator())
sage: v.__repr__()
'A vertex at (1, 0)'
"""
return 'A vertex at ' + repr(self.vector());
def homogeneous_vector(self, base_ring=None):
"""
Return homogeneous coordinates for this vertex.
Since a vertex is given by an affine point, this is the vector
with a 1 appended.
INPUT:
- ``base_ring`` -- the base ring of the vector.
EXAMPLES::
sage: P = Polyhedron(vertices=[(2,0)], rays=[(1,0)], lines=[(3,2)])
sage: P.vertices()[0].homogeneous_vector()
(2, 0, 1)
sage: P.vertices()[0].homogeneous_vector(RDF)
(2.0, 0.0, 1.0)
"""
v = list(self._vector) + [1]
return vector(base_ring or self._base_ring, v)
def evaluated_on(self, Hobj):
r"""
Returns `A\vec{x}+b`
EXAMPLES::
sage: p = polytopes.hypercube(3)
sage: v = next(p.vertex_generator())
sage: h = next(p.inequality_generator())
sage: v
A vertex at (-1, -1, -1)
sage: h
An inequality (0, 0, -1) x + 1 >= 0
sage: v.evaluated_on(h)
2
"""
return Hobj.A() * self.vector() + Hobj.b()
def is_integral(self):
r"""
Return whether the coordinates of the vertex are all integral.
OUTPUT:
Boolean.
EXAMPLES::
sage: p = Polyhedron([(1/2,3,5), (0,0,0), (2,3,7)])
sage: [ v.is_integral() for v in p.vertex_generator() ]
[True, False, True]
"""
return (self._base_ring is ZZ) or all(x in ZZ for x in self)
class Ray(Vrepresentation):
"""
A ray of the polyhedron. Inherits from ``Vrepresentation``.
"""
def type(self):
r"""
Returns the type (equation/inequality/vertex/ray/line) as an
integer.
OUTPUT:
Integer. One of ``PolyhedronRepresentation.INEQUALITY``,
``.EQUATION``, ``.VERTEX``, ``.RAY``, or ``.LINE``.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,1],[0,1,0],[1,-1,0]])
sage: repr_obj = next(p.ray_generator())
sage: repr_obj.type()
3
sage: repr_obj.type() == repr_obj.INEQUALITY
False
sage: repr_obj.type() == repr_obj.EQUATION
False
sage: repr_obj.type() == repr_obj.VERTEX
False
sage: repr_obj.type() == repr_obj.RAY
True
sage: repr_obj.type() == repr_obj.LINE
False
"""
return self.RAY
def is_ray(self):
"""
Tests if this object is a ray. Always True by construction.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,1],[0,1,0],[1,-1,0]])
sage: a = next(p.ray_generator())
sage: a.is_ray()
True
"""
return True
def _repr_(self):
"""
A string representation of the ray.
TESTS::
sage: p = Polyhedron(ieqs = [[0,0,1],[0,1,0],[1,-1,0]])
sage: a = next(p.ray_generator())
sage: a._repr_()
'A ray in the direction (0, 1)'
"""
return 'A ray in the direction ' + repr(self.vector());
def homogeneous_vector(self, base_ring=None):
"""
Return homogeneous coordinates for this ray.
Since a ray is given by a direction, this is the vector with a
0 appended.
INPUT:
- ``base_ring`` -- the base ring of the vector.
EXAMPLES::
sage: P = Polyhedron(vertices=[(2,0)], rays=[(1,0)], lines=[(3,2)])
sage: P.rays()[0].homogeneous_vector()
(1, 0, 0)
sage: P.rays()[0].homogeneous_vector(RDF)
(1.0, 0.0, 0.0)
"""
v = list(self._vector) + [0]
return vector(base_ring or self._base_ring, v)
def evaluated_on(self, Hobj):
r"""
Returns `A\vec{r}`
EXAMPLES::
sage: p = Polyhedron(ieqs = [[0,0,1],[0,1,0],[1,-1,0]])
sage: a = next(p.ray_generator())
sage: h = next(p.inequality_generator())
sage: a.evaluated_on(h)
0
"""
return Hobj.A() * self.vector()
class Line(Vrepresentation):
r"""
A line (Minkowski summand `\simeq\RR`) of the
polyhedron. Inherits from ``Vrepresentation``.
"""
def type(self):
r"""
Returns the type (equation/inequality/vertex/ray/line) as an
integer.
OUTPUT:
Integer. One of ``PolyhedronRepresentation.INEQUALITY``,
``.EQUATION``, ``.VERTEX``, ``.RAY``, or ``.LINE``.
EXAMPLES::
sage: p = Polyhedron(ieqs = [[1, 0, 0, 1],[1,1,0,0]])
sage: repr_obj = next(p.line_generator())
sage: repr_obj.type()
4
sage: repr_obj.type() == repr_obj.INEQUALITY
False
sage: repr_obj.type() == repr_obj.EQUATION
False
sage: repr_obj.type() == repr_obj.VERTEX
False
sage: repr_obj.type() == repr_obj.RAY
False
sage: repr_obj.type() == repr_obj.LINE
True
"""
return self.LINE
def is_line(self):
"""
Tests if the object is a line. By construction it must be.
TESTS::
sage: p = Polyhedron(ieqs = [[1, 0, 0, 1],[1,1,0,0]])
sage: a = next(p.line_generator())
sage: a.is_line()
True
"""
return True
def _repr_(self):
"""
A string representation of the line.
TESTS::
sage: p = Polyhedron(ieqs = [[1, 0, 0, 1],[1,1,0,0]])
sage: a = next(p.line_generator())
sage: a.__repr__()
'A line in the direction (0, 1, 0)'
"""
return 'A line in the direction ' + repr(self.vector());
def homogeneous_vector(self, base_ring=None):
"""
Return homogeneous coordinates for this line.
Since a line is given by a direction, this is the vector with a
0 appended.
INPUT:
- ``base_ring`` -- the base ring of the vector.
EXAMPLES::
sage: P = Polyhedron(vertices=[(2,0)], rays=[(1,0)], lines=[(3,2)])
sage: P.lines()[0].homogeneous_vector()
(3, 2, 0)
sage: P.lines()[0].homogeneous_vector(RDF)
(3.0, 2.0, 0.0)
"""
v = list(self._vector) + [0]
return vector(base_ring or self._base_ring, v)
def evaluated_on(self, Hobj):
r"""
Returns `A\vec{\ell}`
EXAMPLES::
sage: p = Polyhedron(ieqs = [[1, 0, 0, 1],[1,1,0,0]])
sage: a = next(p.line_generator())
sage: h = next(p.inequality_generator())
sage: a.evaluated_on(h)
0
"""
return Hobj.A() * self.vector()
| 30.218935
| 96
| 0.513731
|
adf1932bdbe321ed06a432af9033777d5ce0f624
| 2,118
|
py
|
Python
|
test/integration_test/tester/node.py
|
heshu-by/likelib-ws
|
85987d328dc274622f4b758afa1b6af43d15564f
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test/tester/node.py
|
heshu-by/likelib-ws
|
85987d328dc274622f4b758afa1b6af43d15564f
|
[
"Apache-2.0"
] | null | null | null |
test/integration_test/tester/node.py
|
heshu-by/likelib-ws
|
85987d328dc274622f4b758afa1b6af43d15564f
|
[
"Apache-2.0"
] | null | null | null |
import signal
import subprocess
import time
from .base import Logger, LogicException, BadResultException
class Node:
def __init__(self, *, name: str, work_dir: str, config_file_path: str, node_file_path: str, logger: Logger):
self.name = name
self.work_dir = work_dir
self.config_file_path = config_file_path
self.node_file_path = node_file_path
self.logger = logger
self.process = None
self.is_running = False
self.pid = -1
def start(self, *, startup_time: int) -> None:
if self.is_running:
raise LogicException(f"{self.name} - Process already started")
self.process = subprocess.Popen([self.node_file_path, "--config", self.config_file_path],
cwd=self.work_dir, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if self.process.poll() is None:
self.is_running = True
self.pid = self.process.pid
self.logger.info(f"{self.name} - start node(pid:{self.pid}) with work directory: {self.work_dir}")
else:
self.is_running = False
self.process.kill()
self.logger.error(f"{self.name} - failed running node with work directory:{self.work_dir}")
raise BadResultException(f"{self.name} - process failed to start")
time.sleep(startup_time)
def stop(self, *, shutdown_timeout: int) -> None:
if self.is_running:
self.logger.info(f"{self.name} - try to close node with work_dir {self.work_dir}")
self.process.send_signal(signal.SIGINT)
try:
self.process.wait(timeout=shutdown_timeout)
except subprocess.TimeoutExpired:
self.process.kill()
self.logger.info(f"{self.name} - killed node with work_dir {self.work_dir}")
exit_code = self.process.poll()
self.logger.info(
f"{self.name} - closed node(exit code:{exit_code}, pid:{self.pid}, work_dir:{self.work_dir})")
self.is_running = False
self.pid = -1
| 40.730769
| 112
| 0.612842
|
3186a5575dc99357ef3a5f2f94da001734a49d44
| 45,674
|
py
|
Python
|
sdks/python/apache_beam/io/gcp/bigquery.py
|
chamikaramj/beam
|
7c710360868d784c5b6bf99b8341748807b08101
|
[
"Apache-2.0"
] | 1
|
2019-05-24T14:03:58.000Z
|
2019-05-24T14:03:58.000Z
|
sdks/python/apache_beam/io/gcp/bigquery.py
|
kavyasmj/beam0.6
|
d59dfeb339bd56feb7569531e5c421a297b0d3dc
|
[
"Apache-2.0"
] | 2
|
2017-04-24T20:32:25.000Z
|
2022-03-29T12:59:55.000Z
|
sdks/python/apache_beam/io/gcp/bigquery.py
|
kavyasmj/beam0.6
|
d59dfeb339bd56feb7569531e5c421a297b0d3dc
|
[
"Apache-2.0"
] | 2
|
2019-03-04T02:12:46.000Z
|
2021-08-10T20:29:37.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BigQuery sources and sinks.
This module implements reading from and writing to BigQuery tables. It relies
on several classes exposed by the BigQuery API: TableSchema, TableFieldSchema,
TableRow, and TableCell. The default mode is to return table rows read from a
BigQuery source as dictionaries. Similarly a Write transform to a BigQuerySink
accepts PCollections of dictionaries. This is done for more convenient
programming. If desired, the native TableRow objects can be used throughout to
represent rows (use an instance of TableRowJsonCoder as a coder argument when
creating the sources or sinks respectively).
Also, for programming convenience, instances of TableReference and TableSchema
have a string representation that can be used for the corresponding arguments:
- TableReference can be a PROJECT:DATASET.TABLE or DATASET.TABLE string.
- TableSchema can be a NAME:TYPE{,NAME:TYPE}* string
(e.g. 'month:STRING,event_count:INTEGER').
The syntax supported is described here:
https://cloud.google.com/bigquery/bq-command-line-tool-quickstart
BigQuery sources can be used as main inputs or side inputs. A main input
(common case) is expected to be massive and will be split into manageable chunks
and processed in parallel. Side inputs are expected to be small and will be read
completely every time a ParDo DoFn gets executed. In the example below the
lambda function implementing the DoFn for the Map transform will get on each
call *one* row of the main table and *all* rows of the side table. The runner
may use some caching techniques to share the side inputs between calls in order
to avoid excessive reading:::
main_table = pipeline | 'very_big' >> beam.io.Read(beam.io.BigQuerySource()
side_table = pipeline | 'not_big' >> beam.io.Read(beam.io.BigQuerySource()
results = (
main_table
| 'process data' >> beam.Map(
lambda element, side_input: ..., AsList(side_table)))
There is no difference in how main and side inputs are read. What makes the
side_table a 'side input' is the AsList wrapper used when passing the table
as a parameter to the Map transform. AsList signals to the execution framework
that its input should be made available whole.
The main and side inputs are implemented differently. Reading a BigQuery table
as main input entails exporting the table to a set of GCS files (currently in
JSON format) and then processing those files. Reading the same table as a side
input entails querying the table for all its rows. The coder argument on
BigQuerySource controls the reading of the lines in the export files (i.e.,
transform a JSON object into a PCollection element). The coder is not involved
when the same table is read as a side input since there is no intermediate
format involved. We get the table rows directly from the BigQuery service with
a query.
Users may provide a query to read from rather than reading all of a BigQuery
table. If specified, the result obtained by executing the specified query will
be used as the data of the input transform.::
query_results = pipeline | beam.io.Read(beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM samples.weather_stations'))
When creating a BigQuery input transform, users should provide either a query
or a table. Pipeline construction will fail with a validation error if neither
or both are specified.
*** Short introduction to BigQuery concepts ***
Tables have rows (TableRow) and each row has cells (TableCell).
A table has a schema (TableSchema), which in turn describes the schema of each
cell (TableFieldSchema). The terms field and cell are used interchangeably.
TableSchema: Describes the schema (types and order) for values in each row.
Has one attribute, 'field', which is list of TableFieldSchema objects.
TableFieldSchema: Describes the schema (type, name) for one field.
Has several attributes, including 'name' and 'type'. Common values for
the type attribute are: 'STRING', 'INTEGER', 'FLOAT', 'BOOLEAN'. All possible
values are described at:
https://cloud.google.com/bigquery/preparing-data-for-bigquery#datatypes
TableRow: Holds all values in a table row. Has one attribute, 'f', which is a
list of TableCell instances.
TableCell: Holds the value for one cell (or field). Has one attribute,
'v', which is a JsonValue instance. This class is defined in
apitools.base.py.extra_types.py module.
"""
from __future__ import absolute_import
import collections
import datetime
import json
import logging
import re
import time
import uuid
from apache_beam import coders
from apache_beam.internal.gcp import auth
from apache_beam.internal.gcp.json_value import from_json_value
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.utils import retry
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.io.gcp.internal.clients import bigquery
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
__all__ = [
'TableRowJsonCoder',
'BigQueryDisposition',
'BigQuerySource',
'BigQuerySink',
]
JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are not JSON compliant.'
MAX_RETRIES = 3
class RowAsDictJsonCoder(coders.Coder):
"""A coder for a table row (represented as a dict) to/from a JSON string.
This is the default coder for sources and sinks if the coder argument is not
specified.
"""
def encode(self, table_row):
# The normal error when dumping NAN/INF values is:
# ValueError: Out of range float values are not JSON compliant
# This code will catch this error to emit an error that explains
# to the programmer that they have used NAN/INF values.
try:
return json.dumps(table_row, allow_nan=False)
except ValueError as e:
raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
return json.loads(encoded_table_row)
class TableRowJsonCoder(coders.Coder):
"""A coder for a TableRow instance to/from a JSON string.
Note that the encoding operation (used when writing to sinks) requires the
table schema in order to obtain the ordered list of field names. Reading from
sources on the other hand does not need the table schema.
"""
def __init__(self, table_schema=None):
# The table schema is needed for encoding TableRows as JSON (writing to
# sinks) because the ordered list of field names is used in the JSON
# representation.
self.table_schema = table_schema
# Precompute field names since we need them for row encoding.
if self.table_schema:
self.field_names = tuple(fs.name for fs in self.table_schema.fields)
def encode(self, table_row):
if self.table_schema is None:
raise AttributeError(
'The TableRowJsonCoder requires a table schema for '
'encoding operations. Please specify a table_schema argument.')
try:
return json.dumps(
collections.OrderedDict(
zip(self.field_names,
[from_json_value(f.v) for f in table_row.f])),
allow_nan=False)
except ValueError as e:
raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
od = json.loads(
encoded_table_row, object_pairs_hook=collections.OrderedDict)
return bigquery.TableRow(
f=[bigquery.TableCell(v=to_json_value(e)) for e in od.itervalues()])
def parse_table_schema_from_json(schema_string):
"""Parse the Table Schema provided as string.
Args:
schema_string: String serialized table schema, should be a valid JSON.
Returns:
A TableSchema of the BigQuery export from either the Query or the Table.
"""
json_schema = json.loads(schema_string)
def _parse_schema_field(field):
"""Parse a single schema field from dictionary.
Args:
field: Dictionary object containing serialized schema.
Returns:
A TableFieldSchema for a single column in BigQuery.
"""
schema = bigquery.TableFieldSchema()
schema.name = field['name']
schema.type = field['type']
if 'mode' in field:
schema.mode = field['mode']
else:
schema.mode = 'NULLABLE'
if 'description' in field:
schema.description = field['description']
if 'fields' in field:
schema.fields = [_parse_schema_field(x) for x in field['fields']]
return schema
fields = [_parse_schema_field(f) for f in json_schema['fields']]
return bigquery.TableSchema(fields=fields)
class BigQueryDisposition(object):
"""Class holding standard strings used for create and write dispositions."""
CREATE_NEVER = 'CREATE_NEVER'
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_APPEND = 'WRITE_APPEND'
WRITE_EMPTY = 'WRITE_EMPTY'
@staticmethod
def validate_create(disposition):
values = (BigQueryDisposition.CREATE_NEVER,
BigQueryDisposition.CREATE_IF_NEEDED)
if disposition not in values:
raise ValueError(
'Invalid create disposition %s. Expecting %s' % (disposition, values))
return disposition
@staticmethod
def validate_write(disposition):
values = (BigQueryDisposition.WRITE_TRUNCATE,
BigQueryDisposition.WRITE_APPEND,
BigQueryDisposition.WRITE_EMPTY)
if disposition not in values:
raise ValueError(
'Invalid write disposition %s. Expecting %s' % (disposition, values))
return disposition
def _parse_table_reference(table, dataset=None, project=None):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
table: The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). If dataset argument is None
then the table argument must contain the entire table reference:
'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a
bigquery.TableReference instance in which case dataset and project are
ignored and the reference is returned as a result. Additionally, for date
partitioned tables, appending '$YYYYmmdd' to the table name is supported,
e.g. 'DATASET.TABLE$YYYYmmdd'.
dataset: The ID of the dataset containing this table or null if the table
reference is specified entirely by the table argument.
project: The ID of the project containing this table or null if the table
reference is specified entirely by the table (and possibly dataset)
argument.
Returns:
A bigquery.TableReference object. The object has the following attributes:
projectId, datasetId, and tableId.
Raises:
ValueError: if the table reference as a string does not match the expected
format.
"""
if isinstance(table, bigquery.TableReference):
return table
table_reference = bigquery.TableReference()
# If dataset argument is not specified, the expectation is that the
# table argument will contain a full table reference instead of just a
# table name.
if dataset is None:
match = re.match(
r'^((?P<project>.+):)?(?P<dataset>\w+)\.(?P<table>[\w\$]+)$', table)
if not match:
raise ValueError(
'Expected a table reference (PROJECT:DATASET.TABLE or '
'DATASET.TABLE) instead of %s.' % table)
table_reference.projectId = match.group('project')
table_reference.datasetId = match.group('dataset')
table_reference.tableId = match.group('table')
else:
table_reference.projectId = project
table_reference.datasetId = dataset
table_reference.tableId = table
return table_reference
# -----------------------------------------------------------------------------
# BigQuerySource, BigQuerySink.
class BigQuerySource(dataflow_io.NativeSource):
"""A source based on a BigQuery table."""
def __init__(self, table=None, dataset=None, project=None, query=None,
validate=False, coder=None, use_standard_sql=False,
flatten_results=True):
"""Initialize a BigQuerySource.
Args:
table: The ID of a BigQuery table. If specified all data of the table
will be used as input of the current source. The ID must contain only
letters (a-z, A-Z), numbers (0-9), or underscores (_). If dataset
and query arguments are None then the table argument must contain the
entire table reference specified as: 'DATASET.TABLE' or
'PROJECT:DATASET.TABLE'.
dataset: The ID of the dataset containing this table or null if the table
reference is specified entirely by the table argument or a query is
specified.
project: The ID of the project containing this table or null if the table
reference is specified entirely by the table argument or a query is
specified.
query: A query to be used instead of arguments table, dataset, and
project.
validate: If true, various checks will be done when source gets
initialized (e.g., is table present?). This should be True for most
scenarios in order to catch errors as early as possible (pipeline
construction instead of pipeline execution). It should be False if the
table is created during pipeline execution by a previous step.
coder: The coder for the table rows if serialized to disk. If None, then
the default coder is RowAsDictJsonCoder, which will interpret every line
in a file as a JSON serialized dictionary. This argument needs a value
only in special cases when returning table rows as dictionaries is not
desirable.
use_standard_sql: Specifies whether to use BigQuery's standard
SQL dialect for this query. The default value is False. If set to True,
the query will use BigQuery's updated SQL dialect with improved
standards compliance. This parameter is ignored for table inputs.
flatten_results: Flattens all nested and repeated fields in the
query results. The default value is true.
Raises:
ValueError: if any of the following is true
(1) the table reference as a string does not match the expected format
(2) neither a table nor a query is specified
(3) both a table and a query is specified.
"""
if table is not None and query is not None:
raise ValueError('Both a BigQuery table and a query were specified.'
' Please specify only one of these.')
elif table is None and query is None:
raise ValueError('A BigQuery table or a query must be specified')
elif table is not None:
self.table_reference = _parse_table_reference(table, dataset, project)
self.query = None
self.use_legacy_sql = True
else:
self.query = query
# TODO(BEAM-1082): Change the internal flag to be standard_sql
self.use_legacy_sql = not use_standard_sql
self.table_reference = None
self.validate = validate
self.flatten_results = flatten_results
self.coder = coder or RowAsDictJsonCoder()
def display_data(self):
if self.query is not None:
res = {'query': DisplayDataItem(self.query, label='Query')}
else:
if self.table_reference.projectId is not None:
tableSpec = '{}:{}.{}'.format(self.table_reference.projectId,
self.table_reference.datasetId,
self.table_reference.tableId)
else:
tableSpec = '{}.{}'.format(self.table_reference.datasetId,
self.table_reference.tableId)
res = {'table': DisplayDataItem(tableSpec, label='Table')}
res['validation'] = DisplayDataItem(self.validate,
label='Validation Enabled')
return res
@property
def format(self):
"""Source format name required for remote execution."""
return 'bigquery'
def reader(self, test_bigquery_client=None):
return BigQueryReader(
source=self,
test_bigquery_client=test_bigquery_client,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results)
class BigQuerySink(dataflow_io.NativeSink):
"""A sink based on a BigQuery table."""
def __init__(self, table, dataset=None, project=None, schema=None,
create_disposition=BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=BigQueryDisposition.WRITE_EMPTY,
validate=False, coder=None):
"""Initialize a BigQuerySink.
Args:
table: The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). If dataset argument is
None then the table argument must contain the entire table reference
specified as: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'.
dataset: The ID of the dataset containing this table or null if the table
reference is specified entirely by the table argument.
project: The ID of the project containing this table or null if the table
reference is specified entirely by the table argument.
schema: The schema to be used if the BigQuery table to write has to be
created. This can be either specified as a 'bigquery.TableSchema' object
or a single string of the form 'field1:type1,field2:type2,field3:type3'
that defines a comma separated list of fields. Here 'type' should
specify the BigQuery type of the field. Single string based schemas do
not support nested fields, repeated fields, or specifying a BigQuery
mode for fields (mode will always be set to 'NULLABLE').
create_disposition: A string describing what happens if the table does not
exist. Possible values are:
- BigQueryDisposition.CREATE_IF_NEEDED: create if does not exist.
- BigQueryDisposition.CREATE_NEVER: fail the write if does not exist.
write_disposition: A string describing what happens if the table has
already some data. Possible values are:
- BigQueryDisposition.WRITE_TRUNCATE: delete existing rows.
- BigQueryDisposition.WRITE_APPEND: add to existing rows.
- BigQueryDisposition.WRITE_EMPTY: fail the write if table not empty.
validate: If true, various checks will be done when sink gets
initialized (e.g., is table present given the disposition arguments?).
This should be True for most scenarios in order to catch errors as early
as possible (pipeline construction instead of pipeline execution). It
should be False if the table is created during pipeline execution by a
previous step.
coder: The coder for the table rows if serialized to disk. If None, then
the default coder is RowAsDictJsonCoder, which will interpret every
element written to the sink as a dictionary that will be JSON serialized
as a line in a file. This argument needs a value only in special cases
when writing table rows as dictionaries is not desirable.
Raises:
TypeError: if the schema argument is not a string or a TableSchema object.
ValueError: if the table reference as a string does not match the expected
format.
"""
self.table_reference = _parse_table_reference(table, dataset, project)
# Transform the table schema into a bigquery.TableSchema instance.
if isinstance(schema, basestring):
# TODO(silviuc): Should add a regex-based validation of the format.
table_schema = bigquery.TableSchema()
schema_list = [s.strip(' ') for s in schema.split(',')]
for field_and_type in schema_list:
field_name, field_type = field_and_type.split(':')
field_schema = bigquery.TableFieldSchema()
field_schema.name = field_name
field_schema.type = field_type
field_schema.mode = 'NULLABLE'
table_schema.fields.append(field_schema)
self.table_schema = table_schema
elif schema is None:
# TODO(silviuc): Should check that table exists if no schema specified.
self.table_schema = schema
elif isinstance(schema, bigquery.TableSchema):
self.table_schema = schema
else:
raise TypeError('Unexpected schema argument: %s.' % schema)
self.create_disposition = BigQueryDisposition.validate_create(
create_disposition)
self.write_disposition = BigQueryDisposition.validate_write(
write_disposition)
self.validate = validate
self.coder = coder or RowAsDictJsonCoder()
def display_data(self):
res = {}
if self.table_reference is not None:
tableSpec = '{}.{}'.format(self.table_reference.datasetId,
self.table_reference.tableId)
if self.table_reference.projectId is not None:
tableSpec = '{}:{}'.format(self.table_reference.projectId,
tableSpec)
res['table'] = DisplayDataItem(tableSpec, label='Table')
res['validation'] = DisplayDataItem(self.validate,
label="Validation Enabled")
return res
def schema_as_json(self):
"""Returns the TableSchema associated with the sink as a JSON string."""
def schema_list_as_object(schema_list):
"""Returns a list of TableFieldSchema objects as a list of dicts."""
fields = []
for f in schema_list:
fs = {'name': f.name, 'type': f.type}
if f.description is not None:
fs['description'] = f.description
if f.mode is not None:
fs['mode'] = f.mode
if f.type.lower() == 'record':
fs['fields'] = schema_list_as_object(f.fields)
fields.append(fs)
return fields
return json.dumps(
{'fields': schema_list_as_object(self.table_schema.fields)})
@property
def format(self):
"""Sink format name required for remote execution."""
return 'bigquery'
def writer(self, test_bigquery_client=None, buffer_size=None):
return BigQueryWriter(
sink=self, test_bigquery_client=test_bigquery_client,
buffer_size=buffer_size)
# -----------------------------------------------------------------------------
# BigQueryReader, BigQueryWriter.
class BigQueryReader(dataflow_io.NativeSourceReader):
"""A reader for a BigQuery source."""
def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True,
flatten_results=True):
self.source = source
self.test_bigquery_client = test_bigquery_client
if auth.is_running_in_gce:
self.executing_project = auth.executing_project
elif hasattr(source, 'pipeline_options'):
self.executing_project = (
source.pipeline_options.view_as(GoogleCloudOptions).project)
else:
self.executing_project = None
# TODO(silviuc): Try to automatically get it from gcloud config info.
if not self.executing_project and test_bigquery_client is None:
raise RuntimeError(
'Missing executing project information. Please use the --project '
'command line option to specify it.')
self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder)
# Schema for the rows being read by the reader. It is initialized the
# first time something gets read from the table. It is not required
# for reading the field values in each row but could be useful for
# getting additional details.
self.schema = None
self.use_legacy_sql = use_legacy_sql
self.flatten_results = flatten_results
if self.source.query is None:
# If table schema did not define a project we default to executing
# project.
project_id = self.source.table_reference.projectId
if not project_id:
project_id = self.executing_project
self.query = 'SELECT * FROM [%s:%s.%s];' % (
project_id,
self.source.table_reference.datasetId,
self.source.table_reference.tableId)
else:
self.query = self.source.query
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.create_temporary_dataset(self.executing_project)
return self
def __exit__(self, exception_type, exception_value, traceback):
self.client.clean_up_temporary_dataset(self.executing_project)
def __iter__(self):
for rows, schema in self.client.run_query(
project_id=self.executing_project, query=self.query,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results):
if self.schema is None:
self.schema = schema
for row in rows:
if self.row_as_dict:
yield self.client.convert_row_to_dict(row, schema)
else:
yield row
class BigQueryWriter(dataflow_io.NativeSinkWriter):
"""The sink writer for a BigQuerySink."""
def __init__(self, sink, test_bigquery_client=None, buffer_size=None):
self.sink = sink
self.test_bigquery_client = test_bigquery_client
self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder)
# Buffer used to batch written rows so we reduce communication with the
# BigQuery service.
self.rows_buffer = []
self.rows_buffer_flush_threshold = buffer_size or 1000
# Figure out the project, dataset, and table used for the sink.
self.project_id = self.sink.table_reference.projectId
# If table schema did not define a project we default to executing project.
if self.project_id is None and hasattr(sink, 'pipeline_options'):
self.project_id = (
sink.pipeline_options.view_as(GoogleCloudOptions).project)
assert self.project_id is not None
self.dataset_id = self.sink.table_reference.datasetId
self.table_id = self.sink.table_reference.tableId
def _flush_rows_buffer(self):
if self.rows_buffer:
logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer),
self.project_id, self.dataset_id, self.table_id)
passed, errors = self.client.insert_rows(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=self.rows_buffer)
self.rows_buffer = []
if not passed:
raise RuntimeError('Could not successfully insert rows to BigQuery'
' table [%s:%s.%s]. Errors: %s'%
(self.project_id, self.dataset_id,
self.table_id, errors))
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.get_or_create_table(
self.project_id, self.dataset_id, self.table_id, self.sink.table_schema,
self.sink.create_disposition, self.sink.write_disposition)
return self
def __exit__(self, exception_type, exception_value, traceback):
self._flush_rows_buffer()
def Write(self, row):
self.rows_buffer.append(row)
if len(self.rows_buffer) > self.rows_buffer_flush_threshold:
self._flush_rows_buffer()
# -----------------------------------------------------------------------------
# BigQueryWrapper.
class BigQueryWrapper(object):
"""BigQuery client wrapper with utilities for querying.
The wrapper is used to organize all the BigQuery integration points and
offer a common place where retry logic for failures can be controlled.
In addition it offers various functions used both in sources and sinks
(e.g., find and create tables, query a table, etc.).
"""
TEMP_TABLE = 'temp_table_'
TEMP_DATASET = 'temp_dataset_'
def __init__(self, client=None):
self.client = client or bigquery.BigqueryV2(
credentials=auth.get_service_credentials())
self._unique_row_id = 0
# For testing scenarios where we pass in a client we do not want a
# randomized prefix for row IDs.
self._row_id_prefix = '' if client else uuid.uuid4()
self._temporary_table_suffix = uuid.uuid4().hex
@property
def unique_row_id(self):
"""Returns a unique row ID (str) used to avoid multiple insertions.
If the row ID is provided, BigQuery will make a best effort to not insert
the same row multiple times for fail and retry scenarios in which the insert
request may be issued several times. This comes into play for sinks executed
in a local runner.
Returns:
a unique row ID string
"""
self._unique_row_id += 1
return '%s_%d' % (self._row_id_prefix, self._unique_row_id)
def _get_temp_table(self, project_id):
return _parse_table_reference(
table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix,
dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix,
project=project_id)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results,
job_id, dry_run=False):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=dry_run,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
allowLargeResults=True,
destinationTable=self._get_temp_table(project_id),
flattenResults=flatten_results)),
jobReference=reference))
response = self.client.jobs.Insert(request)
return response.jobReference.jobId
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_query_results(self, project_id, job_id,
page_token=None, max_results=10000):
request = bigquery.BigqueryJobsGetQueryResultsRequest(
jobId=job_id, pageToken=page_token, projectId=project_id,
maxResults=max_results)
response = self.client.jobs.GetQueryResults(request)
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_filter)
def _insert_all_rows(self, project_id, dataset_id, table_id, rows):
# The rows argument is a list of
# bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as
# required by the InsertAll() method.
request = bigquery.BigqueryTabledataInsertAllRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest(
# TODO(silviuc): Should have an option for skipInvalidRows?
# TODO(silviuc): Should have an option for ignoreUnknownValues?
rows=rows))
response = self.client.tabledata.InsertAll(request)
# response.insertErrors is not [] if errors encountered.
return not response.insertErrors, response.insertErrors
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_table(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTablesGetRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
response = self.client.tables.Get(request)
# The response is a bigquery.Table instance.
return response
def _create_table(self, project_id, dataset_id, table_id, schema):
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId=project_id, datasetId=dataset_id, tableId=table_id),
schema=schema)
request = bigquery.BigqueryTablesInsertRequest(
projectId=project_id, datasetId=dataset_id, table=table)
response = self.client.tables.Insert(request)
# The response is a bigquery.Table instance.
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_dataset(self, project_id, dataset_id):
# Check if dataset already exists otherwise create it
try:
dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
return dataset
except HttpError as exn:
if exn.status_code == 404:
dataset = bigquery.Dataset(
datasetReference=bigquery.DatasetReference(
projectId=project_id, datasetId=dataset_id))
request = bigquery.BigqueryDatasetsInsertRequest(
projectId=project_id, dataset=dataset)
response = self.client.datasets.Insert(request)
# The response is a bigquery.Dataset instance.
return response
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _is_table_empty(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTabledataListRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
maxResults=1)
response = self.client.tabledata.List(request)
# The response is a bigquery.TableDataList instance.
return response.totalRows == 0
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_table(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTablesDeleteRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
try:
self.client.tables.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Table %s:%s.%s does not exist', project_id,
dataset_id, table_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_dataset(self, project_id, dataset_id, delete_contents=True):
request = bigquery.BigqueryDatasetsDeleteRequest(
projectId=project_id, datasetId=dataset_id,
deleteContents=delete_contents)
try:
self.client.datasets.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataaset %s:%s does not exist', project_id,
dataset_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def create_temporary_dataset(self, project_id):
dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix
# Check if dataset exists to make sure that the temporary id is unique
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
if project_id is not None:
# Unittests don't pass projectIds so they can be run without error
raise RuntimeError(
'Dataset %s:%s already exists so cannot be used as temporary.'
% (project_id, dataset_id))
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset does not exist so we will create it')
self.get_or_create_dataset(project_id, dataset_id)
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def clean_up_temporary_dataset(self, project_id):
temp_table = self._get_temp_table(project_id)
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=temp_table.datasetId))
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
temp_table.datasetId)
return
else:
raise
self._delete_dataset(temp_table.projectId, temp_table.datasetId, True)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_table(
self, project_id, dataset_id, table_id, schema,
create_disposition, write_disposition):
"""Gets or creates a table based on create and write dispositions.
The function mimics the behavior of BigQuery import jobs when using the
same create and write dispositions.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
schema: A bigquery.TableSchema instance or None.
create_disposition: CREATE_NEVER or CREATE_IF_NEEDED.
write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.
Returns:
A bigquery.Table instance if table was found or created.
Raises:
RuntimeError: For various mismatches between the state of the table and
the create/write dispositions passed in. For example if the table is not
empty and WRITE_EMPTY was specified then an error will be raised since
the table was expected to be empty.
"""
found_table = None
try:
found_table = self._get_table(project_id, dataset_id, table_id)
except HttpError as exn:
if exn.status_code == 404:
if create_disposition == BigQueryDisposition.CREATE_NEVER:
raise RuntimeError(
'Table %s:%s.%s not found but create disposition is CREATE_NEVER.'
% (project_id, dataset_id, table_id))
else:
raise
# If table exists already then handle the semantics for WRITE_EMPTY and
# WRITE_TRUNCATE write dispositions.
if found_table:
table_empty = self._is_table_empty(project_id, dataset_id, table_id)
if (not table_empty and
write_disposition == BigQueryDisposition.WRITE_EMPTY):
raise RuntimeError(
'Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.'
% (project_id, dataset_id, table_id))
# Delete the table and recreate it (later) if WRITE_TRUNCATE was
# specified.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
self._delete_table(project_id, dataset_id, table_id)
# Create a new table potentially reusing the schema from a previously
# found table in case the schema was not specified.
if schema is None and found_table is None:
raise RuntimeError(
'Table %s:%s.%s requires a schema. None can be inferred because the '
'table does not exist.'
% (project_id, dataset_id, table_id))
if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:
return found_table
else:
# if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete
# the table before this point.
return self._create_table(project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
schema=schema or found_table.schema)
def run_query(self, project_id, query, use_legacy_sql, flatten_results,
dry_run=False):
job_id = self._start_query_job(project_id, query, use_legacy_sql,
flatten_results, job_id=uuid.uuid4().hex,
dry_run=dry_run)
if dry_run:
# If this was a dry run then the fact that we get here means the
# query has no errors. The start_query_job would raise an error otherwise.
return
page_token = None
while True:
response = self._get_query_results(project_id, job_id, page_token)
if not response.jobComplete:
# The jobComplete field can be False if the query request times out
# (default is 10 seconds). Note that this is a timeout for the query
# request not for the actual execution of the query in the service. If
# the request times out we keep trying. This situation is quite possible
# if the query will return a large number of rows.
logging.info('Waiting on response from query: %s ...', query)
time.sleep(1.0)
continue
# We got some results. The last page is signalled by a missing pageToken.
yield response.rows, response.schema
if not response.pageToken:
break
page_token = response.pageToken
def insert_rows(self, project_id, dataset_id, table_id, rows):
"""Inserts rows into the specified table.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
rows: A list of plain Python dictionaries. Each dictionary is a row and
each key in it is the name of a field.
Returns:
A tuple (bool, errors). If first element is False then the second element
will be a bigquery.InserttErrorsValueListEntry instance containing
specific errors.
"""
# Prepare rows for insertion. Of special note is the row ID that we add to
# each row in order to help BigQuery avoid inserting a row multiple times.
# BigQuery will do a best-effort if unique IDs are provided. This situation
# can happen during retries on failures.
# TODO(silviuc): Must add support to writing TableRow's instead of dicts.
final_rows = []
for row in rows:
json_object = bigquery.JsonObject()
for k, v in row.iteritems():
json_object.additionalProperties.append(
bigquery.JsonObject.AdditionalProperty(
key=k, value=to_json_value(v)))
final_rows.append(
bigquery.TableDataInsertAllRequest.RowsValueListEntry(
insertId=str(self.unique_row_id),
json=json_object))
result, errors = self._insert_all_rows(
project_id, dataset_id, table_id, final_rows)
return result, errors
def _convert_cell_value_to_dict(self, value, field):
if field.type == 'STRING':
# Input: "XYZ" --> Output: "XYZ"
return value
elif field.type == 'BOOLEAN':
# Input: "true" --> Output: True
return value == 'true'
elif field.type == 'INTEGER':
# Input: "123" --> Output: 123
return int(value)
elif field.type == 'FLOAT':
# Input: "1.23" --> Output: 1.23
return float(value)
elif field.type == 'TIMESTAMP':
# The UTC should come from the timezone library but this is a known
# issue in python 2.7 so we'll just hardcode it as we're reading using
# utcfromtimestamp.
# Input: 1478134176.985864 --> Output: "2016-11-03 00:49:36.985864 UTC"
dt = datetime.datetime.utcfromtimestamp(float(value))
return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')
elif field.type == 'BYTES':
# Input: "YmJi" --> Output: "YmJi"
return value
elif field.type == 'DATE':
# Input: "2016-11-03" --> Output: "2016-11-03"
return value
elif field.type == 'DATETIME':
# Input: "2016-11-03T00:49:36" --> Output: "2016-11-03T00:49:36"
return value
elif field.type == 'TIME':
# Input: "00:49:36" --> Output: "00:49:36"
return value
elif field.type == 'RECORD':
# Note that a schema field object supports also a RECORD type. However
# when querying, the repeated and/or record fields are flattened
# unless we pass the flatten_results flag as False to the source
return self.convert_row_to_dict(value, field)
else:
raise RuntimeError('Unexpected field type: %s' % field.type)
def convert_row_to_dict(self, row, schema):
"""Converts a TableRow instance using the schema to a Python dict."""
result = {}
for index, field in enumerate(schema.fields):
value = None
if isinstance(schema, bigquery.TableSchema):
cell = row.f[index]
value = from_json_value(cell.v) if cell.v is not None else None
elif isinstance(schema, bigquery.TableFieldSchema):
cell = row['f'][index]
value = cell['v'] if 'v' in cell else None
if field.mode == 'REPEATED':
result[field.name] = [self._convert_cell_value_to_dict(x['v'], field)
for x in value]
elif value is None:
if not field.mode == 'NULLABLE':
raise ValueError('Received \'None\' as the value for the field %s '
'but the field is not NULLABLE.', field.name)
result[field.name] = None
else:
result[field.name] = self._convert_cell_value_to_dict(value, field)
return result
| 42.212569
| 80
| 0.699676
|
a7b655390835e53b5d0315507ec028f23f661289
| 4,774
|
py
|
Python
|
vulcanai/mnist_loader.py
|
priyatharsan/Vulcan
|
15a196f1b267ec1c9cc88304b93508ca68c94fc1
|
[
"Apache-2.0"
] | 1
|
2018-05-23T23:49:01.000Z
|
2018-05-23T23:49:01.000Z
|
vulcanai/mnist_loader.py
|
priyatharsan/Vulcan
|
15a196f1b267ec1c9cc88304b93508ca68c94fc1
|
[
"Apache-2.0"
] | null | null | null |
vulcanai/mnist_loader.py
|
priyatharsan/Vulcan
|
15a196f1b267ec1c9cc88304b93508ca68c94fc1
|
[
"Apache-2.0"
] | null | null | null |
import os
import urllib
import gzip
import numpy as np
def load_fashion_mnist():
"""
Get the fashion MNIST training data (downloading it if it is not already accessible),
and return it as NumPy arrays.
Extracted from https://github.com/zalandoresearch/fashion-mnist/blob/master/README.md
:return: (train_images, train_labels, test_images, test_labels)
"""
if os.path.exists("data/fashion"):
print("data folder already exists")
else:
print("Creating data/fashion folder")
os.makedirs("data/fashion")
if not os.path.exists("data/fashion/train-images-idx3-ubyte.gz"):
print("No fashion MNIST training images found--downloading")
_download_file("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz", 'data/fashion')
print ('Loading training images...')
train_images = _load_image('data/fashion/train-images-idx3-ubyte.gz')
if not os.path.exists("data/fashion/train-labels-idx1-ubyte.gz"):
print("No fashion MNIST training labels found--downloading")
_download_file("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz", 'data/fashion')
print ('Loading training labels...')
train_labels = _load_label('data/fashion/train-labels-idx1-ubyte.gz')
if not os.path.exists("data/fashion/t10k-images-idx3-ubyte.gz"):
print("No fashion MNIST test (10k) images found--downloading")
_download_file("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz", 'data/fashion')
print ('Loading testing images...')
t10k_images = _load_image("data/fashion/t10k-images-idx3-ubyte.gz")
if not os.path.exists("data/fashion/t10k-labels-idx1-ubyte.gz"):
print("No fashion MNIST test (10k) labels found--downloading")
_download_file("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz", 'data/fashion')
print ('Loading testing labels...')
t10k_labels = _load_label('data/fashion/t10k-labels-idx1-ubyte.gz')
return train_images, train_labels, t10k_images, t10k_labels
def load_mnist():
"""
Get the MNIST training data (downloading it if it is not already accessible),
and return it as NumPy arrays
:return: (train_images, train_labels, test_images, test_labels)
"""
if os.path.exists("data/"):
print("data folder already exists")
else:
print("Creating data folder")
os.makedirs("data/")
if not os.path.exists("data/train-images-idx3-ubyte.gz"):
print("No MNIST training images found--downloading")
_download_file("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz")
print ('Loading training images...')
train_images = _load_image('data/train-images-idx3-ubyte.gz')
if not os.path.exists("data/train-labels-idx1-ubyte.gz"):
print("No MNIST training labels found--downloading")
_download_file("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz")
print ('Loading training labels...')
train_labels = _load_label('data/train-labels-idx1-ubyte.gz')
if not os.path.exists("data/t10k-images-idx3-ubyte.gz"):
print("No MNIST test (10k) images found--downloading")
_download_file("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz")
print ('Loading testing images...')
t10k_images = _load_image("data/t10k-images-idx3-ubyte.gz")
if not os.path.exists("data/t10k-labels-idx1-ubyte.gz"):
print("No MNIST test (10k) labels found--downloading")
_download_file("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz")
print ('Loading testing labels...')
t10k_labels = _load_label('data/t10k-labels-idx1-ubyte.gz')
return train_images, train_labels, t10k_images, t10k_labels
def _download_file(file_path, folder='data'):
print("Downloading {}...".format(file_path))
test_file = urllib.URLopener()
file_name = file_path.split('/')[-1]
test_file.retrieve(file_path, '{}/{}'.format(folder, file_name))
def _load_image(filename):
# Read the inputs in Yann LeCun's binary format.
f = gzip.open(filename, 'rb')
data = np.frombuffer(f.read(), np.uint8, offset=16)
f.close()
data = data.reshape(-1, 784)
return data / np.float32(256)
def _load_label(filename):
"""Read the labels in Yann LeCun's binary format."""
f = gzip.open(filename, 'rb')
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
def main():
"""Totally useless main."""
(train_iamges, train_labels, t10k_images, t10k_labels) = load_mnist()
if __name__ == "__main__":
main()
| 39.131148
| 127
| 0.690406
|
d9f9799f5b13c067fc0a52fd03d7a201653cf327
| 7,473
|
py
|
Python
|
lib/JumpScale/lib/ssh/ufw/manager.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | null | null | null |
lib/JumpScale/lib/ssh/ufw/manager.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 4
|
2016-08-25T12:08:39.000Z
|
2018-04-12T12:36:01.000Z
|
lib/JumpScale/lib/ssh/ufw/manager.py
|
rudecs/jumpscale_core7
|
30c03f26f1cdad3edbb9d79d50fbada8acc974f5
|
[
"Apache-2.0"
] | 3
|
2016-03-08T07:49:34.000Z
|
2018-10-19T13:56:43.000Z
|
import re
import collections
from fabric.api import settings
from JumpScale import j
STATUS_LINE = re.compile('^Status:\s*(.+)')
RULE_LINE = re.compile('^\[\s*(\d+)\] (.+?)\s{2,}(.+?)\s{2,}(.+)$')
ParsedDestination = collections.namedtuple('ParsedDestination',
'ip proto port dev')
class UFWError(Exception):
pass
class UFWManagerFactory(object):
def get(self, connection=None):
if connection==None:
connection=j.ssh.connection
return UFWManager(connection)
class UFWRule(object):
def __init__(self, action=None, source=None, destination=None, number=None):
self._number = number
self._source = source
self._action = action
self._destination = destination
@property
def number(self):
return self._number
@property
def source(self):
return self._source
@property
def destination(self):
return self._destination
@property
def action(self):
return self._action
def __str__(self):
return ('[%2s] %s to %s from %s' %
(self.number if self.number is not None else '',
self.action, self.destination, self.source))
def __repr__(self):
return str(self)
class UFWOperation(object):
def cmd(self):
raise NotImplemented()
class StatusOp(UFWOperation):
def __init__(self, status=None):
self._status = status
def cmd(self):
return '--force enable' if self._status else 'disable'
class ResetOp(UFWOperation):
def cmd(self):
return '--force reset'
class RuleOp(UFWOperation):
def __init__(self, rule=None, add=True):
self._add = add
self._rule = rule
def _parser(self, src):
src = src.replace('(v6)', '').replace('(out)', '')
source = re.search('\d+\.\d+\.\d+.\d+[^\s]*', src)
ip = None
pos = 0
if source:
ip = source.group()
pos = source.end()
else:
ip = 'any'
port_proto_m = re.search('\\b(\d+)(/([^\s]+))?', src[pos:])
proto = None
port = None
if port_proto_m:
proto = port_proto_m.group(3)
port = port_proto_m.group(1)
pos = port_proto_m.end()
on_m = re.search('on \w+', src)
dev = None
if on_m:
dev = on_m.group()
return ParsedDestination(ip=ip, proto=proto, port=port, dev=dev)
def cmd(self):
rule = self._rule
cmd = []
if not self._add:
cmd.append('delete')
cmd.append(rule.action.lower())
def push(src):
cmd.append(src.ip)
if src.proto:
cmd.append('proto %s' % src.proto)
if src.port:
cmd.append('port %s' % src.port)
src = self._parser(rule.source)
dst = self._parser(rule.destination)
if src.dev and dst.dev:
raise UFWError('Both source and destination has devices')
if src.dev:
if 'out' not in rule.action.lower():
raise UFWError('Invalid source for %s' % rule.action)
cmd.append(src.dev)
elif dst.dev:
if 'in' not in rule.action.lower():
raise UFWError('Invalid destination for %s' % rule.action)
cmd.append(dst.dev)
cmd.append('from')
push(src)
cmd.append('to')
push(dst)
return ' '.join(cmd)
class UFWManager(object):
ACTION_ALLOW_IN = 'allow in'
ACTION_ALLOW_OUT = 'allow out'
ACTION_DENY_IN = 'deny in'
ACTION_DENY_OUT = 'deny out'
ACTION_REJECT_IN = 'reject in'
ACTION_REJECT_OUT = 'reject out'
def __init__(self, con=None):
self._con = con
self._rules = None
self._enabled = None
self._transactions = []
def _bool(self, status):
return status == 'active'
def _load(self):
status = self._con.run('ufw status numbered')
self._rules = []
for line in status.split('\n'):
line = line.strip()
if not line or '(v6)' in line:
continue
status = STATUS_LINE.match(line)
if status is not None:
self._enabled = self._bool(status.group(1))
continue
rule = RULE_LINE.match(line)
if rule is None:
continue
number, destination, action, source = rule.groups()
self._rules.append(UFWRule(action, source, destination, number))
@property
def rules(self):
"""
List of current rules.
"""
if self._rules is None:
self._load()
return self._rules
@property
def enabled(self):
"""
Get the `current` actual status of ufw. Setting enabled on the
otherhand will not take effect until you call commit()
"""
if self._enabled is None:
self._load()
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Set the anbled status. Note that this doesn't take action
until you apply the change by calling commit.
"""
self._transactions.append(
StatusOp(value)
)
def addRule(self, action, source='any', destination='any'):
"""
Add a new UFW rule
:action: One of the actions defined
ACTION_ALLOW_IN
ACTION_ALLOW_OUT
ACTION_DENY_IN
ACTION_DENY_OUT
ACTION_REJECT_IN
ACTION_REJECT_OUT
:source: Source to match, default to 'any'. Examples of valid sources
'192.168.1.0/24 proto tcp'
'22/tcp'
'any'
'any on eth0'
:destination: Destination to match, default to 'any'.
"""
self._transactions.append(
RuleOp(UFWRule(action, source, destination))
)
def removeRule(self, rule):
"""
Remove the specified rule
:rule: rule to remove
"""
self._transactions.append(
RuleOp(rule, add=False)
)
def reset(self):
"""
Remove all rules.
"""
self._transactions.append(
ResetOp()
)
def portOpen(self, port):
"""
Short cut to open port
"""
self.addRule(UFWManager.ACTION_ALLOW_IN, 'any', str(port))
def portClose(self, port):
"""
Short cut to closing a port (which is previously open by portOpen)
"""
port = str(port)
for rule in self.rules:
if rule.destination == port:
self.removeRule(rule)
def commit(self):
"""
Apply all bending actions
:warning: Since all management is done over SSH, make sure that you
have a rule to allow your access over SSH.
:example:
ufw.enabled = False
ufw.reset()
ufw.addRule(ufw.ACTION_ALLOW_IN, 'any', '22/tcp')
ufw.enabled = True
ufw.commit()
"""
with settings(abort_exception=UFWError):
while self._transactions:
op = self._transactions.pop(0)
self._con.run('ufw %s' % op.cmd())
# force reload on next access.
self._rules = None
self._status = None
| 25.332203
| 80
| 0.540479
|
fe92707ac951326db9bc91c54b5ba2d78cb380f5
| 320
|
py
|
Python
|
descuento.py
|
BrayanTorres2/algoritmos_programacion
|
21bfa47e50f8405fb683064b6cf5071158609f63
|
[
"MIT"
] | null | null | null |
descuento.py
|
BrayanTorres2/algoritmos_programacion
|
21bfa47e50f8405fb683064b6cf5071158609f63
|
[
"MIT"
] | null | null | null |
descuento.py
|
BrayanTorres2/algoritmos_programacion
|
21bfa47e50f8405fb683064b6cf5071158609f63
|
[
"MIT"
] | 1
|
2021-11-19T02:25:30.000Z
|
2021-11-19T02:25:30.000Z
|
"""
Entradas
total_pagar-->float-->total_ pagar
Salidas
descuento-->float-->descuento
"""
#entradas
total_pagar=float(input("Digite total a pagar"))
#Caja negra
descuento=total_pagar-total_pagar*0.15#float
#Salida
print("debe pagar: ", descuento)
print("debe pagar:"+str(descuento))
print(f"debe pagar: {descuento}")
| 17.777778
| 48
| 0.74375
|
8e038e354ec4626e8fb649c5decd2786272676f0
| 3,763
|
py
|
Python
|
archive/model_archive/LSTMNoWindows.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | 1
|
2022-03-25T16:00:36.000Z
|
2022-03-25T16:00:36.000Z
|
archive/model_archive/LSTMNoWindows.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | 1
|
2022-03-28T13:50:28.000Z
|
2022-03-28T13:50:28.000Z
|
archive/model_archive/LSTMNoWindows.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | null | null | null |
from models.JensModel import LSTMModel
from tensorflow.keras.layers import Conv1D, Dense, Dropout # type: ignore
import tensorflow as tf # type: ignore
from tensorflow.keras.layers import Input, Dense, LSTM, concatenate, Activation, Masking # type: ignore
from tensorflow.keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Dropout # type: ignore
from tensorflow.keras.models import Model # type: ignore
from utils.Window import Window
import numpy as np
class LSTMModelNoWindows(LSTMModel):
def __init__(self, **kwargs):
"""
LSTM
:param kwargs:
n_max_timesteps: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.n_max_timesteps = kwargs["n_max_timesteps"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 1
self.epochs = 100
self.batch_size = 5
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.n_max_timesteps} timesteps (max timesteps) and {n_features} features"
)
ip = Input(shape=(n_features, self.n_max_timesteps))
x = Masking()(ip)
x = LSTM(8)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding="same", kernel_initializer="he_uniform")(y)
y = BatchNormalization()(y)
y = Activation("relu")(y)
y = super().squeeze_excite_block(y)
y = Conv1D(256, 5, padding="same", kernel_initializer="he_uniform")(y)
y = BatchNormalization()(y)
y = Activation("relu")(y)
y = super().squeeze_excite_block(y)
y = Conv1D(128, 3, padding="same", kernel_initializer="he_uniform")(y)
y = BatchNormalization()(y)
y = Activation("relu")(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(n_outputs, activation="softmax")(x)
model = Model(ip, out)
model.compile(
loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"]
)
return model
def windowize(self, recordings):
windows = []
sensor_arrays = []
# convert DFs into numpy arrays for further operations
for recording in recordings:
sensor_array = recording.sensor_frame.to_numpy()
sensor_arrays.append(sensor_array)
# get max number of timesteps over all recordings
n_max_timesteps = max(
list(map(lambda sensor_array: sensor_array.shape[0], sensor_arrays))
)
# post-pad all sensor arrays with 0's so they all have timestep size of n_max_timesteps
for i in enumerate(sensor_arrays):
n_to_pad = n_max_timesteps - sensor_arrays[i].shape[0]
sensor_arrays[i] = np.pad(
sensor_arrays[i],
[(0, n_to_pad), (0, 0)],
mode="constant",
constant_values=0,
)
# swap timestep and feature axis
sensor_arrays = np.swapaxes(sensor_arrays, 1, 2)
# add padded arrays to list of Window objects
for i in enumerate(recordings):
recording = recordings[i]
padded_sensor_array = sensor_arrays[i]
recording_window = Window(
padded_sensor_array, recording.activity, recording.subject
)
windows.append(recording_window)
return windows
| 33.598214
| 120
| 0.612012
|
7452711eed4529cc0f4e3b0aae3b9eab16598d03
| 2,023
|
py
|
Python
|
ydkgen/printer/go/class_get_children_printer.py
|
geordish/ydk-gen
|
eb95b2a86f61180a773b18d0c65b6b906ed542ee
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
ydkgen/printer/go/class_get_children_printer.py
|
geordish/ydk-gen
|
eb95b2a86f61180a773b18d0c65b6b906ed542ee
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
ydkgen/printer/go/class_get_children_printer.py
|
geordish/ydk-gen
|
eb95b2a86f61180a773b18d0c65b6b906ed542ee
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
source_printer.py
prints Go class method
"""
from .function_printer import FunctionPrinter
from ydkgen.common import get_qualified_yang_name
class ClassGetChildrenPrinter(FunctionPrinter):
def __init__(self, ctx, clazz, leafs, children):
super(ClassGetChildrenPrinter, self).__init__(ctx, clazz, leafs, children)
def print_function_header(self):
self.print_function_header_helper(
'GetChildren', return_type='map[string]types.Entity')
def print_function_body(self):
self.ctx.writeln('children := make(map[string]types.Entity)')
for child in self.children:
if child.is_many:
self._print_many(child)
else:
path = get_qualified_yang_name(child)
self.ctx.writeln('children["%s"] = &%s.%s' % (
path, self.class_alias, child.go_name()))
self.ctx.writeln('return children')
def _print_many(self, child):
child_stmt = '%s.%s' % (self.class_alias, child.go_name())
self.ctx.writeln('for i := range %s {' % (child_stmt))
self.ctx.lvl_inc()
child_stmt = '%s[i]' % child_stmt
self.ctx.writeln('children[{0}.GetSegmentPath()] = &{0}'.format(child_stmt))
self.ctx.lvl_dec()
self.ctx.writeln('}')
| 37.462963
| 84
| 0.625803
|
cbdcc405dc18c27ab3e09d58ef27a9ba50785e32
| 2,457
|
py
|
Python
|
scripts/evaluation/config.py
|
taikusa/rcrs-server
|
29b530a8a0f2d3a604c6a71020d2e1dd4972c719
|
[
"BSD-3-Clause"
] | 1
|
2020-06-21T21:27:57.000Z
|
2020-06-21T21:27:57.000Z
|
scripts/evaluation/config.py
|
taikusa/rcrs-server
|
29b530a8a0f2d3a604c6a71020d2e1dd4972c719
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/evaluation/config.py
|
taikusa/rcrs-server
|
29b530a8a0f2d3a604c6a71020d2e1dd4972c719
|
[
"BSD-3-Clause"
] | null | null | null |
all_teams = ["ANC", "APO", "CSU", "GUC", "LTI", "MIN", "MRL", "NAI", "POS", "RI1", "RAK", "SOS", "ZJU"]
semi_teams = ["APO", "CSU", "GUC", "MIN", "MRL", "POS", "SOS", "ZJU"]
team_names = {
# "BAS" : "Baseline (no agents)",
"ANC" : "anct_rescue2013",
"APO" : "Apollo-Rescue",
"CSU" : "CSU-YUNLU",
"GUC" : "GUC_ArtSapience",
"LTI" : "LTI-Agent-Rescue",
"MIN" : "MinERS",
"MRL" : "MRL",
"NAI" : "NAITO-Rescue2013",
"POS" : "Poseidon",
"RI1" : "Ri-one",
"RAK" : "RoboAKUT",
"SOS" : "S.O.S.",
"ZJU" : "ZJUBase"
}
day1 = {'name' : "Day 1",
'shortname' : "Day1",
'maps' : ["Berlin1", "Eindhoven1", "Kobe1", "Paris1", "VC1"],
'teams' : all_teams}
day2 = {'name' : "Day 2",
'shortname' : "Day2",
'maps' : ["Mexico1", "Kobe2", "Eindhoven2", "Istanbul1", "Paris2"],
'teams' : all_teams,
'merge_with' : day1,
'highlight' : 8}
semi = {'name' : "Semifinals",
'shortname' : "Semifinals",
'maps' : ["VC2", "Berlin2", "Kobe3", "Istanbul2", "Mexico2", "Eindhoven3", "Paris3", "Eindhoven4"],
'teams' : semi_teams,
'highlight' : 4}
# final = {'name' : "Finals",
# 'shortname' : "final",
# 'maps' : ["Eindhoven1"],
# 'teams' : all_teams,
# 'merge_with' : day3,
# 'show_ranks' : 1}
rounds = [day1, day2, semi]
# semi_teams = ["RAK", "SBC", "POS", "IAM", "MRL", "RI1", "SEU", "RMA"]
# final_teams = ["POS", "IAM", "SEU", "RMA"]
# day1 = {'name' : "Preliminaries Day 1",
# 'shortname' : "Preliminary1",
# 'maps' : ["VC1", "Paris1", "Kobe1", "Berlin1", "Istanbul1"],
# 'teams' : all_teams}
# day2 = {'name' : "Preliminaries Day 2",
# 'shortname' : "Preliminary2",
# 'maps' : ["Kobe2", "Paris2", "Istanbul2", "Berlin2", "VC2"],
# 'teams' : all_teams
# 'merge_with' : day1
# 'highlight' : 8}
# semi = {'name' : "Semifinals",
# 'shortname' : "Semifinals",
# 'maps' : ["Kobe2", "Paris2", "Istanbul2", "Berlin2", "VC2"],
# 'teams' : semi_teams,
# 'highlight' : 4}
# final = {'name' : "Finals",
# 'shortname' : "Finals",
# 'maps' : ["Kobe2", "Paris2", "Istanbul2", "Berlin2", "VC2"],
# 'teams' : ["Paris5", "Berlin5", "Kobe4", "Istanbul5", "VC5"],
# 'show_ranks' : 3}
# rounds = [day1, day2, semi, final]
log_location = "logs/2013"
add_downloads = True
| 30.333333
| 107
| 0.490435
|
24bd2b35d4f8a1a9ef5e3323ebff8ed767132b3d
| 4,215
|
py
|
Python
|
dask/dataframe/optimize.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 20
|
2015-01-19T14:04:10.000Z
|
2020-01-14T03:43:19.000Z
|
dask/dataframe/optimize.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 12
|
2015-01-22T22:00:43.000Z
|
2020-07-28T19:22:16.000Z
|
dask/dataframe/optimize.py
|
srijan-deepsource/dask
|
0673d9084e02f985f3fdf5ba6ede80e8de5ac15c
|
[
"BSD-3-Clause"
] | 7
|
2015-01-04T18:50:00.000Z
|
2020-07-29T11:00:04.000Z
|
""" Dataframe optimizations """
import operator
from dask.base import tokenize
from ..optimization import cull, fuse
from .. import config, core
from ..highlevelgraph import HighLevelGraph
from ..utils import ensure_dict
from ..blockwise import optimize_blockwise, fuse_roots, Blockwise
def optimize(dsk, keys, **kwargs):
if isinstance(dsk, HighLevelGraph):
# Think about an API for this.
flat_keys = list(core.flatten(keys))
dsk = optimize_read_parquet_getitem(dsk, keys=flat_keys)
dsk = optimize_blockwise(dsk, keys=flat_keys)
dsk = fuse_roots(dsk, keys=flat_keys)
dsk = ensure_dict(dsk)
if isinstance(keys, list):
dsk, dependencies = cull(dsk, list(core.flatten(keys)))
else:
dsk, dependencies = cull(dsk, [keys])
fuse_subgraphs = config.get("optimization.fuse.subgraphs")
if fuse_subgraphs is None:
fuse_subgraphs = True
dsk, dependencies = fuse(
dsk, keys, dependencies=dependencies, fuse_subgraphs=fuse_subgraphs,
)
dsk, _ = cull(dsk, keys)
return dsk
def optimize_read_parquet_getitem(dsk, keys):
# find the keys to optimize
from .io.parquet.core import ParquetSubgraph
read_parquets = [k for k, v in dsk.layers.items() if isinstance(v, ParquetSubgraph)]
layers = dsk.layers.copy()
dependencies = dsk.dependencies.copy()
for k in read_parquets:
columns = set()
update_blocks = {}
for dep in dsk.dependents[k]:
block = dsk.layers[dep]
# Check if we're a read_parquet followed by a getitem
if not isinstance(block, Blockwise):
# getitem are Blockwise...
return dsk
if len(block.dsk) != 1:
# ... with a single item...
return dsk
if list(block.dsk.values())[0][0] != operator.getitem:
# ... where this value is __getitem__...
return dsk
if any(block.output == x[0] for x in keys if isinstance(x, tuple)):
# if any(block.output == x[0] for x in keys if isinstance(x, tuple)):
# ... but bail on the optimization if the getitem is what's requested
# These keys are structured like [('getitem-<token>', 0), ...]
# so we check for the first item of the tuple.
# See https://github.com/dask/dask/issues/5893
return dsk
block_columns = block.indices[1][0]
if isinstance(block_columns, str):
block_columns = [block_columns]
columns |= set(block_columns)
update_blocks[dep] = block
old = layers[k]
if columns and columns < set(old.meta.columns):
columns = list(columns)
meta = old.meta[columns]
name = "read-parquet-" + tokenize(old.name, columns)
assert len(update_blocks)
for block_key, block in update_blocks.items():
# (('read-parquet-old', (.,)), ( ... )) ->
# (('read-parquet-new', (.,)), ( ... ))
new_indices = ((name, block.indices[0][1]), block.indices[1])
numblocks = {name: block.numblocks[old.name]}
new_block = Blockwise(
block.output,
block.output_indices,
block.dsk,
new_indices,
numblocks,
block.concatenate,
block.new_axes,
)
layers[block_key] = new_block
dependencies[block_key] = {name}
dependencies[name] = dependencies.pop(k)
else:
# Things like df[df.A == 'a'], where the argument to
# getitem is not a column name
name = old.name
meta = old.meta
columns = list(meta.columns)
new = ParquetSubgraph(
name, old.engine, old.fs, meta, columns, old.index, old.parts, old.kwargs
)
layers[name] = new
if name != old.name:
del layers[old.name]
new_hlg = HighLevelGraph(layers, dependencies)
return new_hlg
| 33.991935
| 88
| 0.563227
|
6cfb114e0a8693e73285d5aed9257ae0230d86ff
| 2,602
|
py
|
Python
|
nova/api/openstack/compute/views/flavors.py
|
NetApp/nova
|
ca490d48a762a423449c654d5a7caeadecf2f6ca
|
[
"Apache-2.0"
] | 2
|
2015-11-05T04:52:34.000Z
|
2016-03-07T03:00:06.000Z
|
nova/api/openstack/compute/views/flavors.py
|
NetApp/nova
|
ca490d48a762a423449c654d5a7caeadecf2f6ca
|
[
"Apache-2.0"
] | 1
|
2018-01-19T07:50:49.000Z
|
2018-01-19T07:50:49.000Z
|
nova/api/openstack/compute/views/flavors.py
|
NetApp/nova
|
ca490d48a762a423449c654d5a7caeadecf2f6ca
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:32:11.000Z
|
2020-07-24T07:32:11.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import common
class ViewBuilder(common.ViewBuilder):
_collection_name = "flavors"
def basic(self, request, flavor):
return {
"flavor": {
"id": flavor["flavorid"],
"name": flavor["name"],
"links": self._get_links(request,
flavor["flavorid"],
self._collection_name),
},
}
def show(self, request, flavor):
flavor_dict = {
"flavor": {
"id": flavor["flavorid"],
"name": flavor["name"],
"ram": flavor["memory_mb"],
"disk": flavor["root_gb"],
"vcpus": flavor.get("vcpus") or "",
"links": self._get_links(request,
flavor["flavorid"],
self._collection_name),
},
}
return flavor_dict
def index(self, request, flavors):
"""Return the 'index' view of flavors."""
return self._list_view(self.basic, request, flavors)
def detail(self, request, flavors):
"""Return the 'detail' view of flavors."""
return self._list_view(self.show, request, flavors)
def _list_view(self, func, request, flavors):
"""Provide a view for a list of flavors."""
flavor_list = [func(request, flavor)["flavor"] for flavor in flavors]
flavors_links = self._get_collection_links(request,
flavors,
self._collection_name,
"flavorid")
flavors_dict = dict(flavors=flavor_list)
if flavors_links:
flavors_dict["flavors_links"] = flavors_links
return flavors_dict
| 35.643836
| 78
| 0.544197
|
653783fe3967b3c031395beb8ef4772406a2d441
| 4,209
|
py
|
Python
|
vmtkScripts/vmtkbranchpatching.py
|
michelebucelli/vmtk
|
738bd1d152e8836847ab4d75f7e8360bd574e724
|
[
"Apache-2.0"
] | 217
|
2015-01-05T19:08:30.000Z
|
2022-03-31T12:14:59.000Z
|
vmtkScripts/vmtkbranchpatching.py
|
mrp089/vmtk
|
64675f598e31bc6be3d4fba903fb59bf1394f492
|
[
"Apache-2.0"
] | 226
|
2015-03-31T07:16:06.000Z
|
2022-03-01T14:59:30.000Z
|
vmtkScripts/vmtkbranchpatching.py
|
mrp089/vmtk
|
64675f598e31bc6be3d4fba903fb59bf1394f492
|
[
"Apache-2.0"
] | 132
|
2015-02-16T11:38:34.000Z
|
2022-03-18T04:38:45.000Z
|
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkbranchpatching.py,v $
## Language: Python
## Date: $Date: 2006/07/07 10:46:17 $
## Version: $Revision: 1.9 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
from vmtk import vtkvmtk
import sys
from vmtk import pypes
class vmtkBranchPatching(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.PatchedData = None
self.CircularPatching = 1
self.UseConnectivity = 1
self.LongitudinalPatchSize = 1.0
self.CircularNumberOfPatches = 1
self.PatchSize = [0.0, 0.0]
self.GroupIdsArrayName = 'GroupIds'
self.LongitudinalMappingArrayName = 'AbscissaMetric'
self.CircularMappingArrayName = 'AngularMetric'
self.LongitudinalPatchNumberArrayName = 'Slab'
self.CircularPatchNumberArrayName = 'Sector'
self.PatchAreaArrayName = 'PatchArea'
self.SetScriptName('vmtkbranchpatching')
self.SetScriptDoc('cut a set of contiguous rectangular regions on a surface that follow iso-contours in the StretchedMapping and AngularMetric arrays')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','','vmtksurfacereader'],
['PatchSize','patchsize','float',2,'(0.0,)'],
['LongitudinalPatchSize','longitudinalpatchsize','float',1,'(0.0,)'],
['CircularNumberOfPatches','circularpatches','int',1,'(0,)'],
['CircularPatching','circularpatching','bool',1],
['UseConnectivity','connectivity','bool',1],
['GroupIdsArrayName','groupidsarray','str',1],
['LongitudinalMappingArrayName','longitudinalmappingarray','str',1],
['CircularMappingArrayName','circularmappingarray','str',1],
['LongitudinalPatchNumberArrayName','longitudinalpatchnumberarray','str',1],
['CircularPatchNumberArrayName','circularpatchnumberarray','str',1],
['PatchAreaArrayName','patchareaarray','str',1]
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','','vmtksurfacewriter'],
['PatchedData','patcheddata','vtkImageData',1,'','','vmtkimagewriter'],
['PatchSize','patchsize','float',2],
['LongitudinalPatchNumberArrayName','longitudinalpatchnumberarray','str',1],
['CircularPatchNumberArrayName','circularpatchnumberarray','str',1],
['PatchAreaArrayName','patchareaarray','str',1]
])
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No input surface.')
self.PatchSize = [self.LongitudinalPatchSize, 1.0/float(self.CircularNumberOfPatches)]
patchingFilter = vtkvmtk.vtkvmtkPolyDataPatchingFilter()
patchingFilter.SetInputData(self.Surface)
patchingFilter.SetCircularPatching(self.CircularPatching)
patchingFilter.SetUseConnectivity(self.UseConnectivity)
patchingFilter.SetLongitudinalMappingArrayName(self.LongitudinalMappingArrayName)
patchingFilter.SetCircularMappingArrayName(self.CircularMappingArrayName)
patchingFilter.SetLongitudinalPatchNumberArrayName(self.LongitudinalPatchNumberArrayName)
patchingFilter.SetCircularPatchNumberArrayName(self.CircularPatchNumberArrayName)
patchingFilter.SetPatchAreaArrayName(self.PatchAreaArrayName)
patchingFilter.SetGroupIdsArrayName(self.GroupIdsArrayName)
patchingFilter.SetPatchSize(self.PatchSize)
patchingFilter.Update()
self.Surface = patchingFilter.GetOutput()
self.PatchedData = patchingFilter.GetPatchedData()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 40.471154
| 159
| 0.68401
|
bbfbeef81a0bfc9b4ee6a61bbe3f8132b1dcc8db
| 1,897
|
py
|
Python
|
provision/management/commands/sync_clusterpaths_from_int_to_dev.py
|
NOAA-GSD/qrba_os
|
83d079e43a7fa026c5ced79d7bc65f62cd74b90b
|
[
"CC0-1.0"
] | 1
|
2019-11-14T03:43:32.000Z
|
2019-11-14T03:43:32.000Z
|
provision/management/commands/sync_clusterpaths_from_int_to_dev.py
|
NOAA-GSD/qrba_os
|
83d079e43a7fa026c5ced79d7bc65f62cd74b90b
|
[
"CC0-1.0"
] | null | null | null |
provision/management/commands/sync_clusterpaths_from_int_to_dev.py
|
NOAA-GSD/qrba_os
|
83d079e43a7fa026c5ced79d7bc65f62cd74b90b
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python
from __future__ import unicode_literals
# https://stackoverflow.com/questions/19475955/using-django-models-in-external-python-script
from django.core.management.base import BaseCommand, CommandError
import datetime
from django.utils import timezone
from provision.models import Cluster
from qrba import settings
class Command(BaseCommand):
help = "synchronizes cluster path related objects from the testcluster to the integration cluster"
def handle(self, *args, **options):
qr = Cluster.objects.filter(name=settings.QUMULO_intcluster['name'])
if qr.count() == 0:
intserver = Cluster.objects.create(name=settings.QUMULO_intcluster['name'],
ipaddr=settings.QUMULO_intcluster['ipaddr'],
adminpassword=settings.QUMULO_intcluster['adminpassword'])
intserver.save()
else:
intserver = qr[0]
qr = Cluster.objects.filter(name=settings.QUMULO_devcluster['name'])
if qr.count() == 0:
testserver = Cluster.objects.create(name=settings.QUMULO_devcluster['name'],
ipaddr=settings.QUMULO_devcluster['ipaddr'],
adminpassword=settings.QUMULO_devcluster['adminpassword'])
testserver.save()
else:
testserver = qr[0]
now = timezone.now() + datetime.timedelta(days=30)
print("now: " + str(now))
print("intserver is: " + str(intserver))
print("testserver is: " + str(testserver))
print("calling " + str(testserver) + ".sync_clusterpaths_from_cluster( " + str(intserver) + " ) at " + str(now))
activity = testserver.sync_clusterpaths_from_cluster(intserver)
print(" activity is " + str(activity) + " at " + str(now))
| 44.116279
| 120
| 0.618872
|
e13e1f25be8830ac2e2c100cb6a7a6f71d674ced
| 509
|
py
|
Python
|
server/lib/Cgi.py
|
worldflat/Aceserver
|
fbc63f2128e17af55034297b3619004c06b2132f
|
[
"Apache-2.0"
] | null | null | null |
server/lib/Cgi.py
|
worldflat/Aceserver
|
fbc63f2128e17af55034297b3619004c06b2132f
|
[
"Apache-2.0"
] | null | null | null |
server/lib/Cgi.py
|
worldflat/Aceserver
|
fbc63f2128e17af55034297b3619004c06b2132f
|
[
"Apache-2.0"
] | null | null | null |
from http.server import HTTPServer, CGIHTTPRequestHandler
import socketserver
class CGIServer:
def __init__(self, port, address, dir):
self.port = port
self.addr = address
self.dir = dir
def runserver(self):
port = self.port
addr = self.addr
dir = self.dir
class Handler(CGIHTTPRequestHandler):
cgi_directories = ["cgi-bin"]
print(type(port))
httpd = HTTPServer((addr, port), Handler)
httpd.serve_forever()
| 26.789474
| 57
| 0.616896
|
d00701b322548b7242292ce26f2c52be7e121e71
| 4,074
|
py
|
Python
|
textworld/envs/zmachine/jericho.py
|
bibidang/CS4033ShortRL
|
336a453efe6232008be05e0d7fb41665ec292271
|
[
"MIT"
] | 2
|
2021-04-25T21:27:55.000Z
|
2021-04-28T08:13:50.000Z
|
textworld/envs/zmachine/jericho.py
|
bibidang/CS4033ShortRL
|
336a453efe6232008be05e0d7fb41665ec292271
|
[
"MIT"
] | null | null | null |
textworld/envs/zmachine/jericho.py
|
bibidang/CS4033ShortRL
|
336a453efe6232008be05e0d7fb41665ec292271
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import warnings
import jericho
import textworld
from textworld.core import GameState
from textworld.core import GameNotRunningError
class JerichoEnv(textworld.Environment):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._seed = -1
self._jericho = None
self.gamefile = None
self._reset = False
def load(self, z_file: str) -> None:
self.gamefile = os.path.abspath(z_file)
_, ext = os.path.splitext(os.path.basename(self.gamefile))
# Check if game is supported by Jericho.
if not ext.startswith(".z"):
raise ValueError("Only .z[1-8] files are supported!")
if not os.path.isfile(self.gamefile):
raise FileNotFoundError(self.gamefile)
if self._jericho is None:
# Start the game using Jericho.
self._jericho = jericho.FrotzEnv(self.gamefile, self._seed)
else:
self._jericho.load(self.gamefile)
def __del__(self) -> None:
self.close()
@property
def game_running(self) -> bool:
""" Determines if the game is still running. """
return self._jericho is not None
def seed(self, seed=None):
self._seed = seed
if self._jericho:
self._jericho.seed(self._seed)
return self._seed
def _gather_infos(self):
""" Adds additional information to the internal state. """
self.state.feedback = self.state.raw
if not self._jericho.is_fully_supported:
return # No more information can be gathered.
for attr in self.infos.basics:
self.state[attr] = getattr(self._jericho, "get_" + attr, lambda: self.state.get(attr))()
for attr in self.infos.extras:
self.state["extra.{}".format(attr)] = getattr(self._jericho, "get_" + attr, lambda: None)()
# Deal with information that has different method name in Jericho.
self.state["won"] = self._jericho.victory()
self.state["lost"] = self._jericho.game_over()
self.state["score"] = self._jericho.get_score()
self.state["location"] = self._jericho.get_player_location()
def reset(self):
if not self.game_running:
raise GameNotRunningError("Call env.load(gamefile) before env.reset().")
self.state = GameState()
self.state.raw, _ = self._jericho.reset()
self._gather_infos()
self._reset = True
return self.state
def _send(self, command: str) -> str:
""" Send a command directly to the interpreter.
This method will not affect the internal state variable.
"""
feedback, _, _, _ = self._jericho.step(command)
return feedback
def step(self, command):
if not self.game_running or not self._reset:
raise GameNotRunningError()
self.state = GameState()
self.state.last_command = command.strip()
res = self._jericho.step(self.state.last_command)
# As of Jericho >= 2.1.0, the reward is returned instead of the score.
self.state.raw, _, self.state.done, _ = res
self._gather_infos()
return self.state, self.state.score, self.state.done
def close(self):
if self.game_running:
self._jericho.close()
self._jericho = None
self._reset = False
def copy(self) -> "JerichoEnv":
""" Return a copy of this environment at the same state. """
env = JerichoEnv(self.infos)
env._seed = self._seed
if self.gamefile:
env.load(self.gamefile)
if self._jericho:
env._jericho = self._jericho.copy()
# Copy core Environment's attributes.
env.state = self.state.copy()
env.infos = self.infos.copy()
return env
# By default disable the warning about unsupported games.
warnings.simplefilter("ignore", jericho.UnsupportedGameWarning)
| 31.581395
| 103
| 0.62273
|
12b9af2af54fab0d1dc7c4d663f91a5f4347361f
| 4,219
|
py
|
Python
|
generator/generate_routes.py
|
paulritzy/dropbox-sdk-js
|
cf7ea0fed6fb5cbc7e641cb70b2ab5024150177c
|
[
"MIT"
] | 1
|
2022-02-22T14:29:43.000Z
|
2022-02-22T14:29:43.000Z
|
generator/generate_routes.py
|
paulritzy/dropbox-sdk-js
|
cf7ea0fed6fb5cbc7e641cb70b2ab5024150177c
|
[
"MIT"
] | null | null | null |
generator/generate_routes.py
|
paulritzy/dropbox-sdk-js
|
cf7ea0fed6fb5cbc7e641cb70b2ab5024150177c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import json
import os
import subprocess
import sys
cmdline_desc = """\
Runs Stone to generate JS routes for the Dropbox client.
"""
_cmdline_parser = argparse.ArgumentParser(description=cmdline_desc)
_cmdline_parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Print debugging statements.',
)
_cmdline_parser.add_argument(
'spec',
nargs='*',
type=str,
help='Path to API specifications. Each must have a .stone extension.',
)
_cmdline_parser.add_argument(
'-s',
'--stone',
type=str,
help='Path to clone of stone repository.',
)
def main():
"""The entry point for the program."""
args = _cmdline_parser.parse_args()
verbose = args.verbose
if args.spec:
specs = args.spec
else:
# If no specs were specified, default to the spec submodule.
specs = glob.glob('dropbox-api-spec/*.stone') # Arbitrary sorting
specs.sort()
specs = [os.path.join(os.getcwd(), s) for s in specs]
stone_path = os.path.abspath('stone')
if args.stone:
stone_path = args.stone
dropbox_pkg_path = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../lib'))
if verbose:
print('Dropbox package path: %s' % dropbox_pkg_path)
typescript_template_path = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), 'typescript'))
if verbose:
print('TypeScript template path: %s' % typescript_template_path)
types_template_path = os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), '../types'))
if verbose:
print('Types template path: %s' % types_template_path)
upload_arg = {
"match": ["style", "upload"],
"arg_name": "contents",
"arg_type": "Object",
"arg_docstring": "The file contents to be uploaded."
}
if verbose:
print('Generating JS types')
subprocess.check_output(
(['python3', '-m', 'stone.cli', 'js_types', dropbox_pkg_path] +
specs + ['-b', 'team'] + ['-a', 'host', '-a', 'style', '-a', 'auth'] +
['--', 'types.js', '-e', json.dumps(upload_arg)]),
cwd=stone_path)
if verbose:
print('Generating JS client routes for user routes')
o = subprocess.check_output(
(['python3', '-m', 'stone.cli', 'js_client', dropbox_pkg_path] +
specs + ['-a', 'host', '-a', 'style', '-a', 'auth', '-a', 'scope'] +
['--', 'routes.js', '-c', 'Dropbox', '--wrap-response-in', 'DropboxResponse', '--wrap-error-in', 'DropboxResponseError', '-a', 'scope']),
cwd=stone_path)
if verbose:
print(o)
if verbose:
print('Generating TSD types')
subprocess.check_output(
(['python3', '-m', 'stone.cli', 'tsd_types', typescript_template_path] +
specs + ['-b', 'team'] + ['-a', 'host', '-a', 'style'] +
['--', 'dropbox_types.d.tstemplate', 'dropbox_types.d.ts', '-e', json.dumps(upload_arg), '--export-namespaces']),
cwd=stone_path)
if verbose:
print('Generating TSD client routes for user routes')
subprocess.check_output(
(['python3', '-m', 'stone.cli', 'tsd_client', typescript_template_path] +
specs + ['-a', 'host', '-a', 'style', '-a', 'scope'] +
['--', 'index.d.tstemplate', 'index.d.ts', '--wrap-response-in', 'DropboxResponse', '--wrap-error-in', 'DropboxResponseError', '--import-namespaces', '--types-file', './dropbox_types', '-a', 'scope']),
cwd=stone_path)
typescript_generated_files = glob.glob('typescript/*.d.ts')
typescript_generated_files.sort()
typescript_generated_files = [os.path.join(os.getcwd(), f) for f in typescript_generated_files]
if verbose:
print('TypeScript generated files: %s' % typescript_generated_files)
if verbose:
print('Moving TSD routes and types to types/')
for file in typescript_generated_files:
subprocess.check_output(
(['mv', file , types_template_path]),
cwd=typescript_template_path
)
if __name__ == '__main__':
main()
| 33.220472
| 210
| 0.615312
|
09dbeca8c91cf3d2eedd37575120a47c32dc4add
| 1,611
|
py
|
Python
|
user_menu/menu.py
|
skjoshi267/ucd_20200200_shreyankjoshi
|
96f97950944458dd6b4c5081e88352b4deb425ed
|
[
"MIT"
] | null | null | null |
user_menu/menu.py
|
skjoshi267/ucd_20200200_shreyankjoshi
|
96f97950944458dd6b4c5081e88352b4deb425ed
|
[
"MIT"
] | null | null | null |
user_menu/menu.py
|
skjoshi267/ucd_20200200_shreyankjoshi
|
96f97950944458dd6b4c5081e88352b4deb425ed
|
[
"MIT"
] | null | null | null |
#Get all errors from Errors class
from errors import Errors
#Get all configurations from Configuration Class
from config import Configuration
#Import Stock Data and Operations
from stock_database import stock_data
import pandas as pd
#Select the mode of application : 1. Command Line 2. GUI
def user_menu():
operation_menu_txt = Configuration.tabulate_output("OPMENU")
try:
op_mode = int(input(operation_menu_txt+"\nSelect: "))
print("\n"+Errors.INVALID_CHOICE) if (op_mode != 1 and op_mode != 2) else run_app(op_mode)
except ValueError:
print("\n"+Errors.ONLY_NUMBERS)
#Run the application based on user choice
def run_app(app_mode):
command_line() if app_mode == 2 else start_gui()
def command_line():
display_options()
def start_gui():
print("GUI is under construction. Exiting now.")
#Display all the operations for command line
def display_options():
option = 0
disp_menu_txt = Configuration.tabulate_output("DISPOPTMENU")
while (option != 4):
try:
option = int(input("\n"+disp_menu_txt+"\nSelect: "))
perform_operation(option)
except ValueError:
print("\n"+Errors.ONLY_NUMBERS)
#Perform CLI Operations
def perform_operation(op):
if op not in range(1,5):
print("\n"+Errors.INVALID_CHOICE)
elif op == 1:
stock_data.stock_main()
elif op == 2:
report()
elif op == 3:
meta()
else:
pass
def report():
print("\nFeature Rollback")
def meta():
print("\n'https://github.com/skjoshi267/ucd_20200200_shreyankjoshi'")
| 27.305085
| 100
| 0.671633
|
3f889ccdaa4b4e0b9c3fe9ea482ef42ae1b90269
| 92
|
py
|
Python
|
acmicpc/5554/5554.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 3
|
2019-03-09T05:19:23.000Z
|
2019-04-06T09:26:36.000Z
|
acmicpc/5554/5554.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 1
|
2020-02-23T10:38:04.000Z
|
2020-02-23T10:38:04.000Z
|
acmicpc/5554/5554.py
|
love-adela/algorithm
|
4ccd02173c96f8369962f1fd4e5166a221690fa2
|
[
"MIT"
] | 1
|
2019-05-22T13:47:53.000Z
|
2019-05-22T13:47:53.000Z
|
t = [int(input()) for _ in range(4)]
h = sum(t) // 60
print(h)
m = sum(t) - 60 * h
print(m)
| 15.333333
| 36
| 0.521739
|
9627f29bb71685a38f0493ba10b06d6258dfda85
| 1,510
|
py
|
Python
|
openslides/core/access_permissions.py
|
boehlke/OpenSlides
|
7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2
|
[
"MIT"
] | null | null | null |
openslides/core/access_permissions.py
|
boehlke/OpenSlides
|
7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2
|
[
"MIT"
] | null | null | null |
openslides/core/access_permissions.py
|
boehlke/OpenSlides
|
7a64fb83ebda2cb41706f62d7cfc5a63dbcab4a2
|
[
"MIT"
] | null | null | null |
from ..utils.access_permissions import BaseAccessPermissions
from ..utils.auth import GROUP_ADMIN_PK, async_in_some_groups
class ProjectorAccessPermissions(BaseAccessPermissions):
"""
Access permissions container for Projector and ProjectorViewSet.
"""
base_permission = "core.can_see_projector"
class TagAccessPermissions(BaseAccessPermissions):
"""
Access permissions container for Tag and TagViewSet.
"""
class ChatMessageAccessPermissions(BaseAccessPermissions):
"""
Access permissions container for ChatMessage and ChatMessageViewSet.
"""
base_permission = "core.can_use_chat"
class ProjectorMessageAccessPermissions(BaseAccessPermissions):
"""
Access permissions for ProjectorMessage.
"""
base_permission = "core.can_see_projector"
class CountdownAccessPermissions(BaseAccessPermissions):
"""
Access permissions for Countdown.
"""
base_permission = "core.can_see_projector"
class ConfigAccessPermissions(BaseAccessPermissions):
"""
Access permissions container for the config (ConfigStore and
ConfigViewSet).
"""
class HistoryAccessPermissions(BaseAccessPermissions):
"""
Access permissions container for the Histroy.
"""
async def async_check_permissions(self, user_id: int) -> bool:
"""
Returns True if the user is in admin group and has read access to
model instances.
"""
return await async_in_some_groups(user_id, [GROUP_ADMIN_PK])
| 24.754098
| 73
| 0.733113
|
717739f2ef171f81187b9199007d7d87c53e6218
| 13,405
|
py
|
Python
|
santoku/utils/url_handler.py
|
wiris/santoku
|
1243b68d3363a3b7205ac47e1772a32787f9ed35
|
[
"MIT"
] | 3
|
2020-07-08T19:33:11.000Z
|
2022-03-12T10:05:40.000Z
|
santoku/utils/url_handler.py
|
wiris/santoku
|
1243b68d3363a3b7205ac47e1772a32787f9ed35
|
[
"MIT"
] | 1
|
2021-04-21T09:13:51.000Z
|
2021-04-21T09:13:51.000Z
|
santoku/utils/url_handler.py
|
wiris/santoku
|
1243b68d3363a3b7205ac47e1772a32787f9ed35
|
[
"MIT"
] | 1
|
2020-11-11T18:09:34.000Z
|
2020-11-11T18:09:34.000Z
|
import ipaddress
from typing import List
from urllib.parse import urlparse
import tldextract
class InvalidURLError(Exception):
def __init__(self, message):
super().__init__(message)
class URLHandler:
@classmethod
def get_partial_domain(
cls, url: str, num_subdomains: int = 0, raise_exception_if_invalid_url: bool = True
) -> str:
"""
Given a URL, return the domain name up to a particular number of subdomains.
When the given URL is invalid, if `raise_exception_if_invalid_url` is set to `True`,
`InvalidURLError` exception will be raised. Otherwise, if the URL contains subdomain,
num_subdomains + domain will be returned, if the URL does not contain subdomain, an empty
string will be returned if the scheme is detected, otherwise, the URL will be returned as it
is.
Parameters
----------
url : str
The URL to get the partial domain from.
num_subdomains : int, Optional
Number of subdomains that are extracted. No subdomains are extracted by default.
raise_exception_if_invalid_url : bool, Optional
We consider as invalid those URLs in which some particles are missing in the fully
qualified domain name. Set to `True` by default.
Returns
-------
str
The partial domain of the `url` following the aforementioned criteria.
Raises
------
InvalidURLError
If `url` is invalid.
Notes
-----
Notice that with our definition of invalid URLs, URLs containing IP addresses will be
considered as invalid as they do not contain top level domains.
Examples
--------
https://sub2.sub1.example.com.es/path, 0 -> example.com.es
https://sub2.sub1.example.com.es/path, 1 -> sub1.example.com.es
https://sub2.sub1.example.com.es/path, 2 -> sub2.sub1.example.com.es
https://sub2.sub1.example.com.es/path, 3 -> sub2.sub1.example.com.es
"""
res = tldextract.extract(url)
# When URL is invalid
if not res.domain or not res.suffix:
# If URL contain a domain or a suffix, we remove the scheme and path
if not raise_exception_if_invalid_url:
if res.domain:
if res.subdomain and num_subdomains > 0:
# URL contain subdomain and domain, we return the last n subdomains + domain
component = res.subdomain.split(".")[-num_subdomains:] + [res.domain]
return ".".join(component)
# URL only contain domain, we return only the domain
return res.domain
elif res.suffix:
# URL contain only sufix, we return only the suffix
return res.suffix
# If URL doesn't contain anything identified as domain or suffix, check whether it
# contains scheme. If so, return an empty domain, otherwise, return the url as it is
# e.g.: for `http:///integration/...` an empty domain will be returned, while for
# `fakedomain`, the whole `fakedomain` will be returned.
if urlparse(url).scheme:
return ""
return url
raise InvalidURLError(f"The {url} URL does not contain top level domain")
components = []
# If the url contains subdomains and subdomains are needed
if res.subdomain and num_subdomains > 0:
# Split the subdomains and keep the last n
components += res.subdomain.split(".")[-num_subdomains:]
if res.domain:
components.append(res.domain)
if res.suffix:
components.append(res.suffix)
return ".".join(components)
@classmethod
def get_fully_qualified_domain(
cls, url: str, raise_exception_if_invalid_url: bool = True
) -> str:
"""
Given a URL, return its fully qualified domain name without the trailing dot.
The fully qualified domain name is defined as the domain name with all its subdomains.
When the given URL is invalid, if `raise_exception_if_invalid_url` is set to `True`,
`InvalidURLError` exception will be raised. Otherwise, if any particle of the fully
qualified domain is present, the URL without scheme, path, query and fragment will be
returned; else, if the URL contains a scheme, an empty string will be returned;
otherwise, the url will be returned as it is.
Parameters
----------
url : str
The URL to get the fully qualified domain from.
raise_exception_if_invalid_url : bool, Optional
We consider as invalid those URLs in which some particles are missing in the fully
qualified domain name. Set to `True` by default.
Returns
-------
str
The fully qualified domain of the `url`, following the aforementioned criteria.
Raises
------
InvalidURLError
If `url` is invalid.
Notes
-----
This method is more useful than get_partial_domain when you don't know how many subdomains
the URL contains.
More information on fully qualified domain name: [1]
References
----------
[1] :
https://en.wikipedia.org/wiki/Fully_qualified_domain_name
Example
-------
https://sub.example.com.es/path -> sub.example.com.es
"""
res = tldextract.extract(url)
# When URL is invalid
if not res.domain or not res.suffix:
# If URL contain a domain or a suffix, we remove the scheme and path
if not raise_exception_if_invalid_url:
if res.domain:
# URL contain subdomain and domain, we return subdomain + domain
if res.subdomain:
return f"{res.subdomain}.{res.domain}"
# URL only contain domain, we return only the domain
return res.domain
elif res.suffix:
# URL contain only sufix, we return only the suffix
return res.suffix
# If URL doesn't contain anything identified as domain or suffix, check whether it
# contains scheme. If so, return an empty domain, otherwise, return the url as it is
# e.g.: for `http:///integration/...` an empty domain will be returned, while for
# `fakedomain`, the whole `fakedomain` will be returned.
if urlparse(url).scheme:
return ""
return url
raise InvalidURLError(f"The {url} URL does not contain domain or suffix")
return ".".join(part for part in res if part)
@classmethod
def contains_ip(cls, url: str) -> bool:
"""
Return true if the given string contains an IP address.
Parameters
----------
url : str
The URL to check.
Returns
-------
bool
True if the given `url` contains an IP address.
Notes
-----
The verification of IP has been done using the `ipaddress` module of python. More
information on the ipaddress module: [1]
References
----------
[1] :
https://docs.python.org/3/library/ipaddress.html
"""
domain = tldextract.extract(url=url).domain
# If it is a valid IP, the initialization of the IP class should be successful.
try:
ipaddress.ip_address(domain)
except ValueError:
return False
return True
@classmethod
def explode_domain(cls, url: str, raise_exception_if_invalid_url: bool = True) -> List[str]:
"""
Takes in a string with a URL and computes all possible levels of subdomain including the top
level domain, from less complete to more.
When the given URL is invalid, if `raise_exception_if_invalid_url` is set to `True`,
`InvalidURLError` exception will be raised, otherwise, a list containing exploded subdomains
of the invalid URL will be returned.
Parameters
----------
url : str
The URL to explode the domain from.
raise_exception_if_invalid_url : bool, Optional
We consider as invalid those URLs in which some particles are missing in the fully
qualified domain name. Set to `True` by default.
Returns
-------
List[str]
The exploded domains from less complete to more, following the aforementioned criteria.
Raises
------
InvalidURLError
If `url` is invalid.
Example
-------
'www.s1.s2.example.com' -> ['example.com', 's2.example.com', 's1.s2.example.com',
'www.s1.s2.example.com'].
"""
res = tldextract.extract(url)
if res.suffix:
if res.domain:
domain = f"{res.domain}.{res.suffix}"
exploded_subdomains = [domain]
if res.subdomain:
# Append splitted subdomains successively
for subdomain in reversed(res.subdomain.split(".")):
exploded_subdomains.append(f"{subdomain}.{exploded_subdomains[-1]}")
# If the URL doesn't contain subdomain, return only the domain
return exploded_subdomains
else:
if not raise_exception_if_invalid_url:
# A URL can be identified as suffix when it contains only tlds, i.e: 'com' or
# 'co.uk'
return [res.suffix]
elif res.domain:
if not raise_exception_if_invalid_url:
exploded_subdomains = [res.domain]
if res.subdomain:
# Append splitted subdomains successively
for subdomain in reversed(res.subdomain.split(".")):
exploded_subdomains.append(f"{subdomain}.{exploded_subdomains[-1]}")
return exploded_subdomains
else:
if not raise_exception_if_invalid_url:
# If URL doesn't contain anything identified as domain or suffix, check whether it
# contains scheme. If so, return an empty domain, e.g.: for
# `http:///integration/...`, an empty domain will be returned. Otherwise
# return the url as it is: it's the case of: "fakedomain", " ", "//", ".", etc.
if urlparse(url).scheme:
return [""]
return [url]
raise InvalidURLError(f"The {url} URL does not contain domain or suffix")
# We comment this code block out until we are sure of what to do
# try:
# res = tld.get_tld(url, fix_protocol=True, as_object=True)
# except (tld.exceptions.TldDomainNotFound, tld.exceptions.TldBadUrl) as error:
# # get_tld raises an exception when the top level domain (tld) is unknown
# # For example, we might find an unknown tld if someone uses ".devel" during development
# # The code below is an attempt to "clean" the domain by doing
# # - Remove http:// and https://
# # - Split by "/" and return position 0 of the list
# # - Split by "?" and return position 0
# # - Split by "#" and return position 0
# parsed_url = url.replace("http://", "").replace("https://", "").replace("//", "")
# parsed_url = parsed_url.split("/")[0].split("?")[0].split("#")[0]
# return [parsed_url]
# exploded_subdomains = [res.fld]
@classmethod
def get_path(cls, url: str, raise_exception_if_invalid_url: bool = True) -> str:
"""
Given a URL, return the path.
When the given URL is invalid, if `raise_exception_if_invalid_url` is set to `True`,
`InvalidURLError`exception will be raised. If the url does not contain a scheme or a domain,
an empty string will be returned instead.
Parameters
----------
url : str
The URL to get the path from.
raise_exception_if_invalid_url : bool, Optional
We consider as invalid those URLs in which some particles are missing in the fully
qualified domain name. Set to `True` by default.
Returns
-------
str
The path of the URL or an empty string, following the aforementioned criteria.
Raises
------
InvalidURLError
If `url` is invalid.
Example
-------
'https://example.com/path/' -> '/path/'
"""
res = tldextract.extract(url)
# When URL is invalid
if not res.domain or not res.suffix:
if raise_exception_if_invalid_url:
raise InvalidURLError(f"The {url} URL does not contain domain or suffix")
parsed_url = urlparse(url)
if parsed_url.scheme or parsed_url.netloc:
return parsed_url.path
else:
return ""
| 36.826923
| 101
| 0.581052
|
702051c80d920784630738f16b1ada90d4e520ea
| 7,446
|
py
|
Python
|
amlb/utils/core.py
|
automl/automlbenchmark
|
59e796fe6632637233a7104dfffe65f210f9eef5
|
[
"MIT"
] | 2
|
2020-12-06T22:09:12.000Z
|
2022-02-09T00:02:29.000Z
|
amlb/utils/core.py
|
automl/automlbenchmark
|
59e796fe6632637233a7104dfffe65f210f9eef5
|
[
"MIT"
] | null | null | null |
amlb/utils/core.py
|
automl/automlbenchmark
|
59e796fe6632637233a7104dfffe65f210f9eef5
|
[
"MIT"
] | null | null | null |
from ast import literal_eval
from collections.abc import Iterable
from functools import reduce, wraps
import json
import logging
import pprint
log = logging.getLogger(__name__)
class Namespace:
printer = pprint.PrettyPrinter(indent=2)
@staticmethod
def parse(*args, **kwargs):
raw = dict(*args, **kwargs)
parsed = Namespace()
dots, nodots = partition(raw.keys(), lambda s: '.' in s)
for k in nodots:
v = raw[k]
try:
if isinstance(v, str):
v = literal_eval(v)
except:
pass
parsed[k] = v
sublevel = {}
for k in dots:
k1, k2 = k.split('.', 1)
entry = [(k2, raw[k])]
if k1 in sublevel:
sublevel[k1].update(entry)
else:
sublevel[k1] = dict(entry)
for k, v in sublevel.items():
parsed[k] = Namespace.parse(v)
return parsed
@staticmethod
def merge(*namespaces, deep=False):
merged = Namespace()
for ns in namespaces:
if ns is None:
continue
if not deep:
merged + ns
else:
for k, v in ns:
if isinstance(v, Namespace):
merged[k] = Namespace.merge(merged[k], v, deep=True)
else:
merged[k] = v
return merged
@staticmethod
def dict(namespace, deep=True):
dic = dict(namespace)
if not deep:
return dic
for k, v in dic.items():
if isinstance(v, Namespace):
dic[k] = Namespace.dict(v)
return dic
@staticmethod
def from_dict(dic, deep=True):
ns = Namespace(dic)
if not deep:
return ns
for k, v in ns:
if isinstance(v, dict):
ns[k] = Namespace.from_dict(v)
return ns
@staticmethod
def walk(namespace, fn, inplace=False):
def _walk(namespace, fn, parents=None, inplace=inplace):
parents = [] if parents is None else parents
ns = namespace if inplace else Namespace()
for k, v in namespace:
nk, nv = fn(k, v, parents=parents)
if nk is not None:
if v is nv and isinstance(v, Namespace):
nv = _walk(nv, fn, parents=parents+[k], inplace=inplace)
ns[nk] = nv
return ns
return _walk(namespace, fn, inplace=inplace)
def __init__(self, *args, **kwargs):
self.__dict__.update(dict(*args, **kwargs))
def __add__(self, other):
"""extends self with other (always overrides)"""
if other is not None:
self.__dict__.update(other)
return self
def __mod__(self, other):
"""extends self with other (adds only missing keys)"""
if other is not None:
for k, v in other:
self.__dict__.setdefault(k, v)
return self
def __contains__(self, key):
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
def __getitem__(self, item):
return self.__dict__.get(item)
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
self.__dict__.pop(key, None)
def __iter__(self):
return iter(self.__dict__.items())
def __copy__(self):
return Namespace(self.__dict__.copy())
def __dir__(self):
return list(self.__dict__.keys())
def __str__(self):
return Namespace.printer.pformat(Namespace.dict(self))
def __repr__(self):
return repr(self.__dict__)
def __json__(self):
return Namespace.dict(self)
def repr_def(obj):
return "{clazz}({attributes})".format(clazz=type(obj).__name__, attributes=', '.join(("{}={}".format(k, repr(v)) for k, v in obj.__dict__.items())))
def noop():
pass
def as_list(*args):
if len(args) == 0:
return list()
elif len(args) == 1 and isinstance(args[0], Iterable) and not isinstance(args[0], str):
return list(args[0])
return list(args)
def flatten(iterable, flatten_tuple=False, flatten_dict=False):
return reduce(lambda l, r: (l.extend(r) if isinstance(r, (list, tuple) if flatten_tuple else list)
else l.extend(r.items()) if flatten_dict and isinstance(r, dict)
else l.append(r)) or l, iterable, [])
def partition(iterable, predicate=id):
truthy, falsy = [], []
for i in iterable:
if predicate(i):
truthy.append(i)
else:
falsy.append(i)
return truthy, falsy
def translate_dict(dic, translation_dict):
tr = dict()
for k, v in dic.items():
if k in translation_dict:
tr[translation_dict[k]] = v
else:
tr[k] = v
return tr
def str2bool(s):
if s.lower() in ('true', 't', 'yes', 'y', 'on', '1'):
return True
elif s.lower() in ('false', 'f', 'no', 'n', 'off', '0'):
return False
else:
raise ValueError(s+" can't be interpreted as a boolean.")
def str_def(s, if_none=''):
if s is None:
return if_none
return str(s)
def head(s, lines=10):
s_lines = s.splitlines() if s else []
return '\n'.join(s_lines[:lines])
def tail(s, lines=10, from_line=None, include_line=True):
if s is None:
return None if from_line is None else None, None
s_lines = s.splitlines()
start = -lines
if isinstance(from_line, int):
start = from_line
if not include_line:
start += 1
elif isinstance(from_line, str):
try:
start = s_lines.index(from_line)
if not include_line:
start += 1
except ValueError:
start = 0
last_line = dict(index=len(s_lines) - 1,
line=s_lines[-1] if len(s_lines) > 0 else None)
t = '\n'.join(s_lines[start:])
return t if from_line is None else (t, last_line)
def fn_name(fn):
return ".".join([fn.__module__, fn.__qualname__])
def json_load(file, as_namespace=False):
with open(file, 'r') as f:
return json_loads(f.read(), as_namespace=as_namespace)
def json_loads(s, as_namespace=False):
if as_namespace:
return json.loads(s, object_hook=lambda dic: Namespace(**dic))
else:
return json.loads(s)
def json_dump(o, file, style='default'):
with open(file, 'w') as f:
f.write(json_dumps(o, style=style))
def json_dumps(o, style='default'):
"""
:param o:
:param style: str among ('compact', 'default', 'pretty').
- `compact` removes all blanks (no space, no newline).
- `default` adds a space after each separator but prints on one line
- `pretty` adds a space after each separator and indents after opening brackets.
:return:
"""
separators = (',', ':') if style == 'compact' else None
indent = 4 if style == 'pretty' else None
def default_encode(o):
if hasattr(o, '__json__') and callable(o.__json__):
return o.__json__()
return json.encoder.JSONEncoder.default(None, o)
return json.dumps(o, indent=indent, separators=separators, default=default_encode)
| 27.88764
| 152
| 0.559226
|
519231d92ccd46474ab28c587b1e5c027823e25e
| 2,321
|
py
|
Python
|
PythonXMLTags/parser_excerpt.py
|
kevinburleigh75/NSFPatents
|
1f7f93dff4128793d43d177cff43d087e25b6c28
|
[
"Apache-2.0"
] | 1
|
2015-03-08T02:32:39.000Z
|
2015-03-08T02:32:39.000Z
|
PythonXMLTags/parser_excerpt.py
|
ahirschberg/NSFPatents
|
e70d7369ea05aff7edf2715545bd674c8c9293d5
|
[
"Apache-2.0"
] | null | null | null |
PythonXMLTags/parser_excerpt.py
|
ahirschberg/NSFPatents
|
e70d7369ea05aff7edf2715545bd674c8c9293d5
|
[
"Apache-2.0"
] | 2
|
2016-05-26T13:52:02.000Z
|
2020-04-26T02:31:35.000Z
|
def setTags(self, year):
print 'Year is %s' % year
if year >= 07:
# 2007 tagslist
self.ipa_enclosing = 'us-patent-application'
self.ipa_pubnum = 'publication-reference/document-id/doc-number'
self.ipa_pubdate = 'publication-reference/document-id/date' #Published patent document
self.ipa_invtitle = 'invention-title' #Title of invention
self.ipa_abstract = 'abstract/p' # Concise summary of disclosure
self.ipa_assignee = 'assignees/assignee'
self.ipa_inventors = 'applicants' # Applicants information
self.ipa_crossref = '<?cross-reference-to-related-applications description="Cross Reference To Related Applications" end="lead"?><?cross-reference-to-related-applications description="Cross Reference To Related Applications" end="tail"?>' # Xref, but there is also a 2nd option coded into the scrape method
self.ipa_appnum = 'application-reference/document-id/doc-number' # Patent ID
self.ipa_appdate = 'application-reference/document-id/date' # Filing Date
self.ipa_pct_371cdate = 'pct-or-regional-filing-data/us-371c124-date' # PCT filing date
self.ipa_pct_pubnum = 'pct-or-regional-publishing-data/document-id/doc-number' # PCT publishing date
self.ipa_priorpub = 'related-publication/document-id/doc-number' # Previously published document about same app
self.ipa_priorpubdate = 'related-publication/document-id/date' # Date for previously published document
self.ipa_govint = '<?federal-research-statement description="Federal Research Statement" end="lead"?><?federal-research-statement description="Federal Research Statement" end="tail"?>' #Govint
self.ipa_parentcase = 'us-related-documents/parent-doc/document-id/doc-number' # Parent Case
self.ipa_childcase = 'us-related-documents/child-doc/document-id/doc-number' # Child Case
self.ipg_enclosing = 'us-patent-grant'
self.ipg_govint = '<?GOVINT description="Government Interest" end="lead"?><?GOVINT description="Government Interest" end="tail"?>'
self.ipg_crossref = '<?RELAPP description="Other Patent Relations" end="lead"?><?RELAPP description="Other Patent Relations" end="tail"?>'
| 82.892857
| 318
| 0.69539
|
93f28c5796cc43ee8cf5802b50b3938dd9926674
| 3,333
|
py
|
Python
|
asreview/ascii.py
|
bmkramer/automated-systematic-review
|
f99079926f381bc7895ff6fefa9e6e729a2c26b8
|
[
"Apache-2.0"
] | 1
|
2021-01-22T15:18:33.000Z
|
2021-01-22T15:18:33.000Z
|
asreview/ascii.py
|
bmkramer/automated-systematic-review
|
f99079926f381bc7895ff6fefa9e6e729a2c26b8
|
[
"Apache-2.0"
] | null | null | null |
asreview/ascii.py
|
bmkramer/automated-systematic-review
|
f99079926f381bc7895ff6fefa9e6e729a2c26b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asreview.config import EMAIL_ADDRESS
from asreview.config import GITHUB_PAGE
ASCII_TEA = """
( ) ( ) )
) ( ) ( (
( ) ( ) )
_____________
<_____________> ___
| |/ _ \
| | | |
| |_| |
___| |\___/
/ \___________/ \
\_____________________/
"""
ASCII_LOGO = """
_____ _____ _
/\ / ____| __ \ (_)
/ \ | (___ | |__) |_____ ___ _____ __
/ /\ \ \___ \| _ // _ \ \ / / |/ _ \ \ /\ / /
/ ____ \ ____) | | \ \ __/\ V /| | __/\ V V /
/_/ \_\_____/|_| \_\___| \_/ |_|\___| \_/\_/
"""
ASCII_MSG_ORACLE = """
---------------------------------------------------------------------------------
| |
| Welcome to the ASReview Automated Systematic Review software. |
| In this mode the computer will assist you in creating your systematic review. |
| After giving it a few papers that are either included or excluded, |
| it will compute a model and show progressively more relevant papers. |
| |
| GitHub page: {0: <58}|
| Questions/remarks: {1: <58}|
| |
---------------------------------------------------------------------------------
""".format(GITHUB_PAGE, EMAIL_ADDRESS)
ASCII_MSG_SIMULATE = """
---------------------------------------------------------------------------------
| |
| Welcome to the ASReview Automated Systematic Review software. |
| In this mode the computer will simulate how well the ASReview software |
| could have accelerate the systematic review of your dataset. |
| You can sit back and relax while the computer runs this simulation. |
| |
| GitHub page: {0: <58}|
| Questions/remarks: {1: <58}|
| |
---------------------------------------------------------------------------------
""".format(GITHUB_PAGE, EMAIL_ADDRESS)
def welcome_message(mode="oracle"):
if mode == "oracle":
return ASCII_LOGO + ASCII_MSG_ORACLE
elif mode == "simulate":
return ASCII_LOGO + ASCII_MSG_SIMULATE
| 43.285714
| 82
| 0.428443
|
8a11e12381808cab9fb41b692575f66664a5bd6a
| 348
|
py
|
Python
|
invenio_requests/resolvers/__init__.py
|
mb-wali/invenio-requests
|
897084787bf3af51425043dad9183b4d5773938e
|
[
"MIT"
] | null | null | null |
invenio_requests/resolvers/__init__.py
|
mb-wali/invenio-requests
|
897084787bf3af51425043dad9183b4d5773938e
|
[
"MIT"
] | 87
|
2021-11-08T12:58:56.000Z
|
2022-03-29T08:52:48.000Z
|
invenio_requests/resolvers/__init__.py
|
mb-wali/invenio-requests
|
897084787bf3af51425043dad9183b4d5773938e
|
[
"MIT"
] | 10
|
2021-11-04T19:15:37.000Z
|
2022-02-21T09:27:07.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 - 2022 TU Wien.
#
# Invenio-Requests is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Module for entity resolvers."""
# NOTE: no imports made available here because that introduces circular imports
__all__ = ()
| 26.769231
| 79
| 0.715517
|
1a93f98335bdfadee18cd32475c67e00a5ae4018
| 276
|
py
|
Python
|
tests/artificial/transf_Integration/trend_MovingAverage/cycle_30/ar_/test_artificial_128_Integration_MovingAverage_30__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/artificial/transf_Integration/trend_MovingAverage/cycle_30/ar_/test_artificial_128_Integration_MovingAverage_30__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/artificial/transf_Integration/trend_MovingAverage/cycle_30/ar_/test_artificial_128_Integration_MovingAverage_30__20.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 0);
| 39.428571
| 171
| 0.73913
|
a638ff6f9a1b41dd1794c511c8a242d89ce00dfe
| 16,099
|
py
|
Python
|
mmic_openmm/components/ff_component.py
|
MolSSI/mmic_openmm
|
a0fea2a3d743282f3ebe74056d170a8e435c4eb9
|
[
"BSD-3-Clause"
] | null | null | null |
mmic_openmm/components/ff_component.py
|
MolSSI/mmic_openmm
|
a0fea2a3d743282f3ebe74056d170a8e435c4eb9
|
[
"BSD-3-Clause"
] | null | null | null |
mmic_openmm/components/ff_component.py
|
MolSSI/mmic_openmm
|
a0fea2a3d743282f3ebe74056d170a8e435c4eb9
|
[
"BSD-3-Clause"
] | null | null | null |
from mmelemental.models import forcefield
from mmic_translator.models import (
TransInput,
TransOutput,
)
from mmic_translator.components import TransComponent
from mmic_openmm.mmic_openmm import units as openmm_units
from typing import List, Tuple, Optional
from collections.abc import Iterable
from mmelemental.util.units import convert
from simtk.openmm.app import forcefield as openmm_ff
import simtk
__all__ = ["FFToOpenMMComponent", "OpenMMToFFComponent"]
class FFToOpenMMComponent(TransComponent):
"""A component for converting MMSchema to OpenMM ForceField object."""
def execute(
self,
inputs: TransInput,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> Tuple[bool, TransOutput]:
if isinstance(inputs, dict):
inputs = self.input()(**inputs)
empty_atom = parmed.topologyobjects.Atom()
mmff = inputs.schema_object
pff = parmed.structure.Structure()
masses = convert(
mmff.masses, mmff.masses_units, empty_atom.umass.unit.get_symbol()
)
charges = TransComponent.get(mmff, "charges")
charges = convert(
charges, mmff.charges_units, empty_atom.ucharge.unit.get_symbol()
)
atomic_numbers = TransComponent.get(mmff, "atomic_numbers")
atom_types = TransComponent.get(mmff, "defs")
rmin, epsilon = self._get_nonbonded(mmff, empty_atom)
for index, symb in enumerate(mmff.symbols):
# Will likely lose FF-related info ... but then Molecule is not supposed to store any params specific to FFs
if atomic_numbers is not None:
atomic_number = atomic_numbers[index]
else:
atomic_number = None
if atom_types is not None:
atom_type = atom_types[index]
else:
atom_type = None
if masses is not None:
mass = masses[index]
else:
mass = None
if charges is not None:
charge = charges[index]
else:
charge = None
atom = parmed.topologyobjects.Atom(
list=None,
atomic_number=atomic_number,
name=symb,
type=atom_type,
mass=mass,
charge=charge,
nb_idx=0,
solvent_radius=0.0,
screen=0.0,
tree="BLA",
join=0.0,
irotat=0.0,
occupancy=1.0,
bfactor=0.0,
altloc="",
number=-1,
rmin=rmin[index],
epsilon=epsilon[index],
rmin14=None,
epsilon14=None,
# bonds=..., faster than connecting atoms one by one as done below?
# angles=...,
# dihedrals=...,
# impropers=...,
# polarizable=...,
)
residues = TransComponent.get(mmff, "substructs")
if residues:
resname, resnum = residues[index]
else:
raise NotImplementedError(
"Residues must be supplied for forcefields based on atom typing."
)
pff.add_atom(atom, resname, resnum, chain="", inscode="", segid="")
# Bonds
bonds = TransComponent.get(mmff, "bonds")
if bonds is not None:
assert (
mmff.bonds.form == "Harmonic"
), "Only Harmonic potential supported for now"
spring = convert(
bonds.params.spring, bonds.params.spring_units, "kcal/mol/angstroms**2"
)
req = convert(bonds.lengths, bonds.lengths_units, "angstroms")
for (
bi,
(
i,
j,
order,
),
) in enumerate(mmff.bonds.indices):
btype = parmed.topologyobjects.BondType(
k=spring[bi], req=req[bi], list=pff.bond_types
)
pff.bonds.append(
parmed.topologyobjects.Bond(
pff.atoms[i], pff.atoms[j], order=order, type=btype
)
)
pff.bond_types.append(btype)
# both implementations seem to perform almost the same:
# pff.atoms[i].bond_to(pff.atoms[j])
# Angles
angles = TransComponent.get(mmff, "angles")
if angles is not None:
assert (
mmff.angles.form == "Harmonic"
), "Only Harmonic potential supported for now"
spring = convert(
angles.params.spring, angles.params.spring_units, "kcal/mol/radians^2"
)
angles_eq = convert(angles.angles, angles.angles_units, "degrees")
for ai, (i, j, k) in enumerate(mmff.angles.indices):
atype = parmed.topologyobjects.AngleType(
k=spring[ai], theteq=angles_eq[ai], list=pff.angle_types
)
pff.angles.append(
parmed.topologyobjects.Angle(
pff.atoms[i], pff.atoms[j], pff.atoms[k], type=atype
)
)
pff.angle_types.append(atype)
# Dihedrals
dihedrals = TransComponent.get(mmff, "dihedrals")
if dihedrals is not None:
dihedrals = (
dihedrals.pop() if isinstance(dihedrals, list) else dihedrals
) # For now, keep track of only a single type
# Need to change this ASAP! Must take multiple types into account!
assert (
dihedrals.form == "Charmm" or dihedrals.form == "CharmmMulti"
), "Only Charmm-style potentials supported for now"
energy = convert(
dihedrals.params.energy, dihedrals.params.energy_units, "kcal/mol"
)
phase = convert(
dihedrals.params.phase, dihedrals.params.phase_units, "degrees"
)
periodicity = dihedrals.params.periodicity
for di, (i, j, k, l) in enumerate(dihedrals.indices):
if isinstance(energy[di], Iterable):
dtype = [
parmed.topologyobjects.DihedralType(
phi_k=energy[di][dj],
per=periodicity[di][dj],
phase=phase[di][dj],
# scee,
# scnb,
list=pff.dihedral_types,
)
for dj in range(len(energy[di]))
]
else:
dtype = parmed.topologyobjects.DihedralType(
phi_k=energy[di],
per=periodicity[di],
phase=phase[di],
# scee
# scnb
list=pff.dihedral_types,
)
# assert:
# dtype.funct = (
# 9 # hackish: assume all dihedrals are proper and charmm-style
# )
pff.dihedrals.append(
parmed.topologyobjects.Dihedral(
pff.atoms[i],
pff.atoms[j],
pff.atoms[k],
pff.atoms[l],
improper=False,
type=dtype,
)
)
pff.dihedral_types.append(dtype)
return True, TransOutput(proc_input=inputs, data_object=pff)
def _get_nonbonded(
self,
mmff: forcefield.ForceField,
empty_atom: "parmed.topologyobjects.Atom",
) -> Tuple["numpy.ndarray", "numpy.ndarray"]:
assert (
mmff.nonbonded.form == "LennardJones"
), "Only LJ potential supported for now"
lj_units = forcefield.nonbonded.potentials.lenjones.LennardJones.get_units()
scaling_factor = 2 ** (1.0 / 6.0) # rmin = 2^(1/6) sigma
rmin = mmff.nonbonded.params.sigma * scaling_factor
rmin = convert(rmin, lj_units["sigma_units"], empty_atom.urmin.unit.get_name())
# atom.rmin_14 * rmin_14_factor * scaling_factor,
epsilon = convert(
mmff.nonbonded.params.epsilon,
lj_units["epsilon_units"],
empty_atom.uepsilon.unit.get_name(),
)
# atom.epsilon_14 * epsilon_14_factor,
return rmin, epsilon
class OpenMMToFFComponent(TransComponent):
"""A component for converting OpenMM ForceField to MMSchema object."""
def execute(
self,
inputs: TransInput,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
) -> Tuple[bool, TransOutput]:
if isinstance(inputs, dict):
inputs = self.input()(**inputs)
ff = inputs.data_object
mm_units = forcefield.ForceField.get_units()
atoms = ff._atomTypes
templates = ff._templates
templates_ = [
{resname: [atom.name for atom in residue.atoms]}
for resname, residue in templates.items()
]
# names-defs = [(atom.name, symbols[atom.type]) for atom in residue.atoms for residue in templates.items()]
for gen in ff.getGenerators():
if isinstance(gen, simtk.openmm.app.forcefield.NonbondedGenerator):
nonbond_ff = gen
elif isinstance(gen, simtk.openmm.app.forcefield.HarmonicBondGenerator):
bond_ff = gen
elif isinstance(gen, simtk.openmm.app.forcefield.HarmonicAngleGenerator):
angle_ff = gen
elif isinstance(gen, simtk.openmm.app.forcefield.PeriodicTorsionGenerator):
dihedral_ff = gen
else:
raise NotImplementedError
# Need to map these to potential types
lj_units = forcefield.nonbonded.potentials.lenjones.LennardJones.get_units()
data = [
(
atom.atomClass,
atom.element.symbol,
atom.element.atomic_number,
atom.mass,
)
for _, atom in ff._atomTypes.items()
]
types, symbols, atomic_numbers, masses = zip(*data)
masses_units = atoms["0"].element.mass.unit.get_name()
masses = convert(masses, masses_units, mm_units["masses_units"])
nonbonded, charges = self._get_nonbonded(nonbond_ff)
bonds = self._get_bonds(bond_ff)
angles = self._get_angles(angle_ff)
dihedrals = self._get_dihedrals_proper(dihedral_ff)
# charge_groups = None ... should support charge_groups?
exclusions = None
inclusions = None
input_dict = {
"masses": masses,
"charges": charges,
"bonds": bonds,
"angles": angles,
"dihedrals": dihedrals,
"nonbonded": nonbonded,
"exclusions": exclusions,
"inclusions": inclusions,
"defs": types, # or names?
"symbols": symbols,
# "substructs": residues,
"atomic_numbers": atomic_numbers,
}
ff = forcefield.ForceField(**input_dict)
success = True
return success, TransOutput(
proc_input=inputs,
schema_object=ff,
success=success,
schema_version=inputs.schema_version,
schema_name=inputs.schema_name,
)
def _get_nonbonded(self, nonbond):
lj_scale = nonbond.lj14scale
coul_scale = nonbond.coulomb14scale
params = {}
# How to deal with lj/coul 1-4 scale? relevant to sigma/epsilon scale in parmed?
for paramName in nonbond.params.paramNames:
params[paramName] = [
nonbond.params.paramsForType[str(i)][paramName]
for i in range(len(nonbond.params.paramsForType))
]
lj = forcefield.nonbonded.potentials.LennardJones(
sigma=params["sigma"], epsilon=params["epsilon"]
)
# Need to include sigma_14 and epsilon_14
nonbonded = forcefield.nonbonded.NonBonded(params=lj, form="LennardJones")
# How do we access units?
return nonbonded, params["charge"]
def _get_bonds(self, bonds):
ff = bonds.ff
bonds_units = forcefield.bonded.bonds.potentials.harmonic.Harmonic.get_units()
bonds_units.update(forcefield.bonded.Bonds.get_units())
bonds_lengths = bonds.length
bonds_k = bonds.k
ntypes = len(bonds_k)
connectivity = [
(
ff._atomTypes[next(iter(bonds.types1[i]))].atomClass,
ff._atomTypes[next(iter(bonds.types2[i]))].atomClass,
1,
)
for i in range(ntypes)
]
params = forcefield.bonded.bonds.potentials.Harmonic(
spring=bonds_k,
spring_units=f"{openmm_units['energy']} / {openmm_units['length']}**2",
)
return forcefield.bonded.Bonds(
params=params,
lengths=bonds_lengths,
lengths_units=openmm_units["length"],
connectivity=connectivity,
form="Harmonic",
)
def _get_angles(self, angles):
ff = angles.ff
angles_units = forcefield.bonded.angles.potentials.harmonic.Harmonic.get_units()
angles_units.update(forcefield.bonded.Angles.get_units())
angles_lengths = angles.angle
angles_k = angles.k
ntypes = len(angles_k)
connectivity = [
(
ff._atomTypes[next(iter(angles.types1[i]))].atomClass,
ff._atomTypes[next(iter(angles.types2[i]))].atomClass,
ff._atomTypes[next(iter(angles.types3[i]))].atomClass,
)
for i in range(ntypes)
]
params = forcefield.bonded.angles.potentials.Harmonic(
spring=angles_k,
spring_units=f"{openmm_units['energy']} / {openmm_units['angle']}**2",
)
return forcefield.bonded.Angles(
params=params,
angles=angles_lengths,
angles_units=openmm_units["angle"],
connectivity=connectivity,
form="Harmonic",
)
def _get_dihedrals_proper(self, dihedrals):
ff = dihedrals.ff
proper = dihedrals.proper
dihedrals_units = (
forcefield.bonded.dihedrals.potentials.harmonic.Harmonic.get_units()
)
dihedrals_units.update(forcefield.bonded.Dihedrals.get_units())
connectivity = [
(
ff._atomTypes[next(iter(dihedral.types1))].atomClass,
ff._atomTypes[next(iter(dihedral.types2))].atomClass,
ff._atomTypes[next(iter(dihedral.types3))].atomClass,
ff._atomTypes[next(iter(dihedral.types4))].atomClass,
)
for dihedral in proper
]
fields = [
(dihedral.k, dihedral.periodicity, dihedral.phase) for dihedral in proper
]
energy, periodicity, phase = zip(*fields)
params = forcefield.bonded.dihedrals.potentials.CharmmMulti(
energy=energy,
energy_units=openmm_units["energy"],
periodicity=periodicity,
phase=phase,
phase_units=openmm_units["angle"],
)
return forcefield.bonded.Dihedrals(
params=params,
connectivity=connectivity,
form="CharmmMulti",
)
| 34.921909
| 120
| 0.541152
|
0955f57121327f9e4edcd58c26e125db07ab800e
| 93
|
py
|
Python
|
blockcert/apps.py
|
amit502/digi_cert
|
a0d9669ed1470961509b69e9c9b18bf197079d3d
|
[
"MIT"
] | null | null | null |
blockcert/apps.py
|
amit502/digi_cert
|
a0d9669ed1470961509b69e9c9b18bf197079d3d
|
[
"MIT"
] | null | null | null |
blockcert/apps.py
|
amit502/digi_cert
|
a0d9669ed1470961509b69e9c9b18bf197079d3d
|
[
"MIT"
] | 2
|
2018-08-08T14:46:35.000Z
|
2021-07-13T12:08:16.000Z
|
from django.apps import AppConfig
class BlockcertConfig(AppConfig):
name = 'blockcert'
| 15.5
| 33
| 0.763441
|
ef2a751ba0a9dc7e3c77eeeb7c406bad32fe60f2
| 3,179
|
py
|
Python
|
scripts/utils/best_component_membership.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 4
|
2018-05-28T11:05:42.000Z
|
2021-05-14T01:13:11.000Z
|
scripts/utils/best_component_membership.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 13
|
2019-08-14T07:30:24.000Z
|
2021-11-08T23:44:29.000Z
|
scripts/utils/best_component_membership.py
|
mikeireland/chronostar
|
fcf37614e1d145f3a5e265e54512bf8cd98051a0
|
[
"MIT"
] | 4
|
2016-04-21T08:25:26.000Z
|
2021-02-25T06:53:52.000Z
|
"""
Add columns to the table with best component memberships
"""
import numpy as np
from astropy.table import Table, vstack, unique, join
import sys
sys.path.insert(0, '/Users/marusa/chronostar/chronostar/')
import coordinate
############################################
data_filename = '../gaia_200_pc_for_scocen_with_overlaps.fits'
comps_filename = '../final_comps_21.fits'
good_comps_with_ages = ['C', 'U', 'T', 'A', 'G'] # F doesn't have enough members
# Components that are definitely background (based on CMD)
bg_comps = ['K', 'L', 'M', 'N', 'O', 'P', 'R', 'S']
comps_multiple_pop = ['J', 'B', 'Q'] # Some background and very young stars
############################################
try:
tab=tab0
except:
tab0 = Table.read(data_filename)
tab=tab0
# Read components (WARNING: These are coordinates at time 0 in the past! You should traceforward these to the present!)
comps = Table.read(comps_filename)
comps.rename_column('age', 'Age')
# Crossing time. Only components with sigma<age have reliable ages.
crossing_time = comps['dX']/comps['dV'] * 0.977813106 # pc/km*s to Myr
comps['Crossing_time'] = crossing_time
mask = crossing_time < comps['Age'] # sigma < age
comps['Age_reliable'] = mask
membership_colnames = ['membership%s'%comp_ID for comp_ID in comps['comp_ID']]
membership_colnames.append('membership_bg')
memberships = np.array([tab[col] for col in membership_colnames]).T
ind = np.argwhere(memberships==np.amax(memberships,1, keepdims=True))[:,1]
best_group = np.array([membership_colnames[i].replace('membership', '').replace('_', '') for i in ind])
best_group_membership = np.array([tab[membership_colnames[i]][x] for x, i in enumerate(ind)])
tab['best_component'] = best_group
tab['best_component_membership'] = best_group_membership
# Comments on components that need further splitting
comments = ['Component needs further split' if comp_ID in comps_multiple_pop else '' for comp_ID in tab['best_component']]
tab['best_component_comment'] = comments
tab['best_component_80'] = [' ']*len(tab)
mask = best_group_membership>0.8
tab['best_component_80'][mask] = best_group[mask]
tab['best_component_50'] = [' ']*len(tab)
mask = best_group_membership>0.5
tab['best_component_50'][mask] = best_group[mask]
tab['best_component_90'] = [' ']*len(tab)
mask = best_group_membership>0.9
tab['best_component_90'][mask] = best_group[mask]
compsd = dict(zip(comps['comp_ID'], comps))
# AGES
ages = [compsd[comp_ID]['Age'] if comp_ID!='bg' else np.nan for comp_ID in tab['best_component']]
tab['age_best_component'] = ages
crossing_time = [compsd[comp_ID]['Crossing_time'] if comp_ID!='bg' else np.nan for comp_ID in tab['best_component']]
tab['crossing_time_best_component'] = crossing_time
age_reliable = [compsd[comp_ID]['Age_reliable'] if comp_ID!='bg' else np.nan for comp_ID in tab['best_component']]
tab['age_reliable_best_component'] = age_reliable
# Exclude background stars
mask = (tab['best_component']!='bg') & (tab['best_component_membership']>0.5)
for c in bg_comps:
mask = mask & (tab['best_component']!=c)
tab_members = tab[mask]
print(tab_members)
tab_members.write('scocen_members.fits', overwrite=True)
| 32.773196
| 122
| 0.710915
|
e3ff21c0b452ee7c9e8219991912f9e1b4846576
| 5,974
|
py
|
Python
|
irods-rest.py
|
alanking/irods_client_rest_cpp
|
848775e109d9204836488879028109356938ce18
|
[
"BSD-3-Clause"
] | null | null | null |
irods-rest.py
|
alanking/irods_client_rest_cpp
|
848775e109d9204836488879028109356938ce18
|
[
"BSD-3-Clause"
] | null | null | null |
irods-rest.py
|
alanking/irods_client_rest_cpp
|
848775e109d9204836488879028109356938ce18
|
[
"BSD-3-Clause"
] | null | null | null |
import os, pycurl, getopt, sys, urllib
from functools import partial
from StringIO import StringIO
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
def base_url():
return "http://localhost/irods-rest/1.0.0/"
def authenticate(_user_name, _password, _auth_type):
buffer = StringIO()
c = pycurl.Curl()
c.setopt(c.CUSTOMREQUEST, 'POST')
url = base_url()+'auth?userName='+_user_name+'&password='+_password+'&authType='+_auth_type
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return body
def access(_token, _logical_path):
buffer = StringIO()
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER,['Accept: application/json'])
c.setopt(pycurl.HTTPHEADER,['Authorization: '+_token])
c.setopt(c.CUSTOMREQUEST, 'POST')
url = '{0}access?path={1}'.format(base_url(), _logical_path)
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return body
def list(_token, _path, _stat, _permissions, _metadata, _offset, _limit):
buffer = StringIO()
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER,['Accept: application/json'])
c.setopt(pycurl.HTTPHEADER,['Authorization: '+_token])
c.setopt(c.CUSTOMREQUEST, 'GET')
url = base_url()+'list?path={0}&stat={1}&permissions={2}&metadata={3}&offset={4}&limit={5}'.format(_path, _stat, _permissions, _metadata, _offset, _limit)
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return body
def put(_token, _physical_path, _logical_path):
body = ""
offset = 0
file_size = 0
read_size = 1024 * 1024 * 4
with open(_physical_path, 'r') as f:
for data in iter(partial(f.read, read_size), b''):
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER,['Accept: application/json'])
c.setopt(pycurl.HTTPHEADER,['Authorization: '+_token])
c.setopt(c.CUSTOMREQUEST, 'PUT')
c.setopt(c.POSTFIELDSIZE, len(data))
data_buffer = BytesIO(data.encode('utf-8'))
c.setopt(c.READDATA, data_buffer)
c.setopt(c.UPLOAD, 1)
file_size = file_size + len(data)
c.setopt(c.URL, '{0}stream?path={1}&offset={2}&limit={3}'.format(base_url(), _logical_path, offset, file_size))
body_buffer = StringIO()
c.setopt(c.WRITEDATA, body_buffer)
c.perform()
offset = offset + len(data)
c.close()
body = body_buffer.getvalue()
return body
def get(_token, _physical_path, _logical_path):
offset = 0
read_size = 1024 * 1024 * 4
with open(_physical_path, 'w') as f:
while True:
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER,['Accept: application/json'])
c.setopt(pycurl.HTTPHEADER,['Authorization: '+_token])
c.setopt(c.CUSTOMREQUEST, 'GET')
c.setopt(c.URL, '{0}stream?path={1}&offset={2}&limit={3}'.format(base_url(), _logical_path, offset, read_size))
body_buffer = StringIO()
c.setopt(c.WRITEDATA, body_buffer)
c.perform()
c.close()
body = body_buffer.getvalue()
if len(body) == 0:
break
f.write(body)
offset = offset + len(body)
return "Success"
def query(_token, _string, _limit, _offset, _type):
buffer = StringIO()
c = pycurl.Curl()
c.setopt(pycurl.HTTPHEADER,['Accept: application/json'])
c.setopt(pycurl.HTTPHEADER,['Authorization: '+_token])
c.setopt(c.CUSTOMREQUEST, 'GET')
params = { 'query_string' : _string,
'query_limit' : _limit,
'row_offset' : _offset,
'query_type' : _type }
url = base_url()+'query?'+urllib.urlencode(params)
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, buffer)
c.perform()
c.close()
body = buffer.getvalue()
return body
def get_arguments():
full_args = sys.argv
arg_list = full_args[1:]
options_list = [ 'user_name=', 'password=', 'command=',
'logical_path=', 'physical_path=', 'metadata',
'permissions', 'stat', 'offset=',
'limit=', 'type=', 'query=' ]
try:
arguments, values = getopt.getopt(arg_list, [], options_list)
return dict((arguments))
except getopt.error as err:
print (str(err))
sys.exit(2)
def get_value(_args, _key):
try:
return _args['--'+_key]
except:
return None
def get_flag(_args, _key):
try:
if None == _args['--'+_key]:
return False
else:
return True
except:
return False
args = get_arguments()
token = authenticate(get_value(args, 'user_name'), get_value(args, 'password'), 'STANDARD')
cmd = args['--command']
if('query' == cmd):
qstr = get_value(args, 'query')
qtype = get_value(args, 'type')
limit = get_value(args, 'limit')
offset = get_value(args, 'offset')
print query(token, qstr, limit, offset, qtype)
elif('get' == cmd):
print get(token, get_value(args,'physical_path'), get_value(args,'logical_path'))
elif('put' == cmd):
print put(token, get_value(args,'physical_path'), get_value(args,'logical_path'))
elif('list' == cmd):
path = get_value(args, 'logical_path')
limit = get_value(args, 'limit')
offset = get_value(args, 'offset')
stat = get_flag(args, 'stat')
mdata = get_flag(args, 'metadata')
perms = get_flag(args, 'permissions')
print list(token, path, stat, perms, mdata, offset, limit)
elif('access' == cmd):
path = get_value(args, 'logical_path')
print access(token, path)
else:
pass
| 24.383673
| 158
| 0.592735
|
9f694f4bc30236766d7f05711c104624508f72a7
| 2,745
|
py
|
Python
|
nova/tests/test_wsgi.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/test_wsgi.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/test_wsgi.py
|
781778304/nova
|
05aff1959c9f94dae095635133386418390efb37
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `nova.wsgi`."""
import os.path
import tempfile
import unittest
import nova.exception
from nova import test
import nova.wsgi
class TestLoaderNothingExists(test.TestCase):
"""Loader tests where os.path.exists always returns False."""
def setUp(self):
super(TestLoaderNothingExists, self).setUp()
self.stubs.Set(os.path, 'exists', lambda _: False)
def test_config_not_found(self):
self.assertRaises(
nova.exception.ConfigNotFound,
nova.wsgi.Loader,
)
class TestLoaderNormalFilesystem(unittest.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = nova.wsgi.Loader(self.config.name)
def test_config_found(self):
self.assertEquals(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
nova.exception.PasteAppNotFound,
self.loader.load_app,
"non-existant app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEquals("/tmp", url_parser.directory)
def tearDown(self):
self.config.close()
class TestWSGIServer(unittest.TestCase):
"""WSGI server tests."""
def test_no_app(self):
server = nova.wsgi.Server("test_app", None)
self.assertEquals("test_app", server.name)
def test_start_random_port(self):
server = nova.wsgi.Server("test_random_port", None,
host="127.0.0.1", port=0)
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
| 29.516129
| 78
| 0.671403
|
c25f3f19d59e52c6063b44d4ed9e3d4b93058ae3
| 6,146
|
py
|
Python
|
__init__.py
|
NeonGeckoCom/skill-support_helper
|
4164dfe712c7a21dd9803d0ef199c8e222aa9a34
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
NeonGeckoCom/skill-support_helper
|
4164dfe712c7a21dd9803d0ef199c8e222aa9a34
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
NeonGeckoCom/skill-support_helper
|
4164dfe712c7a21dd9803d0ef199c8e222aa9a34
|
[
"Apache-2.0"
] | null | null | null |
# NEON AI (TM) SOFTWARE, Software Development Kit & Application Framework
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2022 Neongecko.com Inc.
# Contributors: Daniel McKnight, Guy Daniels, Elon Gasper, Richard Leeds,
# Regina Bloomstine, Casimiro Ferreira, Andrii Pernatii, Kirill Hrymailo
# BSD-3 License
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from copy import deepcopy
from datetime import datetime
from mycroft_bus_client import Message
from neon_utils.user_utils import get_user_prefs
from neon_utils.skills.neon_skill import NeonSkill, LOG
from neon_utils.net_utils import get_ip_address
from mycroft.skills import intent_file_handler
class SupportSkill(NeonSkill):
def __init__(self):
super(SupportSkill, self).__init__(name="SupportHelper")
@property
def support_email(self) -> str:
"""
Email to refer users to for support
"""
return self.settings.get("support_email") or "support@neon.ai"
@intent_file_handler('contact_support.intent')
def handle_contact_support(self, message: Message):
"""
Handle a user request to contact support
:param message: Message associated with request
"""
user_profile = get_user_prefs(message)
if not user_profile["user"]["email"]:
# TODO: Ask to send to support@neon.ai?
self.speak_dialog("no_email", private=True)
return
if self.ask_yesno("confirm_support",
{"email": user_profile["user"]["email"]}) == "yes":
if user_profile["response_mode"].get("hesitation"):
self.speak_dialog("one_moment", private=True)
diagnostic_info = self._get_support_info(message, user_profile)
user_description = self.get_response("ask_description",
num_retries=0)
diagnostic_info["user_description"] = user_description
self.send_email(self.translate("email_title"),
self._format_email_body(diagnostic_info),
message, user_profile["user"]["email"])
self.speak_dialog("complete",
{"email": user_profile["user"]["email"]},
private=True)
else:
self.speak_dialog("cancelled", private=True)
def _format_email_body(self, diagnostics: dict) -> str:
"""
Format the diagnostic data with email dialog and return a string body
:param diagnostics: diagnostic data to format into the email
:returns: email body to send
"""
json_str = json.dumps(diagnostics, indent=4)
return '\n\n'.join((self.translate("email_intro",
{"email": self.support_email}),
json_str,
self.translate("email_signature")))
def _get_support_info(self, message: Message,
profile: dict = None) -> dict:
"""
Collect relevant information to include in a support ticket
:param message: Message associated with support request
"""
user_profile = profile or get_user_prefs(message)
message_context = deepcopy(message.context)
speech_module = self.bus.wait_for_response(
Message("mycroft.speech.is_ready"))
speech_status = speech_module.data.get("status") if speech_module \
else None
audio_module = self.bus.wait_for_response(
Message("mycroft.audio.is_ready"))
audio_status = audio_module.data.get("status") if audio_module \
else None
skills_module = self.bus.wait_for_response(
Message("mycroft.skills.is_ready"))
skills_module = skills_module.data.get("status") if skills_module \
else None
loaded_skills = self.bus.wait_for_response(
Message("skillmanager.list"), "mycroft.skills.list"
)
loaded_skills = loaded_skills.data if loaded_skills else None
core_device_ip = get_ip_address()
return {
"user_profile": user_profile,
"message_context": message_context,
"module_status": {"speech": speech_status,
"audio": audio_status,
"skills": skills_module},
"loaded_skills": loaded_skills,
"host_device": {"ip": core_device_ip},
"generated_time_utc": datetime.utcnow().isoformat()
}
def stop(self):
pass
def create_skill():
return SupportSkill()
| 43.9
| 78
| 0.657013
|
b90be324b0b3c42c6471cefcae9c950041ae993f
| 3,362
|
py
|
Python
|
flights/views.py
|
solnsubuga/flightapp
|
2da79cb4edef51507152a1d27388292a15b67815
|
[
"Apache-2.0"
] | null | null | null |
flights/views.py
|
solnsubuga/flightapp
|
2da79cb4edef51507152a1d27388292a15b67815
|
[
"Apache-2.0"
] | 8
|
2020-02-12T00:24:07.000Z
|
2021-09-08T01:11:22.000Z
|
flights/views.py
|
solnsubuga/flightapp
|
2da79cb4edef51507152a1d27388292a15b67815
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=E1101
from flights.serializers import FlightSerializer, FlightReservationSerializer, QueryReservationSerializer
from flights.models import Flight, Reservation
from rest_framework import generics
from rest_framework import exceptions
from rest_framework.views import APIView, status
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from drf_yasg.utils import swagger_auto_schema
class FlightsListAPIView(generics.ListAPIView):
'''List all flights'''
permission_classes = (IsAuthenticatedOrReadOnly, )
serializer_class = FlightSerializer
queryset = Flight.objects.all()
class ReserveFlightAPIView(APIView):
''' Flight reservation view'''
serializer_class = FlightReservationSerializer
permission_classes = (IsAuthenticated, )
queryset = Reservation.objects.all()
@swagger_auto_schema(
request_body=serializer_class,
responses={201: serializer_class, 400: 'Bad Request'})
def post(self, request):
'''Make a flight reservation '''
data = request.data
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
serializer.save(user=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request, format=None):
''' Get all your flight reservation '''
reservations = Reservation.objects.filter(user=request.user).all()
serializer = self.serializer_class(reservations, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class QueryReservationAPIView(APIView):
'''Query reservation API '''
serializer_class = QueryReservationSerializer
permission_classes = (IsAuthenticated, )
@swagger_auto_schema(
request_body=serializer_class,
responses={200: 'Ok', 400: 'Bad Request'}
)
def post(self, request):
'''Queries reservations for a flight on a given day'''
data = request.data
serializer = self.serializer_class(data=data)
serializer.is_valid(raise_exception=True)
flight_number = request.data.get('flight_number')
date = request.data.get('date')
reservations_count = Reservation.objects.filter(
flight__number=flight_number, created__date=date).count()
return Response({
'reservations': reservations_count
})
class CheckFlightStatusAPIView(APIView):
permission_classes = (AllowAny,)
@swagger_auto_schema(
responses={200: 'Ok', 404: 'Flight not found'}
)
def get(self, request, flight_number):
flight = Flight.objects.filter(number=flight_number).first()
if not flight:
raise exceptions.NotFound(
'Flight with number {flight_number} is not found'.format(flight_number=flight_number))
return Response({
'status': flight.status,
'info': {
'flight_number': flight.number,
'origin': flight.origin,
'destination': flight.destination,
'departure_time': flight.departure_time,
'arrival_time': flight.arrival_time
}
}, status=status.HTTP_200_OK)
| 38.204545
| 105
| 0.696312
|
af2b8e71c3677774c33a67a3a976fd9468bf2c17
| 767
|
py
|
Python
|
train.py
|
igorperic17/object_detection_tf_example
|
4d79eb45f5cf05af51e1055f72e4226dfa0f3538
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
igorperic17/object_detection_tf_example
|
4d79eb45f5cf05af51e1055f72e4226dfa0f3538
|
[
"Apache-2.0"
] | null | null | null |
train.py
|
igorperic17/object_detection_tf_example
|
4d79eb45f5cf05af51e1055f72e4226dfa0f3538
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
predictions = model(x_train[:1]).numpy()
print(predictions)
tf.nn.softmax(predictions).numpy()
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
print(loss_fn(y_train[:1], predictions).numpy())
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=500)
model.evaluate(x_test, y_test, verbose=2)
| 25.566667
| 73
| 0.711864
|
0eda28056a7dd0b5844c348a56491adcc47830d2
| 1,871
|
py
|
Python
|
bin/build-results.py
|
adjspecies/furrypoll-munger
|
8dfe57e873ccad899fbb4c1395cf89e5a70d043a
|
[
"MIT"
] | 1
|
2019-01-24T01:57:17.000Z
|
2019-01-24T01:57:17.000Z
|
bin/build-results.py
|
adjspecies/furrypoll-munger
|
8dfe57e873ccad899fbb4c1395cf89e5a70d043a
|
[
"MIT"
] | 1
|
2016-01-10T23:59:39.000Z
|
2016-01-10T23:59:39.000Z
|
bin/build-results.py
|
adjspecies/furrypoll-munger
|
8dfe57e873ccad899fbb4c1395cf89e5a70d043a
|
[
"MIT"
] | 1
|
2015-04-05T18:43:12.000Z
|
2015-04-05T18:43:12.000Z
|
import csv
import logging
import sys
import tempfile
import subprocess
import shutil
from results.meta import FIELDNAMES
def main(out_dir):
# Modules for resetting
modules = sys.modules.copy()
def reset_imports():
logger.info("Resetting imports")
n = 0
for k, v in sys.modules.items():
if k not in modules:
del sys.modules[k]
n += 1
logger.info("Reset {} imports".format(n))
# Write the results out to the CSV file
with open(out_dir + '/results.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES.keys())
writer.writeheader()
logger.info('Beginning dump')
# This is garbage.
# MongoEngine appears to leak and won't let us grab both '15 and '16, so we
# have to run each year separately.
# Grump grump grump.
tempdir = tempfile.mkdtemp()
logger.debug("Working from temporary directory {}".format(tempdir))
for i in range(2009, 2017):
if i == 2014:
continue # We didn't run a survey this year
logger.info("Dumping {}".format(i))
subprocess.call([
'python',
'bin/results/results{}.py'.format(i),
'{}/{}.csv'.format(tempdir, i),
])
logger.debug("Done dumping {}, adding to total".format(i))
with open(out_dir + '/results.csv', 'a') as csvfile:
with open("{}/{}.csv".format(tempdir, i), 'r') as year:
csvfile.write(year.read())
logger.info("Done with {}".format(i))
logger.debug("Removing working directory")
shutil.rmtree(tempdir)
logger.info('Finished')
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main(sys.argv[1])
| 31.183333
| 79
| 0.602352
|
852b4b1056c06613a7e7ca9f4139ce3620f45e02
| 2,828
|
py
|
Python
|
adbus/server/signal.py
|
wuhanck/adbus
|
a5d4840fa99828f6da58c8912633c2bfd30a997f
|
[
"MIT"
] | 31
|
2017-09-07T22:57:54.000Z
|
2021-08-15T01:45:42.000Z
|
adbus/server/signal.py
|
wuhanck/adbus
|
a5d4840fa99828f6da58c8912633c2bfd30a997f
|
[
"MIT"
] | 41
|
2017-08-23T17:44:02.000Z
|
2021-04-21T21:22:24.000Z
|
adbus/server/signal.py
|
wuhanck/adbus
|
a5d4840fa99828f6da58c8912633c2bfd30a997f
|
[
"MIT"
] | 10
|
2018-08-22T06:08:20.000Z
|
2020-07-06T11:05:04.000Z
|
# Copyright: 2017, CCX Technologies
"""D-Bus Signal"""
from .. import sdbus
class Signal:
"""Provides a method to emit a signal via D-Bus.
This class is to be used as a Decorator for signals in
an adbus.server.Object which will be exported via the D-Bus.
Args:
name (str): optional, signal name used in the D-Bus, if None the
signal's label will be used
deprecated (bool): optional, if true object is labelled
as deprecated in the introspect XML data
hidden (bool): optional, if true object won't be added
to the introspect XML data
camel_convert (bool): optional, D-Bus method and property
names are typically defined in Camel Case, but Python
methods and arguments are typically defined in Snake
Case, if this is set the cases will be automatically
converted between the two
"""
def __init__(
self,
name=None,
deprecated=False,
hidden=False,
camel_convert=True
):
self.dbus_name = name
self.deprecated = deprecated
self.hidden = hidden
self.camel_convert = camel_convert
def __get__(self, instance, owner):
raise RuntimeError("Can't read from a signal")
def __set__(self, instance, value):
signal = instance.__dict__[self.py_name]
if isinstance(self.dbus_signature, list):
signal.emit(*value)
else:
signal.emit(value)
def __set_name__(self, owner, name):
self.py_name = name
try:
signature = iter(owner.__annotations__[name])
except TypeError:
self.dbus_signature = (
sdbus.dbus_signature(owner.__annotations__[name]),
)
except KeyError:
self.dbus_signature = sdbus.variant_signature()
else:
try:
self.dbus_signature = [
sdbus.dbus_signature(s) for s in signature
]
except (RuntimeError, TypeError):
# this is a work around for an issue with the
# typing library on Python version 3.7 creating an
# unusable iterator from typing.List[int]
self.dbus_signature = (
sdbus.dbus_signature(owner.__annotations__[name]),
)
if not self.dbus_name:
self.dbus_name = name
if self.camel_convert:
self.dbus_name = sdbus.snake_to_camel(self.dbus_name)
def vt(self, instance):
signal = sdbus.Signal(
self.dbus_name, self.dbus_signature, self.deprecated,
self.hidden
)
instance.__dict__[self.py_name] = signal
return signal
| 32.505747
| 74
| 0.582744
|
9b9f83f37ec0d88d71125f9e1f49e506c410ec44
| 2,400
|
py
|
Python
|
funtests/tests/test_mongodb.py
|
hudolejev/kombu
|
1bd9ac8f9725a4e11520e424f927b4a43f42a4a5
|
[
"BSD-3-Clause"
] | 2
|
2017-08-28T17:15:06.000Z
|
2018-02-09T08:04:39.000Z
|
funtests/tests/test_mongodb.py
|
hudolejev/kombu
|
1bd9ac8f9725a4e11520e424f927b4a43f42a4a5
|
[
"BSD-3-Clause"
] | null | null | null |
funtests/tests/test_mongodb.py
|
hudolejev/kombu
|
1bd9ac8f9725a4e11520e424f927b4a43f42a4a5
|
[
"BSD-3-Clause"
] | 1
|
2019-08-16T18:50:50.000Z
|
2019-08-16T18:50:50.000Z
|
from kombu import Consumer, Producer, Exchange, Queue
from kombu.five import range
from kombu.utils import nested
from kombu.tests.case import skip_if_not_module
from funtests import transport
class test_mongodb(transport.TransportCase):
transport = 'mongodb'
prefix = 'mongodb'
event_loop_max = 100
@skip_if_not_module('pymongo')
def setup(self):
pass
def after_connect(self, connection):
connection.channel().client # evaluate connection.
self.c = self.connection # shortcut
def test_fanout(self, name='test_mongodb_fanout'):
if not self.verify_alive():
return
c = self.connection
self.e = Exchange(name, type='fanout')
self.q = Queue(name, exchange=self.e, routing_key=name)
self.q2 = Queue(name + '2', exchange=self.e, routing_key=name + '2')
channel = c.default_channel
producer = Producer(channel, self.e)
consumer1 = Consumer(channel, self.q)
consumer2 = Consumer(channel, self.q2)
self.q2(channel).declare()
for i in range(10):
producer.publish({'foo': i}, routing_key=name)
for i in range(10):
producer.publish({'foo': i}, routing_key=name + '2')
_received1 = []
_received2 = []
def callback1(message_data, message):
_received1.append(message)
message.ack()
def callback2(message_data, message):
_received2.append(message)
message.ack()
consumer1.register_callback(callback1)
consumer2.register_callback(callback2)
with nested(consumer1, consumer2):
while 1:
if len(_received1) + len(_received2) == 20:
break
c.drain_events(timeout=60)
self.assertEqual(len(_received1) + len(_received2), 20)
# queue.delete
for i in range(10):
producer.publish({'foo': i}, routing_key=name)
self.assertTrue(self.q(channel).get())
self.q(channel).delete()
self.q(channel).declare()
self.assertIsNone(self.q(channel).get())
# queue.purge
for i in range(10):
producer.publish({'foo': i}, routing_key=name + '2')
self.assertTrue(self.q2(channel).get())
self.q2(channel).purge()
self.assertIsNone(self.q2(channel).get())
| 30.769231
| 76
| 0.605833
|
0c51a07c14a9af1a181cb39312082b42aabc3a39
| 4,863
|
py
|
Python
|
PDF.co Web API/PDF Merging API/Python/Merge PDF Documents From Uploaded Files Asynchronously/MergePDFDocumentsFromUploadedFileAsynchronously.py
|
atkins126/ByteScout-SDK-SourceCode
|
cc4bc9e779ad95f85be0a8630c17878006059684
|
[
"Apache-2.0"
] | null | null | null |
PDF.co Web API/PDF Merging API/Python/Merge PDF Documents From Uploaded Files Asynchronously/MergePDFDocumentsFromUploadedFileAsynchronously.py
|
atkins126/ByteScout-SDK-SourceCode
|
cc4bc9e779ad95f85be0a8630c17878006059684
|
[
"Apache-2.0"
] | null | null | null |
PDF.co Web API/PDF Merging API/Python/Merge PDF Documents From Uploaded Files Asynchronously/MergePDFDocumentsFromUploadedFileAsynchronously.py
|
atkins126/ByteScout-SDK-SourceCode
|
cc4bc9e779ad95f85be0a8630c17878006059684
|
[
"Apache-2.0"
] | null | null | null |
""" Cloud API asynchronous "PDF To Text" job example.
Allows to avoid timeout errors when processing huge or scanned PDF documents.
"""
import os
import requests # pip install requests
import time
import datetime
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co/documentation/api
API_KEY = "******************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF files
SourceFile_1 = ".\\sample1.pdf"
SourceFile_2 = ".\\sample2.pdf"
# Destination PDF file name
DestinationFile = ".\\result.pdf"
# (!) Make asynchronous job
Async = True
def main(args = None):
UploadedFileUrl_1 = uploadFile(SourceFile_1)
UploadedFileUrl_2 = uploadFile(SourceFile_2)
if (UploadedFileUrl_1 != None and UploadedFileUrl_2!= None):
uploadedFileUrls = "{},{}".format(UploadedFileUrl_1, UploadedFileUrl_2)
mergeFiles(uploadedFileUrls, DestinationFile)
def mergeFiles(uploadedFileUrls, destinationFile):
"""Perform Merge using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["async"] = Async
parameters["name"] = os.path.basename(destinationFile)
parameters["url"] = uploadedFileUrls
# Prepare URL for 'Merge PDF' API request
url = "{}/pdf/merge".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Asynchronous job ID
jobId = json["jobId"]
# URL of the result file
resultFileUrl = json["url"]
# Check the job status in a loop.
# If you don't want to pause the main thread you can rework the code
# to use a separate thread for the status checking and completion.
while True:
status = checkJobStatus(jobId) # Possible statuses: "working", "failed", "aborted", "success".
# Display timestamp and status (for demo purposes)
print(datetime.datetime.now().strftime("%H:%M.%S") + ": " + status)
if status == "success":
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
break
elif status == "working":
# Pause for a few seconds
time.sleep(3)
else:
print(status)
break
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def checkJobStatus(jobId):
"""Checks server job status"""
url = f"{BASE_URL}/job/check?jobid={jobId}"
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
return json["status"]
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main()
| 34.006993
| 128
| 0.581328
|
daab7482e6fcd4530ee66c906699733ce138c12d
| 324
|
py
|
Python
|
rxbp/multicast/impl/multicastsubscriberimpl.py
|
MichaelSchneeberger/rx_backpressure
|
16173827498bf1bbee3344933cb9efbfd19699f5
|
[
"Apache-2.0"
] | 24
|
2018-11-22T21:04:49.000Z
|
2021-11-08T11:18:09.000Z
|
rxbp/multicast/impl/multicastsubscriberimpl.py
|
MichaelSchneeberger/rx_backpressure
|
16173827498bf1bbee3344933cb9efbfd19699f5
|
[
"Apache-2.0"
] | 1
|
2019-02-06T15:58:46.000Z
|
2019-02-12T20:31:50.000Z
|
rxbp/multicast/impl/multicastsubscriberimpl.py
|
MichaelSchneeberger/rx_backpressure
|
16173827498bf1bbee3344933cb9efbfd19699f5
|
[
"Apache-2.0"
] | 1
|
2021-01-26T12:41:37.000Z
|
2021-01-26T12:41:37.000Z
|
from typing import Tuple
from dataclass_abc import dataclass_abc
from rxbp.multicast.multicastsubscriber import MultiCastSubscriber
from rxbp.schedulers.trampolinescheduler import TrampolineScheduler
@dataclass_abc
class MultiCastSubscriberImpl(MultiCastSubscriber):
subscribe_schedulers: Tuple[TrampolineScheduler]
| 27
| 67
| 0.876543
|
adadd945be16c596bc332905f941a9e083f8fdfe
| 4,388
|
py
|
Python
|
fluent_pages/management/commands/rebuild_page_tree.py
|
vinnyrose/django-fluent-pages
|
960b40dcf4e5cecd440f6414d28be6b51f31eb4e
|
[
"Apache-2.0"
] | null | null | null |
fluent_pages/management/commands/rebuild_page_tree.py
|
vinnyrose/django-fluent-pages
|
960b40dcf4e5cecd440f6414d28be6b51f31eb4e
|
[
"Apache-2.0"
] | null | null | null |
fluent_pages/management/commands/rebuild_page_tree.py
|
vinnyrose/django-fluent-pages
|
960b40dcf4e5cecd440f6414d28be6b51f31eb4e
|
[
"Apache-2.0"
] | null | null | null |
from django.core.management.base import NoArgsCommand
from django.utils.encoding import smart_text
from optparse import make_option
from fluent_pages import appsettings
from fluent_pages.models.db import UrlNode_Translation, UrlNode
class Command(NoArgsCommand):
"""
Update the tree, rebuild the translated URL nodes.
"""
help = "Update the cached_url for the translated URL node tree"
option_list = (
make_option(
'-p', '--dry-run', action='store_true', dest='dry-run', default=False,
help="Only list what will change, don't make the actual changes."
),
make_option(
'-m', '--mptt-only', action='store_true', dest='mptt-only', default=False,
help="Only fix the MPTT fields, leave URLs unchanged."
),
) + NoArgsCommand.option_list
def handle_noargs(self, **options):
is_dry_run = options.get('dry-run', False)
mptt_only = options.get('mptt-only', False)
slugs = {}
overrides = {}
parents = dict(UrlNode.objects.values_list('id', 'parent_id'))
self.stdout.write("Updated MPTT columns")
if is_dry_run and mptt_only:
# Can't really do anything
return
if not is_dry_run:
# Fix MPTT first, that is the basis for walking through all nodes.
UrlNode.objects.rebuild()
self.stdout.write("Updated MPTT columns")
if mptt_only:
return
self.stdout.write("Updating cached URLs")
self.stdout.write("Page tree nodes:\n\n")
col_style = u"| {0:6} | {1:6} | {2:6} | {3}"
header = col_style.format("Site", "Page", "Locale", "URL")
sep = '-' * (len(header) + 40)
self.stdout.write(sep)
self.stdout.write(header)
self.stdout.write(sep)
for translation in UrlNode_Translation.objects.select_related('master').order_by('master__parent_site__id', 'master__tree_id', 'master__lft', 'language_code'):
slugs.setdefault(translation.language_code, {})[translation.master_id] = translation.slug
overrides.setdefault(translation.language_code, {})[translation.master_id] = translation.override_url
old_url = translation._cached_url
try:
new_url = self._construct_url(translation.language_code, translation.master_id, parents, slugs, overrides)
except KeyError:
if is_dry_run:
# When the mptt tree is broken, some URLs can't be correctly generated yet.
self.stderr.write("Failed to determine new URL for {0}, please run with --mptt-only first.".format(old_url))
return
raise
if old_url != new_url:
translation._cached_url = new_url
if not is_dry_run:
translation.save()
if old_url != new_url:
self.stdout.write(smart_text(u"{0} {1} {2}\n".format(
col_style.format(translation.master.parent_site_id, translation.master_id, translation.language_code, translation._cached_url),
"WILL CHANGE from" if is_dry_run else "UPDATED from",
old_url
)))
else:
self.stdout.write(smart_text(col_style.format(
translation.master.parent_site_id, translation.master_id, translation.language_code, translation._cached_url
)))
def _construct_url(self, language_code, child_id, parents, slugs, overrides):
fallback = appsettings.FLUENT_PAGES_LANGUAGES.get_fallback_language(language_code)
breadcrumb = []
cur = child_id
while cur is not None:
breadcrumb.insert(0, cur)
cur = parents[cur]
url_parts = ['']
for id in breadcrumb:
try:
# Resets url_parts
override = overrides[language_code][id]
if override:
url_parts = [override]
continue
except KeyError:
pass
try:
url_parts.append(slugs[language_code][id])
except KeyError:
url_parts.append(slugs[fallback][id])
return (u'/'.join(url_parts) + u'/').replace('//', '/')
| 40.256881
| 167
| 0.590018
|
b1462bb95b585e8b788163481dc21824a6b1e84c
| 7,562
|
py
|
Python
|
test/functional/wallet_dump.py
|
Manu-2009/Geranium
|
93c08aa10ea151f4efd8337c1d5599ee7e8d58ea
|
[
"MIT"
] | 2
|
2022-01-09T15:32:02.000Z
|
2022-01-09T15:32:22.000Z
|
test/functional/wallet_dump.py
|
Manu-2009/Geranium
|
93c08aa10ea151f4efd8337c1d5599ee7e8d58ea
|
[
"MIT"
] | 2
|
2021-12-23T01:04:41.000Z
|
2021-12-30T18:47:39.000Z
|
test/functional/wallet_dump.py
|
Manu-2009/Geranium
|
93c08aa10ea151f4efd8337c1d5599ee7e8d58ea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Geranium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the dumpwallet RPC."""
import os
from test_framework.test_framework import GeraniumTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
def read_dump(file_name, addrs, script_addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_legacy_addr = 0
found_p2sh_segwit_addr = 0
found_bech32_addr = 0
found_script_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_date_label, comment = line.split("#")
key_date_label = key_date_label.split(" ")
# key = key_date_label[0]
date = key_date_label[1]
keytype = key_date_label[2]
imported_key = date == '1970-01-01T00:00:01Z'
if imported_key:
# Imported keys have multiple addresses, no label (keypath) and timestamp
# Skip them
continue
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdseed=1":
# ensure the old master is still available
assert hd_master_addr_old == addr
elif keytype == "hdseed=1":
# ensure we have generated a new hd master key
assert hd_master_addr_old != addr
hd_master_addr_ret = addr
elif keytype == "script=1":
# scripts don't have keypaths
keypath = None
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr.split(",")[0] and addrObj['hdkeypath'] == keypath and keytype == "label=":
if addr.startswith('m') or addr.startswith('n'):
# P2PKH address
found_legacy_addr += 1
elif addr.startswith('2'):
# P2SH-segwit address
found_p2sh_segwit_addr += 1
elif addr.startswith('bcrt1'):
found_bech32_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
# count scripts
for script_addr in script_addrs:
if script_addr == addr.rstrip() and keytype == "script=1":
found_script_addr += 1
break
return found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(GeraniumTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-addresstype=legacy"]]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
def run_test(self):
wallet_unenc_dump = os.path.join(self.nodes[0].datadir, "wallet.unencrypted.dump")
wallet_enc_dump = os.path.join(self.nodes[0].datadir, "wallet.encrypted.dump")
# generate 30 addresses to compare against the dump
# - 10 legacy P2PKH
# - 10 P2SH-segwit
# - 10 bech32
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
for i in range(0, test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
# Test scripts dump by adding a 1-of-1 multisig address
multisig_addr = self.nodes[0].addmultisigaddress(1, [addrs[1]["address"]])["address"]
# Refill the keypool. getnewaddress() refills the keypool *before* taking a key from
# the keypool, so the final call to getnewaddress leaves the keypool with one key below
# its capacity
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
result = self.nodes[0].dumpwallet(wallet_unenc_dump)
assert_equal(result['filename'], wallet_unenc_dump)
found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(wallet_unenc_dump, addrs, [multisig_addr], None)
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1) # all scripts must be in the dump
assert_equal(found_addr_chg, 0) # 0 blocks where mined
assert_equal(found_addr_rsv, 90 * 2) # 90 keys plus 100% internal keys
# encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
self.nodes[0].walletpassphrase('test', 100)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(wallet_enc_dump)
found_legacy_addr, found_p2sh_segwit_addr, found_bech32_addr, found_script_addr, found_addr_chg, found_addr_rsv, _ = \
read_dump(wallet_enc_dump, addrs, [multisig_addr], hd_master_addr_unenc)
assert_equal(found_legacy_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_p2sh_segwit_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_bech32_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_script_addr, 1)
assert_equal(found_addr_chg, 90 * 2) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 90 * 2)
# Overwriting should fail
assert_raises_rpc_error(-8, "already exists", lambda: self.nodes[0].dumpwallet(wallet_enc_dump))
# Restart node with new wallet, and test importwallet
self.stop_node(0)
self.start_node(0, ['-wallet=w2'])
# Make sure the address is not IsMine before import
result = self.nodes[0].getaddressinfo(multisig_addr)
assert not result['ismine']
self.nodes[0].importwallet(wallet_unenc_dump)
# Now check IsMine is true
result = self.nodes[0].getaddressinfo(multisig_addr)
assert result['ismine']
if __name__ == '__main__':
WalletDumpTest().main()
| 43.710983
| 146
| 0.605395
|
f23e6d75490dfb678c44672ba1d54f6a0737e5ce
| 43,338
|
py
|
Python
|
pyboto3/codestar.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 91
|
2016-12-31T11:38:37.000Z
|
2021-09-16T19:33:23.000Z
|
pyboto3/codestar.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 7
|
2017-01-02T18:54:23.000Z
|
2020-08-11T13:54:02.000Z
|
pyboto3/codestar.py
|
gehad-shaat/pyboto3
|
4a0c2851a8bc04fb1c71c36086f7bb257e48181d
|
[
"MIT"
] | 26
|
2016-12-31T13:11:00.000Z
|
2022-03-03T21:01:12.000Z
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_team_member(projectId=None, clientRequestToken=None, userArn=None, projectRole=None, remoteAccessAllowed=None):
"""
Adds an IAM user to the team for an AWS CodeStar project.
See also: AWS API Documentation
Exceptions
:example: response = client.associate_team_member(
projectId='string',
clientRequestToken='string',
userArn='string',
projectRole='string',
remoteAccessAllowed=True|False
)
:type projectId: string
:param projectId: [REQUIRED]\nThe ID of the project to which you will add the IAM user.\n
:type clientRequestToken: string
:param clientRequestToken: A user- or system-generated token that identifies the entity that requested the team member association to the project. This token can be used to repeat the request.
:type userArn: string
:param userArn: [REQUIRED]\nThe Amazon Resource Name (ARN) for the IAM user you want to add to the AWS CodeStar project.\n
:type projectRole: string
:param projectRole: [REQUIRED]\nThe AWS CodeStar project role that will apply to this user. This role determines what actions a user can take in an AWS CodeStar project.\n
:type remoteAccessAllowed: boolean
:param remoteAccessAllowed: Whether the team member is allowed to use an SSH public/private key pair to remotely access project resources, for example Amazon EC2 instances.
:rtype: dict
ReturnsResponse Syntax
{
'clientRequestToken': 'string'
}
Response Structure
(dict) --
clientRequestToken (string) --
The user- or system-generated token from the initial request that can be used to repeat the request.
Exceptions
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.TeamMemberAlreadyAssociatedException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
:return: {
'clientRequestToken': 'string'
}
:returns:
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.TeamMemberAlreadyAssociatedException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_project(name=None, id=None, description=None, clientRequestToken=None, sourceCode=None, toolchain=None, tags=None):
"""
Creates a project, including project resources. This action creates a project based on a submitted project request. A set of source code files and a toolchain template file can be included with the project request. If these are not provided, an empty project is created.
See also: AWS API Documentation
Exceptions
:example: response = client.create_project(
name='string',
id='string',
description='string',
clientRequestToken='string',
sourceCode=[
{
'source': {
's3': {
'bucketName': 'string',
'bucketKey': 'string'
}
},
'destination': {
'codeCommit': {
'name': 'string'
},
'gitHub': {
'name': 'string',
'description': 'string',
'type': 'string',
'owner': 'string',
'privateRepository': True|False,
'issuesEnabled': True|False,
'token': 'string'
}
}
},
],
toolchain={
'source': {
's3': {
'bucketName': 'string',
'bucketKey': 'string'
}
},
'roleArn': 'string',
'stackParameters': {
'string': 'string'
}
},
tags={
'string': 'string'
}
)
:type name: string
:param name: [REQUIRED]\nThe display name for the project to be created in AWS CodeStar.\n
:type id: string
:param id: [REQUIRED]\nThe ID of the project to be created in AWS CodeStar.\n
:type description: string
:param description: The description of the project, if any.
:type clientRequestToken: string
:param clientRequestToken: A user- or system-generated token that identifies the entity that requested project creation. This token can be used to repeat the request.
:type sourceCode: list
:param sourceCode: A list of the Code objects submitted with the project request. If this parameter is specified, the request must also include the toolchain parameter.\n\n(dict) --Location and destination information about the source code files provided with the project request. The source code is uploaded to the new project source repository after project creation.\n\nsource (dict) -- [REQUIRED]The location where the source code files provided with the project request are stored. AWS CodeStar retrieves the files during project creation.\n\ns3 (dict) -- [REQUIRED]Information about the Amazon S3 location where the source code files provided with the project request are stored.\n\nbucketName (string) --The Amazon S3 bucket name where the source code files provided with the project request are stored.\n\nbucketKey (string) --The Amazon S3 object key where the source code files provided with the project request are stored.\n\n\n\n\n\ndestination (dict) -- [REQUIRED]The repository to be created in AWS CodeStar. Valid values are AWS CodeCommit or GitHub. After AWS CodeStar provisions the new repository, the source code files provided with the project request are placed in the repository.\n\ncodeCommit (dict) --Information about the AWS CodeCommit repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.\n\nname (string) -- [REQUIRED]The name of the AWS CodeCommit repository to be created in AWS CodeStar.\n\n\n\ngitHub (dict) --Information about the GitHub repository to be created in AWS CodeStar. This is where the source code files provided with the project request will be uploaded after project creation.\n\nname (string) -- [REQUIRED]Name of the GitHub repository to be created in AWS CodeStar.\n\ndescription (string) --Description for the GitHub repository to be created in AWS CodeStar. This description displays in GitHub after the repository is created.\n\ntype (string) -- [REQUIRED]The type of GitHub repository to be created in AWS CodeStar. Valid values are User or Organization.\n\nowner (string) -- [REQUIRED]The GitHub username for the owner of the GitHub repository to be created in AWS CodeStar. If this repository should be owned by a GitHub organization, provide its name.\n\nprivateRepository (boolean) -- [REQUIRED]Whether the GitHub repository is to be a private repository.\n\nissuesEnabled (boolean) -- [REQUIRED]Whether to enable issues for the GitHub repository.\n\ntoken (string) -- [REQUIRED]The GitHub user\'s personal access token for the GitHub repository.\n\n\n\n\n\n\n\n\n
:type toolchain: dict
:param toolchain: The name of the toolchain template file submitted with the project request. If this parameter is specified, the request must also include the sourceCode parameter.\n\nsource (dict) -- [REQUIRED]The Amazon S3 location where the toolchain template file provided with the project request is stored. AWS CodeStar retrieves the file during project creation.\n\ns3 (dict) -- [REQUIRED]The Amazon S3 bucket where the toolchain template file provided with the project request is stored.\n\nbucketName (string) --The Amazon S3 bucket name where the source code files provided with the project request are stored.\n\nbucketKey (string) --The Amazon S3 object key where the source code files provided with the project request are stored.\n\n\n\n\n\nroleArn (string) --The service role ARN for AWS CodeStar to use for the toolchain template during stack provisioning.\n\nstackParameters (dict) --The list of parameter overrides to be passed into the toolchain template during stack provisioning, if any.\n\n(string) --\n(string) --\n\n\n\n\n\n
:type tags: dict
:param tags: The tags created for the project.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'id': 'string',
'arn': 'string',
'clientRequestToken': 'string',
'projectTemplateId': 'string'
}
Response Structure
(dict) --
id (string) --
The ID of the project.
arn (string) --
The Amazon Resource Name (ARN) of the created project.
clientRequestToken (string) --
A user- or system-generated token that identifies the entity that requested project creation.
projectTemplateId (string) --
Reserved for future use.
Exceptions
CodeStar.Client.exceptions.ProjectAlreadyExistsException
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.ProjectCreationFailedException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
:return: {
'id': 'string',
'arn': 'string',
'clientRequestToken': 'string',
'projectTemplateId': 'string'
}
:returns:
CodeStar.Client.exceptions.ProjectAlreadyExistsException
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.ProjectCreationFailedException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
"""
pass
def create_user_profile(userArn=None, displayName=None, emailAddress=None, sshPublicKey=None):
"""
Creates a profile for a user that includes user preferences, such as the display name and email address assocciated with the user, in AWS CodeStar. The user profile is not project-specific. Information in the user profile is displayed wherever the user\'s information appears to other users in AWS CodeStar.
See also: AWS API Documentation
Exceptions
:example: response = client.create_user_profile(
userArn='string',
displayName='string',
emailAddress='string',
sshPublicKey='string'
)
:type userArn: string
:param userArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the user in IAM.\n
:type displayName: string
:param displayName: [REQUIRED]\nThe name that will be displayed as the friendly name for the user in AWS CodeStar.\n
:type emailAddress: string
:param emailAddress: [REQUIRED]\nThe email address that will be displayed as part of the user\'s profile in AWS CodeStar.\n
:type sshPublicKey: string
:param sshPublicKey: The SSH public key associated with the user in AWS CodeStar. If a project owner allows the user remote access to project resources, this public key will be used along with the user\'s private key for SSH access.
:rtype: dict
ReturnsResponse Syntax
{
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string',
'createdTimestamp': datetime(2015, 1, 1),
'lastModifiedTimestamp': datetime(2015, 1, 1)
}
Response Structure
(dict) --
userArn (string) --
The Amazon Resource Name (ARN) of the user in IAM.
displayName (string) --
The name that is displayed as the friendly name for the user in AWS CodeStar.
emailAddress (string) --
The email address that is displayed as part of the user\'s profile in AWS CodeStar.
sshPublicKey (string) --
The SSH public key associated with the user in AWS CodeStar. This is the public portion of the public/private keypair the user can use to access project resources if a project owner allows the user remote access to those resources.
createdTimestamp (datetime) --
The date the user profile was created, in timestamp format.
lastModifiedTimestamp (datetime) --
The date the user profile was last modified, in timestamp format.
Exceptions
CodeStar.Client.exceptions.UserProfileAlreadyExistsException
CodeStar.Client.exceptions.ValidationException
:return: {
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string',
'createdTimestamp': datetime(2015, 1, 1),
'lastModifiedTimestamp': datetime(2015, 1, 1)
}
:returns:
CodeStar.Client.exceptions.UserProfileAlreadyExistsException
CodeStar.Client.exceptions.ValidationException
"""
pass
def delete_project(id=None, clientRequestToken=None, deleteStack=None):
"""
Deletes a project, including project resources. Does not delete users associated with the project, but does delete the IAM roles that allowed access to the project.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_project(
id='string',
clientRequestToken='string',
deleteStack=True|False
)
:type id: string
:param id: [REQUIRED]\nThe ID of the project to be deleted in AWS CodeStar.\n
:type clientRequestToken: string
:param clientRequestToken: A user- or system-generated token that identifies the entity that requested project deletion. This token can be used to repeat the request.
:type deleteStack: boolean
:param deleteStack: Whether to send a delete request for the primary stack in AWS CloudFormation originally used to generate the project and its resources. This option will delete all AWS resources for the project (except for any buckets in Amazon S3) as well as deleting the project itself. Recommended for most use cases.
:rtype: dict
ReturnsResponse Syntax
{
'stackId': 'string',
'projectArn': 'string'
}
Response Structure
(dict) --
stackId (string) --
The ID of the primary stack in AWS CloudFormation that will be deleted as part of deleting the project and its resources.
projectArn (string) --
The Amazon Resource Name (ARN) of the deleted project.
Exceptions
CodeStar.Client.exceptions.ConcurrentModificationException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
:return: {
'stackId': 'string',
'projectArn': 'string'
}
:returns:
CodeStar.Client.exceptions.ConcurrentModificationException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
"""
pass
def delete_user_profile(userArn=None):
"""
Deletes a user profile in AWS CodeStar, including all personal preference data associated with that profile, such as display name and email address. It does not delete the history of that user, for example the history of commits made by that user.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_user_profile(
userArn='string'
)
:type userArn: string
:param userArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the user to delete from AWS CodeStar.\n
:rtype: dict
ReturnsResponse Syntax{
'userArn': 'string'
}
Response Structure
(dict) --
userArn (string) --The Amazon Resource Name (ARN) of the user deleted from AWS CodeStar.
Exceptions
CodeStar.Client.exceptions.ValidationException
:return: {
'userArn': 'string'
}
"""
pass
def describe_project(id=None):
"""
Describes a project and its resources.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_project(
id='string'
)
:type id: string
:param id: [REQUIRED]\nThe ID of the project.\n
:rtype: dict
ReturnsResponse Syntax{
'name': 'string',
'id': 'string',
'arn': 'string',
'description': 'string',
'clientRequestToken': 'string',
'createdTimeStamp': datetime(2015, 1, 1),
'stackId': 'string',
'projectTemplateId': 'string',
'status': {
'state': 'string',
'reason': 'string'
}
}
Response Structure
(dict) --
name (string) --The display name for the project.
id (string) --The ID of the project.
arn (string) --The Amazon Resource Name (ARN) for the project.
description (string) --The description of the project, if any.
clientRequestToken (string) --A user- or system-generated token that identifies the entity that requested project creation.
createdTimeStamp (datetime) --The date and time the project was created, in timestamp format.
stackId (string) --The ID of the primary stack in AWS CloudFormation used to generate resources for the project.
projectTemplateId (string) --The ID for the AWS CodeStar project template used to create the project.
status (dict) --The project creation or deletion status.
state (string) --The phase of completion for a project creation or deletion.
reason (string) --In the case of a project creation or deletion failure, a reason for the failure.
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
:return: {
'name': 'string',
'id': 'string',
'arn': 'string',
'description': 'string',
'clientRequestToken': 'string',
'createdTimeStamp': datetime(2015, 1, 1),
'stackId': 'string',
'projectTemplateId': 'string',
'status': {
'state': 'string',
'reason': 'string'
}
}
"""
pass
def describe_user_profile(userArn=None):
"""
Describes a user in AWS CodeStar and the user attributes across all projects.
See also: AWS API Documentation
Exceptions
:example: response = client.describe_user_profile(
userArn='string'
)
:type userArn: string
:param userArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the user.\n
:rtype: dict
ReturnsResponse Syntax{
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string',
'createdTimestamp': datetime(2015, 1, 1),
'lastModifiedTimestamp': datetime(2015, 1, 1)
}
Response Structure
(dict) --
userArn (string) --The Amazon Resource Name (ARN) of the user.
displayName (string) --The display name shown for the user in AWS CodeStar projects. For example, this could be set to both first and last name ("Mary Major") or a single name ("Mary"). The display name is also used to generate the initial icon associated with the user in AWS CodeStar projects. If spaces are included in the display name, the first character that appears after the space will be used as the second character in the user initial icon. The initial icon displays a maximum of two characters, so a display name with more than one space (for example "Mary Jane Major") would generate an initial icon using the first character and the first character after the space ("MJ", not "MM").
emailAddress (string) --The email address for the user. Optional.
sshPublicKey (string) --The SSH public key associated with the user. This SSH public key is associated with the user profile, and can be used in conjunction with the associated private key for access to project resources, such as Amazon EC2 instances, if a project owner grants remote access to those resources.
createdTimestamp (datetime) --The date and time when the user profile was created in AWS CodeStar, in timestamp format.
lastModifiedTimestamp (datetime) --The date and time when the user profile was last modified, in timestamp format.
Exceptions
CodeStar.Client.exceptions.UserProfileNotFoundException
CodeStar.Client.exceptions.ValidationException
:return: {
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string',
'createdTimestamp': datetime(2015, 1, 1),
'lastModifiedTimestamp': datetime(2015, 1, 1)
}
"""
pass
def disassociate_team_member(projectId=None, userArn=None):
"""
Removes a user from a project. Removing a user from a project also removes the IAM policies from that user that allowed access to the project and its resources. Disassociating a team member does not remove that user\'s profile from AWS CodeStar. It does not remove the user from IAM.
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_team_member(
projectId='string',
userArn='string'
)
:type projectId: string
:param projectId: [REQUIRED]\nThe ID of the AWS CodeStar project from which you want to remove a team member.\n
:type userArn: string
:param userArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the IAM user or group whom you want to remove from the project.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_projects(nextToken=None, maxResults=None):
"""
Lists all projects in AWS CodeStar associated with your AWS account.
See also: AWS API Documentation
Exceptions
:example: response = client.list_projects(
nextToken='string',
maxResults=123
)
:type nextToken: string
:param nextToken: The continuation token to be used to return the next set of results, if the results cannot be returned in one response.
:type maxResults: integer
:param maxResults: The maximum amount of data that can be contained in a single set of results.
:rtype: dict
ReturnsResponse Syntax
{
'projects': [
{
'projectId': 'string',
'projectArn': 'string'
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
projects (list) --
A list of projects.
(dict) --
Information about the metadata for a project.
projectId (string) --
The ID of the project.
projectArn (string) --
The Amazon Resource Name (ARN) of the project.
nextToken (string) --
The continuation token to use when requesting the next set of results, if there are more results to be returned.
Exceptions
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
:return: {
'projects': [
{
'projectId': 'string',
'projectArn': 'string'
},
],
'nextToken': 'string'
}
:returns:
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
"""
pass
def list_resources(projectId=None, nextToken=None, maxResults=None):
"""
Lists resources associated with a project in AWS CodeStar.
See also: AWS API Documentation
Exceptions
:example: response = client.list_resources(
projectId='string',
nextToken='string',
maxResults=123
)
:type projectId: string
:param projectId: [REQUIRED]\nThe ID of the project.\n
:type nextToken: string
:param nextToken: The continuation token for the next set of results, if the results cannot be returned in one response.
:type maxResults: integer
:param maxResults: The maximum amount of data that can be contained in a single set of results.
:rtype: dict
ReturnsResponse Syntax
{
'resources': [
{
'id': 'string'
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
resources (list) --
An array of resources associated with the project.
(dict) --
Information about a resource for a project.
id (string) --
The Amazon Resource Name (ARN) of the resource.
nextToken (string) --
The continuation token to use when requesting the next set of results, if there are more results to be returned.
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
:return: {
'resources': [
{
'id': 'string'
},
],
'nextToken': 'string'
}
:returns:
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
"""
pass
def list_tags_for_project(id=None, nextToken=None, maxResults=None):
"""
Gets the tags for a project.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_project(
id='string',
nextToken='string',
maxResults=123
)
:type id: string
:param id: [REQUIRED]\nThe ID of the project to get tags for.\n
:type nextToken: string
:param nextToken: Reserved for future use.
:type maxResults: integer
:param maxResults: Reserved for future use.
:rtype: dict
ReturnsResponse Syntax
{
'tags': {
'string': 'string'
},
'nextToken': 'string'
}
Response Structure
(dict) --
tags (dict) --
The tags for the project.
(string) --
(string) --
nextToken (string) --
Reserved for future use.
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidNextTokenException
:return: {
'tags': {
'string': 'string'
},
'nextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_team_members(projectId=None, nextToken=None, maxResults=None):
"""
Lists all team members associated with a project.
See also: AWS API Documentation
Exceptions
:example: response = client.list_team_members(
projectId='string',
nextToken='string',
maxResults=123
)
:type projectId: string
:param projectId: [REQUIRED]\nThe ID of the project for which you want to list team members.\n
:type nextToken: string
:param nextToken: The continuation token for the next set of results, if the results cannot be returned in one response.
:type maxResults: integer
:param maxResults: The maximum number of team members you want returned in a response.
:rtype: dict
ReturnsResponse Syntax
{
'teamMembers': [
{
'userArn': 'string',
'projectRole': 'string',
'remoteAccessAllowed': True|False
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
teamMembers (list) --
A list of team member objects for the project.
(dict) --
Information about a team member in a project.
userArn (string) --
The Amazon Resource Name (ARN) of the user in IAM.
projectRole (string) --
The role assigned to the user in the project. Project roles have different levels of access. For more information, see Working with Teams in the AWS CodeStar User Guide .
remoteAccessAllowed (boolean) --
Whether the user is allowed to remotely access project resources using an SSH public/private key pair.
nextToken (string) --
The continuation token to use when requesting the next set of results, if there are more results to be returned.
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
:return: {
'teamMembers': [
{
'userArn': 'string',
'projectRole': 'string',
'remoteAccessAllowed': True|False
},
],
'nextToken': 'string'
}
:returns:
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
"""
pass
def list_user_profiles(nextToken=None, maxResults=None):
"""
Lists all the user profiles configured for your AWS account in AWS CodeStar.
See also: AWS API Documentation
Exceptions
:example: response = client.list_user_profiles(
nextToken='string',
maxResults=123
)
:type nextToken: string
:param nextToken: The continuation token for the next set of results, if the results cannot be returned in one response.
:type maxResults: integer
:param maxResults: The maximum number of results to return in a response.
:rtype: dict
ReturnsResponse Syntax
{
'userProfiles': [
{
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string'
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
userProfiles (list) --
All the user profiles configured in AWS CodeStar for an AWS account.
(dict) --
Information about a user\'s profile in AWS CodeStar.
userArn (string) --
The Amazon Resource Name (ARN) of the user in IAM.
displayName (string) --
The display name of a user in AWS CodeStar. For example, this could be set to both first and last name ("Mary Major") or a single name ("Mary"). The display name is also used to generate the initial icon associated with the user in AWS CodeStar projects. If spaces are included in the display name, the first character that appears after the space will be used as the second character in the user initial icon. The initial icon displays a maximum of two characters, so a display name with more than one space (for example "Mary Jane Major") would generate an initial icon using the first character and the first character after the space ("MJ", not "MM").
emailAddress (string) --
The email address associated with the user.
sshPublicKey (string) --
The SSH public key associated with the user in AWS CodeStar. If a project owner allows the user remote access to project resources, this public key will be used along with the user\'s private key for SSH access.
nextToken (string) --
The continuation token to use when requesting the next set of results, if there are more results to be returned.
Exceptions
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
:return: {
'userProfiles': [
{
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string'
},
],
'nextToken': 'string'
}
:returns:
CodeStar.Client.exceptions.InvalidNextTokenException
CodeStar.Client.exceptions.ValidationException
"""
pass
def tag_project(id=None, tags=None):
"""
Adds tags to a project.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_project(
id='string',
tags={
'string': 'string'
}
)
:type id: string
:param id: [REQUIRED]\nThe ID of the project you want to add a tag to.\n
:type tags: dict
:param tags: [REQUIRED]\nThe tags you want to add to the project.\n\n(string) --\n(string) --\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'tags': {
'string': 'string'
}
}
Response Structure
(dict) --
tags (dict) --
The tags for the project.
(string) --
(string) --
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ConcurrentModificationException
:return: {
'tags': {
'string': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def untag_project(id=None, tags=None):
"""
Removes tags from a project.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_project(
id='string',
tags=[
'string',
]
)
:type id: string
:param id: [REQUIRED]\nThe ID of the project to remove tags from.\n
:type tags: list
:param tags: [REQUIRED]\nThe tags to remove from the project.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ConcurrentModificationException
:return: {}
:returns:
(dict) --
"""
pass
def update_project(id=None, name=None, description=None):
"""
Updates a project in AWS CodeStar.
See also: AWS API Documentation
Exceptions
:example: response = client.update_project(
id='string',
name='string',
description='string'
)
:type id: string
:param id: [REQUIRED]\nThe ID of the project you want to update.\n
:type name: string
:param name: The name of the project you want to update.
:type description: string
:param description: The description of the project, if any.
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
:return: {}
:returns:
(dict) --
"""
pass
def update_team_member(projectId=None, userArn=None, projectRole=None, remoteAccessAllowed=None):
"""
Updates a team member\'s attributes in an AWS CodeStar project. For example, you can change a team member\'s role in the project, or change whether they have remote access to project resources.
See also: AWS API Documentation
Exceptions
:example: response = client.update_team_member(
projectId='string',
userArn='string',
projectRole='string',
remoteAccessAllowed=True|False
)
:type projectId: string
:param projectId: [REQUIRED]\nThe ID of the project.\n
:type userArn: string
:param userArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the user for whom you want to change team membership attributes.\n
:type projectRole: string
:param projectRole: The role assigned to the user in the project. Project roles have different levels of access. For more information, see Working with Teams in the AWS CodeStar User Guide .
:type remoteAccessAllowed: boolean
:param remoteAccessAllowed: Whether a team member is allowed to remotely access project resources using the SSH public key associated with the user\'s profile. Even if this is set to True, the user must associate a public key with their profile before the user can access resources.
:rtype: dict
ReturnsResponse Syntax
{
'userArn': 'string',
'projectRole': 'string',
'remoteAccessAllowed': True|False
}
Response Structure
(dict) --
userArn (string) --
The Amazon Resource Name (ARN) of the user whose team membership attributes were updated.
projectRole (string) --
The project role granted to the user.
remoteAccessAllowed (boolean) --
Whether a team member is allowed to remotely access project resources using the SSH public key associated with the user\'s profile.
Exceptions
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
CodeStar.Client.exceptions.TeamMemberNotFoundException
:return: {
'userArn': 'string',
'projectRole': 'string',
'remoteAccessAllowed': True|False
}
:returns:
CodeStar.Client.exceptions.LimitExceededException
CodeStar.Client.exceptions.ProjectNotFoundException
CodeStar.Client.exceptions.ValidationException
CodeStar.Client.exceptions.InvalidServiceRoleException
CodeStar.Client.exceptions.ProjectConfigurationException
CodeStar.Client.exceptions.ConcurrentModificationException
CodeStar.Client.exceptions.TeamMemberNotFoundException
"""
pass
def update_user_profile(userArn=None, displayName=None, emailAddress=None, sshPublicKey=None):
"""
Updates a user\'s profile in AWS CodeStar. The user profile is not project-specific. Information in the user profile is displayed wherever the user\'s information appears to other users in AWS CodeStar.
See also: AWS API Documentation
Exceptions
:example: response = client.update_user_profile(
userArn='string',
displayName='string',
emailAddress='string',
sshPublicKey='string'
)
:type userArn: string
:param userArn: [REQUIRED]\nThe name that will be displayed as the friendly name for the user in AWS CodeStar.\n
:type displayName: string
:param displayName: The name that is displayed as the friendly name for the user in AWS CodeStar.
:type emailAddress: string
:param emailAddress: The email address that is displayed as part of the user\'s profile in AWS CodeStar.
:type sshPublicKey: string
:param sshPublicKey: The SSH public key associated with the user in AWS CodeStar. If a project owner allows the user remote access to project resources, this public key will be used along with the user\'s private key for SSH access.
:rtype: dict
ReturnsResponse Syntax
{
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string',
'createdTimestamp': datetime(2015, 1, 1),
'lastModifiedTimestamp': datetime(2015, 1, 1)
}
Response Structure
(dict) --
userArn (string) --
The Amazon Resource Name (ARN) of the user in IAM.
displayName (string) --
The name that is displayed as the friendly name for the user in AWS CodeStar.
emailAddress (string) --
The email address that is displayed as part of the user\'s profile in AWS CodeStar.
sshPublicKey (string) --
The SSH public key associated with the user in AWS CodeStar. This is the public portion of the public/private keypair the user can use to access project resources if a project owner allows the user remote access to those resources.
createdTimestamp (datetime) --
The date the user profile was created, in timestamp format.
lastModifiedTimestamp (datetime) --
The date the user profile was last modified, in timestamp format.
Exceptions
CodeStar.Client.exceptions.UserProfileNotFoundException
CodeStar.Client.exceptions.ValidationException
:return: {
'userArn': 'string',
'displayName': 'string',
'emailAddress': 'string',
'sshPublicKey': 'string',
'createdTimestamp': datetime(2015, 1, 1),
'lastModifiedTimestamp': datetime(2015, 1, 1)
}
:returns:
CodeStar.Client.exceptions.UserProfileNotFoundException
CodeStar.Client.exceptions.ValidationException
"""
pass
| 28.681668
| 2,625
| 0.69844
|
fbadf1b6ba7f9ecf0e050a629ab13f47532601be
| 10,849
|
py
|
Python
|
src/fastly_bouncer/fastly_api.py
|
crowdsecurity/cs-fastly-bouncer
|
76db68c11c6dcd2174c25496009a11109368265c
|
[
"MIT"
] | null | null | null |
src/fastly_bouncer/fastly_api.py
|
crowdsecurity/cs-fastly-bouncer
|
76db68c11c6dcd2174c25496009a11109368265c
|
[
"MIT"
] | 1
|
2022-03-02T10:58:05.000Z
|
2022-03-09T09:44:55.000Z
|
src/fastly_bouncer/fastly_api.py
|
crowdsecurity/cs-fastly-bouncer
|
76db68c11c6dcd2174c25496009a11109368265c
|
[
"MIT"
] | null | null | null |
import datetime
import ipaddress
import logging
from dataclasses import asdict, dataclass, field
from functools import partial
from typing import Dict, List, Set
from urllib.parse import urljoin
import httpx
import trio
from dateutil.parser import parse as parse_date
from fastly_bouncer.utils import with_suffix
logger: logging.Logger = logging.getLogger("")
ACL_CAPACITY = 100
@dataclass
class ACL:
id: str
name: str
service_id: str
version: str
entries_to_add: Set[str] = field(default_factory=set)
entries_to_delete: Set[str] = field(default_factory=set)
entries: Dict[str, str] = field(default_factory=dict)
entry_count: int = 0
created: bool = False
def is_full(self) -> bool:
is_full = self.entry_count == ACL_CAPACITY
return is_full
def as_jsonable_dict(self) -> Dict:
return {
"id": self.id,
"name": self.name,
"service_id": self.service_id,
"version": self.version,
"entries_to_add": list(self.entries_to_add),
"entries_to_delete": list(self.entries_to_delete),
"entries": self.entries,
"entry_count": self.entry_count,
"created": self.created,
}
@dataclass
class VCL:
name: str
service_id: str
version: str
action: str
conditional: str = ""
type: str = "recv"
dynamic: str = "1"
id: str = ""
def as_jsonable_dict(self):
return asdict(self)
def to_dict(self):
if self.conditional:
content = f"{self.conditional} {{ {self.action} }}"
else:
content = self.action
return {
"name": self.name,
"service_id": self.service_id,
"version": self.version,
"type": self.type,
"content": content,
"dynamic": self.dynamic,
}
async def raise_on_4xx_5xx(response):
response.raise_for_status()
class FastlyAPI:
base_url = "https://api.fastly.com"
def __init__(self, token):
self._token = token
self._acl_count = 0
self.session = httpx.AsyncClient(
headers=httpx.Headers({"Fastly-Key": self._token}),
timeout=httpx.Timeout(connect=30, read=None, write=15, pool=None),
transport=httpx.AsyncHTTPTransport(retries=3),
event_hooks={"response": [raise_on_4xx_5xx]},
)
async def get_version_to_clone(self, service_id: str) -> str:
"""
Gets the version to clone from. If service has active version, then the active version will be cloned.
Else the the version which was last updated would be cloned
"""
service_versions_resp = await self.session.get(
self.api_url(f"/service/{service_id}/version")
)
service_versions = service_versions_resp.json()
version_to_clone = None
last_updated = None
for service_version in service_versions:
if not last_updated:
version_to_clone = service_version["number"]
elif last_updated < parse_date(service_version["updated_at"]):
last_updated = parse_date(service_version["updated_at"])
version_to_clone = service_version["number"]
return str(version_to_clone)
async def get_all_service_ids(self, with_name=False) -> List[str]:
current_page = 1
per_page = 50
all_service_ids = []
while True:
resp = await self.session.get(
self.api_url(f"/service?page={current_page}&per_page={per_page}")
)
services = resp.json()
for service in services:
if with_name:
all_service_ids.append((service["id"], service["name"]))
else:
all_service_ids.append(service["id"])
if len(services) < per_page:
return all_service_ids
async def get_all_vcls(self, service_id, version) -> List[VCL]:
vcls = await self.session.get(
self.api_url(f"/service/{service_id}/version/{version}/snippet")
)
vcls = vcls.json()
return [
VCL(
name=vcl["name"],
service_id=vcl["service_id"],
dynamic=vcl["dynamic"],
id=vcl["id"],
version=vcl["version"],
action="",
)
for vcl in vcls
]
async def activate_service_version(self, service_id: str, version: str):
resp = await self.session.put(
self.api_url(f"/service/{service_id}/version/{version}/activate")
)
resp.json()
async def delete_vcl(self, vcl: VCL):
resp = await self.session.delete(
self.api_url(f"/service/{vcl.service_id}/version/{vcl.version}/snippet/{vcl.name}")
)
return resp.json()
async def get_all_acls(self, service_id, version) -> List[ACL]:
resp = await self.session.get(self.api_url(f"/service/{service_id}/version/{version}/acl"))
acls = resp.json()
return [
ACL(id=acl["id"], name=acl["name"], service_id=service_id, version=version)
for acl in acls
]
async def delete_acl(self, acl: ACL):
resp = await self.session.delete(
self.api_url(f"/service/{acl.service_id}/version/{acl.version}/acl/{acl.name}")
)
return resp
async def clear_crowdsec_resources(self, service_id, version):
"""
The version of the service provided must not be locked.
"""
all_acls = await self.get_all_acls(service_id, version)
all_acls = list(filter(lambda acl: acl.name.startswith("crowdsec"), all_acls))
all_vcls = await self.get_all_vcls(service_id, version)
all_vcls = list(filter(lambda vcl: vcl.name.startswith("crowdsec"), all_vcls))
if not all_vcls and not all_acls:
return
async with trio.open_nursery() as n:
for acl in all_acls:
n.start_soon(self.delete_acl, acl)
for vcl in all_vcls:
n.start_soon(self.delete_vcl, vcl)
async def clone_version_for_service_from_given_version(
self, service_id: str, version: str, comment=""
) -> str:
"""
Creates new version for service.
Returns the new version.
"""
if not comment:
comment = ""
resp = await self.session.put(
self.api_url(f"/service/{service_id}/version/{version}/clone")
)
resp = resp.json()
tmp = await self.session.put(
self.api_url(
f"/service/{service_id}/version/{resp['number']}",
),
json={
"comment": f"Created by CrowdSec. {comment} Cloned from version {version}. Created at {datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}"
},
)
tmp.json()
return str(resp["number"])
async def create_acl_for_service(self, service_id, version, name=None) -> ACL:
"""
Create an ACL resource for the given service_id and version. If "name"
parameter is not specified, a random name would be used for the ACL.
Returns the id of the ACL.
"""
if not name:
name = f"acl_{str(self._acl_count)}"
r = await self.session.post(
self.api_url(f"/service/{service_id}/version/{version}/acl"),
data=f"name={name}",
)
resp = r.json()
self._acl_count += 1
return ACL(
id=resp["id"],
service_id=service_id,
version=str(version),
name=name,
created=True,
)
async def create_or_update_vcl(self, vcl: VCL) -> VCL:
if not vcl.id:
vcl = await self.create_vcl(vcl)
else:
vcl = await self.update_dynamic_vcl(vcl)
return vcl
async def is_service_version_locked(self, service_id, version) -> bool:
resp = await self.session.get(self.api_url(f"/service/{service_id}/version/{version}"))
resp = resp.json()
return resp["locked"]
async def create_vcl(self, vcl: VCL):
if vcl.id:
return vcl
resp = await self.session.post(
self.api_url(f"/service/{vcl.service_id}/version/{vcl.version}/snippet"),
data=vcl.to_dict(),
)
resp = resp.json()
vcl.id = resp["id"]
return vcl
async def update_dynamic_vcl(self, vcl: VCL):
resp = await self.session.put(
self.api_url(f"/service/{vcl.service_id}/snippet/{vcl.id}"),
data=vcl.to_dict(),
)
resp.json()
return vcl
async def refresh_acl_entries(self, acl: ACL) -> Dict[str, str]:
resp = await self.session.get(
self.api_url(f"/service/{acl.service_id}/acl/{acl.id}/entries?per_page=100")
)
resp = resp.json()
acl.entries = {}
for entry in resp:
acl.entries[f"{entry['ip']}/{entry['subnet']}"] = entry["id"]
return acl
async def process_acl(self, acl: ACL):
logger.debug(with_suffix(f"entries to delete {acl.entries_to_delete}", acl_id=acl.id))
logger.debug(with_suffix(f"entries to add {acl.entries_to_add}", acl_id=acl.id))
update_entries = []
for entry_to_add in acl.entries_to_add:
if entry_to_add in acl.entries:
continue
network = ipaddress.ip_network(entry_to_add)
ip, subnet = str(network.network_address), network.prefixlen
update_entries.append({"op": "create", "ip": ip, "subnet": subnet})
for entry_to_delete in acl.entries_to_delete:
update_entries.append(
{
"op": "delete",
"id": acl.entries[entry_to_delete],
}
)
if not update_entries:
return
# Only 100 operations per request can be done on an acl.
async with trio.open_nursery() as n:
for i in range(0, len(update_entries), 100):
update_entries_batch = update_entries[i : i + 100]
request_body = {"entries": update_entries_batch}
f = partial(self.session.patch, json=request_body)
n.start_soon(
f,
self.api_url(f"/service/{acl.service_id}/acl/{acl.id}/entries"),
)
acl = await self.refresh_acl_entries(acl)
@staticmethod
def api_url(endpoint: str) -> str:
return urljoin(FastlyAPI.base_url, endpoint)
@staticmethod
def check_for_errors(resp, *args, **kwargs):
resp.raise_for_status()
| 33.17737
| 155
| 0.578026
|
7e67dfd7a125ad975254d78a51124e355b37775e
| 448
|
py
|
Python
|
General Skills/mus1c (300 points)/flag.py
|
root-ji218at/2019game.picoctf.com
|
48ae89edbe06eef3f651cc535615ebfb067d57bc
|
[
"MIT"
] | null | null | null |
General Skills/mus1c (300 points)/flag.py
|
root-ji218at/2019game.picoctf.com
|
48ae89edbe06eef3f651cc535615ebfb067d57bc
|
[
"MIT"
] | null | null | null |
General Skills/mus1c (300 points)/flag.py
|
root-ji218at/2019game.picoctf.com
|
48ae89edbe06eef3f651cc535615ebfb067d57bc
|
[
"MIT"
] | null | null | null |
##############################################
####### By Arijit bhowmick #####
##############################################
import os
print("#"*27, "\nRockstar_encoded decoding as ASCII\n", "#"*27, "\n\n")
ascii_enc = open("rockstar_code_enc.txt", "r")
ascii_enc_list = ascii_enc.readlines()
ascii_enc.close()
print("Flag --> picoCTF{", end="")
for ascii_ in ascii_enc_list:
print(chr(int(ascii_.replace("\n", ""))), end="")
print("}")
| 22.4
| 71
| 0.502232
|
22ecec7550ef9e176988fc95054e77b12d87f4ca
| 3,485
|
py
|
Python
|
server/plato/api/domains.py
|
zhlooking/plato
|
9daf0dfd8b376603453eadf2d981c71d3adb2632
|
[
"MIT"
] | null | null | null |
server/plato/api/domains.py
|
zhlooking/plato
|
9daf0dfd8b376603453eadf2d981c71d3adb2632
|
[
"MIT"
] | null | null | null |
server/plato/api/domains.py
|
zhlooking/plato
|
9daf0dfd8b376603453eadf2d981c71d3adb2632
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, make_response, jsonify, request
from sqlalchemy import exc
from plato import db
from plato.api.utils import authenticate, is_admin
from plato.model.domain import Domain
domains_blueprint = Blueprint('domains', __name__, template_folder='./templates')
@domains_blueprint.route('/domains', methods=['POST'])
@authenticate
def add_domain(resp):
'''add domain info'''
if is_admin(resp):
response_object = {
'status': 'error',
'message': 'You have no permission to do that.'
}
return make_response(jsonify(response_object)), 403
post_data = request.get_json()
if not post_data:
response_object = {
'status': 'fail',
'message': f'Invalid payload'
}
return make_response(jsonify(response_object)), 400
domain_val = post_data.get('domain')
ip = post_data.get('ip')
master = post_data.get('master')
try:
domain = Domain.query.filter_by(domain=domain_val).first()
if not domain:
db.session.add(Domain(domain=domain_val, ip=ip, master=master))
db.session.commit()
response_object = {
'status': 'success',
'message': f'{domain_val} was added!'
}
return make_response(jsonify(response_object)), 201
else:
response_object = {
'status': 'fail',
'message': 'Sorry, that domain already exists.'
}
return make_response(jsonify(response_object)), 400
except exc.IntegrityError as e:
db.session().rollback()
response_object = {
'status': 'fail',
'message': 'Invalid payload'
}
return make_response(jsonify(response_object)), 400
except ValueError as e:
db.session().rollback()
response_object = {
'status': 'fail',
'message': 'Invalid payload'
}
return make_response(jsonify(response_object)), 400
@domains_blueprint.route('/domain/<domain_id>', methods=['GET'])
def get_domain(domain_id):
'''get domain info'''
response_object = {
'status': 'fail',
'message': 'Domain does not exist.'
}
try:
domain = Domain.query.filter_by(id=int(domain_id)).first()
if not domain:
return make_response(jsonify(response_object)), 404
else:
response_object = {
'status': 'success',
'data': {
'id': domain.id,
'ip': domain.ip,
'domain': domain.domain,
'master': domain.master
}
}
return make_response(jsonify(response_object)), 200
except ValueError:
return make_response(jsonify(response_object)), 404
@domains_blueprint.route('/domains', methods=['GET'])
def get_all_domains():
'''get all domain list'''
domains = Domain.query.order_by(Domain.ip.desc()).all()
domains_list = []
for domain in domains:
domain_object = {
'id': domain.id,
'ip': domain.ip,
'domain': domain.domain,
'master': domain.master
}
domains_list.append(domain_object)
response_object = {
'status': 'success',
'data': {
'domains': domains_list
}
}
return make_response(jsonify(response_object)), 200
| 31.681818
| 81
| 0.570732
|
9de63807cf645d34eb02489c55b50a339bb0fab1
| 16,024
|
py
|
Python
|
mindarmour/adv_robustness/attacks/deep_fool.py
|
hboshnak/mindarmour
|
0609a4eaea875a84667bed279add9305752880cc
|
[
"Apache-2.0"
] | null | null | null |
mindarmour/adv_robustness/attacks/deep_fool.py
|
hboshnak/mindarmour
|
0609a4eaea875a84667bed279add9305752880cc
|
[
"Apache-2.0"
] | null | null | null |
mindarmour/adv_robustness/attacks/deep_fool.py
|
hboshnak/mindarmour
|
0609a4eaea875a84667bed279add9305752880cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DeepFool Attack.
"""
import numpy as np
from mindspore import Tensor
from mindspore.nn import Cell
from mindarmour.utils.logger import LogUtil
from mindarmour.utils.util import GradWrap, jacobian_matrix, \
jacobian_matrix_for_detection, calculate_iou, to_tensor_tuple
from mindarmour.utils._check_param import check_pair_numpy_param, check_model, \
check_value_positive, check_int_positive, check_norm_level, \
check_param_multi_types, check_param_type, check_value_non_negative
from .attack import Attack
LOGGER = LogUtil.get_instance()
TAG = 'DeepFool'
class _GetLogits(Cell):
def __init__(self, network):
super(_GetLogits, self).__init__()
self._network = network
def construct(self, *inputs):
_, pre_logits = self._network(*inputs)
return pre_logits
def _deepfool_detection_scores(inputs, gt_boxes, gt_labels, network):
"""
Evaluate the detection result of inputs, specially for object detection models.
Args:
inputs (numpy.ndarray): Input samples.
gt_boxes (numpy.ndarray): Ground-truth boxes of inputs.
gt_labels (numpy.ndarray): Ground-truth labels of inputs.
model (BlackModel): Target model.
Returns:
- numpy.ndarray, detection scores of inputs.
- numpy.ndarray, the number of objects that are correctly detected.
"""
network = check_param_type('network', network, Cell)
inputs_tensor = to_tensor_tuple(inputs)
box_and_confi, pred_logits = network(*inputs_tensor)
box_and_confi, pred_logits = box_and_confi.asnumpy(), pred_logits.asnumpy()
pred_labels = np.argmax(pred_logits, axis=2)
det_scores = []
correct_labels_num = []
gt_boxes_num = gt_boxes.shape[1]
iou_thres = 0.5
for idx, (boxes, labels) in enumerate(zip(box_and_confi, pred_labels)):
score = 0
box_num = boxes.shape[0]
gt_boxes_idx = gt_boxes[idx]
gt_labels_idx = gt_labels[idx]
correct_label_flag = np.zeros(gt_labels_idx.shape)
for i in range(box_num):
pred_box = boxes[i]
max_iou_confi = 0
for j in range(gt_boxes_num):
iou = calculate_iou(pred_box[:4], gt_boxes_idx[j][:4])
if labels[i] == gt_labels_idx[j] and iou > iou_thres:
max_iou_confi = max(max_iou_confi, pred_box[-1] + iou)
correct_label_flag[j] = 1
score += max_iou_confi
det_scores.append(score)
correct_labels_num.append(np.sum(correct_label_flag))
return np.array(det_scores), np.array(correct_labels_num)
def _is_success(inputs, gt_boxes, gt_labels, network, gt_object_nums, reserve_ratio):
_, correct_nums_adv = _deepfool_detection_scores(inputs, gt_boxes, gt_labels, network)
return np.all(correct_nums_adv <= (gt_object_nums*reserve_ratio).astype(np.int32))
class DeepFool(Attack):
"""
DeepFool is an untargeted & iterative attack achieved by moving the benign
sample to the nearest classification boundary and crossing the boundary.
Reference: `DeepFool: a simple and accurate method to fool deep neural
networks <https://arxiv.org/abs/1511.04599>`_
Args:
network (Cell): Target model.
num_classes (int): Number of labels of model output, which should be
greater than zero.
model_type (str): Tye type of targeted model. 'classification' and 'detection' are supported now.
default: 'classification'.
reserve_ratio (Union[int, float]): The percentage of objects that can be detected after attaks,
specifically for model_type='detection'. Reserve_ratio should be in the range of (0, 1). Default: 0.3.
max_iters (int): Max iterations, which should be
greater than zero. Default: 50.
overshoot (float): Overshoot parameter. Default: 0.02.
norm_level (Union[int, str]): Order of the vector norm. Possible values: np.inf
or 2. Default: 2.
bounds (Union[tuple, list]): Upper and lower bounds of data range. In form of (clip_min,
clip_max). Default: None.
sparse (bool): If True, input labels are sparse-coded. If False,
input labels are onehot-coded. Default: True.
Examples:
>>> import mindspore.ops.operations as P
>>> from mindspore.nn import Cell
>>> from mindspore import Tensor
>>> from mindarmour.adv_robustness.attacks import DeepFool
>>> class Net(Cell):
... def __init__(self):
... super(Net, self).__init__()
... self._softmax = P.Softmax()
... def construct(self, inputs):
... out = self._softmax(inputs)
... return out
>>> net = Net()
>>> input_shape = (1, 5)
>>> _, classes = input_shape
>>> attack = DeepFool(net, classes, max_iters=10, norm_level=2,
... bounds=(0.0, 1.0))
>>> input_np = np.array([[0.1, 0.2, 0.7, 0.5, 0.4]]).astype(np.float32)
>>> input_me = Tensor(input_np)
>>> true_labels = np.argmax(net(input_me).asnumpy(), axis=1)
>>> advs = attack.generate(input_np, true_labels)
"""
def __init__(self, network, num_classes, model_type='classification',
reserve_ratio=0.3, max_iters=50, overshoot=0.02, norm_level=2, bounds=None, sparse=True):
super(DeepFool, self).__init__()
self._network = check_model('network', network, Cell)
self._max_iters = check_int_positive('max_iters', max_iters)
self._overshoot = check_value_positive('overshoot', overshoot)
self._norm_level = check_norm_level(norm_level)
self._num_classes = check_int_positive('num_classes', num_classes)
self._net_grad = GradWrap(self._network)
self._bounds = bounds
if self._bounds is not None:
self._bounds = check_param_multi_types('bounds', bounds, [list, tuple])
for b in self._bounds:
_ = check_param_multi_types('bound', b, [int, float])
self._sparse = check_param_type('sparse', sparse, bool)
self._model_type = check_param_type('model_type', model_type, str)
if self._model_type not in ('classification', 'detection'):
msg = "Only 'classification' or 'detection' is supported now, but got {}.".format(self._model_type)
LOGGER.error(TAG, msg)
raise ValueError(msg)
self._reserve_ratio = check_value_non_negative('reserve_ratio', reserve_ratio)
if self._reserve_ratio > 1:
msg = 'reserve_ratio should be less than 1.0, but got {}.'.format(self._reserve_ratio)
LOGGER.error(TAG, msg)
raise ValueError(TAG, msg)
def generate(self, inputs, labels):
"""
Generate adversarial examples based on input samples and original labels.
Args:
inputs (Union[numpy.ndarray, tuple]): Input samples. The format of inputs should be numpy.ndarray if
model_type='classification'. The format of inputs can be (input1, input2, ...) or only one array if
model_type='detection'.
labels (Union[numpy.ndarray, tuple]): Targeted labels or ground-truth labels. The format of labels should
be numpy.ndarray if model_type='classification'. The format of labels should be (gt_boxes, gt_labels)
if model_type='detection'.
Returns:
numpy.ndarray, adversarial examples.
Raises:
NotImplementedError: If norm_level is not in [2, np.inf, '2', 'inf'].
"""
if self._model_type == 'detection':
return self._generate_detection(inputs, labels)
if self._model_type == 'classification':
return self._generate_classification(inputs, labels)
return None
def _update_image(self, x_origin, r_tot):
"""update image based on bounds"""
if self._bounds is not None:
clip_min, clip_max = self._bounds
images = x_origin + (1 + self._overshoot)*r_tot*(clip_max-clip_min)
images = np.clip(images, clip_min, clip_max)
else:
images = x_origin + (1 + self._overshoot)*r_tot
return images
def _generate_detection(self, inputs, labels):
"""Generate adversarial examples in detection scenario"""
images, auxiliary_inputs = inputs[0], inputs[1:]
gt_boxes, gt_labels = labels
_, gt_object_nums = _deepfool_detection_scores(inputs, gt_boxes, gt_labels, self._network)
if not self._sparse:
gt_labels = np.argmax(gt_labels, axis=2)
origin_labels = np.zeros(gt_labels.shape[0])
for i in range(gt_labels.shape[0]):
origin_labels[i] = np.argmax(np.bincount(gt_labels[i]))
images_dtype = images.dtype
iteration = 0
num_boxes = gt_labels.shape[1]
merge_net = _GetLogits(self._network)
detection_net_grad = GradWrap(merge_net)
weight = np.squeeze(np.zeros(images.shape[1:]))
r_tot = np.zeros(images.shape)
x_origin = images
while not _is_success((images,) + auxiliary_inputs, gt_boxes, gt_labels, self._network, gt_object_nums, \
self._reserve_ratio) and iteration < self._max_iters:
preds_logits = merge_net(*to_tensor_tuple(images), *to_tensor_tuple(auxiliary_inputs)).asnumpy()
grads = jacobian_matrix_for_detection(detection_net_grad, (images,) + auxiliary_inputs,
num_boxes, self._num_classes)
for idx in range(images.shape[0]):
diff_w = np.inf
label = int(origin_labels[idx])
auxiliary_input_i = tuple()
for item in auxiliary_inputs:
auxiliary_input_i += (np.expand_dims(item[idx], axis=0),)
gt_boxes_i = np.expand_dims(gt_boxes[idx], axis=0)
gt_labels_i = np.expand_dims(gt_labels[idx], axis=0)
inputs_i = (np.expand_dims(images[idx], axis=0),) + auxiliary_input_i
if _is_success(inputs_i, gt_boxes_i, gt_labels_i,
self._network, gt_object_nums[idx], self._reserve_ratio):
continue
for k in range(self._num_classes):
if k == label:
continue
w_k = grads[k, idx, ...] - grads[label, idx, ...]
f_k = np.mean(np.abs(preds_logits[idx, :, k, ...] - preds_logits[idx, :, label, ...]))
if self._norm_level == 2 or self._norm_level == '2':
diff_w_k = abs(f_k) / (np.linalg.norm(w_k) + 1e-8)
elif self._norm_level == np.inf \
or self._norm_level == 'inf':
diff_w_k = abs(f_k) / (np.linalg.norm(w_k, ord=1) + 1e-8)
else:
msg = 'ord {} is not available.' \
.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
if diff_w_k < diff_w:
diff_w = diff_w_k
weight = w_k
if self._norm_level == 2 or self._norm_level == '2':
r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8)
elif self._norm_level == np.inf or self._norm_level == 'inf':
r_i = diff_w*np.sign(weight) \
/ (np.linalg.norm(weight, ord=1) + 1e-8)
else:
msg = 'ord {} is not available in normalization,' \
.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
images = self._update_image(x_origin, r_tot)
iteration += 1
images = images.astype(images_dtype)
del preds_logits, grads
return images
def _generate_classification(self, inputs, labels):
"""Generate adversarial examples in classification scenario"""
inputs, labels = check_pair_numpy_param('inputs', inputs,
'labels', labels)
if not self._sparse:
labels = np.argmax(labels, axis=1)
inputs_dtype = inputs.dtype
iteration = 0
origin_labels = labels
cur_labels = origin_labels.copy()
weight = np.squeeze(np.zeros(inputs.shape[1:]))
r_tot = np.zeros(inputs.shape)
x_origin = inputs
while np.any(cur_labels == origin_labels) and iteration < self._max_iters:
preds = self._network(Tensor(inputs)).asnumpy()
grads = jacobian_matrix(self._net_grad, inputs, self._num_classes)
for idx in range(inputs.shape[0]):
diff_w = np.inf
label = origin_labels[idx]
if cur_labels[idx] != label:
continue
for k in range(self._num_classes):
if k == label:
continue
w_k = grads[k, idx, ...] - grads[label, idx, ...]
f_k = preds[idx, k] - preds[idx, label]
if self._norm_level == 2 or self._norm_level == '2':
diff_w_k = abs(f_k) / (np.linalg.norm(w_k) + 1e-8)
elif self._norm_level == np.inf \
or self._norm_level == 'inf':
diff_w_k = abs(f_k) / (np.linalg.norm(w_k, ord=1) + 1e-8)
else:
msg = 'ord {} is not available.' \
.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
if diff_w_k < diff_w:
diff_w = diff_w_k
weight = w_k
if self._norm_level == 2 or self._norm_level == '2':
r_i = diff_w*weight / (np.linalg.norm(weight) + 1e-8)
elif self._norm_level == np.inf or self._norm_level == 'inf':
r_i = diff_w*np.sign(weight) \
/ (np.linalg.norm(weight, ord=1) + 1e-8)
else:
msg = 'ord {} is not available in normalization.' \
.format(str(self._norm_level))
LOGGER.error(TAG, msg)
raise NotImplementedError(msg)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
if self._bounds is not None:
clip_min, clip_max = self._bounds
inputs = x_origin + (1 + self._overshoot)*r_tot*(clip_max
- clip_min)
inputs = np.clip(inputs, clip_min, clip_max)
else:
inputs = x_origin + (1 + self._overshoot)*r_tot
cur_labels = np.argmax(
self._network(Tensor(inputs.astype(inputs_dtype))).asnumpy(),
axis=1)
iteration += 1
inputs = inputs.astype(inputs_dtype)
del preds, grads
return inputs
| 47.129412
| 117
| 0.587119
|
8f7f58586717e6600de56063186ef3843cd9553c
| 4,579
|
py
|
Python
|
sumy/summarizers/text_rank.py
|
Mike-debug/sumy
|
c948bc72ebd07dc4c4f41d905bab0de38163779c
|
[
"Apache-2.0"
] | 2,880
|
2015-01-02T14:59:08.000Z
|
2022-03-30T08:49:11.000Z
|
sumy/summarizers/text_rank.py
|
Mike-debug/sumy
|
c948bc72ebd07dc4c4f41d905bab0de38163779c
|
[
"Apache-2.0"
] | 129
|
2015-02-09T20:42:19.000Z
|
2022-03-30T21:28:56.000Z
|
sumy/summarizers/text_rank.py
|
Mike-debug/sumy
|
c948bc72ebd07dc4c4f41d905bab0de38163779c
|
[
"Apache-2.0"
] | 545
|
2015-01-12T06:15:50.000Z
|
2022-03-30T21:11:58.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
try:
import numpy
except ImportError:
numpy = None
from ._summarizer import AbstractSummarizer
class TextRankSummarizer(AbstractSummarizer):
"""An implementation of TextRank algorithm for summarization.
Source: https://web.eecs.umich.edu/~mihalcea/papers/mihalcea.emnlp04.pdf
"""
epsilon = 1e-4
damping = 0.85
# small number to prevent zero-division error, see https://github.com/miso-belica/sumy/issues/112
_ZERO_DIVISION_PREVENTION = 1e-7
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependencies_installed()
if not document.sentences:
return ()
ratings = self.rate_sentences(document)
return self._get_best_sentences(document.sentences, sentences_count, ratings)
@staticmethod
def _ensure_dependencies_installed():
if numpy is None:
raise ValueError("LexRank summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def rate_sentences(self, document):
matrix = self._create_matrix(document)
ranks = self.power_method(matrix, self.epsilon)
return {sent: rank for sent, rank in zip(document.sentences, ranks)}
def _create_matrix(self, document):
"""Create a stochastic matrix for TextRank.
Element at row i and column j of the matrix corresponds to the similarity of sentence i
and j, where the similarity is computed as the number of common words between them, divided
by their sum of logarithm of their lengths. After such matrix is created, it is turned into
a stochastic matrix by normalizing over columns i.e. making the columns sum to one. TextRank
uses PageRank algorithm with damping, so a damping factor is incorporated as explained in
TextRank's paper. The resulting matrix is a stochastic matrix ready for power method.
"""
sentences_as_words = [self._to_words_set(sent) for sent in document.sentences]
sentences_count = len(sentences_as_words)
weights = numpy.zeros((sentences_count, sentences_count))
for i, words_i in enumerate(sentences_as_words):
for j in range(i, sentences_count):
rating = self._rate_sentences_edge(words_i, sentences_as_words[j])
weights[i, j] = rating
weights[j, i] = rating
weights /= (weights.sum(axis=1)[:, numpy.newaxis] + self._ZERO_DIVISION_PREVENTION)
# In the original paper, the probability of randomly moving to any of the vertices
# is NOT divided by the number of vertices. Here we do divide it so that the power
# method works; without this division, the stationary probability blows up. This
# should not affect the ranking of the vertices so we can use the resulting stationary
# probability as is without any postprocessing.
return numpy.full((sentences_count, sentences_count), (1.-self.damping) / sentences_count) \
+ self.damping * weights
def _to_words_set(self, sentence):
words = map(self.normalize_word, sentence.words)
return [self.stem_word(w) for w in words if w not in self._stop_words]
@staticmethod
def _rate_sentences_edge(words1, words2):
rank = sum(words2.count(w) for w in words1)
if rank == 0:
return 0.0
assert len(words1) > 0 and len(words2) > 0
norm = math.log(len(words1)) + math.log(len(words2))
if numpy.isclose(norm, 0.):
# This should only happen when words1 and words2 only have a single word.
# Thus, rank can only be 0 or 1.
assert rank in (0, 1)
return float(rank)
else:
return rank / norm
@staticmethod
def power_method(matrix, epsilon):
transposed_matrix = matrix.T
sentences_count = len(matrix)
p_vector = numpy.array([1.0 / sentences_count] * sentences_count)
lambda_val = 1.0
while lambda_val > epsilon:
next_p = numpy.dot(transposed_matrix, p_vector)
lambda_val = numpy.linalg.norm(numpy.subtract(next_p, p_vector))
p_vector = next_p
return p_vector
| 39.474138
| 117
| 0.674383
|
fe6ee04c5b260e130e41e3813084eecf6a8ab779
| 5,822
|
py
|
Python
|
nablapps/events/models/mixins.py
|
pettaroni/nablaweb
|
5e610698a276884b9cd779a718dfdee641713636
|
[
"MIT"
] | null | null | null |
nablapps/events/models/mixins.py
|
pettaroni/nablaweb
|
5e610698a276884b9cd779a718dfdee641713636
|
[
"MIT"
] | null | null | null |
nablapps/events/models/mixins.py
|
pettaroni/nablaweb
|
5e610698a276884b9cd779a718dfdee641713636
|
[
"MIT"
] | null | null | null |
"""
Mixins to be inherited by AbstractEvent
They are split up in order make them easier to read,
and because there was (once upon a time) an idea to split up the information
about an event and the registration info into different models.
"""
from datetime import datetime, date
from django.db import models
from django.contrib.auth.models import Group
from six.moves.urllib.parse import urlparse
from nablapps.accounts.models import FysmatClass
from ..exceptions import (RegistrationNotRequiredException,
RegistrationNotAllowed, RegistrationNotOpen)
from .eventregistration import EventRegistration
class EventInfoMixin(models.Model):
"""Abstract model defining info about an event, excluding registration info"""
short_name = models.CharField(
verbose_name="kort navn",
max_length=20,
blank=True,
null=True,
help_text="Brukes på steder hvor det ikke er plass til å skrive hele overskriften, "
"for eksempel kalenderen.")
organizer = models.CharField(
verbose_name="organisert av",
max_length=100,
blank=True,
help_text="Den som står bak arrangementet")
location = models.CharField(
verbose_name="sted",
max_length=100,
blank=False)
event_start = models.DateTimeField(
verbose_name="start",
null=True,
blank=False)
event_end = models.DateTimeField(
verbose_name="slutt",
null=True,
blank=True)
facebook_url = models.CharField(
verbose_name="facebook-url",
blank=True,
max_length=100,
help_text="URL-en til det tilsvarende arrangementet på Facebook")
class Meta:
abstract = True
def has_started(self):
"""Has the event started?"""
return self.event_start < datetime.now()
def has_finished(self):
"""Is the event finished?"""
return self.event_end and self.event_end < datetime.now()
def clean(self):
self.clean_facebook_url()
super().clean()
def clean_facebook_url(self):
"""Verifiserer formen på facebook-urlen, og endrer den hvis den er feil."""
parsed = urlparse(self.facebook_url)
noscheme = parsed.netloc + parsed.path
self.facebook_url = 'http' + '://' + noscheme.replace("http://", "").replace("https://", "")
if self.facebook_url == "http://":
self.facebook_url = ""
class RegistrationInfoMixin(models.Model):
"""Abstract model containing info about the registration.
Most of these fields don't make any sense unless registration_required is set.
"""
registration_required = models.BooleanField(
verbose_name="påmelding",
default=False,
null=False,
blank=False)
registration_deadline = models.DateTimeField(
verbose_name="påmeldingsfrist",
null=True,
blank=True)
registration_start = models.DateTimeField(
verbose_name="påmelding åpner",
null=True,
blank=True)
deregistration_deadline = models.DateTimeField(
verbose_name="avmeldingsfrist",
null=True,
blank=True)
places = models.PositiveIntegerField(
verbose_name="antall plasser",
null=True,
blank=True)
has_queue = models.NullBooleanField(
verbose_name="har venteliste",
null=True,
blank=True,
help_text=("Om ventelisten er på, vil det være mulig å melde seg på "
"selv om arrangementet er fullt. "
"De som er i ventelisten vil automatisk bli påmeldt "
"etter hvert som plasser blir ledige.")
)
open_for = models.ManyToManyField(
FysmatClass,
verbose_name="Åpen for",
blank=True,
help_text=("Hvilke grupper som får lov til å melde seg på arrangementet. "
"Hvis ingen grupper er valgt er det åpent for alle.")
)
class Meta:
abstract = True
def allowed_to_attend(self, user):
"""Indikerer om en bruker har lov til å melde seg på arrangementet"""
return (not self.open_for.exists()) or self.open_for.filter(user=user).exists()
def registration_has_started(self):
"""Return whether registration has started"""
return self.registration_required and self.registration_start < datetime.now()
def registration_open(self):
"""Return whether it is possible to register for the event"""
return self.registration_has_started() and datetime.now() < self.registration_deadline
def deregistration_closed(self):
"""Return whether the event is closed for deregistration."""
return self.deregistration_deadline and (self.deregistration_deadline < datetime.now())
def user_penalty_limit(self, user):
"""Counts the users penalties this term, used in _asser_user_allowed_to_register"""
MAX_PENALTY = 4 # This is the limit at which one is not allowed to register
# user.get_penalties returns EventRegistrations where the user has penalties
penalty_count = sum([reg.penalty for reg in user.get_penalties()])
return False if penalty_count >= MAX_PENALTY else True
def _assert_user_allowed_to_register(self, user):
if not self.registration_required:
raise RegistrationNotRequiredException(event=self, user=user)
elif not self.registration_open():
raise RegistrationNotOpen(event=self, user=user)
elif not self.allowed_to_attend(user):
raise RegistrationNotAllowed("Arrangementet er ikke åpent for ditt kull.", event=self, user=user)
elif not self.user_penalty_limit(user):
raise RegistrationNotAllowed("Du har for mange prikker!", event=self, user=user)
| 37.56129
| 109
| 0.665922
|
79d6ac62bd6a35338e4f0fdcc47c6867830b2c8a
| 3,758
|
py
|
Python
|
py/hwid/service/appengine/test/e2e_test.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 3
|
2022-01-06T16:52:52.000Z
|
2022-03-07T11:30:47.000Z
|
py/hwid/service/appengine/test/e2e_test.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | null | null | null |
py/hwid/service/appengine/test/e2e_test.py
|
arccode/factory
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-24T01:47:22.000Z
|
2021-10-24T01:47:22.000Z
|
#!/usr/bin/env python3
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for e2e test.
Before deploying HWID Service to prod environment, one should run this test
on the staging environment. The test loads a test config files in the
factory-private repository, and executes the speicified tests.
"""
import importlib
import json
import logging
import os
import unittest
from google.protobuf import json_format
from google.protobuf import text_format
from cros.factory.utils import config_utils
from cros.factory.utils import file_utils
from cros.factory.utils import process_utils
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
APPENGINE_DIR = os.path.dirname(TEST_DIR)
PROTO_DIR = os.path.join(APPENGINE_DIR, 'proto')
FACTORY_DIR = os.path.abspath(
os.path.join(APPENGINE_DIR, '../../../..'))
FACTORY_PRIVATE_DIR = os.path.abspath(
os.path.join(FACTORY_DIR, '../factory-private'))
TEST_DIR = os.path.join(FACTORY_PRIVATE_DIR,
'config/hwid/service/appengine/test/')
DEFAULT_CONFIG_PATH = os.path.join(TEST_DIR, 'e2e_test')
PROTO_PKG = 'cros.factory.hwid.service.appengine.proto'
class E2ETest(unittest.TestCase):
"""e2e tests on staging environment."""
def setUp(self):
self.config = config_utils.LoadConfig(DEFAULT_CONFIG_PATH, 'e2e_test')
self.test_cmd = self.config['test_cmd']
self._SetupEnv(self.config['check_env_cmd'], self.config['setup_env_cmd'])
def _SetupEnv(self, check_env_cmd, setup_env_cmd):
check_ret = process_utils.Spawn(check_env_cmd, call=True).returncode
if check_ret:
logging.info("Setting up environment with command: %s", setup_env_cmd)
setup_ret = process_utils.Spawn(setup_env_cmd, call=True).returncode
if setup_ret:
self.fail('Environment is not ready')
def _GenerateCommand(self, test):
return [
os.path.join(TEST_DIR, self.test_cmd), test['proto_filename'],
test['api']
]
def testAll(self):
logging.info('Test endpoint: %s', self.config['host_name'])
for test in self.config['tests']:
with self.subTest(name=test['name']):
logging.info('Running test: %s', test['name'])
try:
pkg = importlib.import_module('.' + test['proto_filename'] + '_pb2',
PROTO_PKG)
response_class = getattr(pkg, test['response_class'])
request_class = getattr(pkg, test['request_class'])
expected_output = json.dumps(test['expected_output'])
request = json_format.Parse(
json.dumps(test['request']), request_class())
cmd = self._GenerateCommand(test)
p = process_utils.Spawn(cmd, stdin=process_utils.PIPE,
stdout=process_utils.PIPE,
stderr=process_utils.PIPE)
stdin = text_format.MessageToString(request)
stdout = p.communicate(stdin)[0]
out_msg = text_format.Parse(stdout, response_class())
expected_msg = json_format.Parse(expected_output, response_class())
except Exception as ex:
self.fail(str(ex))
if out_msg != expected_msg:
out_json = json_format.MessageToDict(out_msg)
err_msg = json.dumps({
'expect': test['expected_output'],
'got': out_json
})
with open(file_utils.CreateTemporaryFile(), 'w') as f:
f.write(err_msg)
self.fail('%s failed, see report at %s' % (test['name'], f.name))
if __name__ == '__main__':
logging.getLogger().setLevel(int(os.environ.get('LOG_LEVEL') or logging.INFO))
unittest.main()
| 37.58
| 80
| 0.668707
|
5cf1755f41112d2149ccb6aedb0cb52eedacdb7a
| 18,985
|
py
|
Python
|
paasta_tools/long_running_service_tools.py
|
eric-erki/An-open-distributed-platform-as-a-service
|
6769c5601685deb1017910ab8d09109e8e998892
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/long_running_service_tools.py
|
eric-erki/An-open-distributed-platform-as-a-service
|
6769c5601685deb1017910ab8d09109e8e998892
|
[
"Apache-2.0"
] | 4
|
2021-02-08T20:42:08.000Z
|
2021-06-02T00:51:04.000Z
|
paasta_tools/long_running_service_tools.py
|
eric-erki/An-open-distributed-platform-as-a-service
|
6769c5601685deb1017910ab8d09109e8e998892
|
[
"Apache-2.0"
] | null | null | null |
import logging
import socket
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Tuple
import service_configuration_lib
from kazoo.exceptions import NoNodeError
from mypy_extensions import TypedDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import DeployBlacklist
from paasta_tools.utils import DeployWhitelist
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import InstanceConfigDict
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import ZookeeperPool
DEFAULT_CONTAINER_PORT = 8888
log = logging.getLogger(__name__)
logging.getLogger('marathon').setLevel(logging.WARNING)
AUTOSCALING_ZK_ROOT = '/autoscaling'
ZK_PAUSE_AUTOSCALE_PATH = '/autoscaling/paused'
class LongRunningServiceConfigDict(InstanceConfigDict, total=False):
drain_method: str
container_port: int
drain_method_params: Dict
healthcheck_cmd: str
healthcheck_grace_period_seconds: float
healthcheck_interval_seconds: float
healthcheck_max_consecutive_failures: int
healthcheck_mode: str
healthcheck_timeout_seconds: float
healthcheck_uri: str
instances: int
max_instances: int
min_instances: int
nerve_ns: str
registrations: List[str]
replication_threshold: int
bounce_priority: int
# Defined here to avoid import cycles -- this gets used in bounce_lib and subclassed in marathon_tools.
BounceMethodConfigDict = TypedDict('BounceMethodConfigDict', {"instances": int})
class ServiceNamespaceConfig(dict):
def get_healthcheck_mode(self) -> str:
"""Get the healthcheck mode for the service. In most cases, this will match the mode
of the service, but we do provide the opportunity for users to specify both. Default to the mode
if no healthcheck_mode is specified.
"""
healthcheck_mode = self.get('healthcheck_mode', None)
if not healthcheck_mode:
return self.get_mode()
else:
return healthcheck_mode
def get_mode(self) -> str:
"""Get the mode that the service runs in and check that we support it.
If the mode is not specified, we check whether the service uses smartstack
in order to determine the appropriate default value. If proxy_port is specified
in the config, the service uses smartstack, and we can thus safely assume its mode is http.
If the mode is not defined and the service does not use smartstack, we set the mode to None.
"""
mode = self.get('mode', None)
if mode is None:
if not self.is_in_smartstack():
return None
else:
return 'http'
elif mode in ['http', 'tcp', 'https']:
return mode
else:
raise InvalidSmartstackMode("Unknown mode: %s" % mode)
def get_healthcheck_uri(self) -> str:
return self.get('healthcheck_uri', '/status')
def get_discover(self) -> str:
return self.get('discover', 'region')
def is_in_smartstack(self) -> bool:
if self.get('proxy_port') is not None:
return True
else:
return False
class LongRunningServiceConfig(InstanceConfig):
config_dict: LongRunningServiceConfigDict
def __init__(
self, service: str, cluster: str, instance: str, config_dict: LongRunningServiceConfigDict,
branch_dict: Optional[BranchDictV2], soa_dir: str=DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
def get_drain_method(self, service_namespace_config: ServiceNamespaceConfig) -> str:
"""Get the drain method specified in the service's marathon configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain method specified in the config, or 'noop' if not specified"""
default = 'noop'
# Default to hacheck draining if the service is in smartstack
if service_namespace_config.is_in_smartstack():
default = 'hacheck'
return self.config_dict.get('drain_method', default)
def get_drain_method_params(self, service_namespace_config: ServiceNamespaceConfig) -> Dict:
"""Get the drain method parameters specified in the service's marathon configuration.
:param service_config: The service instance's configuration dictionary
:returns: The drain_method_params dictionary specified in the config, or {} if not specified"""
default: Dict = {}
if service_namespace_config.is_in_smartstack():
default = {'delay': 60}
return self.config_dict.get('drain_method_params', default)
# FIXME(jlynch|2016-08-02, PAASTA-4964): DEPRECATE nerve_ns and remove it
def get_nerve_namespace(self) -> str:
return decompose_job_id(self.get_registrations()[0])[1]
def get_registrations(self) -> List[str]:
registrations = self.config_dict.get('registrations', [])
for registration in registrations:
try:
decompose_job_id(registration)
except InvalidJobNameError:
log.error(
'Provided registration {} for service '
'{} is invalid'.format(registration, self.service),
)
# Backwards compatibility with nerve_ns
# FIXME(jlynch|2016-08-02, PAASTA-4964): DEPRECATE nerve_ns and remove it
if not registrations and 'nerve_ns' in self.config_dict:
registrations.append(
compose_job_id(self.service, self.config_dict['nerve_ns']),
)
return registrations or [compose_job_id(self.service, self.instance)]
def get_replication_crit_percentage(self) -> int:
return self.config_dict.get('replication_threshold', 50)
def get_healthcheck_uri(self, service_namespace_config: ServiceNamespaceConfig) -> str:
return self.config_dict.get('healthcheck_uri', service_namespace_config.get_healthcheck_uri())
def get_healthcheck_cmd(self) -> str:
cmd = self.config_dict.get('healthcheck_cmd', None)
if cmd is None:
raise InvalidInstanceConfig("healthcheck mode 'cmd' requires a healthcheck_cmd to run")
else:
return cmd
def get_healthcheck_grace_period_seconds(self) -> float:
"""How long Marathon should give a service to come up before counting failed healthchecks."""
return self.config_dict.get('healthcheck_grace_period_seconds', 60)
def get_healthcheck_interval_seconds(self) -> float:
return self.config_dict.get('healthcheck_interval_seconds', 10)
def get_healthcheck_timeout_seconds(self) -> float:
return self.config_dict.get('healthcheck_timeout_seconds', 10)
def get_healthcheck_max_consecutive_failures(self) -> int:
return self.config_dict.get('healthcheck_max_consecutive_failures', 30)
def get_healthcheck_mode(self, service_namespace_config: ServiceNamespaceConfig) -> str:
mode = self.config_dict.get('healthcheck_mode', None)
if mode is None:
mode = service_namespace_config.get_healthcheck_mode()
elif mode not in ['http', 'https', 'tcp', 'cmd', None]:
raise InvalidHealthcheckMode("Unknown mode: %s" % mode)
return mode
def get_bounce_priority(self) -> int:
"""Gives a priority to each service instance which deployd will use to prioritise services.
Higher numbers are higher priority. This affects the order in which deployd workers pick
instances from the bounce queue.
NB: we multiply by -1 here because *internally* lower numbers are higher priority.
"""
return self.config_dict.get('bounce_priority', 0) * -1
def get_instances(self, with_limit: bool=True) -> int:
"""Gets the number of instances for a service, ignoring whether the user has requested
the service to be started or stopped"""
if self.get_max_instances() is not None:
try:
zk_instances = get_instances_from_zookeeper(
service=self.service,
instance=self.instance,
)
log.debug("Got %d instances out of zookeeper" % zk_instances)
except NoNodeError:
log.debug("No zookeeper data, returning max_instances (%d)" % self.get_max_instances())
return self.get_max_instances()
else:
limited_instances = self.limit_instance_count(zk_instances) if with_limit else zk_instances
return limited_instances
else:
instances = self.config_dict.get('instances', 1)
log.debug("Autoscaling not enabled, returning %d instances" % instances)
return instances
def get_min_instances(self) -> int:
return self.config_dict.get('min_instances', 1)
def get_max_instances(self) -> int:
return self.config_dict.get('max_instances', None)
def get_desired_instances(self) -> int:
"""Get the number of instances specified in zookeeper or the service's marathon configuration.
If the number of instances in zookeeper is less than min_instances, returns min_instances.
If the number of instances in zookeeper is greater than max_instances, returns max_instances.
Defaults to 0 if not specified in the config.
:returns: The number of instances specified in the config, 0 if not
specified or if desired_state is not 'start'.
"""
if self.get_desired_state() == 'start':
return self.get_instances()
else:
log.debug("Instance is set to stop. Returning '0' instances")
return 0
def limit_instance_count(self, instances: int) -> int:
"""
Returns param instances if it is between min_instances and max_instances.
Returns max_instances if instances > max_instances
Returns min_instances if instances < min_instances
"""
return max(
self.get_min_instances(),
min(self.get_max_instances(), instances),
)
def get_container_port(self) -> int:
return self.config_dict.get('container_port', DEFAULT_CONTAINER_PORT)
class InvalidHealthcheckMode(Exception):
pass
def get_healthcheck_for_instance(
service: str,
instance: str,
service_manifest: LongRunningServiceConfig,
random_port: int,
soa_dir: str=DEFAULT_SOA_DIR,
) -> Tuple[Optional[str], Optional[str]]:
"""
Returns healthcheck for a given service instance in the form of a tuple (mode, healthcheck_command)
or (None, None) if no healthcheck
"""
namespace = service_manifest.get_nerve_namespace()
smartstack_config = load_service_namespace_config(
service=service,
namespace=namespace,
soa_dir=soa_dir,
)
mode = service_manifest.get_healthcheck_mode(smartstack_config)
hostname = socket.getfqdn()
if mode == "http" or mode == "https":
path = service_manifest.get_healthcheck_uri(smartstack_config)
healthcheck_command = '%s://%s:%d%s' % (mode, hostname, random_port, path)
elif mode == "tcp":
healthcheck_command = '%s://%s:%d' % (mode, hostname, random_port)
elif mode == 'cmd':
healthcheck_command = service_manifest.get_healthcheck_cmd()
else:
mode = None
healthcheck_command = None
return (mode, healthcheck_command)
def load_service_namespace_config(service: str, namespace: str, soa_dir: str=DEFAULT_SOA_DIR) -> ServiceNamespaceConfig:
"""Attempt to read the configuration for a service's namespace in a more strict fashion.
Retrieves the following keys:
- proxy_port: the proxy port defined for the given namespace
- healthcheck_mode: the mode for the healthcheck (http or tcp)
- healthcheck_port: An alternate port to use for health checking
- healthcheck_uri: URI target for healthchecking
- healthcheck_timeout_s: healthcheck timeout in seconds
- healthcheck_body_expect: an expected string in healthcheck response body
- updown_timeout_s: updown_service timeout in seconds
- timeout_connect_ms: proxy frontend timeout in milliseconds
- timeout_server_ms: proxy server backend timeout in milliseconds
- timeout_client_ms: proxy server client timeout in milliseconds
- retries: the number of retries on a proxy backend
- mode: the mode the service is run in (http or tcp)
- routes: a list of tuples of (source, destination)
- discover: the scope at which to discover services e.g. 'habitat'
- advertise: a list of scopes to advertise services at e.g. ['habitat', 'region']
- extra_advertise: a list of tuples of (source, destination)
e.g. [('region:dc6-prod', 'region:useast1-prod')]
- extra_healthcheck_headers: a dict of HTTP headers that must
be supplied when health checking. E.g. { 'Host': 'example.com' }
:param service: The service name
:param namespace: The namespace to read
:param soa_dir: The SOA config directory to read from
:returns: A dict of the above keys, if they were defined
"""
service_config = service_configuration_lib.read_service_configuration(
service_name=service, soa_dir=soa_dir,
)
smartstack_config = service_config.get('smartstack', {})
namespace_config_from_file = smartstack_config.get(namespace, {})
service_namespace_config = ServiceNamespaceConfig()
# We can't really use .get, as we don't want the key to be in the returned
# dict at all if it doesn't exist in the config file.
# We also can't just copy the whole dict, as we only care about some keys
# and there's other things that appear in the smartstack section in
# several cases.
key_whitelist = {
'healthcheck_mode',
'healthcheck_uri',
'healthcheck_port',
'healthcheck_timeout_s',
'healthcheck_body_expect',
'updown_timeout_s',
'proxy_port',
'timeout_connect_ms',
'timeout_server_ms',
'timeout_client_ms',
'retries',
'mode',
'discover',
'advertise',
'extra_healthcheck_headers',
}
for key, value in namespace_config_from_file.items():
if key in key_whitelist:
service_namespace_config[key] = value
# Other code in paasta_tools checks 'mode' after the config file
# is loaded, so this ensures that it is set to the appropriate default
# if not otherwise specified, even if appropriate default is None.
service_namespace_config['mode'] = service_namespace_config.get_mode()
if 'routes' in namespace_config_from_file:
service_namespace_config['routes'] = [(route['source'], dest)
for route in namespace_config_from_file['routes']
for dest in route['destinations']]
if 'extra_advertise' in namespace_config_from_file:
service_namespace_config['extra_advertise'] = [
(src, dst)
for src in namespace_config_from_file['extra_advertise']
for dst in namespace_config_from_file['extra_advertise'][src]
]
return service_namespace_config
class InvalidSmartstackMode(Exception):
pass
def get_instances_from_zookeeper(service: str, instance: str) -> int:
with ZookeeperPool() as zookeeper_client:
(instances, _) = zookeeper_client.get('%s/instances' % compose_autoscaling_zookeeper_root(service, instance))
return int(instances)
def compose_autoscaling_zookeeper_root(service: str, instance: str) -> str:
return f'{AUTOSCALING_ZK_ROOT}/{service}/{instance}'
def set_instances_for_marathon_service(
service: str,
instance: str,
instance_count: int,
soa_dir: str=DEFAULT_SOA_DIR,
) -> None:
zookeeper_path = '%s/instances' % compose_autoscaling_zookeeper_root(service, instance)
with ZookeeperPool() as zookeeper_client:
zookeeper_client.ensure_path(zookeeper_path)
zookeeper_client.set(zookeeper_path, str(instance_count).encode('utf8'))
def get_proxy_port_for_instance(
service_config: LongRunningServiceConfig,
) -> Optional[int]:
"""Get the proxy_port defined in the first namespace configuration for a
service instance.
This means that the namespace first has to be loaded from the service instance's
configuration, and then the proxy_port has to loaded from the smartstack configuration
for that namespace.
:param service_config: The instance of the services LongRunningServiceConfig
:returns: The proxy_port for the service instance, or None if not defined"""
registration = service_config.get_registrations()[0]
service, namespace, _, __ = decompose_job_id(registration)
nerve_dict = load_service_namespace_config(
service=service, namespace=namespace, soa_dir=service_config.soa_dir,
)
return nerve_dict.get('proxy_port')
def host_passes_blacklist(host_attributes: Mapping[str, str], blacklist: DeployBlacklist) -> bool:
"""
:param host: A single host attributes dict
:param blacklist: A list of lists like [["location_type", "location"], ["foo", "bar"]]
:returns: boolean, True if the host gets passed the blacklist
"""
try:
for location_type, location in blacklist:
if host_attributes.get(location_type) == location:
return False
except ValueError as e:
log.error(f"Errors processing the following blacklist: {blacklist}")
log.error("I will assume the host does not pass\nError was: %s" % e)
return False
return True
def host_passes_whitelist(host_attributes: Mapping[str, str], whitelist: DeployWhitelist) -> bool:
"""
:param host: A single host attributes dict.
:param whitelist: A 2 item list like ["location_type", ["location1", 'location2']]
:returns: boolean, True if the host gets past the whitelist
"""
# No whitelist, so disable whitelisting behavior.
if whitelist is None or len(whitelist) == 0:
return True
try:
(location_type, locations) = whitelist
if host_attributes.get(location_type) in locations:
return True
except ValueError as e:
log.error(f"Errors processing the following whitelist: {whitelist}")
log.error("I will assume the host does not pass\nError was: %s" % e)
return False
return False
| 40.653105
| 120
| 0.689755
|
369fc6c76d890a703c63eda67287919ff59e6c4b
| 1,322
|
py
|
Python
|
notifications/tests.py
|
GilbertTan19/Empire_of_Movies-deploy
|
e9e05530a25e76523e624591c966dccf84898ace
|
[
"MIT"
] | null | null | null |
notifications/tests.py
|
GilbertTan19/Empire_of_Movies-deploy
|
e9e05530a25e76523e624591c966dccf84898ace
|
[
"MIT"
] | 2
|
2021-03-30T14:31:18.000Z
|
2021-04-08T21:22:09.000Z
|
notifications/tests.py
|
GilbertTan19/Empire_of_Movies-deploy
|
e9e05530a25e76523e624591c966dccf84898ace
|
[
"MIT"
] | 5
|
2020-07-13T03:17:07.000Z
|
2020-07-22T03:15:57.000Z
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.test import Client, TestCase
from django.urls import reverse
from .models import Notification
from articles.models import Movie, Discussion
from django.test import TestCase
from .models import Notification
# Create your tests here.
class NotificationsTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='user',
email='user@email.com',
password='testpass123'
)
self.movie = Movie.objects.create(
title='Harry Potter',
synopsis='Once upon a time',
year='2002'
)
self.user.follows.add(self.movie)
def test_notification_list_view_for_logged_in_user(self):
self.client.login(username='user', password='testpass123')
self.client.post(reverse('discussion_new', args=[self.movie.pk]), {'author':self.user, 'title': "Discussion title", 'body':"Discussion body", 'movie':self.movie})
response = self.client.get(reverse('notification_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Discussion title')
self.assertTemplateUsed(response, 'notifications/notification_list.html')
| 37.771429
| 170
| 0.695159
|
b95ace5e7a153e3d7754c8f2137ad7aeb9d3cb15
| 3,582
|
py
|
Python
|
haareyfac.py
|
cyberdrk/haarcascades
|
42e037299f8fba9e337cfb7c2e5d2999e06a9666
|
[
"BSD-3-Clause"
] | 2
|
2020-10-30T06:27:38.000Z
|
2021-05-07T14:33:09.000Z
|
haareyfac.py
|
cyberdrk/haarcascades
|
42e037299f8fba9e337cfb7c2e5d2999e06a9666
|
[
"BSD-3-Clause"
] | null | null | null |
haareyfac.py
|
cyberdrk/haarcascades
|
42e037299f8fba9e337cfb7c2e5d2999e06a9666
|
[
"BSD-3-Clause"
] | 1
|
2020-10-30T06:27:39.000Z
|
2020-10-30T06:27:39.000Z
|
'''
IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
By downloading, copying, installing or using the software you agree to this license.
If you do not agree to this license, do not download, install,
copy or use the software.
Intel License Agreement
For Open Source Computer Vision Library
Copyright (C) 2000, Intel Corporation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistribution's of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistribution's in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of Intel Corporation may not be used to endorse or promote products
derived from this software without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are disclaimed.
In no event shall the Intel Corporation or contributors be liable for any direct,
indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
'''
import numpy as np
import cv2
#Written with help from the tutorial on haar cascades on pythonprogramming.net by sentdex
#multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0) #0 for the webcam, 1 for the primary camera and so on
flag = 100
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5) #The figures 1.3 and 5 depend on the size of the image and the likelihood of finding a face in the image
for(x, y, w, h) in faces: #x, y Cartesian Co-ordinates, width and height
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) #Drawing a rectangle
#roi_gray = gray[y:y+h, x:x+w] #Cropping the Gray Region Of Interest, always Y:X
#roi_color = img[y:y+h, x:x+w]
#eyes = eye_cascade.detectMultiScale(roi_gray) #Relying on default values
#for(ex, ey, ew, eh) in eyes:
#cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
#ser.write("5")
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| 49.068493
| 165
| 0.706309
|
b64bf2492cf5fd30a220d07fa7b292511f335184
| 5,516
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
dmitriy79/dystem-core
|
22222e08c4ac5f29dbed99f087436dcbc65019c5
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
dmitriy79/dystem-core
|
22222e08c4ac5f29dbed99f087436dcbc65019c5
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
dmitriy79/dystem-core
|
22222e08c4ac5f29dbed99f087436dcbc65019c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/DYSTEMCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.069767
| 186
| 0.567078
|
1abce05106555daf87913576bf9ddaa90a5600f4
| 1,488
|
py
|
Python
|
google/ads/googleads/v4/errors/types/ad_group_ad_error.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/errors/types/ad_group_ad_error.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/errors/types/ad_group_ad_error.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.errors",
marshal="google.ads.googleads.v4",
manifest={"AdGroupAdErrorEnum",},
)
class AdGroupAdErrorEnum(proto.Message):
r"""Container for enum describing possible ad group ad errors."""
class AdGroupAdError(proto.Enum):
r"""Enum describing possible ad group ad errors."""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_AD_LABEL_ALREADY_EXISTS = 3
AD_NOT_UNDER_ADGROUP = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD = 5
CANNOT_CREATE_DEPRECATED_ADS = 6
CANNOT_CREATE_TEXT_ADS = 7
EMPTY_FIELD = 8
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 9
AD_TYPE_CANNOT_BE_PAUSED = 10
AD_TYPE_CANNOT_BE_REMOVED = 11
__all__ = tuple(sorted(__protobuf__.manifest))
| 31
| 74
| 0.714382
|
00b3bf43ff356fed555d8e18fb9396b5d0884149
| 3,277
|
py
|
Python
|
examples/datacamp-blogpost/05-it-aint-what-yo-do-it-s-the-time-that-you-do-it.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 97
|
2018-01-15T19:29:31.000Z
|
2022-03-11T00:27:34.000Z
|
examples/datacamp-blogpost/05-it-aint-what-yo-do-it-s-the-time-that-you-do-it.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 10
|
2018-01-15T22:44:55.000Z
|
2022-02-18T09:44:10.000Z
|
examples/datacamp-blogpost/05-it-aint-what-yo-do-it-s-the-time-that-you-do-it.py
|
sathiscode/trumania
|
bcf21c4f9e1ff0fe03fd9cbe2dc367f0df033fbc
|
[
"Apache-2.0"
] | 33
|
2018-01-15T19:34:23.000Z
|
2022-03-05T22:39:33.000Z
|
import logging
import pandas as pd
from tabulate import tabulate
from trumania.core import circus, operations
from trumania.core.random_generators import SequencialGenerator, FakerGenerator, NumpyRandomGenerator, ConstantDependentGenerator, ConstantGenerator
import trumania.core.util_functions as util_functions
from trumania.components.time_patterns.profilers import DefaultDailyTimerGenerator
util_functions.setup_logging()
example_circus = circus.Circus(name="example",
master_seed=12345,
start=pd.Timestamp("1 Jan 2017 00:00"),
step_duration=pd.Timedelta("1h"))
# person population
id_gen = SequencialGenerator(prefix="PERSON_")
age_gen = NumpyRandomGenerator(method="normal", loc=3, scale=5,
seed=next(example_circus.seeder))
name_gen = FakerGenerator(method="name", seed=next(example_circus.seeder))
person = example_circus.create_population(name="person", size=1000, ids_gen=id_gen)
person.create_attribute("NAME", init_gen=name_gen)
person.create_attribute("AGE", init_gen=age_gen)
# basic relationship to store people's quote
quote_generator = FakerGenerator(method="sentence", nb_words=6, variable_nb_words=True,
seed=next(example_circus.seeder))
quotes_rel = example_circus.populations["person"].create_relationship("quotes")
for w in range(4):
quotes_rel.add_relations(
from_ids=person.ids,
to_ids=quote_generator.generate(size=person.size),
weights=w
)
# message story
story_timer_gen = DefaultDailyTimerGenerator(
clock=example_circus.clock,
seed=next(example_circus.seeder))
low_activity = story_timer_gen.activity(n=3, per=pd.Timedelta("1 day"))
med_activity = story_timer_gen.activity(n=10, per=pd.Timedelta("1 day"))
high_activity = story_timer_gen.activity(n=20, per=pd.Timedelta("1 day"))
activity_gen = NumpyRandomGenerator(
method="choice",
a=[low_activity, med_activity, high_activity],
p=[.2, .7, .1],
seed=next(example_circus.seeder))
hello_world = example_circus.create_story(
name="hello_world",
initiating_population=example_circus.populations["person"],
member_id_field="PERSON_ID",
timer_gen=story_timer_gen,
activity_gen=activity_gen
)
hello_world.set_operations(
example_circus.clock.ops.timestamp(named_as="TIME"),
example_circus.populations["person"].get_relationship("quotes")
.ops.select_one(from_field="PERSON_ID",named_as="MESSAGE"),
example_circus.populations["person"].ops.select_one(named_as="OTHER_PERSON"),
example_circus.populations["person"]
.ops.lookup(id_field="PERSON_ID", select={"NAME": "EMITTER_NAME"}),
example_circus.populations["person"]
.ops.lookup(id_field="OTHER_PERSON", select={"NAME": "RECEIVER_NAME"}),
operations.FieldLogger(log_id="hello_5")
)
# message story
example_circus.run(
duration=pd.Timedelta("72h"),
log_output_folder="output/example_scenario",
delete_existing_logs=True
)
# -- DEBUG output printout
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df = pd.read_csv("output/example_scenario/hello_5.csv")
print(df.head(10))
print(df.tail(10))
| 33.438776
| 148
| 0.728715
|
87389f8edad48ed85f43e0285b46e4a24c7eb144
| 14,921
|
py
|
Python
|
src/deeponto/onto/text/thesaurus.py
|
KRR-Oxford/DeepOnto
|
9262e372e2e404c907fe47e4964c75c492a278d9
|
[
"Apache-2.0"
] | 6
|
2021-12-19T18:00:34.000Z
|
2022-03-28T22:53:44.000Z
|
src/deeponto/onto/text/thesaurus.py
|
KRR-Oxford/Onto-PLM
|
e2e0bb8a5b6d2aa320222b4f829eb741c67c6774
|
[
"Apache-2.0"
] | null | null | null |
src/deeponto/onto/text/thesaurus.py
|
KRR-Oxford/Onto-PLM
|
e2e0bb8a5b6d2aa320222b4f829eb741c67c6774
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Yuan He (KRR-Oxford). All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for processing thesarus of "synonyms" and "non-synonyms" extracted from ontologies
Synonym pairs (labels for the same class) are guaranteed to have:
1. Reflexivity (identity synonyms): for class label1 => ADD (label1, label1);
2. Symmetricality: ADD (label1, label2) => ADD (label2, label1);
3. Transitivity (Optional, not used in the original BERTMap): ADD (label1, label2), (label2, label3) => ADD (label1, label3)
Non-synonyms are of huge sampling space, and we do not care about any of the above properties but there are two types:
1. Soft: labels from two different (atomic) classes;
2. Hard: labels from two logically disjoint (atomic) classes (sibling classes in BERTMap).
For cross-ontology level, given a small set of mappings:
1. Synonyms are extracted from label pairs of aligned classes;
2. Non-synonyms are extracted from label pairs of mismatched classes (w.r.t the given mappings).
Thoughts for future:
1. Should we consider using equivalence axioms, e.g., (Apple, Fruit & RedColor)? Or leave it for prompts?
2.
"""
from __future__ import annotations
import networkx as nx
import itertools
import random
from typing import List, Set, Tuple, Optional, TYPE_CHECKING
from pyats.datastructures import AttrDict
from deeponto import SavedObj
from deeponto.utils import uniqify
from deeponto.utils.logging import banner_msg
# to avoid circular imports
if TYPE_CHECKING:
from deeponto.onto import Ontology
from deeponto.onto.mapping import OntoMappings
class Thesaurus(SavedObj):
def __init__(self, apply_transitivity: bool = False):
# note that reflexivity and symmetricality have been assumed by
# e.g., if a class C has labels {x, y}, we include (x, x), (y, y)
# for reflexivity; and (x, y), (y, x) for symmetricality.
self.sections = []
self.merged_section = dict()
self.apply_transitivity = apply_transitivity
super().__init__("thesaurus")
def __call__(self, *ontos: Ontology):
"""Create a new Thesaurus for given ontologies
"""
self.__init__(self.apply_transitivity)
self.add_synonyms_from_ontos(*ontos)
print(self)
def __str__(self) -> str:
self.info = AttrDict(
{
"onto_names": [section.onto_name for section in self.sections],
"section_sizes": [section.num_synonym_groups for section in self.sections],
}
)
self.info.total_size = sum(self.info.section_sizes)
if self.merged_section:
self.info.merged_total_size = self.merged_section.num_synonym_groups
self.info.reduced = self.info.total_size - self.info.merged_total_size
return super().report(**self.info)
def create_merged_section(self):
all_synonym_groups = [section.synonym_groups for section in self.sections]
merged_synonym_groups = self.merge_synonyms_by_transitivity(*all_synonym_groups)
self.merged_section = AttrDict(
{
"num_synonym_groups": len(merged_synonym_groups),
"synonym_groups": merged_synonym_groups,
}
)
##################################################################################
### extract synonyms from an ontology ###
##################################################################################
def add_synonyms_from_ontos(self, *ontos: Ontology) -> List[Set[str]]:
"""Add synonyms from each input ontology into a "section",
return the resulting synonym groups
"""
for onto in ontos:
synonym_groups = [set(v) for v in onto.idx2labs.values()]
if self.apply_transitivity:
synonym_groups = self.merge_synonyms_by_transitivity(synonym_groups)
new_section = AttrDict(
{
"onto_name": "[intra-onto]: " + onto.owl.name,
"onto_info": str(onto),
"num_synonym_groups": len(synonym_groups),
"synonym_groups": synonym_groups,
}
)
self.sections.append(new_section)
banner_msg("Updating Thesaurus (from Ontos)")
print(f"Add {new_section.num_synonym_groups} synonym groups from the following ontology:\n")
print(f"{new_section.onto_info}")
self.create_merged_section()
return synonym_groups
def add_matched_synonyms_from_mappings(
self, src_onto: Ontology, tgt_onto: Ontology, known_mappings: OntoMappings,
) -> List[Tuple[Set[str], Set[str]]]:
"""Add aligned synonym groups from given mappings. The merged synonyms are included as a section
and returning aligned but not merged src and tgt synonym groups with: (src_synonyms, tgt_synonyms)
for cross-ontology negative sampling.
"""
synonym_group_pairs = []
synonym_groups = []
for src_ent, tgt_ent_dict in known_mappings.ranked.items():
src_ent_labels = src_onto.idx2labs[src_onto.class2idx[src_ent]]
for tgt_ent in tgt_ent_dict.keys():
tgt_ent_labels = tgt_onto.idx2labs[tgt_onto.class2idx[tgt_ent]]
# merged cross-onto synonym group where labels of aligned classes are synonymous
synonym_group_pairs.append(
(set(src_ent_labels), set(tgt_ent_labels))
) # form a synonym group pair with known ontology source
synonym_groups.append(
set(src_ent_labels + tgt_ent_labels)
) # form a synonym group without distinguishing ontology source
if self.apply_transitivity:
synonym_groups = self.merge_synonyms_by_transitivity(synonym_groups)
new_section = AttrDict(
{
"onto_name": f"[cross-onto]: ({src_onto.owl.name}, {tgt_onto.owl.name})",
"onto_info": f"{src_onto}\n{tgt_onto}",
"num_synonym_groups": len(synonym_groups),
"synonym_groups": synonym_groups,
}
)
self.sections.append(new_section)
banner_msg("Updating Thesaurus (from Mappings)")
print(
f"Add {new_section.num_synonym_groups} synonym groups from the mappings of following ontologies:\n"
)
print(f"{new_section.onto_info}")
self.create_merged_section()
return synonym_group_pairs
##################################################################################
### auxiliary functions for transitivity ###
##################################################################################
@classmethod
def merge_synonyms_by_transitivity(cls, *synonym_group_seq: List[Set[str]]):
"""With transitivity assumed, to merge different synonym groups
"""
label_pairs = []
for synonym_group in synonym_group_seq:
for synonym_set in synonym_group:
label_pairs += itertools.product(synonym_set, synonym_set)
merged_grouped_synonyms = cls.connected_labels(label_pairs)
return merged_grouped_synonyms
@staticmethod
def connected_labels(label_pairs: List[Tuple[str, str]]) -> List[Set[str]]:
"""Build a graph for adjacency among the class labels such that
the transitivity of synonyms is ensured
Args:
label_pairs (List[Tuple[str, str]]): label pairs that are synonymous
Returns:
List[Set[str]]: a collection of synonym sets
"""
graph = nx.Graph()
graph.add_edges_from(label_pairs)
# nx.draw(G, with_labels = True)
connected = list(nx.connected_components(graph))
return connected
##################################################################################
### +ve and -ve sampling ###
##################################################################################
@staticmethod
def positive_sampling(synonym_groups: List[Set[str]], pos_num: Optional[int] = None):
"""Generate synonym pairs from each independent synonym group
(has be validated for getting the same results as in BERTMap repo)
"""
pos_sample_pool = []
for synonym_set in synonym_groups:
synonym_pairs = list(itertools.product(synonym_set, synonym_set))
pos_sample_pool += synonym_pairs
pos_sample_pool = uniqify(pos_sample_pool)
if (not pos_num) or (pos_num >= len(pos_sample_pool)):
print("required number of positives >= maximum; return all retrieved samples instead ...")
return pos_sample_pool
else:
return random.sample(pos_sample_pool, pos_num)
@staticmethod
def random_negative_sampling(synonym_groups: List[Set[str]], neg_num: int, max_iter: int = 10):
"""Soft (random) non-synonyms are defined as label pairs from two different synonym groups
that are randomly selected
"""
neg_sample_pool = []
# randomly select disjoint synonym group pairs from all
for _ in range(neg_num):
left, right = tuple(random.sample(synonym_groups, 2))
# randomly choose one label from a synonym group
left_label = random.choice(list(left))
right_label = random.choice(list(right))
neg_sample_pool.append((left_label, right_label))
# uniqify is too slow so we should avoid operating it too often
neg_sample_pool = uniqify(neg_sample_pool)
while len(neg_sample_pool) < neg_num and max_iter > 0:
max_iter = max_iter - 1 # reduce the iteration to prevent exhausting loop
neg_sample_pool += Thesaurus.random_negative_sampling(
synonym_groups, neg_num - len(neg_sample_pool), max_iter
)
neg_sample_pool = uniqify(neg_sample_pool)
return neg_sample_pool
@staticmethod
def disjointness_negative_sampling(
list_of_disjoint_synonym_groups: List[List[List[str]]], neg_num: int
):
"""Hard (disjoint) non-synonyms are defined as label pairs from two different synonym groups
that are logically disjoint; since these pairs are often of limited number, I adopt the same
way as in positive sampling where we first retrieve all the possible pairs.
Each entry of the input is a group of different disjoint synonym groups
"""
# flatten the disjointness groups into all pairs of hard neagtives
neg_sample_pool = []
for disjoint_synonym_groups in list_of_disjoint_synonym_groups:
# compute catersian product for all possible combinations of disjoint synonym groups
catersian_product = list(itertools.product(disjoint_synonym_groups, disjoint_synonym_groups))
# filter those mapped to selfs
product_excluding_selfs = [pair for pair in catersian_product if pair[0] != pair[1]]
# for each pair of disjoint synonym groups, compute the product of their corresponding labels
product_of_disjoint_labels = [list(itertools.product(left_syn, right_syn)) for left_syn, right_syn in product_excluding_selfs]
product_of_disjoint_labels = list(itertools.chain.from_iterable(product_of_disjoint_labels))
neg_sample_pool += product_of_disjoint_labels
neg_sample_pool = uniqify(neg_sample_pool)
if neg_num > len(neg_sample_pool):
print("required number of negatives >= maximum; return all retrieved samples instead ...")
return neg_sample_pool
else:
return random.sample(neg_sample_pool, neg_num)
@staticmethod
def positive_sampling_from_paired_groups(
matched_synonym_groups: List[Tuple[Set[str], Set[str]]], pos_num: Optional[int] = None
):
"""Generate synonym pairs from each paired synonym group where identity synonyms are removed
"""
pos_sample_pool = []
for left_synonym_set, right_synonym_set in matched_synonym_groups:
# sample cross-onto synonyms but removing identity synonyms
synonym_pairs = [
(l, r) for l, r in itertools.product(left_synonym_set, right_synonym_set) if l != r
]
# append the reversed synonyms (preserve symmertry)
backward_synonym_pairs = [(r, l) for l, r in synonym_pairs]
pos_sample_pool += synonym_pairs + backward_synonym_pairs
pos_sample_pool = uniqify(pos_sample_pool)
if (not pos_num) or (pos_num >= len(pos_sample_pool)):
# return all the possible synonyms if no maximum limit
return pos_sample_pool
else:
return random.sample(pos_sample_pool, pos_num)
@staticmethod
def random_negative_sampling_from_paired_groups(
matched_synonym_groups: List[Tuple[Set[str], Set[str]]], neg_num: int, max_iter: int = 10
):
"""Soft (random) non-synonyms are defined as label pairs from two different synonym groups
that are randomly selected from oposite ontologies
"""
neg_sample_pool = []
# randomly select disjoint synonym group pairs from all
for _ in range(neg_num):
left_class_pairs, right_class_pairs = tuple(random.sample(matched_synonym_groups, 2))
# randomly choose one label from a synonym group
left_label = random.choice(list(left_class_pairs[0])) # choosing the src side
right_label = random.choice(list(right_class_pairs[1])) # choosing the tgt side
neg_sample_pool.append((left_label, right_label))
# uniqify is too slow so we should avoid operating it too often
neg_sample_pool = uniqify(neg_sample_pool)
while len(neg_sample_pool) < neg_num and max_iter > 0:
max_iter = max_iter - 1 # reduce the iteration to prevent exhausting loop
neg_sample_pool += Thesaurus.random_negative_sampling_from_paired_groups(
matched_synonym_groups, neg_num - len(neg_sample_pool), max_iter
)
neg_sample_pool = uniqify(neg_sample_pool)
return neg_sample_pool
| 48.602606
| 138
| 0.640574
|
13d6f3033237d069eceb963f9510ab365a3d50a5
| 31,462
|
py
|
Python
|
Bio/Graphics/BasicChromosome.py
|
rwst/biopython
|
d8280b25e3fefdf7aebb7700a7080879a4146200
|
[
"BSD-3-Clause"
] | 2
|
2020-08-25T13:55:00.000Z
|
2020-08-25T16:36:03.000Z
|
Bio/Graphics/BasicChromosome.py
|
rwst/biopython
|
d8280b25e3fefdf7aebb7700a7080879a4146200
|
[
"BSD-3-Clause"
] | 1
|
2020-04-25T20:36:07.000Z
|
2020-04-25T20:36:07.000Z
|
site-packages/Bio/Graphics/BasicChromosome.py
|
Wristlebane/Pyto
|
901ac307b68486d8289105c159ca702318bea5b0
|
[
"MIT"
] | 1
|
2019-04-12T20:52:12.000Z
|
2019-04-12T20:52:12.000Z
|
# Copyright 2001, 2003 by Brad Chapman. All rights reserved.
# Revisions copyright 2011 by Peter Cock. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Draw representations of organism chromosomes with added information.
These classes are meant to model the drawing of pictures of chromosomes.
This can be useful for lots of things, including displaying markers on
a chromosome (ie. for genetic mapping) and showing syteny between two
chromosomes.
The structure of these classes is intended to be a Composite, so that
it will be easy to plug in and switch different parts without
breaking the general drawing capabilities of the system. The
relationship between classes is that everything derives from
_ChromosomeComponent, which specifies the overall interface. The parts
then are related so that an Organism contains Chromosomes, and these
Chromosomes contain ChromosomeSegments. This representation differents
from the canonical composite structure in that we don't really have
'leaf' nodes here -- all components can potentially hold sub-components.
Most of the time the ChromosomeSegment class is what you'll want to
customize for specific drawing tasks.
For providing drawing capabilities, these classes use reportlab:
http://www.reportlab.com
This provides nice output in PDF, SVG and postscript. If you have
reportlab's renderPM module installed you can also use PNG etc.
"""
# reportlab
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
from reportlab.lib import colors
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.graphics.shapes import Drawing, String, Line, Rect, Wedge, ArcPath
from reportlab.graphics.widgetbase import Widget
from Bio.Graphics import _write
from Bio.Graphics.GenomeDiagram._Colors import ColorTranslator as _ColorTranslator
_color_trans = _ColorTranslator()
class _ChromosomeComponent(Widget):
"""Base class specifying the interface for a component of the system.
This class should not be instantiated directly, but should be used
from derived classes.
"""
def __init__(self):
"""Initialize a chromosome component.
Attributes:
- _sub_components -- Any components which are contained under
this parent component. This attribute should be accessed through
the add() and remove() functions.
"""
self._sub_components = []
def add(self, component):
"""Add a sub_component to the list of components under this item."""
if not isinstance(component, _ChromosomeComponent):
raise TypeError("Expected a _ChromosomeComponent "
"object, got %s" % component)
self._sub_components.append(component)
def remove(self, component):
"""Remove the specified component from the subcomponents.
Raises a ValueError if the component is not registered as a
sub_component.
"""
try:
self._sub_components.remove(component)
except ValueError:
raise ValueError("Component %s not found in sub_components." %
component)
def draw(self):
"""Draw the specified component."""
raise AssertionError("Subclasses must implement.")
class Organism(_ChromosomeComponent):
"""Top level class for drawing chromosomes.
This class holds information about an organism and all of it's
chromosomes, and provides the top level object which could be used
for drawing a chromosome representation of an organism.
Chromosomes should be added and removed from the Organism via the
add and remove functions.
"""
def __init__(self, output_format='pdf'):
"""Initialize."""
_ChromosomeComponent.__init__(self)
# customizable attributes
self.page_size = letter
self.title_size = 20
# Do we need this given we don't draw a legend?
# If so, should be a public API...
self._legend_height = 0 # 2 * inch
self.output_format = output_format
def draw(self, output_file, title):
"""Draw out the information for the Organism.
Arguments:
- output_file -- The name of a file specifying where the
document should be saved, or a handle to be written to.
The output format is set when creating the Organism object.
Alternatively, output_file=None will return the drawing using
the low-level ReportLab objects (for further processing, such
as adding additional graphics, before writing).
- title -- The output title of the produced document.
"""
width, height = self.page_size
cur_drawing = Drawing(width, height)
self._draw_title(cur_drawing, title, width, height)
cur_x_pos = inch * .5
if len(self._sub_components) > 0:
x_pos_change = (width - inch) / len(self._sub_components)
# no sub_components
else:
pass
for sub_component in self._sub_components:
# set the drawing location of the chromosome
sub_component.start_x_position = cur_x_pos + 0.05 * x_pos_change
sub_component.end_x_position = cur_x_pos + 0.95 * x_pos_change
sub_component.start_y_position = height - 1.5 * inch
sub_component.end_y_position = self._legend_height + 1 * inch
# do the drawing
sub_component.draw(cur_drawing)
# update the locations for the next chromosome
cur_x_pos += x_pos_change
self._draw_legend(cur_drawing, self._legend_height + 0.5 * inch, width)
if output_file is None:
# Let the user take care of writing to the file...
return cur_drawing
return _write(cur_drawing, output_file, self.output_format)
def _draw_title(self, cur_drawing, title, width, height):
"""Write out the title of the organism figure (PRIVATE)."""
title_string = String(width / 2, height - inch, title)
title_string.fontName = 'Helvetica-Bold'
title_string.fontSize = self.title_size
title_string.textAnchor = "middle"
cur_drawing.add(title_string)
def _draw_legend(self, cur_drawing, start_y, width):
"""Draw a legend for the figure (PRIVATE).
Subclasses should implement this (see also self._legend_height) to
provide specialized legends.
"""
pass
class Chromosome(_ChromosomeComponent):
"""Class for drawing a chromosome of an organism.
This organizes the drawing of a single organisms chromosome. This
class can be instantiated directly, but the draw method makes the
most sense to be called in the context of an organism.
"""
def __init__(self, chromosome_name):
"""Initialize a Chromosome for drawing.
Arguments:
- chromosome_name - The label for the chromosome.
Attributes:
- start_x_position, end_x_position - The x positions on the page
where the chromosome should be drawn. This allows multiple
chromosomes to be drawn on a single page.
- start_y_position, end_y_position - The y positions on the page
where the chromosome should be contained.
Configuration Attributes:
- title_size - The size of the chromosome title.
- scale_num - A number of scale the drawing by. This is useful if
you want to draw multiple chromosomes of different sizes at the
same scale. If this is not set, then the chromosome drawing will
be scaled by the number of segements in the chromosome (so each
chromosome will be the exact same final size).
"""
_ChromosomeComponent.__init__(self)
self._name = chromosome_name
self.start_x_position = -1
self.end_x_position = -1
self.start_y_position = -1
self.end_y_position = -1
self.title_size = 20
self.scale_num = None
self.label_size = 6
self.chr_percent = 0.25
self.label_sep_percent = self.chr_percent * 0.5
self._color_labels = False
def subcomponent_size(self):
"""Return the scaled size of all subcomponents of this component."""
total_sub = 0
for sub_component in self._sub_components:
total_sub += sub_component.scale
return total_sub
def draw(self, cur_drawing):
"""Draw a chromosome on the specified template.
Ideally, the x_position and y_*_position attributes should be
set prior to drawing -- otherwise we're going to have some problems.
"""
for position in (self.start_x_position, self.end_x_position,
self.start_y_position, self.end_y_position):
assert position != -1, "Need to set drawing coordinates."
# first draw all of the sub-sections of the chromosome -- this
# will actually be the picture of the chromosome
cur_y_pos = self.start_y_position
if self.scale_num:
y_pos_change = ((self.start_y_position * .95 - self.end_y_position) /
self.scale_num)
elif len(self._sub_components) > 0:
y_pos_change = ((self.start_y_position * .95 - self.end_y_position) /
self.subcomponent_size())
# no sub_components to draw
else:
pass
left_labels = []
right_labels = []
for sub_component in self._sub_components:
this_y_pos_change = sub_component.scale * y_pos_change
# set the location of the component to draw
sub_component.start_x_position = self.start_x_position
sub_component.end_x_position = self.end_x_position
sub_component.start_y_position = cur_y_pos
sub_component.end_y_position = cur_y_pos - this_y_pos_change
# draw the sub component
sub_component._left_labels = []
sub_component._right_labels = []
sub_component.draw(cur_drawing)
left_labels += sub_component._left_labels
right_labels += sub_component._right_labels
# update the position for the next component
cur_y_pos -= this_y_pos_change
self._draw_labels(cur_drawing, left_labels, right_labels)
self._draw_label(cur_drawing, self._name)
def _draw_label(self, cur_drawing, label_name):
"""Draw a label for the chromosome (PRIVATE)."""
x_position = 0.5 * (self.start_x_position + self.end_x_position)
y_position = self.end_y_position
label_string = String(x_position, y_position, label_name)
label_string.fontName = 'Times-BoldItalic'
label_string.fontSize = self.title_size
label_string.textAnchor = 'middle'
cur_drawing.add(label_string)
def _draw_labels(self, cur_drawing, left_labels, right_labels):
"""Layout and draw sub-feature labels for the chromosome (PRIVATE).
Tries to place each label at the same vertical position as the
feature it applies to, but will adjust the positions to avoid or
at least reduce label overlap.
Draws the label text and a coloured line linking it to the
location (i.e. feature) it applies to.
"""
if not self._sub_components:
return
color_label = self._color_labels
segment_width = (self.end_x_position - self.start_x_position) \
* self.chr_percent
label_sep = (self.end_x_position - self.start_x_position) \
* self.label_sep_percent
segment_x = self.start_x_position \
+ 0.5 * (self.end_x_position - self.start_x_position - segment_width)
y_limits = []
for sub_component in self._sub_components:
y_limits.extend((sub_component.start_y_position,
sub_component.end_y_position))
y_min = min(y_limits)
y_max = max(y_limits)
del y_limits
# Now do some label placement magic...
# from reportlab.pdfbase import pdfmetrics
# font = pdfmetrics.getFont('Helvetica')
# h = (font.face.ascent + font.face.descent) * 0.90
h = self.label_size
for x1, x2, labels, anchor in [
(segment_x,
segment_x - label_sep,
_place_labels(left_labels, y_min, y_max, h),
"end"),
(segment_x + segment_width,
segment_x + segment_width + label_sep,
_place_labels(right_labels, y_min, y_max, h),
"start"),
]:
for (y1, y2, color, back_color, name) in labels:
cur_drawing.add(Line(x1, y1, x2, y2,
strokeColor=color,
strokeWidth=0.25))
label_string = String(x2, y2, name,
textAnchor=anchor)
label_string.fontName = 'Helvetica'
label_string.fontSize = h
if color_label:
label_string.fillColor = color
if back_color:
w = stringWidth(name,
label_string.fontName,
label_string.fontSize)
if x1 > x2:
w = w * -1.0
cur_drawing.add(Rect(x2, y2 - 0.1 * h, w, h,
strokeColor=back_color,
fillColor=back_color))
cur_drawing.add(label_string)
class ChromosomeSegment(_ChromosomeComponent):
"""Draw a segment of a chromosome.
This class provides the important configurable functionality of drawing
a Chromosome. Each segment has some customization available here, or can
be subclassed to define additional functionality. Most of the interesting
drawing stuff is likely to happen at the ChromosomeSegment level.
"""
def __init__(self):
"""Initialize a ChromosomeSegment.
Attributes:
- start_x_position, end_x_position - Defines the x range we have
to draw things in.
- start_y_position, end_y_position - Defines the y range we have
to draw things in.
Configuration Attributes:
- scale - A scaling value for the component. By default this is
set at 1 (ie -- has the same scale as everything else). Higher
values give more size to the component, smaller values give less.
- fill_color - A color to fill in the segment with. Colors are
available in reportlab.lib.colors
- label - A label to place on the chromosome segment. This should
be a text string specifying what is to be included in the label.
- label_size - The size of the label.
- chr_percent - The percentage of area that the chromosome
segment takes up.
"""
_ChromosomeComponent.__init__(self)
self.start_x_position = -1
self.end_x_position = -1
self.start_y_position = -1
self.end_y_position = -1
# --- attributes for configuration
self.scale = 1
self.fill_color = None
self.label = None
self.label_size = 6
self.chr_percent = .25
def draw(self, cur_drawing):
"""Draw a chromosome segment.
Before drawing, the range we are drawing in needs to be set.
"""
for position in (self.start_x_position, self.end_x_position,
self.start_y_position, self.end_y_position):
assert position != -1, "Need to set drawing coordinates."
self._draw_subcomponents(cur_drawing) # Anything behind
self._draw_segment(cur_drawing)
self._overdraw_subcomponents(cur_drawing) # Anything on top
self._draw_label(cur_drawing)
def _draw_subcomponents(self, cur_drawing):
"""Draw any subcomponents of the chromosome segment (PRIVATE).
This should be overridden in derived classes if there are
subcomponents to be drawn.
"""
pass
def _draw_segment(self, cur_drawing):
"""Draw the current chromosome segment (PRIVATE)."""
# set the coordinates of the segment -- it'll take up the MIDDLE part
# of the space we have.
segment_y = self.end_y_position
segment_width = (self.end_x_position - self.start_x_position) \
* self.chr_percent
segment_height = self.start_y_position - self.end_y_position
segment_x = self.start_x_position \
+ 0.5 * (self.end_x_position - self.start_x_position - segment_width)
# first draw the sides of the segment
right_line = Line(segment_x, segment_y,
segment_x, segment_y + segment_height)
left_line = Line(segment_x + segment_width, segment_y,
segment_x + segment_width, segment_y + segment_height)
cur_drawing.add(right_line)
cur_drawing.add(left_line)
# now draw the box, if it is filled in
if self.fill_color is not None:
fill_rectangle = Rect(segment_x, segment_y,
segment_width, segment_height)
fill_rectangle.fillColor = self.fill_color
fill_rectangle.strokeColor = None
cur_drawing.add(fill_rectangle)
def _overdraw_subcomponents(self, cur_drawing):
"""Draw any subcomponents of the chromosome segment over the main part (PRIVATE).
This should be overridden in derived classes if there are
subcomponents to be drawn.
"""
pass
def _draw_label(self, cur_drawing):
"""Add a label to the chromosome segment (PRIVATE).
The label will be applied to the right of the segment.
This may be overlapped by any sub-feature labels on other segments!
"""
if self.label is not None:
label_x = 0.5 * (self.start_x_position + self.end_x_position) + \
(self.chr_percent + 0.05) * (self.end_x_position -
self.start_x_position)
label_y = ((self.start_y_position - self.end_y_position) / 2 +
self.end_y_position)
label_string = String(label_x, label_y, self.label)
label_string.fontName = 'Helvetica'
label_string.fontSize = self.label_size
cur_drawing.add(label_string)
def _spring_layout(desired, minimum, maximum, gap=0):
"""Try to layout label co-ordinates or other floats (PRIVATE).
Originally written for the y-axis vertical positioning of labels on a
chromosome diagram (where the minimum gap between y-axis co-ordinates is
the label height), it could also potentially be used for x-axis placement,
or indeed radial placement for circular chromosomes within GenomeDiagram.
In essence this is an optimisation problem, balancing the desire to have
each label as close as possible to its data point, but also to spread out
the labels to avoid overlaps. This could be described with a cost function
(modelling the label distance from the desired placement, and the inter-
label separations as springs) and solved as a multi-variable minimization
problem - perhaps with NumPy or SciPy.
For now however, the implementation is a somewhat crude ad hoc algorithm.
NOTE - This expects the input data to have been sorted!
"""
count = len(desired)
if count <= 1:
return desired # Easy!
if minimum >= maximum:
raise ValueError("Bad min/max %f and %f" % (minimum, maximum))
if min(desired) < minimum or max(desired) > maximum:
raise ValueError("Data %f to %f out of bounds (%f to %f)"
% (min(desired), max(desired), minimum, maximum))
equal_step = float(maximum - minimum) / (count - 1)
if equal_step < gap:
import warnings
from Bio import BiopythonWarning
warnings.warn("Too many labels to avoid overlap", BiopythonWarning)
# Crudest solution
return [minimum + i * equal_step for i in range(count)]
good = True
if gap:
prev = desired[0]
for next in desired[1:]:
if prev - next < gap:
good = False
break
if good:
return desired
span = maximum - minimum
for split in [0.5 * span,
span / 3.0,
2 * span / 3.0,
0.25 * span,
0.75 * span]:
midpoint = minimum + split
low = [x for x in desired if x <= midpoint - 0.5 * gap]
high = [x for x in desired if x > midpoint + 0.5 * gap]
if len(low) + len(high) < count:
# Bad split point, points right on boundary
continue
elif not low and len(high) * gap <= (span - split) + 0.5 * gap:
# Give a little of the unused low space to the high points
return _spring_layout(high, midpoint + 0.5 * gap, maximum, gap)
elif not high and len(low) * gap <= split + 0.5 * gap:
# Give a little of the unused highspace to the low points
return _spring_layout(low, minimum, midpoint - 0.5 * gap, gap)
elif (len(low) * gap <= split - 0.5 * gap and
len(high) * gap <= (span - split) - 0.5 * gap):
return _spring_layout(low, minimum, midpoint - 0.5 * gap, gap) + \
_spring_layout(high, midpoint + 0.5 * gap, maximum, gap)
# This can be count-productive now we can split out into the telomere or
# spacer-segment's vertical space...
# Try not to spread out as far as the min/max unless needed
low = min(desired)
high = max(desired)
if (high - low) / (count - 1) >= gap:
# Good, we don't need the full range, and can position the
# min and max exactly as well :)
equal_step = (high - low) / (count - 1)
return [low + i * equal_step for i in range(count)]
low = 0.5 * (minimum + min(desired))
high = 0.5 * (max(desired) + maximum)
if (high - low) / (count - 1) >= gap:
# Good, we don't need the full range
equal_step = (high - low) / (count - 1)
return [low + i * equal_step for i in range(count)]
# Crudest solution
return [minimum + i * equal_step for i in range(count)]
# assert False, _spring_layout([0.10,0.12,0.13,0.14,0.5,0.75, 1.0], 0, 1, 0.1)
# assert _spring_layout([0.10,0.12,0.13,0.14,0.5,0.75, 1.0], 0, 1, 0.1) == \
# [0.0, 0.125, 0.25, 0.375, 0.5, 0.75, 1.0]
# assert _spring_layout([0.10,0.12,0.13,0.14,0.5,0.75, 1.0], 0, 1, 0.1) == \
# [0.0, 0.16666666666666666, 0.33333333333333331, 0.5,
# 0.66666666666666663, 0.83333333333333326, 1.0]
def _place_labels(desired_etc, minimum, maximum, gap=0):
# Want a list of lists/tuples for desired_etc
desired_etc.sort()
placed = _spring_layout([row[0] for row in desired_etc],
minimum, maximum, gap)
for old, y2 in zip(desired_etc, placed):
# (y1, a, b, c, ..., z) --> (y1, y2, a, b, c, ..., z)
yield (old[0], y2) + tuple(old[1:])
class AnnotatedChromosomeSegment(ChromosomeSegment):
"""Annotated chromosome segment.
This is like the ChromosomeSegment, but accepts a list of features.
"""
def __init__(self, bp_length, features,
default_feature_color=colors.blue,
name_qualifiers=('gene', 'label', 'name', 'locus_tag', 'product')):
"""Initialize.
The features can either be SeqFeature objects, or tuples of values:
start (int), end (int), strand (+1, -1, O or None), label (string),
ReportLab color (string or object), and optional ReportLab fill color.
Note we require 0 <= start <= end <= bp_length, and within the vertical
space allocated to this segmenet lines will be places according to the
start/end coordinates (starting from the top).
Positive stand features are drawn on the right, negative on the left,
otherwise all the way across.
We recommend using consisent units for all the segment's scale values
(e.g. their length in base pairs).
When providing features as SeqFeature objects, the default color
is used, unless the feature's qualifiers include an Artemis colour
string (functionality also in GenomeDiagram). The caption also follows
the GenomeDiagram approach and takes the first qualifier from the list
or tuple specified in name_qualifiers.
Note additional attribute label_sep_percent controls the percentage of
area that the chromosome segment takes up, by default half of the
chr_percent attribute (half of 25%, thus 12.5%)
"""
ChromosomeSegment.__init__(self)
self.bp_length = bp_length
self.features = features
self.default_feature_color = default_feature_color
self.name_qualifiers = name_qualifiers
self.label_sep_percent = self.chr_percent * 0.5
def _overdraw_subcomponents(self, cur_drawing):
"""Draw any annotated features on the chromosome segment (PRIVATE).
Assumes _draw_segment already called to fill out the basic shape,
and assmes that uses the same boundaries.
"""
# set the coordinates of the segment -- it'll take up the MIDDLE part
# of the space we have.
segment_y = self.end_y_position
segment_width = (self.end_x_position - self.start_x_position) \
* self.chr_percent
label_sep = (self.end_x_position - self.start_x_position) \
* self.label_sep_percent
segment_height = self.start_y_position - self.end_y_position
segment_x = self.start_x_position \
+ 0.5 * (self.end_x_position - self.start_x_position - segment_width)
left_labels = []
right_labels = []
for f in self.features:
try:
# Assume SeqFeature objects
start = f.location.start
end = f.location.end
strand = f.strand
try:
# Handles Artemis colour integers, HTML colors, etc
color = _color_trans.translate(f.qualifiers['color'][0])
except Exception: # TODO: ValueError?
color = self.default_feature_color
fill_color = color
name = ""
for qualifier in self.name_qualifiers:
if qualifier in f.qualifiers:
name = f.qualifiers[qualifier][0]
break
except AttributeError:
# Assume tuple of ints, string, and color
start, end, strand, name, color = f[:5]
color = _color_trans.translate(color)
if len(f) > 5:
fill_color = _color_trans.translate(f[5])
else:
fill_color = color
assert 0 <= start <= end <= self.bp_length
if strand == +1:
# Right side only
x = segment_x + segment_width * 0.6
w = segment_width * 0.4
elif strand == -1:
# Left side only
x = segment_x
w = segment_width * 0.4
else:
# Both or neither - full width
x = segment_x
w = segment_width
local_scale = segment_height / self.bp_length
fill_rectangle = Rect(x,
segment_y + segment_height - local_scale * start,
w,
local_scale * (start - end))
fill_rectangle.fillColor = fill_color
fill_rectangle.strokeColor = color
cur_drawing.add(fill_rectangle)
if name:
if fill_color == color:
back_color = None
else:
back_color = fill_color
value = (segment_y + segment_height - local_scale * start,
color, back_color, name)
if strand == -1:
self._left_labels.append(value)
else:
self._right_labels.append(value)
class TelomereSegment(ChromosomeSegment):
"""A segment that is located at the end of a linear chromosome.
This is just like a regular segment, but it draws the end of a chromosome
which is represented by a half circle. This just overrides the
_draw_segment class of ChromosomeSegment to provide that specialized
drawing.
"""
def __init__(self, inverted=0):
"""Initialize a segment at the end of a chromosome.
See ChromosomeSegment for all of the attributes that can be
customized in a TelomereSegments.
Arguments:
- inverted -- Whether or not the telomere should be inverted
(ie. drawn on the bottom of a chromosome)
"""
ChromosomeSegment.__init__(self)
self._inverted = inverted
def _draw_segment(self, cur_drawing):
"""Draw a half circle representing the end of a linear chromosome (PRIVATE)."""
# set the coordinates of the segment -- it'll take up the MIDDLE part
# of the space we have.
width = (self.end_x_position - self.start_x_position) \
* self.chr_percent
height = self.start_y_position - self.end_y_position
center_x = 0.5 * (self.end_x_position + self.start_x_position)
start_x = center_x - 0.5 * width
if self._inverted:
center_y = self.start_y_position
start_angle = 180
end_angle = 360
else:
center_y = self.end_y_position
start_angle = 0
end_angle = 180
cap_wedge = Wedge(center_x, center_y, width / 2,
start_angle, end_angle, height)
cap_wedge.strokeColor = None
cap_wedge.fillColor = self.fill_color
cur_drawing.add(cap_wedge)
# Now draw an arc for the curved edge of the wedge,
# omitting the flat end.
cap_arc = ArcPath()
cap_arc.addArc(center_x, center_y, width / 2,
start_angle, end_angle, height)
cur_drawing.add(cap_arc)
class SpacerSegment(ChromosomeSegment):
"""A segment that is located at the end of a linear chromosome.
Doesn't draw anything, just empty space which can be helpful
for layout purposes (e.g. making room for feature labels).
"""
def draw(self, cur_diagram):
"""Draw nothing to the current diagram (dummy method).
The segment spacer has no actual image in the diagram,
so this method therefore does nothing, but is defined
to match the expected API of the other segment objects.
"""
pass
| 39.875792
| 89
| 0.622274
|
ba2afca5df7abc62ecc372282d367e6212f72e93
| 601
|
py
|
Python
|
RaspberryPi/Utility/MySqlHelper.py
|
sevenTiny/SevenTiny.SmartHome
|
59d1704a3faef114664dfbbb1d74b0ed41eb7ddf
|
[
"Apache-2.0"
] | 2
|
2019-10-09T04:17:17.000Z
|
2019-11-12T11:43:51.000Z
|
RaspberryPi/Utility/MySqlHelper.py
|
sevenTiny/SevenTiny.SmartHome
|
59d1704a3faef114664dfbbb1d74b0ed41eb7ddf
|
[
"Apache-2.0"
] | null | null | null |
RaspberryPi/Utility/MySqlHelper.py
|
sevenTiny/SevenTiny.SmartHome
|
59d1704a3faef114664dfbbb1d74b0ed41eb7ddf
|
[
"Apache-2.0"
] | 1
|
2019-11-12T11:43:50.000Z
|
2019-11-12T11:43:50.000Z
|
# coding=utf-8
import pymysql
from Utility.Configs import Cfg_MySql
class MySqlHelper:
conn = None
def __init__(self, db):
cfg_mysql = Cfg_MySql()
self.conn = pymysql.connect(host=cfg_mysql.get('host'), port=int(cfg_mysql.get('port')), user=cfg_mysql.get('user'), passwd=cfg_mysql.get('passwd'), db=db)
def getConnAndCur(self):
return self.conn,self.conn.cursor()
def executeSql(self,sql):
conn,cur = self.getConnAndCur()
cur.execute(sql)
conn.commit()
cur.close()
conn.close()
# 用完记得释放
# cur.close()
# conn.close()
| 24.04
| 163
| 0.637271
|
89b1804e0226bc83c86d43d234ce404f0c2ecacc
| 1,871
|
py
|
Python
|
sqlite4dummy-0.0.6/build/lib/sqlite4dummy/tests/sqlite3_in_python/func/test_quote.py
|
link-money-dev/link-api-web-service
|
3da226c7115ee4267f8346620029b710b9987e74
|
[
"BSD-3-Clause"
] | null | null | null |
sqlite4dummy-0.0.6/build/lib/sqlite4dummy/tests/sqlite3_in_python/func/test_quote.py
|
link-money-dev/link-api-web-service
|
3da226c7115ee4267f8346620029b710b9987e74
|
[
"BSD-3-Clause"
] | 1
|
2021-06-01T22:32:25.000Z
|
2021-06-01T22:32:25.000Z
|
sqlite4dummy-0.0.6/sqlite4dummy/tests/sqlite3_in_python/func/test_quote.py
|
link-money-dev/link-api-web-service
|
3da226c7115ee4267f8346620029b710b9987e74
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
QUOTE函数返回列所对应的原生SQL表达式。
Ref: https://www.sqlite.org/lang_corefunc.html
"""
from sqlite4dummy.tests.basetest import BaseUnittest
from datetime import datetime, date
import unittest
class Unittest(BaseUnittest):
def setUp(self):
self.connect_database()
def test_all(self):
cursor = self.cursor
# insert some data
create_sql = \
"""
CREATE TABLE test
(
_id INTEGER PRIMARY KEY,
_str TEXT,
_bytes BLOB,
_date DATE,
_datetime TIMESTAMP
)
"""
insert_sql = "INSERT INTO test VALUES (?,?,?,?,?)"
cursor.execute(create_sql)
cursor.execute(insert_sql,
(
1,
r"""abc`~!@#$%^&*()_+-={}[]|\:;'"<>,.?/""",
"Hello World".encode("utf-8"),
date.today(),
datetime.now(),
)
)
select_sql = \
"""
SELECT
quote(_str), quote(_bytes), quote(_date), quote(_datetime)
FROM
test
"""
print(cursor.execute(select_sql).fetchone())
def test_usage(self):
"""QUOTE可以用来获得原生SQL表达式。
"""
cursor = self.cursor
print(cursor.execute("SELECT QUOTE(?)",
(r"""abc`~!@#$%^&*()_+-={}[]|\:;'"<>,.?/""", )).fetchone())
print(cursor.execute("SELECT QUOTE(?)",
("Hello World".encode("utf-8"), )).fetchone())
print(cursor.execute("SELECT QUOTE(?)",
(date.today(), )).fetchone())
print(cursor.execute("SELECT QUOTE(?)",
(datetime.now(), )).fetchone())
if __name__ == "__main__":
unittest.main()
| 27.514706
| 72
| 0.463923
|
27e064d6d4f2a90b99850bfc9d533cb04d657bc5
| 9,469
|
py
|
Python
|
elasticapm/utils/encoding.py
|
RyanKung/apm-agent-python
|
125a3f0001011f07e74f879967880ef5dbf06130
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/utils/encoding.py
|
RyanKung/apm-agent-python
|
125a3f0001011f07e74f879967880ef5dbf06130
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/utils/encoding.py
|
RyanKung/apm-agent-python
|
125a3f0001011f07e74f879967880ef5dbf06130
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# BSD 3-Clause License
#
# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
import datetime
import itertools
import uuid
from decimal import Decimal
from elasticapm.conf.constants import KEYWORD_MAX_LENGTH, LABEL_RE, LABEL_TYPES
from elasticapm.utils import compat
PROTECTED_TYPES = compat.integer_types + (type(None), float, Decimal, datetime.datetime, datetime.date, datetime.time)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, PROTECTED_TYPES)
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# compat.text_type. This function gets called often in that setting.
#
# Adapted from Django
if isinstance(s, compat.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, compat.string_types):
if hasattr(s, "__unicode__"):
s = s.__unicode__()
else:
if compat.PY3:
if isinstance(s, bytes):
s = compat.text_type(s, encoding, errors)
else:
s = compat.text_type(s)
else:
s = compat.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of compat.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise UnicodeDecodeError(*e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join([force_text(arg, encoding, strings_only, errors) for arg in s])
return s
def _has_elasticapm_metadata(value):
try:
return callable(value.__getattribute__("__elasticapm__"))
except Exception:
return False
def transform(value, stack=None, context=None):
# TODO: make this extendable
if context is None:
context = {}
if stack is None:
stack = []
objid = id(value)
if objid in context:
return "<...>"
context[objid] = 1
transform_rec = lambda o: transform(o, stack + [value], context)
if any(value is s for s in stack):
ret = "cycle"
elif isinstance(value, (tuple, list, set, frozenset)):
try:
ret = type(value)(transform_rec(o) for o in value)
except Exception:
# We may be dealing with a namedtuple
class value_type(list):
__name__ = type(value).__name__
ret = value_type(transform_rec(o) for o in value)
elif isinstance(value, uuid.UUID):
ret = repr(value)
elif isinstance(value, dict):
ret = dict((to_unicode(k), transform_rec(v)) for k, v in compat.iteritems(value))
elif isinstance(value, compat.text_type):
ret = to_unicode(value)
elif isinstance(value, compat.binary_type):
ret = to_string(value)
elif not isinstance(value, compat.class_types) and _has_elasticapm_metadata(value):
ret = transform_rec(value.__elasticapm__())
elif isinstance(value, bool):
ret = bool(value)
elif isinstance(value, float):
ret = float(value)
elif isinstance(value, int):
ret = int(value)
elif compat.PY2 and isinstance(value, long): # noqa F821
ret = long(value) # noqa F821
elif value is not None:
try:
ret = transform(repr(value))
except Exception:
# It's common case that a model's __unicode__ definition may try to query the database
# which if it was not cleaned up correctly, would hit a transaction aborted exception
ret = u"<BadRepr: %s>" % type(value)
else:
ret = None
del context[objid]
return ret
def to_unicode(value):
try:
value = compat.text_type(force_text(value))
except (UnicodeEncodeError, UnicodeDecodeError):
value = "(Error decoding value)"
except Exception: # in some cases we get a different exception
try:
value = compat.binary_type(repr(type(value)))
except Exception:
value = "(Error decoding value)"
return value
def to_string(value):
try:
return compat.binary_type(value.decode("utf-8").encode("utf-8"))
except Exception:
return to_unicode(value).encode("utf-8")
def shorten(var, list_length=50, string_length=200, dict_length=50):
"""
Shorten a given variable based on configurable maximum lengths, leaving
breadcrumbs in the object to show that it was shortened.
For strings, truncate the string to the max length, and append "..." so
the user knows data was lost.
For lists, truncate the list to the max length, and append two new strings
to the list: "..." and "(<x> more elements)" where <x> is the number of
elements removed.
For dicts, truncate the dict to the max length (based on number of key/value
pairs) and add a new (key, value) pair to the dict:
("...", "(<x> more elements)") where <x> is the number of key/value pairs
removed.
:param var: Variable to be shortened
:param list_length: Max length (in items) of lists
:param string_length: Max length (in characters) of strings
:param dict_length: Max length (in key/value pairs) of dicts
:return: Shortened variable
"""
var = transform(var)
if isinstance(var, compat.string_types) and len(var) > string_length:
var = var[: string_length - 3] + "..."
elif isinstance(var, (list, tuple, set, frozenset)) and len(var) > list_length:
# TODO: we should write a real API for storing some metadata with vars when
# we get around to doing ref storage
var = list(var)[:list_length] + ["...", "(%d more elements)" % (len(var) - list_length,)]
elif isinstance(var, dict) and len(var) > dict_length:
trimmed_tuples = [(k, v) for (k, v) in itertools.islice(compat.iteritems(var), dict_length)]
if "<truncated>" not in var:
trimmed_tuples += [("<truncated>", "(%d more elements)" % (len(var) - dict_length))]
var = dict(trimmed_tuples)
return var
def keyword_field(string):
"""
If the given string is longer than KEYWORD_MAX_LENGTH, truncate it to
KEYWORD_MAX_LENGTH-1, adding the "…" character at the end.
"""
if not isinstance(string, compat.string_types) or len(string) <= KEYWORD_MAX_LENGTH:
return string
return string[: KEYWORD_MAX_LENGTH - 1] + u"…"
def enforce_label_format(labels):
"""
Enforces label format:
* dots, double quotes or stars in keys are replaced by underscores
* string values are limited to a length of 1024 characters
* values can only be of a limited set of types
:param labels: a dictionary of labels
:return: a new dictionary with sanitized keys/values
"""
new = {}
for key, value in compat.iteritems(labels):
if not isinstance(value, LABEL_TYPES):
value = keyword_field(compat.text_type(value))
new[LABEL_RE.sub("_", compat.text_type(key))] = value
return new
| 38.49187
| 118
| 0.660154
|
3ec5aba417b3d439193513f14ba0d2a91a54b686
| 22,552
|
py
|
Python
|
mediaplatform_jwp/sync.py
|
jbrownrs/issue-376-GDS-link
|
e8cce1b79f46b98a7d24b2da5eca48430fd904a3
|
[
"MIT"
] | 5
|
2019-01-07T17:22:34.000Z
|
2020-10-08T15:03:12.000Z
|
mediaplatform_jwp/sync.py
|
jbrownrs/issue-376-GDS-link
|
e8cce1b79f46b98a7d24b2da5eca48430fd904a3
|
[
"MIT"
] | 203
|
2017-12-14T09:51:56.000Z
|
2018-08-28T14:04:08.000Z
|
mediaplatform_jwp/sync.py
|
jbrownrs/issue-376-GDS-link
|
e8cce1b79f46b98a7d24b2da5eca48430fd904a3
|
[
"MIT"
] | 5
|
2018-10-22T11:36:01.000Z
|
2020-07-20T05:47:49.000Z
|
import datetime
import dateutil.parser
from django.db import models, transaction
from django.db.models import expressions, functions
from django.utils import timezone
import pytz
import mediaplatform.models as mpmodels
import mediaplatform_jwp.models as jwpmodels
import legacysms.models as legacymodels
import mediaplatform_jwp.models as mediajwpmodels
from mediaplatform_jwp.api import delivery as jwp
from .signalhandlers import setting_sync_items
@transaction.atomic
def update_related_models_from_cache(update_all_videos=False):
"""
Atomically update the database to reflect the current state of the CachedResource table. If a
video is deleted from JWP, the corresponding MediaItem is marked as deleted. Similarly, if it
is deleted from the SMS (but is still in JWP for some reason), the legacysms.MediaItem model
associated with the MediaItem is deleted.
For video resources whose updated timestamp has increased, the JWP and SMS metadata is
synchronised to mediaplatform.MediaItem or an associated legacysms.MediaItem as appropriate.
The update_all_videos flag may be set to True in which case a synchronisation of *all*
MediaItems with the associated CachedResource is performed irrespective of the
updated_at timestamp. Come what may, all channels are synchronised since there is no equivalent
of the updated timestamp for JWP channels.
TODO: no attempt is yet made to synchronise the edit permission with that of the containing
collection for media items. This needs a bit more thought about how the SMS permission model
maps into the new world.
"""
# 1) Delete mediaplatform_jwp.{Video,Channel} objects which are no-longer hosted by JWP and
# mark the corresponding media items/channels as "deleted".
#
# After this stage, there will be no mediaplatform_jwp.Video, mediaplatform.MediaItem,
# legacysms.MediaItem, mediaplatform_jwp.Channel, mediaplatform.Channel or legacysms.Collection
# objects in the database which are reachable from a JWP video which is no-longer hosted on
# JWP.
# A query for JWP videos/channels in our DB which are no-longer in JWPlatform
deleted_jwp_videos = jwpmodels.Video.objects.exclude(
key__in=mediajwpmodels.CachedResource.videos.values_list('key', flat=True))
deleted_jwp_channels = jwpmodels.Channel.objects.exclude(
key__in=mediajwpmodels.CachedResource.channels.values_list('key', flat=True))
# A query for media items which are to be deleted because they relate to a JWP video which was
# deleted
deleted_media_items = (
mpmodels.MediaItem.objects.filter(jwp__key__in=deleted_jwp_videos))
# A query for channels which are to be deleted because they relate to a JWP video which was
# deleted
deleted_channels = (
mpmodels.Channel.objects.filter(jwp__key__in=deleted_jwp_channels))
# A query for legacysms media items which are to be deleted because they relate to a media item
# which is to be deleted
deleted_sms_media_items = (
legacymodels.MediaItem.objects.filter(item__in=deleted_media_items))
# A query for legacysms collections which are to be deleted because they relate to a channel
# which is to be deleted
deleted_sms_collections = (
legacymodels.Collection.objects.filter(channel__in=deleted_channels))
# Mark 'shadow' playlists associated with deleted collections as deleted.
mpmodels.Playlist.objects.filter(sms__in=deleted_sms_collections).update(
deleted_at=timezone.now()
)
# Mark matching MediaItem models as deleted and delete corresponding SMS and JWP objects. The
# order here is important since the queries are not actually run until the corresponding
# update()/delete() calls.
deleted_sms_media_items.delete()
deleted_media_items.update(deleted_at=timezone.now())
deleted_jwp_videos.delete()
# Move media items which are in deleted channels to have no channel, mark the original
# channel as deleted and delete SMS/JWP objects
mpmodels.MediaItem.objects.filter(channel__in=deleted_channels).update(channel=None)
deleted_sms_collections.delete()
deleted_channels.update(deleted_at=timezone.now())
deleted_jwp_channels.delete()
# 2) Update/create JWP video resources
#
# After this stage all mediaplatform_jwp.Video objects in the database should have the same
# "updated" timestamp as the cached JWP resources and any newly appearing JWP videos should
# have associated mediaplatform_jwp.Video objects.
updated_jwp_video_keys = _ensure_resources(
jwpmodels.Video, mediajwpmodels.CachedResource.videos)
_ensure_resources(
jwpmodels.Channel, mediajwpmodels.CachedResource.channels)
# 3) Insert missing mediaplatform.MediaItem and mediaplatform.Channel objects
#
# After this stage, all mediaplatform_jwp.Video objects which lack a mediaplatform.MediaItem
# and mediaplatform_jwp.Channel object which lack a mediaplatform.Channel will have one. The
# newly created mediaplatform.MediaItem objects will be blank but have an updated_at timestamp
# well before the corresponding mediaplatform_jwp.Video object.
# A queryset of all JWP Video objects which lack a mediaplatform.MediaItem annotated with the
# data from the corresponding CachedResource
videos_needing_items = (
jwpmodels.Video.objects
.filter(item__isnull=True)
.annotate(data=models.Subquery(
mediajwpmodels.CachedResource.videos
.filter(key=models.OuterRef('key'))
.values_list('data')[:1]
))
)
# For all videos needing a mediaplatform.MediaItem, create a blank one for videos arising from
# the SMS.
jwp_keys_and_items = [
(
video.key,
mpmodels.MediaItem(),
)
for video in videos_needing_items
if getattr(video, 'data', {}).get('custom', {}).get('sms_media_id') is not None
]
# Insert all the media items in an efficient manner.
mpmodels.MediaItem.objects.bulk_create([
item for _, item in jwp_keys_and_items
])
# Since the bulk_create() call does not call any signal handlers, we need to manually create
# all of the permissions for the new items.
mpmodels.Permission.objects.bulk_create([
mpmodels.Permission(allows_view_item=item) for _, item in jwp_keys_and_items
])
# Add the corresponding media item link to the JWP videos.
for key, item in jwp_keys_and_items:
jwpmodels.Video.objects.filter(key=key).update(item=item)
# A queryset of all JWP Channel objects which lack a mediaplatform.Channel annotated with the
# data from the corresponding CachedResource
jw_channels_needing_channels = (
jwpmodels.Channel.objects
.filter(channel__isnull=True)
.annotate(data=models.Subquery(
mediajwpmodels.CachedResource.channels
.filter(key=models.OuterRef('key'))
.values_list('data')[:1]
))
)
# For all channels needing a mediaplatform.Channel, create a blank one.
jwp_keys_and_channels = [
(
jw_channel.key,
mpmodels.Channel(billing_account=_ensure_billing_account(
jwp.parse_custom_field(
'instid',
jw_channel.data.get('custom', {}).get('sms_instid', 'instid::')
)
)),
)
for jw_channel in jw_channels_needing_channels
]
# Insert all the channels in an efficient manner.
mpmodels.Channel.objects.bulk_create([
channel for _, channel in jwp_keys_and_channels
])
# Since the bulk_create() call does not call any signal handlers, we need to manually create
# all of the permissions for the new channels.
mpmodels.Permission.objects.bulk_create([
mpmodels.Permission(allows_edit_channel=channel) for _, channel in jwp_keys_and_channels
])
# Add the corresponding media item link to the JWP channels.
for key, channel in jwp_keys_and_channels:
jwpmodels.Channel.objects.filter(key=key).update(channel=channel)
# 4) Update metadata for changed videos
#
# After this stage, all mediaplatform.MediaItem objects whose associated JWP video is one of
# those in updated_jwp_video_keys will have their metadata updated from the JWP video's custom
# props. Note that legacysms.MediaItem objects associated with updated mediaplatform.MediaItem
# objects will also be updated/created/deleted as necessary.
# The media items which need update. We defer fetching all the metdata since we're going to
# reset it anyway.
updated_media_items = (
mpmodels.MediaItem.objects.all()
.select_related('view_permission')
# updated_at is included because, without it, the field does not get updated on save() for
# some reason
.only('view_permission', 'jwp', 'sms', 'updated_at')
.annotate(data=models.Subquery(
mediajwpmodels.CachedResource.videos
.filter(key=models.OuterRef('jwp__key'))
.values_list('data')[:1]
))
)
# Unless we were asked to update the metadata in all objects, only update those which were last
# updated before the corresponding JWP video resource OR were created by us.
if not update_all_videos:
updated_media_items = (
updated_media_items
.filter(
models.Q(jwp__key__in=updated_jwp_video_keys) |
models.Q(id__in=[item.id for _, item in jwp_keys_and_items])
)
)
# Iterate over all updated media items and set the metadata
max_tag_length = mpmodels.MediaItem._meta.get_field('tags').base_field.max_length
type_map = {
'video': mpmodels.MediaItem.VIDEO,
'audio': mpmodels.MediaItem.AUDIO,
'unknown': mpmodels.MediaItem.UNKNOWN,
}
# We'll be modifying the MediaItem objects to be consistent with the JWP videos. We *don't*
# want the signal handlers then trying to modify the JWPlatform videos again so disable
# MediaItem -> JWP syncing if it is enabled.
with setting_sync_items(False):
for item in updated_media_items:
# Skip items with no associated JWP video
if item.data is None:
continue
video = jwp.Video(item.data)
custom = video.get('custom', {})
item.title = _default_if_none(video.get('title'), '')
item.description = _default_if_none(video.get('description'), '')
item.type = type_map[_default_if_none(video.get('mediatype'), 'unknown')]
item.downloadable = 'True' == jwp.parse_custom_field(
'downloadable', custom.get('sms_downloadable', 'downloadable:False:'))
published_timestamp = video.get('date')
if published_timestamp is not None:
item.published_at = datetime.datetime.fromtimestamp(
published_timestamp, pytz.utc)
item.duration = _default_if_none(video.get('duration'), 0.)
# The language should be a three letter code. Use [:3] to make sure that it always is
# even if the JWP custom prop is somehow messed up.
item.language = jwp.parse_custom_field(
'language', custom.get('sms_language', 'language::'))[:3]
item.copyright = jwp.parse_custom_field(
'copyright', custom.get('sms_copyright', 'copyright::'))
# Since tags have database enforced maximum lengths, make sure to truncate them if
# they're too long. We also strip leading or trailing whitespace.
item.tags = [
tag.strip().lower()[:max_tag_length]
for tag in jwp.parse_custom_field(
'keywords', custom.get('sms_keywords', 'keywords::')
).split('|')
if tag.strip() != ''
]
# Update view permission
item.view_permission.reset()
_set_permission_from_acl(item.view_permission, video.acl)
item.view_permission.save()
# Update associated SMS media item (if any)
sms_media_id = video.media_id
if sms_media_id is not None:
# Get or create associated SMS media item. Note that hasattr is recommended in the
# Django docs as a way to determine isf a related objects exists.
# https://docs.djangoproject.com/en/dev/topics/db/examples/one_to_one/
if hasattr(item, 'sms') and item.sms is not None:
sms_media_item = item.sms
else:
sms_media_item = legacymodels.MediaItem(id=sms_media_id)
# Extract last updated timestamp. It should be an ISO 8601 date string.
last_updated = jwp.parse_custom_field(
'last_updated_at', custom.get('sms_last_updated_at', 'last_updated_at::'))
# Update SMS media item
sms_media_item.item = item
if last_updated == '':
sms_media_item.last_updated_at = None
else:
sms_media_item.last_updated_at = dateutil.parser.parse(last_updated)
sms_media_item.save()
else:
# If there is no associated SMS media item, make sure that this item doesn't have
# one pointing to it.
if hasattr(item, 'sms') and item.sms is not None:
item.sms.delete()
item.save()
# 5) Update metadata for changed channels
#
# After this stage, all mediaplatform.Channel objects whose associated JWP channel is one of
# those in updated_jwp_channel_keys will have their metadata updated from the JWP channel's
# custom props. Note that legacysms.Channel objects associated with updated
# mediaplatform.Channel objects will also be updated/created/deleted as necessary.
# The channels which need update. We defer fetching all the metdata since we're going to
# reset it anyway.
updated_channels = (
mpmodels.Channel.objects.all()
.select_related('edit_permission')
# updated_at is included because, without it, the field does not get updated on save() for
# some reason
.only('edit_permission', 'jwp', 'sms', 'updated_at')
.annotate(data=models.Subquery(
mediajwpmodels.CachedResource.channels
.filter(key=models.OuterRef('jwp__key'))
.values_list('data')[:1]
))
)
for channel in updated_channels:
# Skip channels with no associated JWP channel
if channel.data is None:
continue
channel_data = jwp.Channel(channel.data)
custom = channel_data.get('custom', {})
# NB: The channel billing account is immutable and so we need not examine sms_instid here.
channel.title = _default_if_none(channel_data.get('title'), '')
channel.description = _default_if_none(channel_data.get('description'), '')
# Update edit permission
channel.edit_permission.reset()
try:
creator = jwp.parse_custom_field(
'created_by', custom.get('sms_created_by', 'created_by::'))
except ValueError:
creator = jwp.parse_custom_field(
'creator', custom.get('sms_created_by', 'creator::'))
if creator != '' and creator not in channel.edit_permission.crsids:
channel.edit_permission.crsids.append(creator)
group_id = jwp.parse_custom_field(
'groupid', custom.get('sms_groupid', 'groupid::'))
if group_id != '' and group_id not in channel.edit_permission.lookup_groups:
channel.edit_permission.lookup_groups.append(group_id)
channel.edit_permission.save()
# Update contents. We use the "sms_collection_media_ids" custom prop as that is always set
# to the media ids which "should" be in the collection unlike sms_{,failed_}media_ids which
# is used as part of the playlist synchronisation process.
sms_collection_media_ids = [
int(media_id.strip())
for media_id in jwp.parse_custom_field(
'collection_media_ids',
custom.get('sms_collection_media_ids', 'collection_media_ids::')
).split(',') if media_id.strip() != ''
]
collection_media_ids = (
mpmodels.MediaItem.objects.filter(sms__id__in=sms_collection_media_ids)
.only('id', 'sms__id')
)
channel.items.set(collection_media_ids)
# Form a list of media item keys which is in the same order as sms_collection_media_ids.
item_map = {item.sms.id: item.id for item in collection_media_ids}
item_ids = [
item_id for item_id in (
item_map.get(media_id) for media_id in sms_collection_media_ids
) if item_id is not None
]
# Update associated SMS collection (if any)
sms_collection_id = channel_data.collection_id
if sms_collection_id is not None:
# Get or create associated SMS collection. Note that hasattr is recommended in the
# Django docs as a way to determine if a related objects exists.
# https://docs.djangoproject.com/en/dev/topics/db/examples/one_to_one/
if hasattr(channel, 'sms') and channel.sms is not None:
sms_channel = channel.sms
else:
sms_channel = legacymodels.Collection(id=sms_collection_id)
# Extract last updated timestamp. It should be an ISO 8601 date string.
last_updated = jwp.parse_custom_field(
'last_updated_at', custom.get('sms_last_updated_at', 'last_updated_at::'))
# Update SMS collection
sms_channel.channel = channel
if last_updated == '':
sms_channel.last_updated_at = None
else:
sms_channel.last_updated_at = dateutil.parser.parse(last_updated)
if sms_channel.playlist is None:
# If the 'shadow' playlist doesn't exist, create it.
sms_channel.playlist = mpmodels.Playlist(channel=channel)
sms_channel.save()
# Update the Playlist
sms_channel.playlist.title = channel.title
sms_channel.playlist.description = channel.description
sms_channel.playlist.media_items = item_ids
sms_channel.playlist.save()
else:
# If there is no associated SMS collection, make sure that this channel doesn't have
# one pointing to it.
if hasattr(channel, 'sms') and channel.sms is not None:
channel.sms.delete()
channel.save()
def _ensure_billing_account(lookup_instid):
"""
Return a billing account associated with the specified institution id if one exists or create
one if not.
"""
return mpmodels.BillingAccount.objects.get_or_create(
defaults={'description': f'Lookup instutution {lookup_instid}'},
lookup_instid=lookup_instid)[0]
def _default_if_none(value, default):
return value if value is not None else default
def _set_permission_from_acl(permission, acl):
"""
Given an ACL, update the passed permission to reflect it. The permission is not reset, nor is
it save()-ed after the update.
"""
for ace in acl:
if ace == 'WORLD':
permission.is_public = True
elif ace == 'CAM':
permission.is_signed_in = True
elif ace.startswith('INST_'):
permission.lookup_insts.append(ace[5:])
elif ace.startswith('GROUP_'):
permission.lookup_groups.append(ace[6:])
elif ace.startswith('USER_'):
permission.crsids.append(ace[5:])
def _ensure_resources(jwp_model, resource_queryset):
"""
Given a model from mediaplatform_jwp and a queryset of CachedResource object corresponding to
that model, make sure that objects of the appropriate model exist for each CachedResource
object and that their updated timestamps are correct.
Returns a list of all JWP resource keys for resources which were updated/created.
"""
jwp_queryset = jwp_model.objects.all()
# A query which returns all the cached video resources which do not correspond to an existing
# JWP video.
new_resources = (
resource_queryset
.exclude(key__in=jwp_queryset.values_list('key', flat=True))
)
# Start creating a list of all JWP video object which were touched in this update
updated_jwp_keys = [v.key for v in new_resources.only('key')]
# Bulk insert objects for all new resources.
jwp_queryset.bulk_create([
jwp_model(key=resource.key, updated=resource.data.get('updated', 0), resource=resource)
for resource in new_resources
])
# A subquery which returns the corresponding CachedResource's updated timestamp for a JWP
# Video. We cannot simply use "data__updated" here because Django by design
# (https://code.djangoproject.com/ticket/14104) does not support joined fields with update()
# but the checks incorrectly interpret "data__updated" as a join and not a transform. Until
# Django is fixed, we use a horrible workaround using RawSQL. See
# https://www.postgresql.org/docs/current/static/functions-json.html for the Postgres JSON
# operators.
matching_resource_updated = models.Subquery(
resource_queryset
.filter(key=models.OuterRef('key'))
.values_list(
functions.Cast(expressions.RawSQL("data ->> 'updated'", []), models.BigIntegerField())
)[:1]
)
# Add to our list of updated JWP videos
updated_jwp_keys.extend([
v.key
for v in jwp_queryset
.filter(updated__lt=matching_resource_updated)
.only('key')
])
# For all objects whose corresponding CachedResource's updated field is later than the object's
# updated field, update the object.
(
jwp_queryset
.annotate(resource_updated=matching_resource_updated)
.filter(updated__lt=models.F('resource_updated'))
.update(updated=models.F('resource_updated'))
)
return updated_jwp_keys
| 42.874525
| 99
| 0.67147
|
03c5f8872aa26991997879c9084e20553f4fef32
| 46,774
|
py
|
Python
|
katdal/ms_extra.py
|
adriaanph/katdal
|
a3d01a08c5e74b6143caef02e499f2073056acd7
|
[
"BSD-3-Clause"
] | null | null | null |
katdal/ms_extra.py
|
adriaanph/katdal
|
a3d01a08c5e74b6143caef02e499f2073056acd7
|
[
"BSD-3-Clause"
] | null | null | null |
katdal/ms_extra.py
|
adriaanph/katdal
|
a3d01a08c5e74b6143caef02e499f2073056acd7
|
[
"BSD-3-Clause"
] | null | null | null |
################################################################################
# Copyright (c) 2011-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Create MS compatible data and write this data into a template MeasurementSet."""
#
# Ludwig Schwardt
# 25 March 2008
#
import os
import os.path
from copy import deepcopy
import numpy as np
from pkg_resources import parse_version
import casacore
from casacore import tables
# Perform python-casacore version checks
pyc_ver = parse_version(casacore.__version__)
req_ver = parse_version("2.2.1")
if not pyc_ver >= req_ver:
raise ImportError(f"python-casacore {req_ver} is required, but the current version is {pyc_ver}. "
f"Note that python-casacore {req_ver} requires at least casacore 2.3.0.")
def open_table(name, readonly=False, verbose=False, **kwargs):
"""Open casacore Table."""
return tables.table(name, readonly=readonly, ack=verbose, **kwargs)
def create_ms(filename, table_desc=None, dm_info=None):
"""Create an empty MS with the default expected sub-tables and columns."""
with tables.default_ms(filename, table_desc, dm_info) as main_table:
# Add the optional SOURCE subtable
source_path = os.path.join(os.getcwd(), filename, 'SOURCE')
with tables.default_ms_subtable('SOURCE', source_path) as source_table:
# Add the optional REST_FREQUENCY column to appease exportuvfits
# (it only seems to need the column keywords)
rest_freq_desc = tables.makearrcoldesc(
'REST_FREQUENCY', 0, valuetype='DOUBLE', ndim=1,
keywords={'MEASINFO': {'Ref': 'LSRK', 'type': 'frequency'},
'QuantumUnits': 'Hz'})
source_table.addcols(rest_freq_desc)
main_table.putkeyword('SOURCE', 'Table: ' + source_path)
def std_scalar(comment, valueType='integer', option=0, **kwargs):
"""Description for standard scalar column."""
return dict(comment=comment, valueType=valueType, dataManagerType='StandardStMan',
dataManagerGroup='StandardStMan', option=option, maxlen=0, **kwargs)
def std_array(comment, valueType, ndim, **kwargs):
"""Description for standard array column with variable shape (used for smaller arrays)."""
return dict(comment=comment, valueType=valueType, ndim=ndim, dataManagerType='StandardStMan',
dataManagerGroup='StandardStMan', _c_order=True, option=0, maxlen=0, **kwargs)
def fixed_array(comment, valueType, shape, **kwargs):
"""Description for direct array column with fixed shape (used for smaller arrays)."""
return dict(comment=comment, valueType=valueType, shape=np.asarray(shape, dtype=np.int32), ndim=len(shape),
dataManagerType='StandardStMan', dataManagerGroup='StandardStMan',
_c_order=True, option=5, maxlen=0, **kwargs)
def tiled_array(comment, valueType, ndim, dataManagerGroup, **kwargs):
"""Description for array column with tiled storage manager (used for bigger arrays)."""
return dict(comment=comment, valueType=valueType, ndim=ndim, dataManagerType='TiledShapeStMan',
dataManagerGroup=dataManagerGroup, _c_order=True, option=0, maxlen=0, **kwargs)
def define_hypercolumn(desc):
"""Add hypercolumn definitions to table description."""
desc['_define_hypercolumn_'] = {v['dataManagerGroup']: dict(HCdatanames=[k], HCndim=v['ndim'] + 1)
for k, v in desc.items() if v['dataManagerType'] == 'TiledShapeStMan'}
# Map MeasurementSet string types to numpy types
MS_TO_NP_TYPE_MAP = {
'INT': np.int32,
'FLOAT': np.float32,
'DOUBLE': np.float64,
'BOOLEAN': np.bool,
'COMPLEX': np.complex64,
'DCOMPLEX': np.complex128
}
def kat_ms_desc_and_dminfo(nbl, nchan, ncorr, model_data=False):
"""
Creates Table Description and Data Manager Information objects that
describe a MeasurementSet suitable for holding MeerKAT data.
Creates additional DATA, IMAGING_WEIGHT and possibly
MODEL_DATA and CORRECTED_DATA columns.
Columns are given fixed shapes defined by the arguments to this function.
:param nbl: Number of baselines.
:param nchan: Number of channels.
:param ncorr: Number of correlation products.
:param model_data: Boolean indicated whether MODEL_DATA and CORRECTED_DATA
should be added to the Measurement Set.
:return: Returns a tuple containing a table description describing
the extra columns and hypercolumns, as well as a Data Manager
description.
"""
# Columns that will be modified. We want to keep things like their
# keywords, dims and shapes.
modify_columns = {"WEIGHT", "SIGMA", "FLAG", "FLAG_CATEGORY",
"UVW", "ANTENNA1", "ANTENNA2"}
# Get the required table descriptor for an MS
table_desc = tables.required_ms_desc("MAIN")
# Take columns we wish to modify
extra_table_desc = {c: d for c, d in table_desc.items() if c in modify_columns}
# Used to set the SPEC for each Data Manager Group
dmgroup_spec = {}
def dmspec(coldesc, tile_mem_limit=None):
"""
Create data manager spec for a given column description,
mostly by adding a DEFAULTTILESHAPE that fits into the
supplied memory limit.
"""
# Choose 4MB if none given
if tile_mem_limit is None:
tile_mem_limit = 4*1024*1024
# Get the reversed column shape. DEFAULTTILESHAPE is deep in
# casacore and its necessary to specify their ordering here
# ntilerows is the dim that will change least quickly
rev_shape = list(reversed(coldesc["shape"]))
ntilerows = 1
np_dtype = MS_TO_NP_TYPE_MAP[coldesc["valueType"].upper()]
nbytes = np.dtype(np_dtype).itemsize
# Try bump up the number of rows in our tiles while they're
# below the memory limit for the tile
while np.product(rev_shape + [2*ntilerows])*nbytes < tile_mem_limit:
ntilerows *= 2
return {"DEFAULTTILESHAPE": np.int32(rev_shape + [ntilerows])}
# Update existing columns with shape and data manager information
dm_group = 'UVW'
shape = [3]
extra_table_desc["UVW"].update(options=0, shape=shape, ndim=len(shape),
dataManagerGroup=dm_group,
dataManagerType='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(extra_table_desc["UVW"])
dm_group = 'Weight'
shape = [ncorr]
extra_table_desc["WEIGHT"].update(options=4, shape=shape, ndim=len(shape),
dataManagerGroup=dm_group,
dataManagerType='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(extra_table_desc["WEIGHT"])
dm_group = 'Sigma'
shape = [ncorr]
extra_table_desc["SIGMA"].update(options=4, shape=shape, ndim=len(shape),
dataManagerGroup=dm_group,
dataManagerType='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(extra_table_desc["SIGMA"])
dm_group = 'Flag'
shape = [nchan, ncorr]
extra_table_desc["FLAG"].update(options=4, shape=shape, ndim=len(shape),
dataManagerGroup=dm_group,
dataManagerType='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(extra_table_desc["FLAG"])
dm_group = 'FlagCategory'
shape = [1, nchan, ncorr]
extra_table_desc["FLAG_CATEGORY"].update(options=4, keywords={},
shape=shape, ndim=len(shape),
dataManagerGroup=dm_group,
dataManagerType='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(extra_table_desc["FLAG_CATEGORY"])
# Create new columns for integration into the MS
additional_columns = []
dm_group = 'Data'
shape = [nchan, ncorr]
desc = tables.tablecreatearraycoldesc(
"DATA", 0+0j, comment="The Visibility DATA Column",
options=4, valuetype='complex', keywords={"UNIT": "Jy"},
shape=shape, ndim=len(shape), datamanagergroup=dm_group,
datamanagertype='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(desc["desc"])
additional_columns.append(desc)
dm_group = 'WeightSpectrum'
shape = [nchan, ncorr]
desc = tables.tablecreatearraycoldesc(
"WEIGHT_SPECTRUM", 1.0, comment="Per-channel weights",
options=4, valuetype='float', shape=shape, ndim=len(shape),
datamanagergroup=dm_group, datamanagertype='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(desc["desc"])
additional_columns.append(desc)
dm_group = 'ImagingWeight'
shape = [nchan]
desc = tables.tablecreatearraycoldesc(
"IMAGING_WEIGHT", 0,
comment="Weight set by imaging task (e.g. uniform weighting)",
options=4, valuetype='float', shape=shape, ndim=len(shape),
datamanagergroup=dm_group, datamanagertype='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(desc["desc"])
additional_columns.append(desc)
# Add MODEL_DATA and CORRECTED_DATA if requested
if model_data:
dm_group = 'ModelData'
shape = [nchan, ncorr]
desc = tables.tablecreatearraycoldesc(
"MODEL_DATA", 0+0j, comment="The Visibility MODEL_DATA Column",
options=4, valuetype='complex', keywords={"UNIT": "Jy"},
shape=shape, ndim=len(shape), datamanagergroup=dm_group,
datamanagertype='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(desc["desc"])
additional_columns.append(desc)
dm_group = 'CorrectedData'
shape = [nchan, ncorr]
desc = tables.tablecreatearraycoldesc(
"CORRECTED_DATA", 0+0j,
comment="The Visibility CORRECTED_DATA Column",
options=4, valuetype='complex', keywords={"UNIT": "Jy"},
shape=shape, ndim=len(shape), datamanagergroup=dm_group,
datamanagertype='TiledColumnStMan')
dmgroup_spec[dm_group] = dmspec(desc["desc"])
additional_columns.append(desc)
# Update extra table description with additional columns
extra_table_desc.update(tables.maketabdesc(additional_columns))
# Update the original table descriptor with modifications/additions
# Need this to construct a complete Data Manager specification
# that includes the original columns
table_desc.update(extra_table_desc)
# Construct DataManager Specification
dminfo = tables.makedminfo(table_desc, dmgroup_spec)
return extra_table_desc, dminfo
caltable_desc = {}
caltable_desc['TIME'] = std_scalar('Timestamp of solution', 'double', option=5)
caltable_desc['FIELD_ID'] = std_scalar('Unique id for this pointing', 'integer', option=5)
caltable_desc['SPECTRAL_WINDOW_ID'] = std_scalar('Spectral window', 'integer', option=5)
caltable_desc['ANTENNA1'] = std_scalar('ID of first antenna in interferometer', 'integer', option=5)
caltable_desc['ANTENNA2'] = std_scalar('ID of second antenna in interferometer', 'integer', option=5)
caltable_desc['INTERVAL'] = std_scalar('The effective integration time', 'double', option=5)
caltable_desc['SCAN_NUMBER'] = std_scalar('Scan number', 'integer', option=5)
caltable_desc['OBSERVATION_ID'] = std_scalar('Observation id (index in OBSERVATION table)', 'integer', option=5)
caltable_desc['PARAMERR'] = std_array('Parameter error', 'float', -1)
caltable_desc['FLAG'] = std_array('Solution values', 'boolean', -1)
caltable_desc['SNR'] = std_array('Signal to noise ratio', 'float', -1)
caltable_desc['WEIGHT'] = std_array('Weight', 'float', -1)
# float version of caltable
caltable_desc_float = deepcopy(caltable_desc)
caltable_desc_float['FPARAM'] = std_array('Solution values', 'float', -1)
define_hypercolumn(caltable_desc_float)
# complex version of caltable
caltable_desc_complex = deepcopy(caltable_desc)
caltable_desc_complex['CPARAM'] = std_array('Solution values', 'complex', -1)
define_hypercolumn(caltable_desc_complex)
# -------- Routines that create MS data structures in dictionaries -----------
def populate_main_dict(uvw_coordinates, vis_data, flag_data, weight_data, timestamps, antenna1_index,
antenna2_index, integrate_length, field_id=0, state_id=1,
scan_number=0, model_data=None, corrected_data=None):
"""Construct a dictionary containing the columns of the MAIN table.
The MAIN table contains the visibility data itself. The vis data has shape
(num_vis_samples, num_pols, num_channels). The table has one row per
visibility sample, which is one row per baseline per snapshot (time sample).
Parameters
----------
uvw_coordinates : array of float, shape (num_vis_samples, 3)
Array containing (u,v,w) coordinates in metres
vis_data : array of complex, shape (num_vis_samples, num_channels, num_pols)
Array containing complex visibility data in Janskys
flag_data : array of boolean, shape same as vis_data
weight_data : array of float, shape same as vis_data
timestamps : array of float, shape (num_vis_samples,)
Array of timestamps as Modified Julian Dates in seconds
(may contain duplicate times for multiple baselines)
antenna1_index : int or array of int, shape (num_vis_samples,)
Array containing the index of the first antenna of each vis sample
antenna2_index : int or array of int, shape (num_vis_samples,)
Array containing the index of the second antenna of each vis sample
integrate_length : float
The integration time (one over dump rate), in seconds
field_id : int or array of int, shape (num_vis_samples,), optional
The field ID (pointing) associated with this data
state_id : int or array of int, shape (num_vis_samples,), optional
The state ID (observation intent) associated with this data
scan_number : int or array of int, shape (num_vis_samples,), optional
The scan index (compound scan index in the case of KAT-7)
model_data : array of complex, shape (num_vis_samples, num_channels, num_pols)
Array containing complex visibility data in Janskys
corrected_data : array of complex, shape (num_vis_samples, num_channels, num_pols)
Array containing complex visibility data in Janskys
Returns
-------
main_dict : dict
Dictionary containing columns of MAIN table
Raises
------
ValueError
If there is a shape mismatch between some input arrays
"""
num_vis_samples, num_channels, num_pols = vis_data.shape
timestamps = np.atleast_1d(np.asarray(timestamps, dtype=np.float64))
main_dict = {}
# ID of first antenna in interferometer (integer)
main_dict['ANTENNA1'] = antenna1_index
# ID of second antenna in interferometer (integer)
main_dict['ANTENNA2'] = antenna2_index
# ID of array or subarray (integer)
main_dict['ARRAY_ID'] = np.zeros(num_vis_samples, dtype=np.int32)
# The corrected data column (complex, 3-dim)
if corrected_data is not None:
main_dict['CORRECTED_DATA'] = corrected_data
# The data column (complex, 3-dim)
main_dict['DATA'] = vis_data
# The data description table index (integer)
main_dict['DATA_DESC_ID'] = np.zeros(num_vis_samples, dtype=np.int32)
# The effective integration time (double)
main_dict['EXPOSURE'] = integrate_length * np.ones(num_vis_samples)
# The feed index for ANTENNA1 (integer)
main_dict['FEED1'] = np.zeros(num_vis_samples, dtype=np.int32)
# The feed index for ANTENNA1 (integer)
main_dict['FEED2'] = np.zeros(num_vis_samples, dtype=np.int32)
# Unique id for this pointing (integer)
main_dict['FIELD_ID'] = field_id
# The data flags, array of bools with same shape as data
main_dict['FLAG'] = flag_data
# The flag category, NUM_CAT flags for each datum [snd 1 is num channels] (boolean, 4-dim)
main_dict['FLAG_CATEGORY'] = flag_data.reshape((num_vis_samples, 1, num_channels, num_pols))
# Row flag - flag all data in this row if True (boolean)
main_dict['FLAG_ROW'] = np.zeros(num_vis_samples, dtype=np.uint8)
# The visibility weights
main_dict['WEIGHT_SPECTRUM'] = weight_data
# Weight set by imaging task (e.g. uniform weighting) (float, 1-dim)
# main_dict['IMAGING_WEIGHT'] = np.ones((num_vis_samples, 1), dtype=np.float32)
# The sampling interval (double)
main_dict['INTERVAL'] = integrate_length * np.ones(num_vis_samples)
# The model data column (complex, 3-dim)
if model_data is not None:
main_dict['MODEL_DATA'] = model_data
# ID for this observation, index in OBSERVATION table (integer)
main_dict['OBSERVATION_ID'] = np.zeros(num_vis_samples, dtype=np.int32)
# Id for backend processor, index in PROCESSOR table (integer)
main_dict['PROCESSOR_ID'] = - np.ones(num_vis_samples, dtype=np.int32)
# Sequential scan number from on-line system (integer)
main_dict['SCAN_NUMBER'] = scan_number
# Estimated rms noise for channel with unity bandpass response (float, 1-dim)
main_dict['SIGMA'] = np.ones((num_vis_samples, num_pols), dtype=np.float32)
# ID for this observing state (integer)
main_dict['STATE_ID'] = state_id
# Modified Julian Dates in seconds (double)
main_dict['TIME'] = timestamps
# Modified Julian Dates in seconds (double)
main_dict['TIME_CENTROID'] = timestamps
# Vector with uvw coordinates (in metres) (double, 1-dim, shape=(3,))
main_dict['UVW'] = np.asarray(uvw_coordinates)
# Weight for each polarisation spectrum (float, 1-dim). This is just
# just filled with 1's, because the real weights are in WEIGHT_SPECTRUM.
main_dict['WEIGHT'] = np.ones((num_vis_samples, num_pols), dtype=np.float32)
return main_dict
def populate_caltable_main_dict(solution_times, solution_values, antennas, scans):
"""Construct a dictionary containing the columns of the MAIN table.
The MAIN table contains the gain solution data itself. The shape of the data
sepends on the nature of the solution: (npol,1) for gains and delays and
(npol, nchan) for bandpasses.
The table has one row per antenna per time.
Parameters
----------
solution_times : array of float, shape (num_solutions,)
Calibration solution times
solution_values : array of float, shape (num_solutions,)
Calibration solution values
antennas: array of float, shape (num_solutions,)
Antenna corresponding to each solution value
scans: array of float, shape (num_solutions,)
Scan number corresponding to each solution value
Returns
-------
calibration_main_dict : dict
Dictionary containing columns of the caltable MAIN table
"""
num_rows = len(solution_times)
calibration_main_dict = {}
calibration_main_dict['TIME'] = solution_times
calibration_main_dict['FIELD_ID'] = np.zeros(num_rows, dtype=np.int32)
calibration_main_dict['SPECTRAL_WINDOW_ID'] = np.zeros(num_rows, dtype=np.int32)
calibration_main_dict['ANTENNA1'] = antennas
calibration_main_dict['ANTENNA2'] = np.zeros(num_rows, dtype=np.int32)
calibration_main_dict['INTERVAL'] = np.zeros(num_rows, dtype=np.int32)
calibration_main_dict['SCAN_NUMBER'] = scans
calibration_main_dict['OBSERVATION_ID'] = np.zeros(num_rows, dtype=np.int32)
if np.iscomplexobj(solution_values):
calibration_main_dict['CPARAM'] = solution_values
else:
calibration_main_dict['FPARAM'] = solution_values
calibration_main_dict['PARAMERR'] = np.zeros_like(solution_values, dtype=np.float32)
calibration_main_dict['FLAG'] = np.zeros_like(solution_values, dtype=np.int32)
calibration_main_dict['SNR'] = np.ones_like(solution_values, dtype=np.float32)
return calibration_main_dict
def populate_antenna_dict(antenna_names, antenna_positions, antenna_diameters):
"""Construct a dictionary containing the columns of the ANTENNA subtable.
The ANTENNA subtable contains info about each antenna, such as its name,
position, mount type and diameter. It has one row per antenna.
Parameters
----------
antenna_names : array of string, shape (num_antennas,)
Array of antenna names, one per antenna
antenna_positions : array of float, shape (num_antennas, 3)
Array of antenna positions in ECEF (aka XYZ) coordinates, in metres
antenna_diameters : array of float, shape (num_antennas,)
Array of antenna diameters, in metres
Returns
-------
antenna_dict : dict
Dictionary containing columns of ANTENNA subtable
"""
num_antennas = len(antenna_names)
antenna_dict = {}
# Physical diameter of dish (double)
antenna_dict['DISH_DIAMETER'] = np.asarray(antenna_diameters, np.float64)
# Flag for this row (boolean)
antenna_dict['FLAG_ROW'] = np.zeros(num_antennas, np.uint8)
# Mount type e.g. alt-az, equatorial, etc. (string)
antenna_dict['MOUNT'] = np.tile('ALT-AZ', num_antennas)
# Antenna name, e.g. VLA22, CA03 (string)
antenna_dict['NAME'] = np.asarray(antenna_names)
# Axes offset of mount to FEED REFERENCE point (double, 1-dim, shape=(3,))
antenna_dict['OFFSET'] = np.zeros((num_antennas, 3), np.float64)
# Antenna X,Y,Z phase reference position (double, 1-dim, shape=(3,))
antenna_dict['POSITION'] = np.asarray(antenna_positions, dtype=np.float64)
# Station (antenna pad) name (string)
antenna_dict['STATION'] = np.asarray(antenna_names)
# Antenna type (e.g. SPACE-BASED) (string)
antenna_dict['TYPE'] = np.tile('GROUND-BASED', num_antennas)
return antenna_dict
def populate_feed_dict(num_feeds, num_receptors_per_feed=2):
"""Construct a dictionary containing the columns of the FEED subtable.
The FEED subtable specifies feed characteristics such as polarisation and
beam offsets. It has one row per feed (typically one feed per antenna).
Each feed has a number of receptors (typically one per polarisation type).
Parameters
----------
num_feeds : integer
Number of feeds in telescope (typically equal to number of antennas)
num_receptors_per_feed : integer, optional
Number of receptors per feed (usually one per polarisation type)
Returns
-------
feed_dict : dict
Dictionary containing columns of FEED subtable
"""
feed_dict = {}
# ID of antenna in this array (integer)
feed_dict['ANTENNA_ID'] = np.arange(num_feeds, dtype=np.int32)
# Id for BEAM model (integer)
feed_dict['BEAM_ID'] = np.ones(num_feeds, dtype=np.int32)
# Beam position offset (on sky but in antenna reference frame): (double, 2-dim)
feed_dict['BEAM_OFFSET'] = np.zeros((num_feeds, 2, 2), dtype=np.float64)
# Feed id (integer)
feed_dict['FEED_ID'] = np.zeros(num_feeds, dtype=np.int32)
# Interval for which this set of parameters is accurate (double)
feed_dict['INTERVAL'] = np.zeros(num_feeds, dtype=np.float64)
# Number of receptors on this feed (probably 1 or 2) (integer)
feed_dict['NUM_RECEPTORS'] = np.tile(np.int32(num_receptors_per_feed), num_feeds)
# Type of polarisation to which a given RECEPTOR responds (string, 1-dim)
feed_dict['POLARIZATION_TYPE'] = np.tile(['X', 'Y'], (num_feeds, 1))
# D-matrix i.e. leakage between two receptors (complex, 2-dim)
feed_dict['POL_RESPONSE'] = np.dstack([np.eye(2, dtype=np.complex64) for n in range(num_feeds)]).transpose()
# Position of feed relative to feed reference position (double, 1-dim, shape=(3,))
feed_dict['POSITION'] = np.zeros((num_feeds, 3), np.float64)
# The reference angle for polarisation (double, 1-dim). A parallactic angle of
# 0 means that V is aligned to x (celestial North), but we are mapping H to x
# so we have to correct with a -90 degree rotation.
feed_dict['RECEPTOR_ANGLE'] = np.full((num_feeds, num_receptors_per_feed), -np.pi / 2, dtype=np.float64)
# ID for this spectral window setup (integer)
feed_dict['SPECTRAL_WINDOW_ID'] = - np.ones(num_feeds, dtype=np.int32)
# Midpoint of time for which this set of parameters is accurate (double)
feed_dict['TIME'] = np.zeros(num_feeds, dtype=np.float64)
return feed_dict
def populate_data_description_dict():
"""Construct a dictionary containing the columns of the DATA_DESCRIPTION subtable.
The DATA_DESCRIPTION subtable groups together a set of polarisation and
frequency parameters, which may differ for various experiments done on the
same data set. It has one row per data setting.
Returns
-------
data_description_dict : dict
Dictionary containing columns of DATA_DESCRIPTION subtable
"""
data_description_dict = {}
# Flag this row (boolean)
data_description_dict['FLAG_ROW'] = np.zeros(1, dtype=np.uint8)
# Pointer to polarisation table (integer)
data_description_dict['POLARIZATION_ID'] = np.zeros(1, dtype=np.int32)
# Pointer to spectralwindow table (integer)
data_description_dict['SPECTRAL_WINDOW_ID'] = np.zeros(1, dtype=np.int32)
return data_description_dict
def populate_polarization_dict(ms_pols=['HH', 'VV'], stokes_i=False, circular=False):
"""Construct a dictionary containing the columns of the POLARIZATION subtable.
The POLARIZATION subtable describes how the various receptors are correlated
to create the Stokes terms. It has one row per polarisation setting.
Parameters
----------
ms_pols : ['HH'] | ['VV'] | ['HH','VV'] | ['HH','VV','HV','VH']
The polarisations used in this dataset
stokes_i : False
Mark single pol as Stokes I
circular : False
Label the linear pols with circular (for fun and/or profit)
Returns
-------
polarization_dict : dict
Dictionary containing columns of POLARIZATION subtable
"""
pol_num = {'H': 0, 'V': 1}
# lookups for converting to CASA speak...
pol_types = {'I': 1, 'Q': 2, 'U': 3, 'V': 4, 'RR': 5, 'RL': 6, 'LR': 7, 'LL': 8,
'HH': 9, 'VV': 12, 'HV': 10, 'VH': 11}
if len(ms_pols) > 1 and stokes_i:
print("Warning: Polarisation to be marked as stokes, but more than 1 polarisation "
f"product specified. Using first specified pol ({ms_pols[0]})")
ms_pols = [ms_pols[0]]
# Indices describing receptors of feed going into correlation (integer, 2-dim)
polarization_dict = {}
# The polarisation type for each correlation product, as a Stokes enum (4 integer, 1-dim)
# Stokes enum (starting at 1) = {I, Q, U, V, RR, RL, LR, LL, XX, XY, YX, YY, ...}
# The native correlator data are in XX, YY, XY, YX for HV pol, XX for H pol and YY for V pol
polarization_dict['CORR_PRODUCT'] = np.array([[pol_num[p[0]], pol_num[p[1]]]
for p in ms_pols], dtype=np.int32)[np.newaxis, :, :]
polarization_dict['CORR_TYPE'] = np.array([pol_types[p] - (4 if circular else 0)
for p in (['I'] if stokes_i else ms_pols)])[np.newaxis, :]
# Number of correlation products (integer)
polarization_dict['FLAG_ROW'] = np.zeros(1, dtype=np.uint8)
polarization_dict['NUM_CORR'] = np.array([len(ms_pols)], dtype=np.int32)
return polarization_dict
def populate_observation_dict(start_time, end_time, telescope_name='unknown',
observer_name='unknown', project_name='unknown'):
"""Construct a dictionary containing the columns of the OBSERVATION subtable.
The OBSERVATION subtable describes the overall project and the people doing
the observing. It has one row per observation project/schedule?
Parameters
----------
start_time : float
Start time of project, as a Modified Julian Date in seconds
end_time : float
End time of project, as a Modified Julian Date in seconds
telescope_name : string, optional
Telescope name
observer_name : string, optional
Name of observer
project_name : string, optional
Description of project
Returns
-------
observation_dict : dict
Dictionary containing columns of OBSERVATION subtable
"""
observation_dict = {}
# Row flag (boolean)
observation_dict['FLAG_ROW'] = np.zeros(1, dtype=np.uint8)
# Observing log (string, 1-dim)
observation_dict['LOG'] = np.array(['unavailable']).reshape((1, 1))
# Name of observer(s) (string)
observation_dict['OBSERVER'] = np.array([observer_name])
# Project identification string
observation_dict['PROJECT'] = np.array([project_name])
# Release date when data becomes public (double)
observation_dict['RELEASE_DATE'] = np.array([end_time])
# Observing schedule (string, 1-dim)
observation_dict['SCHEDULE'] = np.array(['unavailable']).reshape((1, 1))
# Observing schedule type (string)
observation_dict['SCHEDULE_TYPE'] = np.array(['unknown'])
# Telescope Name (e.g. WSRT, VLBA) (string)
observation_dict['TELESCOPE_NAME'] = np.array([telescope_name])
# Start and end of observation (double, 1-dim, shape=(2,))
observation_dict['TIME_RANGE'] = np.array([[start_time, end_time]])
return observation_dict
def populate_spectral_window_dict(center_frequencies, channel_bandwidths, ref_freq=None):
"""Construct a dictionary containing the columns of the SPECTRAL_WINDOW subtable.
The SPECTRAL_WINDOW subtable describes groupings of frequency channels into
spectral windows. It has one row per spectral window. At the moment, only a
single spectral window is considered. The reference frequency is chosen to
be the center frequency of the middle channel.
Parameters
----------
center_frequencies : array of float, shape (num_channels,)
Observation center frequencies for each channel, in Hz
channel_bandwidths : array of float, shape (num_channels,)
Bandwidth for each channel, in Hz
Returns
-------
spectral_window_dict : dict
Dictionary containing columns of SPECTRAL_WINDOW subtable
"""
num_channels = len(center_frequencies)
if len(channel_bandwidths) != num_channels:
raise ValueError('Lengths of center_frequencies and channel_bandwidths differ (%d vs %d)' %
(len(center_frequencies), len(channel_bandwidths)))
spectral_window_dict = {}
# Center frequencies for each channel in the data matrix (double, 1-dim)
spectral_window_dict['CHAN_FREQ'] = np.array([center_frequencies], dtype=np.float64)
# Channel width for each channel (double, 1-dim)
spectral_window_dict['CHAN_WIDTH'] = np.array([channel_bandwidths], dtype=np.float64)
# Effective noise bandwidth of each channel (double, 1-dim)
spectral_window_dict['EFFECTIVE_BW'] = np.array([channel_bandwidths], dtype=np.float64)
# Row flag (boolean)
spectral_window_dict['FLAG_ROW'] = np.zeros(1, dtype=np.uint8)
# Frequency group (integer)
spectral_window_dict['FREQ_GROUP'] = np.zeros(1, dtype=np.int32)
# Frequency group name (string)
spectral_window_dict['FREQ_GROUP_NAME'] = np.array(['none'])
# The IF conversion chain number (integer)
spectral_window_dict['IF_CONV_CHAIN'] = np.zeros(1, dtype=np.int32)
# Frequency Measure reference (integer) (5=Topocentric)
spectral_window_dict['MEAS_FREQ_REF'] = np.array([5], dtype=np.int32)
# Spectral window name (string)
spectral_window_dict['NAME'] = np.array(['none'])
# Net sideband (integer)
spectral_window_dict['NET_SIDEBAND'] = np.ones(1, dtype=np.int32)
# Number of spectral channels (integer)
spectral_window_dict['NUM_CHAN'] = np.array([num_channels], dtype=np.int32)
# The reference frequency (double) - pick the frequency of the middle channel
if ref_freq is None:
spectral_window_dict['REF_FREQUENCY'] = np.array([center_frequencies[num_channels // 2]], dtype=np.float64)
else:
spectral_window_dict['REF_FREQUENCY'] = np.array([ref_freq], dtype=np.float64)
# The effective noise bandwidth for each channel (double, 1-dim)
spectral_window_dict['RESOLUTION'] = np.array([channel_bandwidths], dtype=np.float64)
# The total bandwidth for this window (double)
spectral_window_dict['TOTAL_BANDWIDTH'] = np.array([channel_bandwidths.sum()], dtype=np.float64)
return spectral_window_dict
def populate_source_dict(phase_centers, time_origins, field_names=None):
"""Construct a dictionary containing the columns of the SOURCE subtable.
The SOURCE subtable describes time-variable source information, that may
be associated with a given FIELD_ID. It appears to be optional, but for
completeness it is included here (with no time varying terms). Some RARG
tasks and CASA's exportuvfits do require it, though.
Parameters
----------
phase_centers : array of float, shape (M, 2)
Direction of *M* phase centers as (ra, dec) coordinates in radians
time_origins : array of float, shape (M,)
Time origins where the *M* phase centers are correct, as Modified Julian
Dates in seconds
field_names : array of string, shape (M,), optional
Names of fields/pointings (typically some source names)
Returns
-------
source_dict : dict
Dictionary containing columns of SOURCE subtable
"""
phase_centers = np.atleast_2d(np.asarray(phase_centers, np.float64))
num_fields = len(phase_centers)
if field_names is None:
field_names = [f'Source{field}' for field in range(num_fields)]
source_dict = {}
# Source identifier as specified in the FIELD sub-table (integer)
source_dict['SOURCE_ID'] = np.arange(num_fields, dtype=np.int32)
# Source proper motion in radians per second (double, 1-dim, shape=(2,))
source_dict['PROPER_MOTION'] = np.zeros((num_fields, 2), dtype=np.float32)
# Source direction (e.g. RA, DEC) in radians (double, 1-dim, shape=(2,))
source_dict['DIRECTION'] = phase_centers
# Calibration group number to which this source belongs (integer)
source_dict['CALIBRATION_GROUP'] = np.full(num_fields, -1, dtype=np.int32)
# Name of source as given during observations (string)
source_dict['NAME'] = np.atleast_1d(field_names)
# Number of spectral line transitions associated with this source
# and spectral window id combination (integer)
source_dict['NUM_LINES'] = np.zeros(num_fields, dtype=np.int32)
# Midpoint of time for which this set of parameters is accurate (double)
source_dict['TIME'] = np.atleast_1d(np.asarray(time_origins, dtype=np.float64))
# Rest frequencies for the transitions in Hz (double, 1-dim, shape=(NUM_LINES,))
# This column is optional but expected by exportuvfits and even though
# NUM_LINES is 0, put something sensible here in case it is read.
source_dict['REST_FREQUENCY'] = np.zeros((num_fields, 0), dtype=np.float64)
return source_dict
def populate_field_dict(phase_centers, time_origins, field_names=None):
"""Construct a dictionary containing the columns of the FIELD subtable.
The FIELD subtable describes each field (or pointing) by its sky coordinates.
It has one row per field/pointing.
Parameters
----------
phase_centers : array of float, shape (M, 2)
Direction of *M* phase centers as (ra, dec) coordinates in radians
time_origins : array of float, shape (M,)
Time origins where the *M* phase centers are correct, as Modified Julian
Dates in seconds
field_names : array of string, shape (M,), optional
Names of fields/pointings (typically some source names)
Returns
-------
field_dict : dict
Dictionary containing columns of FIELD subtable
"""
phase_centers = np.atleast_2d(np.asarray(phase_centers, np.float64))[:, np.newaxis, :]
num_fields = len(phase_centers)
if field_names is None:
field_names = [f'Field{field}' for field in range(num_fields)]
field_dict = {}
# Special characteristics of field, e.g. position code (string)
field_dict['CODE'] = np.tile('T', num_fields)
# Direction of delay center (e.g. RA, DEC) as polynomial in time (double, 2-dim)
field_dict['DELAY_DIR'] = phase_centers
# Row flag (boolean)
field_dict['FLAG_ROW'] = np.zeros(num_fields, dtype=np.uint8)
# Name of this field (string)
field_dict['NAME'] = np.atleast_1d(field_names)
# Polynomial order of *_DIR columns (integer)
field_dict['NUM_POLY'] = np.zeros(num_fields, dtype=np.int32)
# Direction of phase center (e.g. RA, DEC) (double, 2-dim)
field_dict['PHASE_DIR'] = phase_centers
# Direction of REFERENCE center (e.g. RA, DEC) as polynomial in time (double, 2-dim)
field_dict['REFERENCE_DIR'] = phase_centers
# Source id (integer), or a value of -1 indicates there is no corresponding source defined
field_dict['SOURCE_ID'] = np.arange(num_fields, dtype=np.int32) # the same as source id
# Time origin for direction and rate (double)
field_dict['TIME'] = np.atleast_1d(np.asarray(time_origins, dtype=np.float64))
return field_dict
def populate_state_dict(obs_modes=['UNKNOWN']):
"""Construct a dictionary containing the columns of the STATE subtable.
The STATE subtable describes observing modes.
It has one row per observing modes.
Parameters
----------
obs_modes : array of string
Observing modes, used to define the schedule strategy.
Returns
-------
state_dict : dict
Dictionary containing columns of STATE subtable
"""
num_states = len(obs_modes)
state_dict = {}
# Signal (boolean)
state_dict['SIG'] = np.ones(num_states, dtype=np.uint8)
# Reference (boolean)
state_dict['REF'] = np.zeros(num_states, dtype=np.uint8)
# Noise calibration temperature (double)
state_dict['CAL'] = np.zeros(num_states, dtype=np.float64)
# Load temperature (double)
state_dict['LOAD'] = np.zeros(num_states, dtype=np.float64)
# Sub-scan number (int)
state_dict['SUB_SCAN'] = np.zeros(num_states, dtype=np.int32)
# Observing mode (string)
state_dict['OBS_MODE'] = np.atleast_1d(obs_modes)
# Row flag (boolean)
state_dict['FLAG_ROW'] = np.zeros(num_states, dtype=np.uint8)
return state_dict
def populate_pointing_dict(num_antennas, observation_duration, start_time, phase_center, pointing_name='default'):
"""Construct a dictionary containing the columns of the POINTING subtable.
The POINTING subtable contains data on individual antennas tracking a target.
It has one row per pointing/antenna?
Parameters
----------
num_antennas : integer
Number of antennas
observation_duration : float
Length of observation, in seconds
start_time : float
Start time of observation, as a Modified Julian Date in seconds
phase_center : array of float, shape (2,)
Direction of phase center, in ra-dec coordinates as 2-element array
pointing_name : string, optional
Name for pointing
Returns
-------
pointing_dict : dict
Dictionary containing columns of POINTING subtable
"""
phase_center = phase_center.reshape((2, 1, 1))
pointing_dict = {}
# Antenna Id (integer)
pointing_dict['ANTENNA_ID'] = np.arange(num_antennas, dtype=np.int32)
# Antenna pointing direction as polynomial in time (double, 2-dim)
pointing_dict['DIRECTION'] = np.repeat(phase_center, num_antennas)
# Time interval (double)
pointing_dict['INTERVAL'] = np.tile(np.float64(observation_duration), num_antennas)
# Pointing position name (string)
pointing_dict['NAME'] = np.array([pointing_name] * num_antennas)
# Series order (integer)
pointing_dict['NUM_POLY'] = np.zeros(num_antennas, dtype=np.int32)
# Target direction as polynomial in time (double, -1-dim)
pointing_dict['TARGET'] = np.repeat(phase_center, num_antennas)
# Time interval midpoint (double)
pointing_dict['TIME'] = np.tile(np.float64(start_time), num_antennas)
# Time origin for direction (double)
pointing_dict['TIME_ORIGIN'] = np.tile(np.float64(start_time), num_antennas)
# Tracking flag - True if on position (boolean)
pointing_dict['TRACKING'] = np.ones(num_antennas, dtype=np.uint8)
return pointing_dict
def populate_ms_dict(uvw_coordinates, vis_data, timestamps, antenna1_index, antenna2_index,
integrate_length, center_frequencies, channel_bandwidths,
antenna_names, antenna_positions, antenna_diameter,
num_receptors_per_feed, start_time, end_time,
telescope_name, observer_name, project_name, phase_center, obs_modes):
"""Construct a dictionary containing all the tables in a MeasurementSet.
Parameters
----------
uvw_coordinates : array of float, shape (num_vis_samples, 3)
Array containing (u,v,w) coordinates in multiples of the wavelength
vis_data : array of complex, shape (num_vis_samples, num_channels, num_pols)
Array containing complex visibility data in Janskys
timestamps : array of float, shape (num_vis_samples,)
Array of timestamps as Modified Julian Dates in seconds
antenna1_index : int or array of int, shape (num_vis_samples,)
Array containing the index of the first antenna of each uv sample
antenna2_index : int or array of int, shape (num_vis_samples,)
Array containing the index of the second antenna of each uv sample
integrate_length : float
The integration time (one over dump rate), in seconds
center_frequencies : array of float, shape (num_channels,)
Observation center frequencies for each channel, in Hz
channel_bandwidths : array of float, shape (num_channels,)
Bandwidth for each channel, in Hz
antenna_names : array of string, shape (num_antennas,)
Array of antenna names, one per antenna
antenna_positions : array of float, shape (num_antennas, 3)
Array of antenna positions in ECEF (aka XYZ) coordinates, in metres
antenna_diameter : array of float, shape (num_antennas,)
Array of antenna diameters, in metres
num_receptors_per_feed : integer
Number of receptors per feed (usually one per polarisation type)
start_time : float
Start time of project, as a Modified Julian Date in seconds
end_time : float
End time of project, as a Modified Julian Date in seconds
telescope_name : string
Telescope name
observer_name : string
Observer name
project_name : string
Description of project
phase_center : array of float, shape (2,)
Direction of phase center, in ra-dec coordinates as 2-element array
obs_modes: array of strings
Observing modes
Returns
-------
ms_dict : dict
Dictionary containing all tables and subtables of a measurement set
"""
ms_dict = {}
ms_dict['MAIN'] = populate_main_dict(uvw_coordinates, vis_data, timestamps,
antenna1_index, antenna2_index, integrate_length)
ms_dict['ANTENNA'] = populate_antenna_dict(antenna_names, antenna_positions, antenna_diameter)
ms_dict['FEED'] = populate_feed_dict(len(antenna_positions), num_receptors_per_feed)
ms_dict['DATA_DESCRIPTION'] = populate_data_description_dict()
ms_dict['POLARIZATION'] = populate_polarization_dict()
ms_dict['OBSERVATION'] = populate_observation_dict(start_time, end_time,
telescope_name, observer_name, project_name)
ms_dict['SPECTRAL_WINDOW'] = populate_spectral_window_dict(center_frequencies, channel_bandwidths)
ms_dict['FIELD'] = populate_field_dict(phase_center, start_time)
ms_dict['STATE'] = populate_state_dict(obs_modes)
ms_dict['SOURCE'] = populate_source_dict(phase_center, start_time)
return ms_dict
# ----------------- Write completed dictionary to MS file --------------------
def write_rows(t, row_dict, verbose=True):
num_rows = list(row_dict.values())[0].shape[0]
# Append rows to the table by starting after the last row in table
startrow = t.nrows()
# Add the space required for this group of rows
t.addrows(num_rows)
if verbose:
print(f" added {num_rows} rows")
for col_name, col_data in row_dict.items():
if col_name not in t.colnames():
if verbose:
print(f" column '{col_name}' not in table")
continue
if col_data.dtype.kind == 'U':
col_data = np.char.encode(col_data, encoding='utf-8')
try:
t.putcol(col_name, col_data, startrow)
except RuntimeError as err:
print(" error writing column '%s' with shape %s (%s)" %
(col_name, col_data.shape, err))
else:
if verbose:
print(" wrote column '%s' with shape %s" %
(col_name, col_data.shape))
def write_dict(ms_dict, ms_name, verbose=True):
# Iterate through subtables
for sub_table_name, sub_dict in ms_dict.items():
# Allow parsing of single dict and array of dicts in the same fashion
if isinstance(sub_dict, dict):
sub_dict = [sub_dict]
# Iterate through row groups that are separate dicts within the sub_dict array
for row_dict in sub_dict:
if verbose:
print(f"Table {sub_table_name}:")
# Open main table or sub-table
if sub_table_name == 'MAIN':
t = open_table(ms_name, verbose=verbose)
else:
t = open_table('::'.join((ms_name, sub_table_name)))
if verbose:
print(" opened successfully")
write_rows(t, row_dict, verbose)
t.close()
if verbose:
print(" closed successfully")
| 45.946955
| 115
| 0.684975
|
88ae1288fefc73aa88f1f89de0c0855a8d6e4fa3
| 14,748
|
py
|
Python
|
rocketmq/client.py
|
francisoliverlee/rocketmq-client-python
|
2b683d9f80aebb7a1ee505c5cbf32ca7973b9fe3
|
[
"Apache-2.0"
] | 212
|
2018-10-11T06:59:15.000Z
|
2022-03-30T09:45:10.000Z
|
rocketmq/client.py
|
francisoliverlee/rocketmq-client-python
|
2b683d9f80aebb7a1ee505c5cbf32ca7973b9fe3
|
[
"Apache-2.0"
] | 89
|
2018-11-02T07:50:43.000Z
|
2022-03-16T00:44:18.000Z
|
rocketmq/client.py
|
francisoliverlee/rocketmq-client-python
|
2b683d9f80aebb7a1ee505c5cbf32ca7973b9fe3
|
[
"Apache-2.0"
] | 97
|
2018-10-17T11:57:16.000Z
|
2022-03-08T05:12:08.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import ctypes
from enum import IntEnum
from collections import namedtuple
from .ffi import (
dll, _CSendResult, MSG_CALLBACK_FUNC, MessageModel, TRANSACTION_CHECK_CALLBACK,
LOCAL_TRANSACTION_EXECUTE_CALLBACK
)
from .exceptions import (
ffi_check, NullPointerException,
)
from .consts import MessageProperty
__all__ = ['SendStatus', 'Message', 'ReceivedMessage', 'Producer', 'PushConsumer', 'TransactionMQProducer',
'TransactionStatus', 'ConsumeStatus']
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
SendResult = namedtuple('SendResult', ['status', 'msg_id', 'offset'])
class SendStatus(IntEnum):
OK = 0
FLUSH_DISK_TIMEOUT = 1
FLUSH_SLAVE_TIMEOUT = 2
SLAVE_NOT_AVAILABLE = 3
class TransactionStatus(IntEnum):
COMMIT = 0
ROLLBACK = 1
UNKNOWN = 2
class ConsumeStatus(IntEnum):
CONSUME_SUCCESS = 0
RECONSUME_LATER = 1
def _to_bytes(s):
if isinstance(s, text_type):
return s.encode('utf-8')
return s
class Message(object):
def __init__(self, topic):
self._handle = dll.CreateMessage(_to_bytes(topic))
def __del__(self):
dll.DestroyMessage(self._handle)
def set_keys(self, keys):
ffi_check(dll.SetMessageKeys(self._handle, _to_bytes(keys)))
def set_tags(self, tags):
ffi_check(dll.SetMessageTags(self._handle, _to_bytes(tags)))
def set_body(self, body):
ffi_check(dll.SetMessageBody(self._handle, _to_bytes(body)))
def set_property(self, key, value):
ffi_check(dll.SetMessageProperty(self._handle, _to_bytes(key), _to_bytes(value)))
def set_delay_time_level(self, delay_time_level):
ffi_check(dll.SetDelayTimeLevel(self._handle, delay_time_level))
@property
def _as_parameter_(self):
return self._handle
def maybe_decode(val):
if isinstance(val, binary_type):
return val.decode('utf-8')
elif isinstance(val, text_type):
return val
raise TypeError('Expects string types, but got %s', type(val))
class ReceivedMessage(object):
def __init__(self, handle):
self._handle = handle
@property
def topic(self):
return maybe_decode(dll.GetMessageTopic(self._handle))
@property
def tags(self):
return dll.GetMessageTags(self._handle)
@property
def keys(self):
return dll.GetMessageKeys(self._handle)
@property
def body(self):
return dll.GetMessageBody(self._handle)
@property
def id(self):
return maybe_decode(dll.GetMessageId(self._handle))
@property
def delay_time_level(self):
return dll.GetMessageDelayTimeLevel(self._handle)
@property
def queue_id(self):
return dll.GetMessageQueueId(self._handle)
@property
def reconsume_times(self):
return dll.GetMessageReconsumeTimes(self._handle)
@property
def store_size(self):
return dll.GetMessageStoreSize(self._handle)
@property
def born_timestamp(self):
return dll.GetMessageBornTimestamp(self._handle)
@property
def store_timestamp(self):
return dll.GetMessageStoreTimestamp(self._handle)
@property
def queue_offset(self):
return dll.GetMessageQueueOffset(self._handle)
@property
def commit_log_offset(self):
return dll.GetMessageCommitLogOffset(self._handle)
@property
def prepared_transaction_offset(self):
return dll.GetMessagePreparedTransactionOffset(self._handle)
def get_property(self, prop):
if isinstance(prop, MessageProperty):
prop = prop.value
val = dll.GetMessageProperty(self._handle, _to_bytes(prop))
return val
def __getitem__(self, key):
return self.get_property(key)
def __str__(self):
return self.body.decode('utf-8')
def __bytes__(self):
return self.body
def __repr__(self):
return '<ReceivedMessage topic={} id={} body={}>'.format(
repr(self.topic),
repr(self.id),
repr(self.body),
)
class Producer(object):
def __init__(self, group_id, orderly=False, timeout=None, compress_level=None, max_message_size=None):
if orderly:
self._handle = dll.CreateOrderlyProducer(_to_bytes(group_id))
else:
self._handle = dll.CreateProducer(_to_bytes(group_id))
if self._handle is None:
raise NullPointerException('Returned null pointer when create Producer')
if timeout is not None:
self.set_timeout(timeout)
if compress_level is not None:
self.set_compress_level(compress_level)
if max_message_size is not None:
self.set_max_message_size(max_message_size)
self._callback_refs = []
def __enter__(self):
self.start()
def __exit__(self, exec_type, value, traceback):
self.shutdown()
def send_sync(self, msg):
c_result = _CSendResult()
ffi_check(dll.SendMessageSync(self._handle, msg, ctypes.pointer(c_result)))
return SendResult(
SendStatus(c_result.sendStatus),
c_result.msgId.decode('utf-8'),
c_result.offset
)
def send_oneway(self, msg):
ffi_check(dll.SendMessageOneway(self._handle, msg))
def send_orderly_with_sharding_key(self, msg, sharding_key):
c_result = _CSendResult()
ffi_check(
dll.SendMessageOrderlyByShardingKey(self._handle, msg, _to_bytes(sharding_key), ctypes.pointer(c_result)))
return SendResult(
SendStatus(c_result.sendStatus),
c_result.msgId.decode('utf-8'),
c_result.offset
)
def set_group(self, group_name):
ffi_check(dll.SetProducerGroupName(self._handle, _to_bytes(group_name)))
def set_instance_name(self, name):
ffi_check(dll.SetProducerInstanceName(self._handle, _to_bytes(name)))
def set_name_server_address(self, addr):
ffi_check(dll.SetProducerNameServerAddress(self._handle, _to_bytes(addr)))
def set_name_server_domain(self, domain):
ffi_check(dll.SetProducerNameServerDomain(self._handle, _to_bytes(domain)))
def set_session_credentials(self, access_key, access_secret, channel):
ffi_check(dll.SetProducerSessionCredentials(
self._handle,
_to_bytes(access_key),
_to_bytes(access_secret),
_to_bytes(channel)
))
def set_timeout(self, timeout):
ffi_check(dll.SetProducerSendMsgTimeout(self._handle, timeout))
def set_compress_level(self, level):
ffi_check(dll.SetProducerCompressLevel(self._handle, level))
def set_max_message_size(self, max_size):
ffi_check(dll.SetProducerMaxMessageSize(self._handle, max_size))
def start(self):
ffi_check(dll.StartProducer(self._handle))
def shutdown(self):
ffi_check(dll.ShutdownProducer(self._handle))
class TransactionMQProducer(Producer):
def __init__(self, group_id, checker_callback, user_args=None, timeout=None, compress_level=None,
max_message_size=None):
super(TransactionMQProducer, self).__init__(group_id, timeout, compress_level, max_message_size)
self._callback_refs = []
def _on_check(producer, c_message, user_data):
exc = None
try:
py_message = ReceivedMessage(c_message)
check_result = checker_callback(py_message)
if check_result != TransactionStatus.UNKNOWN and check_result != TransactionStatus.COMMIT \
and check_result != TransactionStatus.ROLLBACK:
raise ValueError(
'Check transaction status error, please use enum \'TransactionStatus\' as response')
return check_result
except BaseException as e:
exc = e
return TransactionStatus.UNKNOWN
finally:
if exc:
raise exc
transaction_checker_callback = TRANSACTION_CHECK_CALLBACK(_on_check)
self._callback_refs.append(transaction_checker_callback)
self._handle = dll.CreateTransactionProducer(_to_bytes(group_id), transaction_checker_callback, user_args)
if self._handle is None:
raise NullPointerException('Returned null pointer when create transaction producer')
if timeout is not None:
self.set_timeout(timeout)
if compress_level is not None:
self.set_compress_level(compress_level)
if max_message_size is not None:
self.set_max_message_size(max_message_size)
def __enter__(self):
self.start()
def __exit__(self, exec_type, value, traceback):
self.shutdown()
def set_name_server_address(self, addr):
ffi_check(dll.SetProducerNameServerAddress(self._handle, _to_bytes(addr)))
def start(self):
ffi_check(dll.StartProducer(self._handle))
def send_message_in_transaction(self, message, local_execute, user_args=None):
def _on_local_execute(producer, c_message, usr_args):
exc = None
try:
py_message = ReceivedMessage(c_message)
local_result = local_execute(py_message, usr_args)
if local_result != TransactionStatus.UNKNOWN and local_result != TransactionStatus.COMMIT \
and local_result != TransactionStatus.ROLLBACK:
raise ValueError(
'Local transaction status error, please use enum \'TransactionStatus\' as response')
return local_result
except BaseException as e:
exc = e
return TransactionStatus.UNKNOWN
finally:
if exc:
raise exc
local_execute_callback = LOCAL_TRANSACTION_EXECUTE_CALLBACK(_on_local_execute)
self._callback_refs.append(local_execute_callback)
result = _CSendResult()
try:
ffi_check(
dll.SendMessageTransaction(self._handle,
message,
local_execute_callback,
user_args,
ctypes.pointer(result)))
finally:
self._callback_refs.remove(local_execute_callback)
return SendResult(
SendStatus(result.sendStatus),
result.msgId.decode('utf-8'),
result.offset
)
class PushConsumer(object):
def __init__(self, group_id, orderly=False, message_model=MessageModel.CLUSTERING):
self._handle = dll.CreatePushConsumer(_to_bytes(group_id))
if self._handle is None:
raise NullPointerException('Returned null pointer when create PushConsumer')
self._orderly = orderly
self.set_message_model(message_model)
self._callback_refs = []
def __enter__(self):
self.start()
def __exit__(self, exec_type, value, traceback):
self.shutdown()
def set_message_model(self, model):
ffi_check(dll.SetPushConsumerMessageModel(self._handle, model))
def start(self):
ffi_check(dll.StartPushConsumer(self._handle))
def shutdown(self):
ffi_check(dll.ShutdownPushConsumer(self._handle))
def set_group(self, group_id):
ffi_check(dll.SetPushConsumerGroupID(self._handle, _to_bytes(group_id)))
def set_name_server_address(self, addr):
ffi_check(dll.SetPushConsumerNameServerAddress(self._handle, _to_bytes(addr)))
def set_name_server_domain(self, domain):
ffi_check(dll.SetPushConsumerNameServerDomain(self._handle, _to_bytes(domain)))
def set_session_credentials(self, access_key, access_secret, channel):
ffi_check(dll.SetPushConsumerSessionCredentials(
self._handle,
_to_bytes(access_key),
_to_bytes(access_secret),
_to_bytes(channel)
))
def subscribe(self, topic, callback, expression='*'):
def _on_message(consumer, msg):
exc = None
try:
consume_result = callback(ReceivedMessage(msg))
if consume_result != ConsumeStatus.CONSUME_SUCCESS and consume_result != ConsumeStatus.RECONSUME_LATER:
raise ValueError('Consume status error, please use enum \'ConsumeStatus\' as response')
return consume_result
except BaseException as e:
exc = e
return ConsumeStatus.RECONSUME_LATER
finally:
if exc:
raise exc
ffi_check(dll.Subscribe(self._handle, _to_bytes(topic), _to_bytes(expression)))
self._register_callback(_on_message)
def _register_callback(self, callback):
if self._orderly:
register_func = dll.RegisterMessageCallbackOrderly
else:
register_func = dll.RegisterMessageCallback
func = MSG_CALLBACK_FUNC(callback)
self._callback_refs.append(func)
ffi_check(register_func(self._handle, func))
def _unregister_callback(self):
if self._orderly:
ffi_check(dll.UnregisterMessageCallbackOrderly(self._handle))
ffi_check(dll.UnregisterMessageCallback(self._handle))
self._callback_refs = []
def set_thread_count(self, thread_count):
ffi_check(dll.SetPushConsumerThreadCount(self._handle, thread_count))
def set_message_batch_max_size(self, max_size):
ffi_check(dll.SetPushConsumerMessageBatchMaxSize(self._handle, max_size))
def set_instance_name(self, name):
ffi_check(dll.SetPushConsumerInstanceName(self._handle, _to_bytes(name)))
| 33.518182
| 119
| 0.662395
|
07000e84a07cd073afcf1b3f46f582afd8035244
| 940
|
py
|
Python
|
sveedocuments/urls.py
|
sveetch/sveedocuments
|
c829ec78da79f8735935548ca881574cb8a8dfd2
|
[
"MIT"
] | 1
|
2015-03-28T19:19:34.000Z
|
2015-03-28T19:19:34.000Z
|
sveedocuments/urls.py
|
sveetch/sveedocuments
|
c829ec78da79f8735935548ca881574cb8a8dfd2
|
[
"MIT"
] | 10
|
2015-01-10T01:03:45.000Z
|
2016-01-16T11:04:42.000Z
|
sveedocuments/urls.py
|
sveetch/sveedocuments
|
c829ec78da79f8735935548ca881574cb8a8dfd2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Root url's map for application
"""
from django.conf.urls import *
from sveedocuments.models import ATTACHMENTS_WITH_SENDFILE
from sveedocuments.views.page import (
HelpPageView, PageIndexView, PageDetailsView,
PageSourceView
)
urlpatterns = patterns('',
url(r'^$', PageIndexView.as_view(), name='index'),
(r'^board/', include('sveedocuments.urls_board')),
url(r'^help/$', HelpPageView.as_view(), name='help'),
url(r'^(?P<slug>[-\w]+)/$', PageDetailsView.as_view(), name='page-details'),
url(r'^(?P<slug>[-\w]+)/source/$', PageSourceView.as_view(), name='page-source'),
)
if ATTACHMENTS_WITH_SENDFILE:
from sveedocuments.views.attachment import AttachmentProtectedDownloadView
urlpatterns += patterns('',
url(r'^(?P<slug>[-\w]+)/attachment/(?P<attachment_id>\d+)/$', AttachmentProtectedDownloadView.as_view(), name='page-attachment-download'),
)
| 32.413793
| 146
| 0.674468
|
2314e361eed31093646693abc2eab49b6f2260e4
| 310
|
py
|
Python
|
vkwave/bots/core/dispatching/filters/__init__.py
|
Arsen-chechen/vkwave
|
71a450c50fd23b4f80f6e69e27a8d78d0ab2da95
|
[
"MIT"
] | null | null | null |
vkwave/bots/core/dispatching/filters/__init__.py
|
Arsen-chechen/vkwave
|
71a450c50fd23b4f80f6e69e27a8d78d0ab2da95
|
[
"MIT"
] | null | null | null |
vkwave/bots/core/dispatching/filters/__init__.py
|
Arsen-chechen/vkwave
|
71a450c50fd23b4f80f6e69e27a8d78d0ab2da95
|
[
"MIT"
] | null | null | null |
from .base import BaseFilter # noqa: F401
from .builtin import (
EventTypeFilter,
TextFilter,
RegexFilter,
FromMeFilter,
PayloadFilter,
ChatActionFilter,
CommandsFilter,
MessageFromConversationTypeFilter,
) # noqa: F401
from .cast import caster as filter_caster # noqa: F401
| 23.846154
| 55
| 0.722581
|
73021bec10038f0adacd686e341a51187cae4f65
| 1,890
|
py
|
Python
|
Eigenvalue_distribution_plots/appendix_figure/generate_data/fisher_easy_qnn.py
|
fpetitzon/effective_dimension
|
5d9a9b638967bee5ff848c9564f4c90849afc5ca
|
[
"Apache-2.0"
] | 19
|
2021-05-02T11:42:19.000Z
|
2022-03-24T16:26:06.000Z
|
Eigenvalue_distribution_plots/appendix_figure/generate_data/fisher_easy_qnn.py
|
yuguoshao/QDNN_eDim
|
1f6384422cfc9e4d05be9c678fd6e7cf65b3bd4f
|
[
"Apache-2.0"
] | null | null | null |
Eigenvalue_distribution_plots/appendix_figure/generate_data/fisher_easy_qnn.py
|
yuguoshao/QDNN_eDim
|
1f6384422cfc9e4d05be9c678fd6e7cf65b3bd4f
|
[
"Apache-2.0"
] | 12
|
2021-06-24T16:53:17.000Z
|
2022-03-24T16:26:09.000Z
|
from effective_dimension import Model, EffectiveDimension, QuantumNeuralNetwork
from qiskit.circuit.library import ZFeatureMap, RealAmplitudes
import numpy as np
# this code generates the data for the easy quantum models' fisher information eigenvalue distribution plot \
# in the Supplementary Information figure
# Global variables
n = [1000, 2000, 8000, 10000, 40000, 60000, 100000, 150000, 200000, 500000, 1000000, 10000000, 10000000000, 10000000000000]
blocks = 9
num_inputs = 100
num_thetas = 100
###################################################################################
num_qubits = 6
fm = ZFeatureMap(feature_dimension=num_qubits, reps=1)
circ = RealAmplitudes(num_qubits, reps=blocks)
qnet = QuantumNeuralNetwork(var_form=circ, feature_map=fm)
ed = EffectiveDimension(qnet, num_thetas=num_thetas, num_inputs=num_inputs)
f, trace = ed.get_fhat()
effdim = ed.eff_dim(f, n)
np.save("6qubits_9layer_f_hats_easy.npy", f)
####################################################################################
num_qubits = 8
fm = ZFeatureMap(feature_dimension=num_qubits, reps=1)
circ = RealAmplitudes(num_qubits, reps=blocks)
qnet = QuantumNeuralNetwork(var_form=circ, feature_map=fm)
ed = EffectiveDimension(qnet, num_thetas=num_thetas, num_inputs=num_inputs)
f, trace = ed.get_fhat()
effdim = ed.eff_dim(f, n)
np.save("8qubits_9layer_f_hats_easy.npy", f)
####################################################################################
num_qubits = 10
fm = ZFeatureMap(feature_dimension=num_qubits, reps=1)
circ = RealAmplitudes(num_qubits, reps=blocks)
qnet = QuantumNeuralNetwork(var_form=circ, feature_map=fm)
ed = EffectiveDimension(qnet, num_thetas=num_thetas, num_inputs=num_inputs)
f, trace = ed.get_fhat()
effdim = ed.eff_dim(f, n)
np.save("10qubits_9layer_f_hats_easy.npy", f)
####################################################################################
| 45
| 123
| 0.654497
|
39b4407bbf9ea35defa33fd9822c57b3f6bb88bc
| 5,951
|
py
|
Python
|
userbot/modules/whois.py
|
nayslaa/naysla
|
0352971c5cd6389d0eafaf398cdc83025a46f730
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-02-21T18:40:44.000Z
|
2022-02-21T18:40:44.000Z
|
userbot/modules/whois.py
|
nayslaa/naysla
|
0352971c5cd6389d0eafaf398cdc83025a46f730
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/whois.py
|
nayslaa/naysla
|
0352971c5cd6389d0eafaf398cdc83025a46f730
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
# The entire source code is OSSRPL except 'whois' which is MPL
# License: MPL and OSSRPL
""" Userbot module for getiing info about any user on Telegram(including you!). """
import os
from telethon.tl.functions.photos import GetUserPhotosRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import MessageEntityMentionName
from telethon.utils import get_input_location
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
@register(pattern=".whois(?: |$)(.*)", outgoing=True)
async def who(event):
#Prevent Channel Bug to use commad whois
if event.is_channel and not event.is_group:
await event.edit("`whois Commad isn't permitted on channels`")
return
await event.edit(
"`Sit tight while I steal some data from *Global Network Zone*...`")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
replied_user = await get_user(event)
try:
photo, caption = await fetch_info(replied_user, event)
except AttributeError:
event.edit("`Could not fetch info of that user.`")
return
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
message_id_to_reply = None
try:
await event.client.send_file(event.chat_id,
photo,
caption=caption,
link_preview=False,
force_document=False,
reply_to=message_id_to_reply,
parse_mode="html")
if not photo.startswith("http"):
os.remove(photo)
await event.delete()
except TypeError:
await event.edit(caption, parse_mode="html")
async def get_user(event):
""" Get the user from argument or replied message. """
if event.reply_to_msg_id and not event.pattern_match.group(1):
previous_message = await event.get_reply_message()
replied_user = await event.client(
GetFullUserRequest(previous_message.from_id))
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
self_user = await event.client.get_me()
user = self_user.id
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user
try:
user_object = await event.client.get_entity(user)
replied_user = await event.client(
GetFullUserRequest(user_object.id))
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return replied_user
async def fetch_info(replied_user, event):
""" Get details from the User object. """
replied_user_profile_photos = await event.client(
GetUserPhotosRequest(user_id=replied_user.user.id,
offset=42,
max_id=0,
limit=80))
replied_user_profile_photos_count = "Person needs help with uploading profile picture."
try:
replied_user_profile_photos_count = replied_user_profile_photos.count
except AttributeError as e:
pass
user_id = replied_user.user.id
first_name = replied_user.user.first_name
last_name = replied_user.user.last_name
try:
dc_id, _ = get_input_location(replied_user.profile_photo)
except Exception as e:
dc_id = "Couldn't fetch DC ID!"
location = str(e)
common_chat = replied_user.common_chats_count
username = replied_user.user.username
user_bio = replied_user.about
is_bot = replied_user.user.bot
restricted = replied_user.user.restricted
verified = replied_user.user.verified
photo = await event.client.download_profile_photo(user_id,
TEMP_DOWNLOAD_DIRECTORY +
str(user_id) + ".jpg",
download_big=True)
first_name = first_name.replace(
"\u2060", "") if first_name else ("This User has no First Name")
last_name = last_name.replace(
"\u2060", "") if last_name else ("This User has no Last Name")
username = "@{}".format(username) if username else (
"This User has no Username")
user_bio = "This User has no About" if not user_bio else user_bio
caption = "<b>USER INFO:</b>\n\n"
caption += f"First Name: {first_name}\n"
caption += f"Last Name: {last_name}\n"
caption += f"Username: {username}\n"
caption += f"Data Centre ID: {dc_id}\n"
caption += f"Number of Profile Pics: {replied_user_profile_photos_count}\n"
caption += f"Is Bot: {is_bot}\n"
caption += f"Is Restricted: {restricted}\n"
caption += f"Is Verified by Telegram: {verified}\n"
caption += f"ID: <code>{user_id}</code>\n\n"
caption += f"Bio: \n<code>{user_bio}</code>\n\n"
caption += f"Common Chats with this user: {common_chat}\n"
caption += "Permanent Link To Profile: "
caption += f"<a href=\"tg://user?id={user_id}\">{first_name}</a>"
return photo, caption
CMD_HELP.update({
"whois":
".whois <username> or reply to someones text with .whois\
\nUsage: Gets info of an user."
})
| 37.664557
| 91
| 0.62729
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.