max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
Misc/CZip/dictzip.py | linxiaohui/CodeRepoPy | 0 | 12763951 | <filename>Misc/CZip/dictzip.py
# -*- coding:UTF-8 -*-
'''
C 2009
'''
"""
Functions that provide transparent read-only access to dictzipped files
"""
import struct, sys
import zlib
import __builtin__
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32(output, value):
output.write(struct.pack("<l", value))
def write32u(output, value):
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<l", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
return DictzipFile(filename, mode, compresslevel)
class DictzipFile:
"""
"""
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, cachesize=2):
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
self.filename = filename
else:
raise ValueError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self._read_gzip_header()
self.pos = 0
self.cachesize = cachesize
self.cache = {}
self.cachekeys = []
def __repr__(self):
s = repr(self.fileobj)
return '<dictzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
# modtime = self.fileobj.read(4)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(6)
if flag & FEXTRA:
# Read the extra field
xlen=ord(self.fileobj.read(1))
xlen=xlen+256*ord(self.fileobj.read(1))
extra = self.fileobj.read(xlen)
while 1:
l = ord(extra[2])+256*ord(extra[3])
e = extra[:4+l]
if e[:2]<>'RA':
extra=extra[4+l:]
if not extra:
raise "Missing dictzip extension"
continue
else:
break
length = ord(extra[2])+256*ord(extra[3])
ver = ord(extra[4])+256*ord(extra[5])
self.chlen = ord(extra[6])+256*ord(extra[7])
chcnt = ord(extra[8])+256*ord(extra[9])
p = 10
lens = []
for i in xrange(chcnt):
thischlen = ord(extra[p])+256*ord(extra[p+1])
p = p+2
lens.append(thischlen)
chpos = 0
self.chunks = []
for i in lens:
self.chunks.append( (chpos, i) )
chpos = chpos+i
self._lastpos = chpos
else:
raise "Missing dictzip extension"
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while (1):
s=self.fileobj.read(1)
if not s or s=='\000': break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
self._firstpos = self.fileobj.tell()
def write(self,data):
raise ValueError, "write() not supported on DictzipFile object"
def writelines(self,lines):
raise ValueError, "writelines() not supported on DictzipFile object"
def _readchunk(self,n):
if n>=len(self.chunks):
return ''
if self.cache.has_key(n):
return self.cache[n]
self.fileobj.seek(self._firstpos+self.chunks[n][0])
s = self.fileobj.read(self.chunks[n][1])
dobj = zlib.decompressobj(-zlib.MAX_WBITS)
output = dobj.decompress(s)
del dobj
#self.cache = {} # crude hack until proper cache is done
self.cache[n] = output
self.cachekeys.append(n)
# delete the oldest filled up item in cache
if len(self.cachekeys) > self.cachesize:
try:
del self.cache[self.cachekeys[0]]
del self.cachekeys[0]
except KeyError:
pass
return output
def read(self, size=-1):
firstchunk = self.pos/self.chlen
offset = self.pos - firstchunk*self.chlen
if size == -1:
lastchunk = len(self.chunks)+1
finish = 0
npos = sys.maxint
else:
lastchunk = (self.pos+size)/self.chlen
finish = offset+size
npos = self.pos+size
buf = ""
for i in range(firstchunk, lastchunk+1):
buf = buf+self._readchunk(i)
r = buf[offset:finish]
self.pos = npos
return r
def close(self):
self.fileobj.close()
def __del__(self):
self.close()
def flush(self):
pass
def seek(self, pos, whence=0):
if whence == 0:
self.pos = pos
elif whence == 1:
self.pos = self.pos+pos
elif whence == 2:
raise "Seeking from end of file not supported"
# fixme
def tell(self):
return self.pos
def isatty(self):
return 0
def readline(self, size=-1):
if size < 0: size = sys.maxint
bufs = []
orig_size = size
oldpos = self.pos
readsize = min(100, size) # Read from the file in small chunks
while 1:
if size == 0:
return ''.join(bufs) # Return resulting line
c = self.read(readsize)
i = c.find('\n')
if i>=0:
self.pos = self.pos-len(c)+i+1
if size is not None:
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if i==-1 and len(c) > size: i=size-1
elif size <= i: i = size -1
if i >= 0 or c == '':
bufs.append(c[:i+1]) # Add portion of last chunk
return ''.join(bufs) # Return resulting line
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
def readlines(self, sizehint=0):
# Negative numbers result in reading all the lines
if sizehint <= 0: sizehint = sys.maxint
L = []
while sizehint > 0:
line = self.readline()
if line == "": break
L.append( line )
sizehint = sizehint - len(line)
return L
def extract(self,out,size):
self.seek(0)
data=self.read(size)
print len(data)
out.write(data)
out.flush()
self.seek(0)
def runtest(self):
print self.chlen;
print self._firstpos
if __name__ == '__main__':
pass
| 2.75 | 3 |
boards/Pynq-Z1/mqttsn/mqttsn.py | stephenneuendorffer/PYNQ-Networking | 40 | 12763952 | # Copyright (c) 2017, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pynq
import pynq.lib
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, Xilinx"
__email__ = "<EMAIL>"
class MqttsnOverlay(pynq.Overlay):
""" The MQTTSN overlay.
This overlay is designed to read the Pmod TMP2 sensor values and publish
them using MQTTSN protocol. PL acceleration is used on this overlay.
Normally the constant PMODB can be extracted by the overlay directly.
In this notebook, however, we will manually define it to be compatible
with PYNQ image_v2.0 release. The definition of PMODB in this example also
omits the interrupt pins for simplicity.
Attributes
----------
leds : AxiGPIO
4-bit output GPIO for interacting with the green LEDs LD0-3
buttons : AxiGPIO
4-bit input GPIO for interacting with the buttons BTN0-3
switches : AxiGPIO
2-bit input GPIO for interacting with the switches SW0 and SW1
rgbleds : [pynq.board.RGBLED]
Wrapper for GPIO for LD4 and LD5 multicolour LEDs
"""
def __init__(self, bitfile, **kwargs):
super().__init__(bitfile, **kwargs)
if self.is_loaded():
self.iop_pmoda.mbtype = "Pmod"
self.iop_pmodb.mbtype = "Pmod"
self.iop_arduino.mbtype = "Arduino"
self.PMODA = self.iop_pmoda.mb_info
self.PMODB = self.iop_pmodb.mb_info
self.ARDUINO = self.iop_arduino.mb_info
self.audio = self.audio_direct_0
self.leds = self.leds_gpio.channel1
self.switches = self.switches_gpio.channel1
self.buttons = self.btns_gpio.channel1
self.leds.setlength(4)
self.switches.setlength(2)
self.buttons.setlength(4)
self.leds.setdirection("out")
self.switches.setdirection("in")
self.buttons.setdirection("in")
self.rgbleds = ([None] * 4) + [pynq.lib.RGBLED(i)
for i in range(4, 6)]
| 1.125 | 1 |
venv/Lib/site-packages/pandas/tests/extension/arrow/test_string.py | OliviaNabbosa89/Disaster_Responses | 1 | 12763953 | import pytest
import pandas as pd
pytest.importorskip("pyarrow", minversion="0.13.0")
from .arrays import ArrowStringDtype # isort:skip
def test_constructor_from_list():
# GH 27673
result = pd.Series(["E"], dtype=ArrowStringDtype())
assert isinstance(result.dtype, ArrowStringDtype)
| 2.34375 | 2 |
Plotjuros.py | Prosantosgui/plotJuros | 0 | 12763954 | import matplotlib.pyplot as plt
montante_inicial = float(input('Montante inicial igual a: '))
rendimento_periodo = float(input('Rendimento por período igual a(%): '))
valor_aporte = float(input('Valor do aporte igual a: '))
total_periodos = int(input('Total de períodos: '))
def calcular_juros_compostos(montante_inicial, rendimento_periodo, valor_aporte,total_periodos):
periodos = []
montante = []
for i in range(total_periodos):
montante_inicial =(montante_inicial + ((montante_inicial/100)) * rendimento_periodo)+valor_aporte
print(f'\t{i+1} periodo, R$ {round(montante_inicial,2): .2f}')
periodos.append(i+1)
montante.append(round(montante_inicial,2))
eixo_x = periodos
eixo_y = montante
plt.plot(periodos,montante, 'p-g')
plt.title('Evolução do valor acumulado')
plt.xlabel('Períodos')
plt.ylabel('Montante')
plt.show()
calcular_juros_compostos(montante_inicial, rendimento_periodo, valor_aporte, total_periodos) | 3.546875 | 4 |
demo_cumtd_gmaps/py/compute.py | intrepiduiuc/cs-205 | 0 | 12763955 | <reponame>intrepiduiuc/cs-205
# Import the libraries we will use in this file
import urllib.request
import urllib.parse
import json
import datetime
| 1.507813 | 2 |
bytejection/core.py | NaleRaphael/bytejection | 0 | 12763956 | import dis
import re
import sys
from types import CodeType, FunctionType, ModuleType
__all__ = [
'update_function', 'COManipulator',
'inject_function'
]
i2b = lambda x: x.to_bytes(1, byteorder=sys.byteorder)
b2i = lambda x: int.from_bytes(x, byteorder=sys.byteorder)
OPMAP = dis.opmap
IOPMAP = {v: k for k, v in OPMAP.items()}
INS_END = i2b(0) # INSTRUCTION_END
CO_ATTRS = [
'co_argcount', 'co_kwonlyargcount', 'co_nlocals',
'co_stacksize', 'co_flags', 'co_code', 'co_consts',
'co_names', 'co_varnames', 'co_filename', 'co_name',
'co_firstlineno', 'co_lnotab', 'co_freevars', 'co_cellvars'
]
COMP_INST = {
'LOAD_GLOBAL': ['LOAD_GLOBAL', 'LOAD_NAME'],
'LOAD_FAST': ['LOAD_FAST', 'LOAD_NAME'],
}
def _update_meta_globals(f, val):
attr = getattr(f, '__globals__')
if not isinstance(val, dict):
raise TypeError('Given `val` should be a `dict`.')
attr.update(val)
return attr
def _update_meta_name(f, val):
if not isinstance(val, str):
raise TypeError('Given `val` should be a `str`.')
return val
def _update_meta_defaults(f, val):
attr = getattr(f, '__defaults__')
if not isinstance(val, tuple):
raise TypeError('Given `val` should be a `tuple`.')
if len(val) != len(attr):
raise ValueError('Length of given `val` does not meet with the orignal one.')
attr.update(val)
return attr
def _update_meta_closure(f, val):
if not isinstance(val, tuple):
raise TypeError('Given `val` should be a `tuple`')
return val
UPDATE_META = {
'__globals__': _update_meta_globals,
'__name__': _update_meta_name,
'__defaults__': _update_meta_defaults,
'__closure__': _update_meta_closure,
}
META = {
'function': [
'__globals__', '__name__', '__defaults__', '__closure__'
],
'module': [
'__name__'
],
}
def _get_code(obj):
if hasattr(obj, '__code__'):
return getattr(obj, '__code__')
elif hasattr(obj, '__loader__'):
return getattr(obj, '__loader__').get_code(None)
else:
raise ValueError('Cannot get code object from this object.')
def update_object(obj, _type, **kwargs):
old = _get_code(obj)
new = CodeType(*(kwargs.get(attr, getattr(old, attr)) for attr in CO_ATTRS))
meta_names = META[type(obj).__name__]
new_meta = []
for name in meta_names:
val = kwargs.get(name)
new_meta.append(getattr(obj, name) if val is None else UPDATE_META[name](obj, val))
return _type(*tuple([new] + new_meta))
def update_function(f, **kwargs):
return update_object(f, FunctionType, **kwargs)
def update_module(mod, **kwargs):
return update_object(mod, ModuleType, **kwargs)
def iscompatible(tgt, src):
return (tgt == src or IOPMAP[tgt] in COMP_INST[IOPMAP[src]])
def search_name(co, inst, idx):
if inst == OPMAP['LOAD_GLOBAL']:
return ('LOAD_GLOBAL', 'co_names', idx)
elif inst == OPMAP['LOAD_FAST']:
return ('LOAD_FAST', 'co_varnames', idx)
else:
return None
def inject_load_inst(co, old_name, new_name, new_co_names):
list_old_name = old_name.split('.')
list_new_name = new_name.split('.')
inst_load = '([{}|{}])'.format(
i2b(OPMAP['LOAD_GLOBAL']).decode(), i2b(OPMAP['LOAD_NAME']).decode()
)
pattern = ''.join([inst_load + i2b(co.co_names.index(part)).decode() for part in list_old_name])
regex = re.compile(pattern.encode())
code = co.co_code
inst_load_global = i2b(OPMAP['LOAD_GLOBAL']).decode()
inst_load_attr = i2b(OPMAP['LOAD_ATTR']).decode()
for matched in regex.finditer(code):
start, end = matched.span()
payload = ''.join([inst_load_global + i2b(new_co_names.index(name)).decode() for name in list_new_name[:-1]])
payload += inst_load_attr + i2b(new_co_names.index(list_new_name[-1])).decode()
code = code[:start] + payload.encode() + code[end:]
return code
def inject_bytecode(co, target_name, bc_payload):
list_target_name = target_name.split('.')
inst_load = inst_load = '([{}|{}])'.format(
i2b(OPMAP['LOAD_GLOBAL']).decode(), i2b(OPMAP['LOAD_NAME']).decode()
)
pattern = ''.join([inst_load + i2b(co.co_names.index(part)).decode() for part in list_target_name])
regex = re.compile(pattern.encode())
code = co.co_code
inst_load_global = i2b(OPMAP['LOAD_GLOBAL']).decode()
inst_load_attr = i2b(OPMAP['LOAD_ATTR']).decode()
for matched in regex.finditer(code):
start, end = matched.span()
code = code[:start] + bc_payload + code[end:]
return code
def inject_function(f, target_name, payload_tuple):
co = f.__code__
payload_name, payload = payload_tuple
# Manipulate `co_consts`
new_co_consts = co.co_consts + tuple([
payload.__code__, '{}.<locals>.{}'.format(co.co_name, payload_name)
])
idx_payload_co = len(new_co_consts) - 2
idx_payload_local_name = len(new_co_consts) - 1
# Manipulate `co_varnames`
new_co_varnames = co.co_varnames + tuple([payload_name])
idx_payload_varnames = len(new_co_varnames) - 1
template_make_function = [
(OPMAP['LOAD_CONST'], idx_payload_co),
(OPMAP['LOAD_CONST'], idx_payload_local_name),
(OPMAP['MAKE_FUNCTION'], 0),
(OPMAP['STORE_FAST'], idx_payload_varnames),
]
# NOTE: There should be a `0` as an ending instruction in Python 3.5
if sys.version_info.major == 3 and sys.version_info.minor == 5:
template_make_function = [v + (0,) for v in template_make_function]
template_load_function = [
(OPMAP['LOAD_FAST'], idx_payload_varnames),
]
bc_make_function = b''.join([b''.join(list(map(i2b, v))) for v in template_make_function])
bc_load_function = b''.join([b''.join(list(map(i2b, v))) for v in template_load_function])
new_code = inject_bytecode(co, target_name, bc_load_function)
new_code = b''.join([bc_make_function, new_code])
return update_function(
f,
co_consts=new_co_consts,
co_varnames=new_co_varnames,
co_code=new_code,
)
class COManipulator(object):
def update_function(self, f, old_name, func_tuple, rename=False, **kwargs):
co = f.__code__
new_name, new_func = func_tuple
splitted_old_name = old_name.split('.')
splitted_new_name = new_name.split('.')
if not all([part in co.co_names for part in splitted_old_name]):
raise ValueError('Given `old_name` does not exist in {}.'.format(f))
idx = co.co_names.index(splitted_old_name[-1])
new_co_names = (co.co_names[:idx] + (splitted_new_name[-1],) + co.co_names[idx+1:])
# inject new namespaces (e.g. module, class...) and loading instruction (e.g. `LOAD_GLOBAL`...)
new_co_code = co.co_code
if len(splitted_new_name) > len(splitted_old_name):
new_co_names = tuple(list(new_co_names) + splitted_new_name[:-1])
new_co_code = inject_load_inst(co, old_name, new_name, new_co_names)
# inject new modules / variables from into global scope
new_globals = kwargs.get('__globals__', {})
if new_name not in f.__globals__:
new_globals.update({new_name: new_func})
new_name = new_name if rename else f.__name__
return update_function(
f,
co_names=new_co_names,
co_name=new_name,
co_code=new_co_code,
__globals__=new_globals,
__name__=new_name,
)
def patch_module(self, module, f, old_name, func_tuple, rename=False, **kwargs):
payload_name, payload = func_tuple
new_func = inject_function(f, old_name, func_tuple)
co = _get_code(module)
# find index of target code object
target_name = new_func.__code__.co_name
index = None
for i, v in enumerate(co.co_consts):
co_name = getattr(v, 'co_name', None)
if co_name is None or co_name != target_name:
continue
else:
index = i
break
if index is None:
raise RuntimeError('Desired function does not exist in given module.')
new_co_consts = tuple(
co.co_consts[:index] + tuple([new_func.__code__]) + co.co_consts[index+1:]
)
payloads = {
'co_consts': new_co_consts,
'co_names': co.co_names + tuple([payload_name])
}
new_co_module = CodeType(
*(kwargs.get(attr, getattr(co, attr) if attr not in payloads else payloads.get(attr))
for attr in CO_ATTRS)
)
return new_co_module
| 1.984375 | 2 |
conanfile.py | fogo/conan-opus | 0 | 12763957 | # -*- coding: utf-8 -*-
from __future__ import print_function
from conans import AutoToolsBuildEnvironment, ConanFile, tools, VisualStudioBuildEnvironment
from conans.util import files
import os
class OpusConan(ConanFile):
name = "opus"
version = "1.2.1"
ZIP_FOLDER_NAME = "opus-{}".format(version)
generators = "cmake"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
# 'rtcd' stands for runtime CPU detection. Opus has a series of
# optimizations based on set of instructions available. Since with
# conan it is distributed as a package, we can't assume end-user
# available instructions so by default it enables this option.
#
# To recompile against your environment without runtime detection
# just disable this option. For instance, you can use this install
# line for this purpose:
#
# conan install opus/1.2.1@fogo/stable -o opus:rtcd=False --build=opus
"rtcd": [True, False],
}
default_options = "shared=False", "fPIC=True", "rtcd=True"
exports_sources = ["CMakeLists.txt"]
url = "https://github.com/fogo/conan-opus"
license = "http://opus-codec.org/license/"
description = "Opus is a totally open, royalty-free, highly versatile " \
"audio codec. Opus is unmatched for interactive speech " \
"and music transmission over the Internet, but is also " \
"intended for storage and streaming applications. It is " \
"standardized by the Internet Engineering Task Force (" \
"IETF) as RFC 6716 which incorporated technology from " \
"Skype’s SILK codec and Xiph.Org’s CELT codec."
checksum = "cfafd339ccd9c5ef8d6ab15d7e1a412c054bf4cb4ecbbbcc78c12ef2def70732"
def configure(self):
# it is just C code, this is unnecessary
del self.settings.compiler.libcxx
def source(self):
# https://archive.mozilla.org/pub/opus/opus-1.2.1.tar.gz
zip_name = "opus-{version}.tar.gz".format(version=self.version)
tools.download(
"https://archive.mozilla.org/pub/opus/{zip_name}".format(zip_name=zip_name),
zip_name)
tools.check_sha256(zip_name, self.checksum)
tools.unzip(zip_name)
os.unlink(zip_name)
if self.settings.os != "Windows":
self.run("chmod +x ./{}/configure".format(self.ZIP_FOLDER_NAME))
def build(self):
with tools.chdir(self.ZIP_FOLDER_NAME):
files.mkdir("_build")
with tools.chdir("_build"):
if not tools.os_info.is_windows:
args = []
if self.options.shared:
args.append("--enable-shared=yes")
args.append("--enable-static=no")
else:
args.append("--enable-shared=no")
args.append("--enable-static=yes")
# Note: as usual compiling w/ gcc 4.1 is a mess. On
# environment used for tests (docker image
# uilianries/conangcc41) I had to disable RTCD AND
# manually patch supported SSE to build x86 arch.
#
# It was failed configuration w/ RTCD enabled w/
# message below:
#
# checking How to get X86 CPU Info... configure: error: no supported Get CPU Info method, please disable run-time CPU capabilities detection or intrinsics
#
# If RTCD disabled, it seems it was still unable to
# detect correct SSE features and it ended up failing
# in posterior link step. That's why a few lines
# below there is a hackish line to add arbitrarily the
# most basic/old SIMD flags. It seems this is necessary
# because gcc4.1 only have a few of all expected SIMD
# flags by Opus.
#
# Reference about gcc4.1 flags:
# http://www.linuxcertif.com/man/1/gcc-4.1/
is_gcc41_x86 = self.settings.arch == "x86" and \
self.settings.compiler == "gcc" and \
self.settings.compiler.version == "4.1"
if is_gcc41_x86 or (not self.options.rtcd):
args.append("--disable-rtcd")
env_build = AutoToolsBuildEnvironment(self)
env_build.fpic = self.options.fPIC
if is_gcc41_x86:
env_build.flags.extend(["-msse", "-msse2"])
env_build.configure("..", args=args)
env_build.make()
else:
# TODO: no idea how to apply rtcd option for Windows
# It enables RTCD based on definitions found by config.h
# file. Can we override it?
env_build = VisualStudioBuildEnvironment(self)
env_build.include_paths.append("../include")
with tools.environment_append(env_build.vars):
name = target = "opus"
msbuild = tools.msvc_build_command(
self.settings,
r"..\win32\VS2015\{}.sln".format(name),
targets=[target],
arch="Win32" if self.settings.arch == "x86" else "x64",
upgrade_project=True)
# TODO: msvc_build_command arch arg seems to have no effect!
msbuild += " /p:Platform={}".format("Win32" if self.settings.arch == "x86" else "x64")
command = "{vcvars} && {msbuild}".format(
vcvars=tools.vcvars_command(self.settings),
msbuild=msbuild)
self.run(command)
def package(self):
self.copy(
"*.h",
dst="include",
src="{basedir}/include".format(basedir=self.ZIP_FOLDER_NAME))
if not tools.os_info.is_windows:
self.copy(
"*.a",
dst="lib",
src="{basedir}/_build/.libs".format(basedir=self.ZIP_FOLDER_NAME))
self.copy(
"*.so",
dst="lib",
src="{basedir}/_build/.libs".format(basedir=self.ZIP_FOLDER_NAME))
else:
self.copy(
"*.dll",
dst="lib",
src="{basedir}/win32/".format(basedir=self.ZIP_FOLDER_NAME),
keep_path=False)
self.copy(
"*.lib",
dst="lib",
src="{basedir}/win32/".format(basedir=self.ZIP_FOLDER_NAME),
keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| 1.835938 | 2 |
tools/db_cli.py | fabric-testbed/ControlFramework | 0 | 12763958 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: <NAME> (<EMAIL>)
import argparse
import logging
import traceback
from logging.handlers import RotatingFileHandler
from fim.graph.neo4j_property_graph import Neo4jGraphImporter, Neo4jPropertyGraph
from fabric_cf.actor.core.plugins.db.actor_database import ActorDatabase
from fabric_cf.actor.core.util.id import ID
class MainClass:
"""
CLI interface to directly fetch information from postgres Database
"""
def __init__(self, user: str, password: str, db: str, host: str = '127.0.0.1:5432'):
self.logger = logging.getLogger("db-cli")
file_handler = RotatingFileHandler('./db_cli.log', backupCount=5, maxBytes=50000)
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s [%(filename)s:%(lineno)d] [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler(), file_handler])
self.db = ActorDatabase(user=user, password=password, database=db, db_host=host, logger=self.logger)
self.neo4j_config = {"url": "neo4j://0.0.0.0:9687",
"user": "neo4j",
"pass": "password",
"import_host_dir": "/Users/kthare10/renci/code/fabric/ControlFramework/neo4j1/imports/",
"import_dir": "/imports"}
def get_slices(self, email: str = None, slice_id: str = None, slice_name: str = None):
try:
if slice_id is not None:
slice_obj = self.db.get_slice(slice_id=ID(uid=slice_id))
slice_list = [slice_obj]
elif email is not None:
slice_list = self.db.get_slice_by_email(email=email)
else:
slice_list = self.db.get_slices()
if slice_list is not None and len(slice_list) > 0:
for s in slice_list:
show_slice = slice_name is None
if slice_name is not None:
show_slice = slice_name in s.get_name()
if show_slice:
print(s)
print()
else:
print(f"No slices found: {slice_list}")
except Exception as e:
print(f"Exception occurred while fetching slices: {e}")
traceback.print_exc()
def get_slice_topology(self, graph_id: str):
try:
neo4j_graph_importer = Neo4jGraphImporter(url=self.neo4j_config["url"],
user=self.neo4j_config["user"],
pswd=self.neo4j_config["pass"],
import_host_dir=self.neo4j_config["import_host_dir"],
import_dir=self.neo4j_config["import_dir"],
logger=self.logger)
slice_model = Neo4jPropertyGraph(graph_id=graph_id, importer=neo4j_graph_importer)
print(f"Slice Model: {slice_model}")
except Exception as e:
print(f"Exception occurred while fetching slices: {e}")
traceback.print_exc()
def get_delegations(self, dlg_id: str = None):
try:
if dlg_id is not None:
del_list = self.db.get_delegation(dlg_graph_id=dlg_id)
else:
del_list = self.db.get_delegations()
if del_list is not None and len(del_list) > 0:
for d in del_list:
print(d)
print()
else:
print(f"No delegations found: {del_list}")
except Exception as e:
print(f"Exception occurred while fetching delegations: {e}")
traceback.print_exc()
def get_reservations(self, slice_id: str = None, res_id: str = None, email: str = None):
try:
res_list = []
if slice_id is not None:
res_list = self.db.get_reservations_by_slice_id(slice_id=ID(uid=slice_id))
elif res_id is not None:
res_list = self.db.get_reservation(rid=ID(uid=res_id))
elif email is not None:
res_list = self.db.get_reservations_by_email(email=email)
else:
res_list = self.db.get_reservations()
if res_list is not None and len(res_list) > 0:
for r in res_list:
print(r)
print()
else:
print(f"No reservations found: {res_list}")
except Exception as e:
print(f"Exception occurred while fetching delegations: {e}")
traceback.print_exc()
def remove_reservation(self, sliver_id: str):
try:
self.db.remove_reservation(rid=ID(uid=sliver_id))
except Exception as e:
print(f"Exception occurred while fetching delegations: {e}")
traceback.print_exc()
def remove_slice(self, slice_id: str):
try:
self.db.remove_slice(slice_id=ID(uid=slice_id))
except Exception as e:
print(f"Exception occurred while fetching delegations: {e}")
traceback.print_exc()
def handle_command(self, args):
if args.command == "slices":
if args.operation is not None and args.operation == "remove":
self.remove_slice(slice_id=args.slice_id)
elif args.operation is not None and args.operation == "topology":
self.get_slice_topology(graph_id=args.graph_id)
else:
self.get_slices(slice_id=args.slice_id, email=args.email, slice_name=args.slice_name)
elif args.command == "slivers":
if args.operation is not None and args.operation == "remove":
self.remove_reservation(sliver_id=args.sliver_id)
else:
self.get_reservations(slice_id=args.slice_id, res_id=args.sliver_id, email=args.email)
elif args.command == "delegations":
self.get_delegations(dlg_id=args.delegation_id)
else:
print(f"Unsupported command: {args.command}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-u", dest='user', required=True, type=str)
parser.add_argument("-p", dest='password', required=True, type=str)
parser.add_argument("-d", dest='database', required=True, type=str)
parser.add_argument("-c", dest='command', required=True, type=str)
parser.add_argument("-s", dest='slice_id', required=False, type=str)
parser.add_argument("-g", dest='graph_id', required=False, type=str)
parser.add_argument("-r", dest='sliver_id', required=False, type=str)
parser.add_argument("-i", dest='delegation_id', required=False, type=str)
parser.add_argument("-e", dest='email', required=False, type=str)
parser.add_argument("-n", dest='slice_name', required=False, type=str)
parser.add_argument("-o", dest='operation', required=False, type=str)
args = parser.parse_args()
mc = MainClass(user=args.user, password=args.password, db=args.database)
mc.handle_command(args)
| 1.757813 | 2 |
meal_planner/logging_configs/local.py | darth-dodo/meal-helper | 2 | 12763959 | import logging.config
import os
from django.utils.log import DEFAULT_LOGGING
def local_logging_config(**options):
log_root = options.get('LOG_ROOT')
log_level = options.get('LOG_LEVEL')
file_log_level = options.get('FILE_LOG_LEVEL')
sentry_log_level = options.get('SENTRY_LOG_LEVEL')
return logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
# exact format is not important, this is the minimum information
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
'django.server': DEFAULT_LOGGING['formatters']['django.server'],
},
'handlers': {
# console logs to stderr
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
},
# # Add Handler for Sentry for `warning` and above
'sentry': {
'level': sentry_log_level,
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
# Adding file rotating log
'file': {
'level': file_log_level,
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'default',
'filename': os.path.join(log_root, 'django.log'),
'maxBytes': 50 * 1024 * 1024, # 50 MB
'backupCount': 20,
},
'django.server': DEFAULT_LOGGING['handlers']['django.server'],
},
'loggers': {
# default for all undefined Python modules
'': {
'level': 'WARNING',
'handlers': ['console', 'sentry', 'file'],
},
# Our application code
'utils': {
'level': log_level,
'handlers': ['console', 'sentry', 'file'],
# Avoid double logging because of root logger
'propagate': False,
},
# 'meals': {
# 'level': log_level,
# 'handlers': ['console', 'sentry', 'file'],
# # Avoid double logging because of root logger
# 'propagate': False,
# },
# 'members': {
# 'level': log_level,
# 'handlers': ['console', 'sentry', 'file'],
# # Avoid double logging because of root logger
# 'propagate': False,
# },
# # Prevent noisy modules from logging to Sentry
# 'noisy_module': {
# 'level': 'ERROR',
# 'handlers': ['console'],
# 'propagate': False,
# },
# Default runserver request logging
'django.server': DEFAULT_LOGGING['loggers']['django.server'],
},
}) | 1.914063 | 2 |
src/articles/migrations/0006_auto_20210426_1356.py | NotSirius-A/School-website | 1 | 12763960 | <filename>src/articles/migrations/0006_auto_20210426_1356.py<gh_stars>1-10
# Generated by Django 3.1.7 on 2021-04-26 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_auto_20210423_1603'),
]
operations = [
migrations.AddField(
model_name='article',
name='show_on_lists',
field=models.BooleanField(default=False, help_text="Choose whether this article should be visible on lists or only accesible trough direct urls. Useful basic articles like:'rules' or 'contact'"),
),
migrations.AlterField(
model_name='article',
name='text',
field=models.TextField(default='Text here', help_text='HTML tags are allowed', max_length=15000),
),
]
| 1.703125 | 2 |
barcodes/dxfwrite/dxfwrite/base.py | sbarton272/AcousticBarcodes-Explorations | 0 | 12763961 | #!/usr/bin/env python
#coding:utf-8
# Purpose: base types
# module belongs to package dxfwrite
# Created: 09.02.2010
# Copyright (C) 2010, <NAME>
# License: MIT License
__author__ = "mozman <<EMAIL>>"
from dxfwrite.util import izip, PYTHON3, to_string, is_string, iterflatlist
if PYTHON3:
xrange = range
from dxfwrite.vector3d import cross_product, unit_vector
def dxfstr(obj):
""" Create the DXF string by calling the __dxf__() method.
This method creates the strings as early as possible, which generates excessive
string concatenation.
Returns a valid dxf-string, last char has to be '\n'.
"""
return obj.__dxf__()
def iterdxftags(dxfobj):
if hasattr(dxfobj, '__dxftags__'):
for tag in dxfobj.__dxftags__():
for subtag in iterdxftags(tag):
yield subtag
else:
yield dxfobj
def tags2str(dxfobj):
""" Creates the DXF string by collecting the DXF tags at first, by iterating over all dxf tags.
Creates the DXF string by only one ''.join() operation.
This method creates the DXF string as late as possible.
Returns a valid dxf-string, last char has to be '\n'.
"""
return "".join( (tag.__dxf__() for tag in iterdxftags(dxfobj)) )
def writetags(fileobj, dxfobj, encoding=None):
if PYTHON3 or (encoding is None):
write = lambda tag: fileobj.write(tag)
else:
write = lambda tag: fileobj.write(tag.encode(encoding))
for dxftag in iterdxftags(dxfobj):
write(dxftag.__dxf__())
class DXFValidationError(Exception):
pass
class _DXFType(object):
_group_code_types = None
def __init__(self):
if self._group_code_types is None:
self._init_table()
def _init_table(self):
self._group_code_types = dict()
for type_str, begin, end in [
('string', 0, 9),
('float', 10, 59),
('int', 60, 79),
('int', 90, 99),
('string', 100, 0),
('string', 102, 0),
('string', 105, 0),
('float', 110, 149),
('int', 170, 179),
('float', 210, 239),
('int', 270, 289),
('bool', 290, 299),
('string', 300, 369),
('int', 370, 389),
('string', 390, 399),
('int', 400, 409),
('string', 410, 419),
('int', 420, 429),
('string', 430, 439),
('int', 440, 459),
('float', 460, 469),
('string', 470, 479),
('string', 999, 1009),
('float', 1010, 1059),
('int', 1060, 1071),
]:
self.add_group_code_type(type_str, begin, end)
def check(self, value, code):
try:
typestr = self.group_code_type(code)
except KeyError:
raise ValueError("Unknown group code '%s'" % str(code))
if typestr == 'string':
return is_string(value)
elif typestr == 'bool':
return value in (0, 1)
elif typestr == 'float':
return isinstance(value, float)
elif typestr == 'int':
return isinstance(value, int)
def cast(self, value, code):
""" Convert value depending on group code """
typestr = self.group_code_type(code)
if typestr == 'string':
return to_string(value)
elif typestr == 'bool':
return 1 if int(value) else 0
elif typestr == 'float':
return float(value)
elif typestr == 'int':
return int(value)
raise ValueError("Unknown format '%s'" % to_string(code))
def group_code_type(self, group_code):
return self._group_code_types[group_code]
def add_group_code_type(self, type_str, begin, end=0):
if end <= begin:
end = begin + 1
else:
end += 1
for code in xrange(begin, end):
self._group_code_types[code] = type_str
class DXFAtom(object):
""" The basic dxf object """
_dxftype = _DXFType()
def __init__(self, value, group_code=0):
self._group_code = int(group_code)
self._value = self._typecast(value, self._group_code)
def __dxf__(self):
""" Returns a valid DXF String. Last char has to be '\n'. """
return "%3d\n%s\n" % (self._group_code, to_string(self._value))
def _typecast(self, value, group_code):
return self._dxftype.cast(value, group_code)
def is_3d_point_coord(self):
return 10 <= self._group_code < 40
def get_index_shift(self):
""" returns the dxf-3d-point-value index (range = 0 .. 9).
10, 20, 30 = index 0
13, 23, 33 = index 3
17, 27, 37 = index 7
"""
if self.is_3d_point_coord():
return int(self.group_code % 10)
else:
raise TypeError("Not a 3D point value")
def get_axis_index(self):
""" returns 0 for 'x', 1 for 'y' and 2 for 'z'.
DXFPoint[axis_index]
"""
if self.is_3d_point_coord():
return int(self.group_code / 10) - 1
else:
raise TypeError("Not a 3D point value")
@property
def value(self): return self._value
@property
def group_code(self): return self._group_code
def __eq__(self, atom):
assert isinstance(atom, DXFAtom)
return (self.group_code == atom.group_code) and \
(self.value == atom.value)
class DXFList(list):
""" Collection of DXFAtoms. """
def __dxf__(self):
""" Returns a valid DXF String. """
return "".join( ( atom.__dxf__() for atom in self ) )
def __dxftags__(self):
return self
def __eq__(self, dxflist):
if len(self) != len(dxflist):
return False
for atom1, atom2 in izip(self, dxflist):
if atom1 != atom2:
return False
return True
def endswith(self, name):
if len(self):
try:
if self[-1].value == name:
return True
except AttributeError:
pass
return False
class DXFString(DXFAtom):
""" String with group code 1 """
def __init__(self, value, group_code=1):
super(DXFString, self).__init__(to_string(value), group_code)
class DXFName(DXFAtom):
""" String with group code 2 """
def __init__(self, value, group_code=2):
super(DXFName, self).__init__(to_string(value), group_code)
class DXFFloat(DXFAtom):
""" float with group code 40 """
def __init__(self, value, group_code=40):
super(DXFFloat, self).__init__(float(value), group_code)
class DXFAngle(DXFAtom):
""" float with group code 50, angle in degrees """
def __init__(self, value, group_code=50):
super(DXFAngle, self).__init__(float(value), group_code)
class DXFInt(DXFAtom):
""" 16 bit integer with group code 70 """
def __init__(self, value, group_code=70):
super(DXFInt, self).__init__(int(value), group_code)
class DXFBool(DXFAtom):
""" Integer 0 or 1 """
def __init__(self, value=1, group_code=290):
super(DXFBool, self).__init__(int(value), group_code)
class DXFPoint(object):
""" 3D point with 3 float coordinates """
def __init__(self, coords=(0., 0., 0.), index_shift=0):
if len(coords) in (2, 3) :
# just use a normal list not DXFList, because point has public access
# and can be set as tuple or list too, so always expect a tuple or list
self.point = [DXFFloat(value, (pos+1)*10+index_shift)
for pos, value in enumerate(coords)]
else:
raise ValueError("only 2 or 3 coord-values allowed.")
def __getitem__(self, axis):
""" Get coordinate for 'axis'.
PARAMETER
axis: 0, 1, 2 or 'x', 'y', 'z'
axis: 'xz' returns a list of 'x' and 'z' any combination of 'x', 'y'
and 'z' is valid, ('xyz', 'zyx', 'xxy', 'xxxxx')
"""
if axis in (0, 1, 2):
try:
return self.point[axis].value
except IndexError:
raise IndexError("DXF-Point has no '%s'-coordinate!" % ('x', 'y', 'z')[axis])
elif is_string(axis):
if axis in ('x', 'y', 'z'):
try:
index = ord(axis) - ord('x')
return self.point[index].value
except IndexError:
raise IndexError("DXF-Point has no '%s'-coordinate!" % axis)
elif len(axis) > 1: # 'xy' or 'zx' get coords in letter order
return [ self.__getitem__(index) for index in axis ]
else:
raise IndexError("Invalid axis name '%s'" % axis)
else:
raise IndexError("Invalid axis name '%s'" % axis)
def __dxf__(self):
return "".join([coord.__dxf__() for coord in self.point])
def get_index_shift(self):
return self.point[0].group_code - 10
def shift_group_code(self, index_shift):
""" get DXFPoint with shifted group code """
return DXFPoint(self[ "xyz"[:len(self.point)] ],
index_shift)
def to_3D(self, zvalue=0.):
""" add z-axis if absent """
if len(self.point) < 3:
self.point.append(DXFFloat(zvalue, self.get_index_shift()+30))
@property
def tuple(self):
# CAUTION: do not override the 'value' attribute!!!
# 'value' would be the suitable name for this property, but that causes
# several serious problems.
return tuple(self['xyz'[:len(self.point)]])
class DXFPoint2D(DXFPoint):
""" only output x and y axis! """
def __dxf__(self):
return "".join([coord.__dxf__() for coord in self.point[:2]])
class DXFPoint3D(DXFPoint):
""" An assurd 3D point """
def __init__(self, coords=(0., 0., 0.), index_shift=0):
if len(coords) == 2:
coords = (coords[0], coords[1], 0.)
super(DXFPoint3D, self).__init__(coords, index_shift)
def PassThroughFactory(value, group_code):
return value
class AttribDef(object):
""" Attribute definition
ATTRIBUTES
.. attribute:: group_code
DXF group code
.. attribute:: factory
factory-function to create DXFAtoms, use PassThroughFactory for
DXFList or list objects like DXFPoint or pattern in class Linetype().
.. attribute:: priority
determines the output order of attributes, not really necessary for
the DXF-format (if you belief Autodesk), but useful for testing.
Prints lower values before higher values. (50, 51, 52, 100, 101, 102)
"""
def __init__(self, factory, group_code=0, priority=100):
self.group_code = group_code
self.factory = factory
self.priority = priority
_LIMIT = 1./64.
_WY = (0., 1., 0.)
_WZ = (0., 0., 1.)
def get_OCS(zvector):
"""Get the Object-Coordinate-System (a.k.a. ECS Entity-C-S).
The arbitrary axis algorithm is used by AutoCAD internally to implement
the arbitrary but consistent generation of object coordinate systems for all
entities which use object coordinates.
untested!
"""
az = unit_vector(zvector)
if (abs(az[0]) < _LIMIT) and (abs(az[1]) < _LIMIT):
ax = unit_vector(cross_product(_WY, az))
else:
ax = unit_vector(cross_product(_WZ, az))
ay = unit_vector(cross_product(az, ax))
return (ax, ay, az) # 3 unit-vectors!
| 2.5 | 2 |
pymicropel/helper/__init__.py | vkorecky/pymicropel | 0 | 12763962 | """Helper classes."""
| 1.15625 | 1 |
kit_tree_test.py | obedtandadjaja/dynamic_testing | 0 | 12763963 | import unittest
import json
from kit_test_helper import TestHelper
from bit_extension import BitExtension
# selenium stuff
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.support.select import Select
# Kit Tree
class KitTreeTestCase(unittest.TestCase):
filename = None
CONST_HOST = None
def setUp(self):
self.browser = webdriver.Firefox()
def getTestCase(self, file_path):
with open(file_path) as data_file:
data = json.load(data_file)
return data
def runTest(self):
helper = TestHelper()
helper.dbEstablishConnection()
if helper.dbTestConnection() == False:
return
data = self.getTestCase("json_test_cases/"+self.filename)
existing, existing_lookup, matching, matching_lookup = data["existing"]["list"], data["existing"]["lookup"], data["matching"]["kit_tree"], data["matching"]["lookup"]
# self.prepareTest(existing, existing_lookup, helper)
print("--"*5, "BEGIN TEST ALL", "--"*5)
for i in matching:
self.search(i["pn"], i["revision_name"])
self.checkKitNavigation(i["kit_navigation"])
self.checkParentNavigation(i["parent_navigation"])
self.checkMasterNavigation(i["master_navigation"])
self.checkMainGrid(i["gridview"])
print("--"*5, "BEGIN TEST DIRECT", "--"*5)
for i in matching:
self.search(i["pn"], i["revision_name"], type="DIRECT")
self.checkKitNavigation(i["kit_navigation"], type="DIRECT")
self.checkParentNavigation(i["parent_navigation"])
self.checkMasterNavigation(i["master_navigation"])
self.checkMainGrid(i["direct_gridview"], type="DIRECT")
print("--"*5, "BEGIN TEST DROPDOWN CHANGE", "--"*5)
self.checkRevisionDropDownChanged(matching[len(matching)-2], matching[len(matching)-1])
self.checkRevisionDropDownChanged(matching[len(matching)-2], matching[len(matching)-1], type="DIRECT")
helper.dbCloseConnection()
def prepareTest(self, existing, existing_lookup, helper):
helper.prepareTest()
print("***SETTING UP EXISTING DATA IN KIT LIST***")
for kitlist in existing:
helper.insertKitList(kitlist["pn"], kitlist["master_pn"], kitlist["parent_pn"], kitlist["revision"], kitlist["subkit_revision"], kitlist["qty"], kitlist["uom"], kitlist["is_kit"], kitlist["description"])
for lookup in existing_lookup:
helper.insertKitLookup(lookup["subkit_pn"], lookup["master_pn"], lookup["subkit_revision"], lookup["master_revision"])
print("***DONE SETTING UP EXISTING DATA***")
def search(self, pn, revision, type="ALL"):
print(pn, revision)
self.browser.get(self.CONST_HOST+"UApplication3/wh/kitrec/kit_tree.aspx")
wait = WebDriverWait(self.browser, 10)
wait.until(EC.presence_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_txt_PN")))
kitPN = self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_txt_PN")
if type == "ALL":
kitPN.send_keys(str(pn) + Keys.RETURN)
else:
kitPN.send_keys(str(pn))
wait.until(EC.element_to_be_clickable((By.ID, "ctl00_ContentPlaceHolder1_btn_searchDirect")))
self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_btn_searchDirect").click()
wait.until(EC.invisibility_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_img_loading")))
self.searchStep1(pn, revision, wait)
def searchStep1(self, pn, revision, wait):
try:
wait.until(lambda driver: self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_pnl_candidates").is_displayed() or self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_btn_searchAll").is_enabled())
except Exception:
self.searchStep1(pn, revision, wait)
return
try:
if not self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_pnl_candidates").is_displayed():
raise Exception("")
wait.until(EC.presence_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_CandidatesGridView")))
revision_qs = "&revision="+str(BitExtension().revisionNameToLong(revision)) if revision != "All" and revision != "ALL" else ""
self.browser.find_element_by_css_selector("table#ctl00_ContentPlaceHolder1_CandidatesGridView.MyDataGridCaption tr.gridBody[onclick*='kit_tree.aspx?pn="+pn+revision_qs+"']").click()
if revision_qs != "":
self.checkRevisionDropDown(wait)
Select(self.browser.find_element_by_css_selector("select#ctl00_ContentPlaceHolder1_ddl_kitRevision")).select_by_value(str(BitExtension().revisionNameToLong(revision)))
wait.until(EC.invisibility_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_img_loading")))
wait.until(lambda driver: self.browser.find_element_by_css_selector("select#ctl00_ContentPlaceHolder1_ddl_kitRevision option[value='"+str(BitExtension().revisionNameToLong(revision))+"']").is_selected())
except Exception as e:
self.checkRevisionDropDown(wait)
Select(self.browser.find_element_by_css_selector("select#ctl00_ContentPlaceHolder1_ddl_kitRevision")).select_by_value(str(BitExtension().revisionNameToLong(revision)))
wait.until(EC.invisibility_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_img_loading")))
wait.until(lambda driver: self.browser.find_element_by_css_selector("select#ctl00_ContentPlaceHolder1_ddl_kitRevision option[value='"+str(BitExtension().revisionNameToLong(revision))+"']").is_selected())
def checkRevisionDropDown(self, wait):
wait.until(EC.presence_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_ddl_kitRevision")))
self.assertTrue(self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_ddl_kitRevision").is_displayed(), msg="kit revision ddl not displayed")
def checkRevisionDropDownChanged(self, child1, child2, type="ALL"):
if len(child1["kit_navigation"]["children"]) > 0 and len(child2["kit_navigation"]["children"]) > 0 and child1["pn"] == child2["pn"]:
wait = WebDriverWait(self.browser, 10)
self.search(child1["pn"], child1["revision_name"], type=type)
self.checkRevisionDropDown(wait)
print("SWITCH TO", child2["pn"], child2["revision_name"])
Select(self.browser.find_element_by_css_selector("select#ctl00_ContentPlaceHolder1_ddl_kitRevision")).select_by_value(str(BitExtension().revisionNameToLong(child2["revision_name"])))
wait.until(EC.invisibility_of_element_located((By.ID, "ctl00_ContentPlaceHolder1_img_loading")))
wait.until(lambda driver: self.browser.find_element_by_css_selector("select#ctl00_ContentPlaceHolder1_ddl_kitRevision option[value='"+str(BitExtension().revisionNameToLong(child2["revision_name"]))+"']").is_selected())
if type == "ALL":
self.checkMainGrid(child2["gridview"])
else:
self.checkMainGrid(child2["direct_gridview"])
def checkMainGrid(self, children, type="ALL"):
print("--"*5, "CHECK MAIN GRID", "--"*5)
testChildren = []
for child in children:
if child["is_kit"] != "1" or type == "DIRECT":
testChildren.append(child)
rows = self.browser.find_elements_by_css_selector("table#ctl00_ContentPlaceHolder1_MainGridView.MyDataGridCaption tbody tr.gridBody")
self.assertEqual(len(rows), len(testChildren), msg="MainGridView: Number of children shown does not match test children")
matchAllChildren = True
for row in rows:
pn = row.find_element_by_css_selector("td:nth-child(1)").text
quantity = row.find_element_by_css_selector("td:nth-child(2)").text
uom = row.find_element_by_css_selector("td:nth-child(3)").text
is_kit = "1" if row.find_element_by_css_selector("input[type='checkbox']").is_selected() else "0"
description = row.find_element_by_css_selector("td:nth-child(5)").text
childMatched = False
index = 0
for child in testChildren:
if child["pn"] == pn and child["qty"] == quantity and child["uom"] == uom and child["is_kit"] == is_kit and child["description"] == description:
print("===", child["pn"], "matched")
del testChildren[index]
childMatched = True
break
index += 1
if not childMatched:
print(pn, quantity, uom, is_kit, description)
matchAllChildren = False
break
self.assertTrue(matchAllChildren, msg="MainGridView: Not every children matches!")
self.assertTrue(len(testChildren) == 0, msg="MainGridView: Not every children is displayed!")
def checkKitNavigation(self, tree, type="ALL"):
print("--"*5, "CHECK KIT NAVIGATION", "--"*5)
levels = ""
string = "div#ctl00_ContentPlaceHolder1_tree_kitNavigation > div "+levels+"> table > tbody > tr > td > a"
if type != "ALL":
string = "div#ctl00_ContentPlaceHolder1_tree_kitNavigation "+levels+"> table > tbody > tr > td > a"
stack = []
stack.append(tree)
while len(stack) > 0:
print("Testing level:", len(levels.split("> div ")))
for x in range(len(stack)):
current = stack.pop(0)
for i in current["children"]:
stack.append(i)
rows = self.browser.find_elements_by_css_selector(string)
tree_level = []
for row in rows:
if row.get_attribute("textContent") != "" and row.get_attribute("textContent") != "[Edit]":
tree_level.append((row.get_attribute("textContent"), row.get_attribute("href")))
self.assertEqual(len(stack), len(tree_level))
matchAllChildren = True
for child in stack:
childMatched = False
index = 0
for (td, href) in tree_level:
if child["pn"] == td and str(BitExtension().revisionNameToLong(child["revision_name"])) in href:
childMatched = True
print("===", child["pn"], "matched")
del tree_level[index]
index += 1
if not childMatched:
print("Missing", child["pn"], child["revision_name"])
matchAllChildren = False
break
self.assertTrue(matchAllChildren, msg="kitNavigation: Not every children matches!")
self.assertTrue(len(tree_level) == 0, msg="kitNavigation: Not every children is displayed!")
levels = levels + "> div "
string = "div#ctl00_ContentPlaceHolder1_tree_kitNavigation > div "+levels+"> table > tbody > tr > td > a"
if type == "DIRECT":
break
def checkParentNavigation(self, parents):
print("--"*5, "CHECK PARENT NAVIGATION", "--"*5)
testParents = []
for i in parents:
testParents.append(i)
element = self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_tree_parent")
rows = element.find_elements_by_css_selector("div a")
self.assertEqual(len(rows), len(testParents), msg="parentNavigation: wrong number of parents displayed")
matchAllParents = True
for row in rows:
parentMatched = False
index = 0
for parent in testParents:
if row.get_attribute("textContent") == parent["pn"] and str(BitExtension().revisionNameToLong(parent["revision_name"])) in row.get_attribute("href"):
parentMatched = True
print("===", parent["pn"], "matched")
del testParents[index]
break
index += 1
if not parentMatched:
print("Missing", row.get_attribute("textContent"))
matchAllParents = False
break
self.assertTrue(matchAllParents, msg="parentNavigation: Not every parent matches!")
self.assertTrue(len(testParents) == 0, msg="parentNavigation: Not every parent is displayed!")
def checkMasterNavigation(self, masters):
print("--"*5, "CHECK MASTER NAVIGATION", "--"*5)
testMasters = []
for i in masters:
testMasters.append(i)
element = self.browser.find_element_by_id("ctl00_ContentPlaceHolder1_tree_master")
rows = element.find_elements_by_css_selector("div a")
self.assertEqual(len(rows), len(testMasters), msg="masterNavigation: wrong number of masters displayed")
matchAllMasters = True
for row in rows:
masterMatched = False
index = 0
for master in testMasters:
if row.get_attribute("textContent") == master["pn"] and str(BitExtension().revisionNameToLong(master["revision_name"])) in row.get_attribute("href"):
masterMatched = True
print("===", master["pn"], "matched")
del testMasters[index]
break
index += 1
if not masterMatched:
print("Missing", row.get_attribute("textContent"))
matchAllMasters = False
break
self.assertTrue(matchAllMasters, msg="masterNavigation: Not every master matches!")
self.assertTrue(len(testMasters) == 0, msg="masterNavigation: Not every master is displayed!") | 2.328125 | 2 |
test/test_score.py | tinylinux/m1platformer | 9 | 12763964 | # Roll 'n' Jump
# Written in 2020, 2021 by <NAME>, <NAME>,
# <NAME>, <NAME>
# To the extent possible under law, the author(s) have dedicated all
# copyright and related and neighboring rights to this software to the
# public domain worldwide. This software is distributed without any warranty.
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""Fichier de test pour score."""
import os
from hypothesis import given
from hypothesis.strategies import characters, integers, text, lists, tuples
import rollnjump.main as main
import rollnjump.conf as cf
import rollnjump.score as scre
cf.SCORES = os.path.join(os.path.dirname(__file__), "test_score.txt")
@given(integers())
def test_print(number):
"""Test pour les fonctions d'affichage."""
# Simples appels aux fonctions
main.initialization(False)
scre.score(number)
scre.score_endgame(number)
cf.LANG = "fr"
scre.winner_endgame()
cf.LANG = "en"
scre.winner_endgame()
alphanum_char = characters(min_codepoint=0x30,
max_codepoint=0x7A,
blacklist_characters=[':', ';', '<',
'=', '>', '?',
'@', '[', '\\',
']', '^', '_',
'`'])
score_list = tuples(integers(min_value=0), text(alphanum_char))
@given(lists(score_list, min_size=1, max_size=5))
def test_scoreboard(scores):
"""Test pour les fonctions relatives au tableau."""
scre.init_best_score()
for (score, name) in scores:
scre.PLAYER = name
scre.set_best_score(score)
read_scores = scre.get_scores()
scores = list(sorted(scores, key=lambda x: -x[0]))
assert read_scores == scores
last_score = scre.get_last_best_score()
assert last_score == scores[-1][0]
scre.PLAYER = scores[0][1]
assert scre.maj(scores[0][0] + 1)
assert scre.get_scores()[0] == scores[0]
for _ in range(5):
if scre.maj(10):
scre.set_best_score(10)
assert not scre.maj(1)
scre.init_best_score()
@given(lists(text()))
def test_corrupted_board_random(contents):
"""Test de robustesse en cas d'erreur dans le fichier des scores."""
with open(cf.SCORES, 'w') as board:
for line in contents:
board.write(line + '\n')
scre.get_scores()
def test_corrupted_board():
"""Test similaire non randomisé pour assurer la couverture."""
with open(cf.SCORES, 'w') as board:
for line in ["fsdq;0;vd", "s;s", "bcds"]:
board.write(line + '\n')
assert scre.get_scores() == []
| 2.671875 | 3 |
main.py | nikitt-code/py-auth-encrypt | 0 | 12763965 | <reponame>nikitt-code/py-auth-encrypt<filename>main.py
import PACrypt
pac = PACrypt.Init()
print(pac.password("<PASSWORD>", 5))
# 444157154256036112436072587541693624076755672895177551428225360594572972299194330109409163709150304140202440078016
print(pac.phone(77778187276))
# 4319497528477571353237713124486853003085805071461469194183444169461798912207674394836126039668752229278780705
| 2.015625 | 2 |
download_harbor.py | akshatvg/roadgain | 1 | 12763966 | <reponame>akshatvg/roadgain
import osmnx as ox
import folium
import pickle
G = ox.graph_from_address('Inner Harbor, Baltimore, Maryland, USA', network_type='drive')
pickle.dump(G, open('harbor.pkl', 'wb'))
graph_map = ox.plot_graph_folium(G, popup_attribute='name', edge_width=2)
graph_map.save('harbor.html') | 2.3125 | 2 |
tests/utilities/test_sha3.py | Arachnid/web3.py | 4 | 12763967 | <gh_stars>1-10
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize(
'value,expected,encoding',
(
(
'',
'0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470',
None,
),
(
'test123',
'0xf81b517a242b218999ec8eec0ea6e2ddbef2a367a14e93f4a32a39e260f686ad',
None,
),
(
'test(int)',
'0xf4d03772bec1e62fbe8c5691e1a9101e520e8f8b5ca612123694632bf3cb51b1',
None,
),
(
'0x80',
'0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421',
'hex',
),
(
'0x80',
'0x6b03a5eef7706e3fb52a61c19ab1122fad7237726601ac665bd4def888f0e4a0',
None,
),
(
'0x3c9229289a6125f7fdf1885a77bb12c37a8d3b4962d936f7e3084dece32a3ca1',
'0x82ff40c0a986c6a5cfad4ddf4c3aa6996f1a7837f9c398e17e5de5cbd5a12b28',
'hex',
)
)
)
def test_sha3(web3, value, expected, encoding):
actual = web3.sha3(value, encoding=encoding)
assert expected == actual
| 2.046875 | 2 |
venvBlog/Lib/site-packages/enchant/__init__.py | msidargo/msidargo_blog | 0 | 12763968 | # pyenchant
#
# Copyright (C) 2004-2011, <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# In addition, as a special exception, you are
# given permission to link the code of this program with
# non-LGPL Spelling Provider libraries (eg: a MSFT Office
# spell checker backend) and distribute linked combinations including
# the two. You must obey the GNU Lesser General Public License in all
# respects for all of the code used other than said providers. If you modify
# this file, you may extend this exception to your version of the
# file, but you are not obligated to do so. If you do not wish to
# do so, delete this exception statement from your version.
#
"""
enchant: Access to the enchant spellchecking library
=====================================================
This module provides several classes for performing spell checking
via the Enchant spellchecking library. For more details on Enchant,
visit the project website:
https://abiword.github.io/enchant/
Spellchecking is performed using 'Dict' objects, which represent
a language dictionary. Their use is best demonstrated by a quick
example::
>>> import enchant
>>> d = enchant.Dict("en_US") # create dictionary for US English
>>> d.check("enchant")
True
>>> d.check("enchnt")
False
>>> d.suggest("enchnt")
['enchant', 'enchants', 'enchanter', 'penchant', 'incant', 'enchain', 'enchanted']
Languages are identified by standard string tags such as "en" (English)
and "fr" (French). Specific language dialects can be specified by
including an additional code - for example, "en_AU" refers to Australian
English. The later form is preferred as it is more widely supported.
To check whether a dictionary exists for a given language, the function
'dict_exists' is available. Dictionaries may also be created using the
function 'request_dict'.
A finer degree of control over the dictionaries and how they are created
can be obtained using one or more 'Broker' objects. These objects are
responsible for locating dictionaries for a specific language.
Note that unicode strings are expected throughout the entire API.
Bytestrings should not be passed into any function.
Errors that occur in this module are reported by raising subclasses
of 'Error'.
"""
_DOC_ERRORS = ["enchnt", "enchnt", "incant", "fr"]
__version__ = "3.2.0"
import os
import warnings
try:
from enchant import _enchant as _e
except ImportError:
if not os.environ.get("PYENCHANT_IGNORE_MISSING_LIB", False):
raise
_e = None
from enchant.errors import Error, DictNotFoundError
from enchant.utils import get_default_language
from enchant.pypwl import PyPWL
class ProviderDesc:
"""Simple class describing an Enchant provider.
Each provider has the following information associated with it:
* name: Internal provider name (e.g. "aspell")
* desc: Human-readable description (e.g. "Aspell Provider")
* file: Location of the library containing the provider
"""
_DOC_ERRORS = ["desc"]
def __init__(self, name, desc, file):
self.name = name
self.desc = desc
self.file = file
def __str__(self):
return "<Enchant: %s>" % self.desc
def __repr__(self):
return str(self)
def __eq__(self, pd):
"""Equality operator on ProviderDesc objects."""
return self.name == pd.name and self.desc == pd.desc and self.file == pd.file
def __hash__(self):
"""Hash operator on ProviderDesc objects."""
return hash(self.name + self.desc + self.file)
class _EnchantObject:
"""Base class for enchant objects.
This class implements some general functionality for interfacing with
the '_enchant' C-library in a consistent way. All public objects
from the 'enchant' module are subclasses of this class.
All enchant objects have an attribute '_this' which contains the
pointer to the underlying C-library object. The method '_check_this'
can be called to ensure that this point is not None, raising an
exception if it is.
"""
def __init__(self):
"""_EnchantObject constructor."""
self._this = None
# To be importable when enchant C lib is missing, we need
# to create a dummy default broker.
if _e is not None:
self._init_this()
def _check_this(self, msg=None):
"""Check that self._this is set to a pointer, rather than None."""
if self._this is None:
if msg is None:
msg = "%s unusable: the underlying C-library object has been freed."
msg = msg % (self.__class__.__name__,)
raise Error(msg)
def _init_this(self):
"""Initialise the underlying C-library object pointer."""
raise NotImplementedError
def _raise_error(self, default="Unspecified Error", eclass=Error):
"""Raise an exception based on available error messages.
This method causes an Error to be raised. Subclasses should
override it to retrieve an error indication from the underlying
API if possible. If such a message cannot be retrieved, the
argument value <default> is used. The class of the exception
can be specified using the argument <eclass>
"""
raise eclass(default)
_raise_error._DOC_ERRORS = ["eclass"]
def __getstate__(self):
"""Customize pickling of PyEnchant objects.
Since it's not safe for multiple objects to share the same C-library
object, we make sure it's unset when pickling.
"""
state = self.__dict__.copy()
state["_this"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._init_this()
class Broker(_EnchantObject):
"""Broker object for the Enchant spellchecker.
Broker objects are responsible for locating and managing dictionaries.
Unless custom functionality is required, there is no need to use Broker
objects directly. The 'enchant' module provides a default broker object
so that 'Dict' objects can be created directly.
The most important methods of this class include:
* :py:meth:`dict_exists`: check existence of a specific language dictionary
* :py:meth:`request_dict`: obtain a dictionary for specific language
* :py:meth:`set_ordering`: specify which dictionaries to try for a given language.
"""
def __init__(self):
"""Broker object constructor.
This method is the constructor for the 'Broker' object. No
arguments are required.
"""
super().__init__()
def _init_this(self):
self._this = _e.broker_init()
if not self._this:
raise Error("Could not initialise an enchant broker.")
self._live_dicts = {}
def __del__(self):
"""Broker object destructor."""
# Calling free() might fail if python is shutting down
try:
self._free()
except (AttributeError, TypeError):
pass
def __getstate__(self):
state = super().__getstate__()
state.pop("_live_dicts")
return state
def _raise_error(self, default="Unspecified Error", eclass=Error):
"""Overrides _EnchantObject._raise_error to check broker errors."""
err = _e.broker_get_error(self._this)
if err == "" or err is None:
raise eclass(default)
raise eclass(err.decode())
def _free(self):
"""Free system resource associated with a Broker object.
This method can be called to free the underlying system resources
associated with a Broker object. It is called automatically when
the object is garbage collected. If called explicitly, the
Broker and any associated Dict objects must no longer be used.
"""
if self._this is not None:
# During shutdown, this finalizer may be called before
# some Dict finalizers. Ensure all pointers are freed.
for (dict, count) in list(self._live_dicts.items()):
while count:
self._free_dict_data(dict)
count -= 1
_e.broker_free(self._this)
self._this = None
def request_dict(self, tag=None):
"""Request a Dict object for the language specified by <tag>.
This method constructs and returns a Dict object for the
requested language. 'tag' should be a string of the appropriate
form for specifying a language, such as "fr" (French) or "en_AU"
(Australian English). The existence of a specific language can
be tested using the 'dict_exists' method.
If <tag> is not given or is None, an attempt is made to determine
the current language in use. If this cannot be determined, Error
is raised.
.. note::
this method is functionally equivalent to calling the Dict()
constructor and passing in the <broker> argument.
"""
return Dict(tag, self)
request_dict._DOC_ERRORS = ["fr"]
def _request_dict_data(self, tag):
"""Request raw C pointer data for a dictionary.
This method call passes on the call to the C library, and does
some internal bookkeeping.
"""
self._check_this()
new_dict = _e.broker_request_dict(self._this, tag.encode())
if new_dict is None:
e_str = "Dictionary for language '%s' could not be found\n"
e_str += "Please check https://pyenchant.github.io/pyenchant/ for details"
self._raise_error(e_str % (tag,), DictNotFoundError)
if new_dict not in self._live_dicts:
self._live_dicts[new_dict] = 1
else:
self._live_dicts[new_dict] += 1
return new_dict
def request_pwl_dict(self, pwl):
"""Request a Dict object for a personal word list.
This method behaves as 'request_dict' but rather than returning
a dictionary for a specific language, it returns a dictionary
referencing a personal word list. A personal word list is a file
of custom dictionary entries, one word per line.
"""
self._check_this()
new_dict = _e.broker_request_pwl_dict(self._this, pwl.encode())
if new_dict is None:
e_str = "Personal Word List file '%s' could not be loaded"
self._raise_error(e_str % (pwl,))
if new_dict not in self._live_dicts:
self._live_dicts[new_dict] = 1
else:
self._live_dicts[new_dict] += 1
d = Dict(False)
d._switch_this(new_dict, self)
return d
def _free_dict(self, dict):
"""Free memory associated with a dictionary.
This method frees system resources associated with a Dict object.
It is equivalent to calling the object's 'free' method. Once this
method has been called on a dictionary, it must not be used again.
"""
self._free_dict_data(dict._this)
dict._this = None
dict._broker = None
def _free_dict_data(self, dict):
"""Free the underlying pointer for a dict."""
self._check_this()
_e.broker_free_dict(self._this, dict)
self._live_dicts[dict] -= 1
if self._live_dicts[dict] == 0:
del self._live_dicts[dict]
def dict_exists(self, tag):
"""Check availability of a dictionary.
This method checks whether there is a dictionary available for
the language specified by 'tag'. It returns True if a dictionary
is available, and False otherwise.
"""
self._check_this()
val = _e.broker_dict_exists(self._this, tag.encode())
return bool(val)
def set_ordering(self, tag, ordering):
"""Set dictionary preferences for a language.
The Enchant library supports the use of multiple dictionary programs
and multiple languages. This method specifies which dictionaries
the broker should prefer when dealing with a given language. 'tag'
must be an appropriate language specification and 'ordering' is a
string listing the dictionaries in order of preference. For example
a valid ordering might be "aspell,myspell,ispell".
The value of 'tag' can also be set to "*" to set a default ordering
for all languages for which one has not been set explicitly.
"""
self._check_this()
_e.broker_set_ordering(self._this, tag.encode(), ordering.encode())
def describe(self):
"""Return list of provider descriptions.
This method returns a list of descriptions of each of the
dictionary providers available. Each entry in the list is a
ProviderDesc object.
"""
self._check_this()
self.__describe_result = []
_e.broker_describe(self._this, self.__describe_callback)
return [ProviderDesc(*r) for r in self.__describe_result]
def __describe_callback(self, name, desc, file):
"""Collector callback for dictionary description.
This method is used as a callback into the _enchant function
'enchant_broker_describe'. It collects the given arguments in
a tuple and appends them to the list '__describe_result'.
"""
name = name.decode()
desc = desc.decode()
file = file.decode()
self.__describe_result.append((name, desc, file))
def list_dicts(self):
"""Return list of available dictionaries.
This method returns a list of dictionaries available to the
broker. Each entry in the list is a two-tuple of the form:
(tag,provider)
where <tag> is the language lag for the dictionary and
<provider> is a ProviderDesc object describing the provider
through which that dictionary can be obtained.
"""
self._check_this()
self.__list_dicts_result = []
_e.broker_list_dicts(self._this, self.__list_dicts_callback)
return [(r[0], ProviderDesc(*r[1])) for r in self.__list_dicts_result]
def __list_dicts_callback(self, tag, name, desc, file):
"""Collector callback for listing dictionaries.
This method is used as a callback into the _enchant function
'enchant_broker_list_dicts'. It collects the given arguments into
an appropriate tuple and appends them to '__list_dicts_result'.
"""
tag = tag.decode()
name = name.decode()
desc = desc.decode()
file = file.decode()
self.__list_dicts_result.append((tag, (name, desc, file)))
def list_languages(self):
"""List languages for which dictionaries are available.
This function returns a list of language tags for which a
dictionary is available.
"""
langs = []
for (tag, prov) in self.list_dicts():
if tag not in langs:
langs.append(tag)
return langs
def __describe_dict(self, dict_data):
"""Get the description tuple for a dict data object.
<dict_data> must be a C-library pointer to an enchant dictionary.
The return value is a tuple of the form:
(<tag>,<name>,<desc>,<file>)
"""
# Define local callback function
cb_result = []
def cb_func(tag, name, desc, file):
tag = tag.decode()
name = name.decode()
desc = desc.decode()
file = file.decode()
cb_result.append((tag, name, desc, file))
# Actually call the describer function
_e.dict_describe(dict_data, cb_func)
return cb_result[0]
__describe_dict._DOC_ERRORS = ["desc"]
def get_param(self, name):
"""Get the value of a named parameter on this broker.
Parameters are used to provide runtime information to individual
provider backends. See the method :py:meth:`set_param` for more details.
.. warning::
This method does **not** work when using the Enchant C
library version 2.0 and above
"""
param = _e.broker_get_param(self._this, name.encode())
if param is not None:
param = param.decode()
return param
get_param._DOC_ERRORS = ["param"]
def set_param(self, name, value):
"""Set the value of a named parameter on this broker.
Parameters are used to provide runtime information to individual
provider backends.
.. warning::
This method does **not** work when using the Enchant C
library version 2.0 and above
"""
name = name.encode()
if value is not None:
value = value.encode()
_e.broker_set_param(self._this, name, value)
class Dict(_EnchantObject):
"""Dictionary object for the Enchant spellchecker.
Dictionary objects are responsible for checking the spelling of words
and suggesting possible corrections. Each dictionary is owned by a
Broker object, but unless a new Broker has explicitly been created
then this will be the 'enchant' module default Broker and is of little
interest.
The important methods of this class include:
* check(): check whether a word id spelled correctly
* suggest(): suggest correct spellings for a word
* add(): add a word to the user's personal dictionary
* remove(): add a word to the user's personal exclude list
* add_to_session(): add a word to the current spellcheck session
* store_replacement(): indicate a replacement for a given word
Information about the dictionary is available using the following
attributes:
* tag: the language tag of the dictionary
* provider: a ProviderDesc object for the dictionary provider
"""
def __init__(self, tag=None, broker=None):
"""Dict object constructor.
A dictionary belongs to a specific language, identified by the
string <tag>. If the tag is not given or is None, an attempt to
determine the language currently in use is made using the 'locale'
module. If the current language cannot be determined, Error is raised.
If <tag> is instead given the value of False, a 'dead' Dict object
is created without any reference to a language. This is typically
only useful within PyEnchant itself. Any other non-string value
for <tag> raises Error.
Each dictionary must also have an associated Broker object which
obtains the dictionary information from the underlying system. This
may be specified using <broker>. If not given, the default broker
is used.
"""
# Initialise misc object attributes to None
self.provider = None
# If no tag was given, use the default language
if tag is None:
tag = get_default_language()
if tag is None:
err = "No tag specified and default language could not "
err = err + "be determined."
raise Error(err)
self.tag = tag
# If no broker was given, use the default broker
if broker is None:
broker = _broker
self._broker = broker
# Now let the superclass initialise the C-library object
super().__init__()
def _init_this(self):
# Create dead object if False was given as the tag.
# Otherwise, use the broker to get C-library pointer data.
self._this = None
if self.tag:
this = self._broker._request_dict_data(self.tag)
self._switch_this(this, self._broker)
def __del__(self):
"""Dict object destructor."""
# Calling free() might fail if python is shutting down
try:
self._free()
except AttributeError:
pass
def _switch_this(self, this, broker):
"""Switch the underlying C-library pointer for this object.
As all useful state for a Dict is stored by the underlying C-library
pointer, it is very convenient to allow this to be switched at
run-time. Pass a new dict data object into this method to affect
the necessary changes. The creating Broker object (at the Python
level) must also be provided.
This should *never* *ever* be used by application code. It's
a convenience for developers only, replacing the clunkier <data>
parameter to __init__ from earlier versions.
"""
# Free old dict data
Dict._free(self)
# Hook in the new stuff
self._this = this
self._broker = broker
# Update object properties
desc = self.__describe(check_this=False)
self.tag = desc[0]
self.provider = ProviderDesc(*desc[1:])
_switch_this._DOC_ERRORS = ["init"]
def _check_this(self, msg=None):
"""Extend _EnchantObject._check_this() to check Broker validity.
It is possible for the managing Broker object to be freed without
freeing the Dict. Thus validity checking must take into account
self._broker._this as well as self._this.
"""
if self._broker is None or self._broker._this is None:
self._this = None
super()._check_this(msg)
def _raise_error(self, default="Unspecified Error", eclass=Error):
"""Overrides _EnchantObject._raise_error to check dict errors."""
err = _e.dict_get_error(self._this)
if err == "" or err is None:
raise eclass(default)
raise eclass(err.decode())
def _free(self):
"""Free the system resources associated with a Dict object.
This method frees underlying system resources for a Dict object.
Once it has been called, the Dict object must no longer be used.
It is called automatically when the object is garbage collected.
"""
if self._this is not None:
# The broker may have been freed before the dict.
# It will have freed the underlying pointers already.
if self._broker is not None and self._broker._this is not None:
self._broker._free_dict(self)
def check(self, word):
"""Check spelling of a word.
This method takes a word in the dictionary language and returns
True if it is correctly spelled, and false otherwise.
"""
self._check_this()
# Enchant asserts that the word is non-empty.
# Check it up-front to avoid nasty warnings on stderr.
if len(word) == 0:
raise ValueError("can't check spelling of empty string")
val = _e.dict_check(self._this, word.encode())
if val == 0:
return True
if val > 0:
return False
self._raise_error()
def suggest(self, word):
"""Suggest possible spellings for a word.
This method tries to guess the correct spelling for a given
word, returning the possibilities in a list.
"""
self._check_this()
# Enchant asserts that the word is non-empty.
# Check it up-front to avoid nasty warnings on stderr.
if len(word) == 0:
raise ValueError("can't suggest spellings for empty string")
suggs = _e.dict_suggest(self._this, word.encode())
return [w.decode() for w in suggs]
def add(self, word):
"""Add a word to the user's personal word list."""
self._check_this()
_e.dict_add(self._this, word.encode())
def remove(self, word):
"""Add a word to the user's personal exclude list."""
self._check_this()
_e.dict_remove(self._this, word.encode())
def add_to_pwl(self, word):
"""Add a word to the user's personal word list."""
warnings.warn(
"Dict.add_to_pwl is deprecated, please use Dict.add",
category=DeprecationWarning,
stacklevel=2,
)
self._check_this()
_e.dict_add_to_pwl(self._this, word.encode())
def add_to_session(self, word):
"""Add a word to the session personal list."""
self._check_this()
_e.dict_add_to_session(self._this, word.encode())
def remove_from_session(self, word):
"""Add a word to the session exclude list."""
self._check_this()
_e.dict_remove_from_session(self._this, word.encode())
def is_added(self, word):
"""Check whether a word is in the personal word list."""
self._check_this()
return _e.dict_is_added(self._this, word.encode())
def is_removed(self, word):
"""Check whether a word is in the personal exclude list."""
self._check_this()
return _e.dict_is_removed(self._this, word.encode())
def store_replacement(self, mis, cor):
"""Store a replacement spelling for a miss-spelled word.
This method makes a suggestion to the spellchecking engine that the
miss-spelled word <mis> is in fact correctly spelled as <cor>. Such
a suggestion will typically mean that <cor> appears early in the
list of suggested spellings offered for later instances of <mis>.
"""
if not mis:
raise ValueError("can't store replacement for an empty string")
if not cor:
raise ValueError("can't store empty string as a replacement")
self._check_this()
_e.dict_store_replacement(self._this, mis.encode(), cor.encode())
store_replacement._DOC_ERRORS = ["mis", "mis"]
def __describe(self, check_this=True):
"""Return a tuple describing the dictionary.
This method returns a four-element tuple describing the underlying
spellchecker system providing the dictionary. It will contain the
following strings:
* language tag
* name of dictionary provider
* description of dictionary provider
* dictionary file
Direct use of this method is not recommended - instead, access this
information through the 'tag' and 'provider' attributes.
"""
if check_this:
self._check_this()
_e.dict_describe(self._this, self.__describe_callback)
return self.__describe_result
def __describe_callback(self, tag, name, desc, file):
"""Collector callback for dictionary description.
This method is used as a callback into the _enchant function
'enchant_dict_describe'. It collects the given arguments in
a tuple and stores them in the attribute '__describe_result'.
"""
tag = tag.decode()
name = name.decode()
desc = desc.decode()
file = file.decode()
self.__describe_result = (tag, name, desc, file)
class DictWithPWL(Dict):
"""Dictionary with separately-managed personal word list.
.. note::
As of version 1.4.0, enchant manages a per-user pwl and
exclude list. This class is now only needed if you want
to explicitly maintain a separate word list in addition to
the default one.
This class behaves as the standard Dict class, but also manages a
personal word list stored in a separate file. The file must be
specified at creation time by the 'pwl' argument to the constructor.
Words added to the dictionary are automatically appended to the pwl file.
A personal exclude list can also be managed, by passing another filename
to the constructor in the optional 'pel' argument. If this is not given,
requests to exclude words are ignored.
If either 'pwl' or 'pel' are None, an in-memory word list is used.
This will prevent calls to add() and remove() from affecting the user's
default word lists.
The Dict object managing the PWL is available as the 'pwl' attribute.
The Dict object managing the PEL is available as the 'pel' attribute.
To create a DictWithPWL from the user's default language, use None
as the 'tag' argument.
"""
_DOC_ERRORS = ["pel", "pel", "PEL", "pel"]
def __init__(self, tag, pwl=None, pel=None, broker=None):
"""DictWithPWL constructor.
The argument 'pwl', if not None, names a file containing the
personal word list. If this file does not exist, it is created
with default permissions.
The argument 'pel', if not None, names a file containing the personal
exclude list. If this file does not exist, it is created with
default permissions.
"""
super().__init__(tag, broker)
if pwl is not None:
if not os.path.exists(pwl):
f = open(pwl, "wt")
f.close()
del f
self.pwl = self._broker.request_pwl_dict(pwl)
else:
self.pwl = PyPWL()
if pel is not None:
if not os.path.exists(pel):
f = open(pel, "wt")
f.close()
del f
self.pel = self._broker.request_pwl_dict(pel)
else:
self.pel = PyPWL()
def _check_this(self, msg=None):
"""Extend Dict._check_this() to check PWL validity."""
if self.pwl is None:
self._free()
if self.pel is None:
self._free()
super()._check_this(msg)
self.pwl._check_this(msg)
self.pel._check_this(msg)
def _free(self):
"""Extend Dict._free() to free the PWL as well."""
if self.pwl is not None:
self.pwl._free()
self.pwl = None
if self.pel is not None:
self.pel._free()
self.pel = None
super()._free()
def check(self, word):
"""Check spelling of a word.
This method takes a word in the dictionary language and returns
True if it is correctly spelled, and false otherwise. It checks
both the dictionary and the personal word list.
"""
if self.pel.check(word):
return False
if self.pwl.check(word):
return True
if super().check(word):
return True
return False
def suggest(self, word):
"""Suggest possible spellings for a word.
This method tries to guess the correct spelling for a given
word, returning the possibilities in a list.
"""
suggs = super().suggest(word)
suggs.extend([w for w in self.pwl.suggest(word) if w not in suggs])
for i in range(len(suggs) - 1, -1, -1):
if self.pel.check(suggs[i]):
del suggs[i]
return suggs
def add(self, word):
"""Add a word to the associated personal word list.
This method adds the given word to the personal word list, and
automatically saves the list to disk.
"""
self._check_this()
self.pwl.add(word)
self.pel.remove(word)
def remove(self, word):
"""Add a word to the associated exclude list."""
self._check_this()
self.pwl.remove(word)
self.pel.add(word)
def add_to_pwl(self, word):
"""Add a word to the associated personal word list.
This method adds the given word to the personal word list, and
automatically saves the list to disk.
"""
self._check_this()
self.pwl.add_to_pwl(word)
self.pel.remove(word)
def is_added(self, word):
"""Check whether a word is in the personal word list."""
self._check_this()
return self.pwl.is_added(word)
def is_removed(self, word):
"""Check whether a word is in the personal exclude list."""
self._check_this()
return self.pel.is_added(word)
## Create a module-level default broker object, and make its important
## methods available at the module level.
_broker = Broker()
request_dict = _broker.request_dict
request_pwl_dict = _broker.request_pwl_dict
dict_exists = _broker.dict_exists
list_dicts = _broker.list_dicts
list_languages = _broker.list_languages
get_param = _broker.get_param
set_param = _broker.set_param
# Expose the "get_version" function.
def get_enchant_version():
"""Get the version string for the underlying enchant library."""
return _e.get_version().decode()
# Expose the "set_prefix_dir" function.
def set_prefix_dir(path):
"""Set the prefix used by the Enchant library to find its plugins
Called automatically when the Python library is imported when
required.
"""
return _e.set_prefix_dir(path)
set_prefix_dir._DOC_ERRORS = ["plugins"]
def get_user_config_dir():
"""Return the path that will be used by some
Enchant providers to look for custom dictionaries.
"""
return _e.get_user_config_dir().decode()
| 2.046875 | 2 |
ssk/helpers/__init__.py | jobliz/solid-state-kinetics | 2 | 12763969 | from api import *
from excel import *
| 1.015625 | 1 |
base_automation/sql_capabilities/my_sql_server.py | Yossira/base-automation | 0 | 12763970 | <gh_stars>0
import mysql.connector
from base_automation import report
class MySqlServer:
@report.utils.step("initiate my sql_capabilities server connection")
def __init__(self, user='root', password='password', host='127.0.0.1', database='sys'):
self._connection = mysql.connector.connect(user=user, password=password,
host=host,
database=database)
self._cursor = self._connection.cursor()
self._data_result = None
@report.utils.step("get data")
def get_data(self, query, close_connection=True):
try:
self._cursor.execute(query)
self._data_result = list(self._cursor.fetchall())
if close_connection:
self.close_connection()
return self._data_result
except Exception as e:
print(e)
self.close_connection()
assert False
@report.utils.step("insert data")
def insert_data(self, query, data, is_tuple=True, close_connection=True):
try:
if is_tuple:
self._cursor.executemany(query, data)
else:
self._cursor.execute(query, data)
self._connection.commit()
if close_connection:
self.close_connection()
except Exception as e:
print(e)
self.close_connection()
assert False
@report.utils.step("close connection")
def close_connection(self):
try:
self._connection.close()
except Exception as e:
print(e)
assert False
| 2.46875 | 2 |
maza/modules/generic/bluetooth/btle_write.py | ArturSpirin/maza | 2 | 12763971 | from maza.core.exploit import *
from maza.core.bluetooth.btle_client import BTLEClient
class Exploit(BTLEClient):
__info__ = {
"name": "Bluetooth LE Write",
"description": "Writes data to target Bluetooth Low Energy device to given "
"characteristic.",
"authors": (
"<NAME> <marcin[at]threat9.com>", # routersploit module
),
"references": (
"https://www.evilsocket.net/2017/09/23/This-is-not-a-post-about-BLE-introducing-BLEAH/",
),
}
target = OptMAC("", "Target MAC address")
char = OptString("", "Characteristic")
data = OptString("41424344", "Data (in hex format)")
buffering = OptBool(True, "Buffering enabled: true/false. Results in real time.")
def run(self):
try:
data = bytes.fromhex(self.data)
except ValueError:
print_error("Data is not in valid format")
return
res = self.btle_scan(self.target)
if res:
device = res[0]
device.write(self.char, data)
| 2.84375 | 3 |
dword/utils.py | deepword18/dword | 3 | 12763972 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_utils.ipynb (unless otherwise specified).
__all__ = ['to_hhmmss', 'to_secs', 'display_video', 'check_resolution', 'check_fps', 'play_audio',
'change_audio_format', 'trim_audio', 'change_volume', 'loop_audio', 'concat_audios']
# Internal Cell
from collections import defaultdict
import os
import subprocess
import time
from pathlib import Path
from subprocess import CalledProcessError
from typing import Dict, Union
import cv2
from fastcore.test import *
import imageio
from IPython.core.display import Video
from IPython.display import Audio
from nbdev.showdoc import *
from pydub import AudioSegment
from tqdm import tqdm
import numpy as np
import platform
# Internal Cell
class URLs:
base = 'https://login.deepword.co:3000/api'
credits_url = f'{base}/api_get_credits/'
list_vids_url = f'{base}/list_video_api/'
txt2speech_url = f'{base}/api_text_to_speech/'
download_vid_url = f'{base}/api_download_video/'
download_yt_vid_url = f'{base}/api_download_youtube_video/'
generate_vid_url = f'{base}/generate_video_api'
validate_token_url = f'{base}/check_apikey'
api_get_audio_sample = f'{base}/api_get_audio_sample'
api_get_video_actors = f'{base}/api_get_video_actors'
trim_video = 'https://youtube.deepword.co:5000/api_trim_video'
# Internal Cell
class AzureDicts:
langs = ["arabic_egypt", "arabic_saudi_arabia", "bulgarian", "catalan", "czech", "welsh", "danish", "german_austria",
"german_switzerland", "german_germany", "greek", "english_australia", "english_canada", "english_uk",
"english_hongkong", "english_ireland", "english_india", "english_new_zealand", "english_philippines",
"english_singapore", "english_us", "english_south_africa", "spanish_argentina", "spanish_colombia",
"spanish_spain", "spanish_mexico", "spanish_us", "estonian", "finnish", "french_belgium", "french_canada",
"french_switzerland", "french_france", "irish", "gujarati", "hebrew", "hindi", "croatian", "hungarian",
"indonesian", "italian", "japanese", "korean", "lithuanian", "latvia", "marathi", "malay", "maltese",
"norwegian", "dutch_belgium", "dutch_netherlands", "polish", "portuguese_brazil", "portuguese_portugal",
"romanian", "russian", "slovak", "slovanian", "swedish", "swahili", "tamil", "telugu", "thai", "turkish",
"ukranian", "urdu", "vietnamese", "chinese_mandarin", "chinese_cantonese", "chinese_taiwanese"]
codes = ["ar-EG","ar-SA","bg-BG","ca-ES","cs-CZ","cy-GB","da-DK","de-AT",
"de-CH","de-DE","el-GR","en-AU","en-CA","en-GB",
"en-HK","en-IE","en-IN","en-NZ","en-PH",
"en-SG","en-US","en-ZA","es-AR","es-CO",
"es-ES","es-MX","es-US","et-EE","fi-FI","fr-BE","fr-CA",
"fr-CH","fr-FR","ga-IE","gu-IN","he-IL","hi-IN","hr-HR","hu-HU",
"id-ID","it-IT","ja-JP","ko-KR","lt-LT","lv-LV","mr-IN","ms-MY","mt-MT",
"nb-NO","nl-BE","nl-NL","pl-PL","pt-BR","pt-PT",
"ro-RO","ru-RU","sk-SK","sl-SI","sv-SE","sw-KE","ta-IN","te-IN","th-TH","tr-TR",
"uk-UA","ur-PK","vi-VN","zh-CN","zh-HK","zh-TW"]
lang2code = dict(zip(langs, codes))
all_speakers = ["ar-EG-SalmaNeural Female","ar-EG-ShakirNeural Male","ar-SA-HamedNeural Male","ar-SA-ZariyahNeural Female","bg-BG-BorislavNeural Male",
"bg-BG-KalinaNeural Female","ca-ES-JoanaNeural Female","ca-ES-AlbaNeural Female","ca-ES-EnricNeural Male","cs-CZ-AntoninNeural Male",
"cs-CZ-VlastaNeural Female","cy-GB-AledNeural Male","cy-GB-NiaNeural Female","da-DK-ChristelNeural Female","da-DK-JeppeNeural Male",
"de-AT-IngridNeural Female","de-AT-JonasNeural Male","de-CH-JanNeural Male","de-CH-LeniNeural Female","de-DE-KatjaNeural Female",
"de-DE-ConradNeural Male","el-GR-AthinaNeural Female","el-GR-NestorasNeural Male","en-AU-NatashaNeural Female","en-AU-WilliamNeural Male",
"en-CA-ClaraNeural Female","en-CA-LiamNeural Male","en-GB-LibbyNeural Female","en-GB-MiaNeural Female","en-GB-RyanNeural Male",
"en-HK-SamNeural Male","en-HK-YanNeural Female","en-IE-ConnorNeural Male","en-IE-EmilyNeural Female","en-IN-NeerjaNeural Female",
"en-IN-PrabhatNeural Male","en-NZ-MitchellNeural Male","en-NZ-MollyNeural Female","en-PH-JamesNeural Male","en-PH-RosaNeural Female",
"en-SG-LunaNeural Female","en-SG-WayneNeural Male","en-US-JennyNeural Female","en-US-JennyMultilingualNeural Female","en-US-GuyNeural Male",
"en-US-AriaNeural Female","en-US-AmberNeural Female","en-US-AnaNeural Female","en-US-AshleyNeural Female","en-US-BrandonNeural Male",
"en-US-ChristopherNeural Male","en-US-CoraNeural Female","en-US-ElizabethNeural Female","en-US-EricNeural Male","en-US-JacobNeural Male",
"en-US-MichelleNeural Female","en-US-MonicaNeural Female","en-ZA-LeahNeural Female","en-ZA-LukeNeural Male","es-AR-ElenaNeural Female",
"es-AR-TomasNeural Male","es-CO-GonzaloNeural Male","es-CO-SalomeNeural Female","es-ES-AlvaroNeural Male","es-ES-ElviraNeural Female",
"es-MX-DaliaNeural Female","es-MX-JorgeNeural Male","es-US-AlonsoNeural Male","es-US-PalomaNeural Female","et-EE-AnuNeural Female",
"et-EE-KertNeural Male","fi-FI-SelmaNeural Female","fi-FI-HarriNeural Male","fi-FI-NooraNeural Female","fr-BE-CharlineNeural Female",
"fr-BE-GerardNeural Male","fr-CA-SylvieNeural Female","fr-CA-AntoineNeural Male","fr-CA-JeanNeural Male","fr-CH-ArianeNeural Female",
"fr-CH-FabriceNeural Male","fr-FR-DeniseNeural Female","fr-FR-HenriNeural Male","ga-IE-ColmNeural Male","ga-IE-OrlaNeural Female",
"gu-IN-DhwaniNeural Female","gu-IN-NiranjanNeural Male","he-IL-AvriNeural Male","he-IL-HilaNeural Male","hi-IN-MadhurNeural Male",
"hi-IN-SwaraNeural Female","hr-HR-GabrijelaNeural Female","hr-HR-SreckoNeural Male","hu-HU-NoemiNeural Female","hu-HU-TamasNeural Male",
"id-ID-ArdiNeural Female","id-ID-GadisNeural Male","it-IT-IsabellaNeural Female","it-IT-DiegoNeural Male","it-IT-ElsaNeural Female",
"ja-JP-NanamiNeural Female","ja-JP-KeitaNeural Male","ko-KR-SunHiNeural Female","ko-KR-InJoonNeural Male","lt-LT-LeonasNeural Male",
"lt-LT-OnaNeural Female","lv-LV-EveritaNeural Female","lv-LV-NilsNeural Male","mr-IN-AarohiNeural Female","mr-IN-ManoharNeural Male",
"ms-MY-OsmanNeural Male","ms-MY-YasminNeural Female","mt-MT-GraceNeural Female","mt-MT-JosephNeural Male","nb-NO-PernilleNeural Female",
"nb-NO-FinnNeural Male","nb-NO-IselinNeural Female","nl-BE-ArnaudNeural Male","nl-BE-DenaNeural Female","nl-NL-ColetteNeural Female",
"nl-NL-FennaNeural Female","nl-NL-MaartenNeural Male","pl-PL-AgnieszkaNeural Female","pl-PL-MarekNeural Male","pl-PL-ZofiaNeural Female",
"pt-BR-FranciscaNeural Female","pt-BR-AntonioNeural Male","pt-PT-DuarteNeural Male","pt-PT-FernandaNeural Female","pt-PT-RaquelNeural Female",
"ro-RO-AlinaNeural Female","ro-RO-EmilNeural Male","ru-RU-SvetlanaNeural Female","ru-RU-DariyaNeural Female","ru-RU-DmitryNeural Male",
"sk-SK-LukasNeural Male","sk-SK-ViktoriaNeural Female","sl-SI-PetraNeural Female","sl-SI-RokNeural Male","sv-SE-SofieNeural Female",
"sv-SE-HilleviNeural Female","sv-SE-MattiasNeural Male","sw-KE-RafikiNeural Male","sw-KE-ZuriNeural Female","ta-IN-PallaviNeural Female",
"ta-IN-ValluvarNeural Male","te-IN-MohanNeural Male","te-IN-ShrutiNeural Female","th-TH-PremwadeeNeural Female","th-TH-AcharaNeural Female",
"th-TH-NiwatNeural Male","tr-TR-AhmetNeural Male","tr-TR-EmelNeural Female","uk-UA-OstapNeural Male","uk-UA-PolinaNeural Female",
"ur-PK-AsadNeural Male","ur-PK-UzmaNeural Female","vi-VN-HoaiMyNeural Female","vi-VN-NamMinhNeural Male","zh-CN-XiaoxiaoNeural Female",
"zh-CN-YunyangNeural Male","zh-CN-XiaohanNeural Female","zh-CN-XiaomoNeural Female","zh-CN-XiaoruiNeural Female","zh-CN-XiaoxuanNeural Female",
"zh-CN-XiaoyouNeural Female","zh-CN-YunxiNeural Male","zh-CN-YunyeNeural Male","zh-HK-HiuMaanNeural Female","zh-HK-HiuGaaiNeural Female",
"zh-HK-WanLungNeural Male","zh-TW-HsiaoChenNeural Female","zh-TW-HsiaoYuNeural Female","zh-TW-YunJhe<NAME>"]
speakers = defaultdict(list)
for lang, code in lang2code.items():
relevant_speakers = []
for s in all_speakers:
if code in s: relevant_speakers.append(s)
speakers[lang] = relevant_speakers
# Internal Cell
# class TextDicts:
# langs = ["arabic", "bengali", "chinese", "czech", "danish", "dutch", "english_aus", "english_ind",
# "english_uk", "english_us", "filipino", "finnish", "french_canada", "french", "german",
# "greek", "gujarati", "hindi", "hungarian", "indonesian", "italian", "japanese", "kannada",
# "korean", "malayalam", "mandarin", "mandarin_taiwan", "norwegian", "polish", "portuguese_brazil", "portuguese",
# "russian", "slovak", "spanish", "swedish", "tamil", "telugu", "thai", "turkish", "ukrainian"]
# codes = ["ar-XA", "bn-IN", "yue-HK", "cs-CZ", "da-DK", "nl-NL", "en-AU", "en-IN", "en-GB",
# "en-US", "fil-PH", "fi-FI", "fr-CA", "fr-FR", "de-DE", "el-GR", "gu-IN", "hi-IN",
# "hu-HU", "id-ID", "it-IT", "ja-JP", "kn-IN", "ko-KR", "ml-IN", "cmn-CN", "cmn-TW", "nb-NO",
# "pl-PL", "pt-BR", "pt-PT", "ru-RU", "sk-SK", "es-ES", "sv-SE", "ta-IN", "te-IN",
# "th-TH", "tr-TR", "uk-UA", "vi-VN"]
# lang2code = dict(zip(langs, codes))
# speakers = {
# "arabic": ["ar-XA-Wavenet-A FEMALE","ar-XA-Wavenet-B MALE","ar-XA-Wavenet-C MALE","ar-XA-Standard-A FEMALE","ar-XA-Standard-B MALE","ar-XA-Standard-C MALE","ar-XA-Standard-D FEMALE"],
# "bengali": ["bn-IN-Standard-A FEMALE","bn-IN-Standard-B MALE"],
# "chinese": ["yue-HK-Standard-A FEMALE","yue-HK-Standard-B MALE","yue-HK-Standard-C FEMALE","yue-HK-Standard-D MALE"],
# "czech": ["cs-CZ-Wavenet-A FEMALE","cs-CZ-Standard-A FEMALE"],
# "danish": ["da-DK-Wavenet-A FEMALE","da-DK-Wavenet-C MALE","da-DK-Wavenet-D FEMALE","da-DK-Wavenet-E FEMALE","da-DK-Standard-A FEMALE","da-DK-Standard-C MALE","da-DK-Standard-D FEMALE","da-DK-Standard-E FEMALE"],
# "dutch": ["nl-NL-Wavenet-A FEMALE","nl-NL-Wavenet-B MALE","nl-NL-Wavenet-C MALE","nl-NL-Wavenet-D FEMALE","nl-NL-Wavenet-E FEMALE","nl-NL-Standard-A FEMALE","nl-NL-Standard-B MALE","nl-NL-Standard-C MALE","nl-NL-Standard-D FEMALE","nl-NL-Standard-E FEMALE"],
# "english_aus": ["en-AU-Wavenet-A FEMALE","en-AU-Wavenet-B MALE","en-AU-Wavenet-C FEMALE","en-AU-Wavenet-D MALE","en-AU-Standard-A FEMALE","en-AU-Standard-B MALE","en-AU-Standard-C FEMALE","en-AU-Standard-D MALE"],
# "english_ind": ["en-IN-Wavenet-A FEMALE","en-IN-Wavenet-B MALE","en-IN-Wavenet-C MALE","en-IN-Wavenet-D FEMALE","en-IN-Standard-A FEMALE","en-IN-Standard-B MALE","en-IN-Standard-C MALE","en-IN-Standard-D FEMALE"],
# "english_uk": ["en-GB-Wavenet-A FEMALE","en-GB-Wavenet-B MALE","en-GB-Wavenet-C FEMALE","en-GB-Wavenet-D MALE","en-GB-Wavenet-F FEMALE","en-GB-Standard-A FEMALE","en-GB-Standard-B MALE","en-GB-Standard-C FEMALE","en-GB-Standard-D MALE","en-GB-Standard-F FEMALE"],
# "english_us": ["en-US-Wavenet-A MALE","en-US-Wavenet-B MALE","en-US-Wavenet-C FEMALE","en-US-Wavenet-D MALE","en-US-Wavenet-E FEMALE","en-US-Wavenet-F FEMALE","en-US-Wavenet-G FEMALE","en-US-Wavenet-H FEMALE","en-US-Wavenet-I MALE","en-US-Wavenet-J MALE" ,"en-US-Standard-B MALE","en-US-Standard-C FEMALE","en-US-Standard-D MALE","en-US-Standard-E FEMALE","en-US-Standard-G FEMALE","en-US-Standard-H FEMALE","en-US-Standard-I MALE","en-US-Standard-J MALE"],
# "filipino": ["fil-PH-Wavenet-A FEMALE","fil-PH-Wavenet-B FEMALE","fil-PH-Wavenet-C MALE","fil-PH-Wavenet-D MALE","fil-PH-Standard-A FEMALE","fil-PH-Standard-B FEMALE","fil-PH-Standard-C MALE","fil-PH-Standard-D MALE"],
# "finnish": ["fi-FI-Wavenet-A FEMALE","fi-FI-Standard-A FEMALE"],
# "french_canada": ["fr-CA-Wavenet-A FEMALE","fr-CA-Wavenet-B MALE","fr-CA-Wavenet-C FEMALE","fr-CA-Wavenet-D MALE","fr-CA-Standard-A FEMALE","fr-CA-Standard-B MALE","fr-CA-Standard-C FEMALE","fr-CA-Standard-D MALE"],
# "french": ["fr-FR-Wavenet-A FEMALE","fr-FR-Wavenet-B MALE","fr-FR-Wavenet-C FEMALE","fr-FR-Wavenet-D MALE","fr-FR-Wavenet-E FEMALE","fr-FR-Standard-A FEMALE","fr-FR-Standard-B MALE","fr-FR-Standard-C FEMALE","fr-FR-Standard-D MALE","fr-FR-Standard-E FEMALE"],
# "german": ["de-DE-Wavenet-A FEMALE","de-DE-Wavenet-B MALE","de-DE-Wavenet-C FEMALE","de-DE-Wavenet-D MALE","de-DE-Wavenet-E MALE","de-DE-Wavenet-F FEMALE","de-DE-Standard-A FEMALE","de-DE-Standard-B MALE","de-DE-Standard-E MALE","de-DE-Standard-F FEMALE"],
# "greek": ["el-GR-Wavenet-A FEMALE","el-GR-Standard-A FEMALE"],
# "gujarati": ["gu-IN-Standard-A FEMALE","gu-IN-Standard-B MALE"],
# "hindi": ["hi-IN-Wavenet-A FEMALE","hi-IN-Wavenet-B MALE","hi-IN-Wavenet-C MALE","hi-IN-Wavenet-D FEMALE","hi-IN-Standard-A FEMALE","hi-IN-Standard-B MALE","hi-IN-Standard-C MALE","hi-IN-Standard-D FEMALE"],
# "hungarian": ["hu-HU-Wavenet-A FEMALE","hu-HU-Standard-A FEMALE"],
# "indonesian": ["id-ID-Wavenet-A FEMALE","id-ID-Wavenet-B MALE","id-ID-Wavenet-C MALE","id-ID-Wavenet-D FEMALE","id-ID-Standard-A FEMALE","id-ID-Standard-B MALE","id-ID-Standard-C MALE","id-ID-Standard-D FEMALE"],
# "italian": ["it-IT-Wavenet-A FEMALE","it-IT-Wavenet-B FEMALE","it-IT-Wavenet-C MALE","it-IT-Wavenet-D MALE","it-IT-Standard-A FEMALE","it-IT-Standard-B FEMALE","it-IT-Standard-C MALE","it-IT-Standard-D MALE"],
# "japanese": ["ja-JP-Wavenet-A FEMALE","ja-JP-Wavenet-B FEMALE","ja-JP-Wavenet-C MALE","ja-JP-Wavenet-D MALE","ja-JP-Standard-A FEMALE","ja-JP-Standard-B FEMALE","ja-JP-Standard-C MALE","ja-JP-Standard-D MALE"],
# "kannada": ["kn-IN-Standard-A FEMALE","kn-IN-Standard-B MALE"],
# "korean": ["ko-KR-Wavenet-A FEMALE","ko-KR-Wavenet-B FEMALE","ko-KR-Wavenet-C MALE","ko-KR-Wavenet-D MALE","ko-KR-Standard-A FEMALE","ko-KR-Standard-B FEMALE","ko-KR-Standard-C MALE","ko-KR-Standard-D MALE"],
# "malayalam": ["ml-IN-Standard-A FEMALE","ml-IN-Standard-B MALE"],
# "mandarin": ["cmn-CN-Wavenet-A FEMALE","cmn-CN-Wavenet-B MALE","cmn-CN-Wavenet-C MALE","cmn-CN-Wavenet-D FEMALE", "cmn-CN-Standard-A FEMALE","cmn-CN-Standard-B MALE","cmn-CN-Standard-C MALE","cmn-CN-Standard-D FEMALE"],
# "mandarin_taiwan": ["cmn-TW-Wavenet-A FEMALE","cmn-TW-Wavenet-B MALE","cmn-TW-Wavenet-C MALE", "cmn-TW-Standard-A FEMALE","cmn-TW-Standard-B MALE","cmn-TW-Standard-C MALE"],
# "norwegian": ["nb-NO-Wavenet-A FEMALE","nb-NO-Wavenet-B MALE","nb-no-Wavenet-E FEMALE","nb-NO-Wavenet-C FEMALE","nb-NO-Wavenet-D MALE","nb-NO-Standard-A FEMALE","nb-NO-Standard-B MALE","nb-NO-Standard-C FEMALE","nb-NO-Standard-D MALE","nb-no-Standard-E FEMALE"],
# "polish": ["pl-PL-Wavenet-A FEMALE","pl-PL-Wavenet-B MALE","pl-PL-Wavenet-C MALE","pl-PL-Wavenet-D FEMALE","pl-PL-Wavenet-E FEMALE","pl-PL-Standard-A FEMALE","pl-PL-Standard-B MALE","pl-PL-Standard-C MALE","pl-PL-Standard-D FEMALE","pl-PL-Standard-E FEMALE"],
# "portuguese_brazil": ["pt-BR-Wavenet-A FEMALE","pt-BR-Standard-A FEMALE"],
# "portuguese": ["pt-PT-Wavenet-A FEMALE","pt-PT-Wavenet-B MALE","pt-PT-Wavenet-C MALE","pt-PT-Wavenet-D FEMALE","pt-PT-Standard-A FEMALE","pt-PT-Standard-B MALE","pt-PT-Standard-C MALE","pt-PT-Standard-D FEMALE"],
# "russian": ["ru-RU-Wavenet-A FEMALE","ru-RU-Wavenet-B MALE","ru-RU-Wavenet-C FEMALE","ru-RU-Wavenet-D MALE","ru-RU-Wavenet-E FEMALE","ru-RU-Standard-A FEMALE","ru-RU-Standard-B MALE","ru-RU-Standard-C FEMALE","ru-RU-Standard-D MALE","ru-RU-Standard-E FEMALE"],
# "slovak": ["sk-SK-Wavenet-A FEMALE","sk-SK-Standard-A FEMALE"],
# "spanish": ["es-ES-Wavenet-B MALE","es-ES-Standard-A FEMALE","es-ES-Standard-B MALE"],
# "swedish": ["sv-SE-Wavenet-A FEMALE","sv-SE-Standard-A FEMALE"],
# "tamil": ["ta-IN-Standard-A FEMALE","ta-IN-Standard-B MALE"],
# "telugu": ["te-IN-Standard-A FEMALE","te-IN-Standard-B MALE"],
# "thai": ["th-TH-Standard-A FEMALE"],
# "turkish": ["tr-TR-Wavenet-A FEMALE","tr-TR-Wavenet-B MALE","tr-TR-Wavenet-C FEMALE","tr-TR-Wavenet-D FEMALE","tr-TR-Wavenet-E MALE","tr-TR-Standard-A FEMALE","tr-TR-Standard-B MALE","tr-TR-Standard-C FEMALE","tr-TR-Standard-D FEMALE","tr-TR-Standard-E MALE"],
# "ukrainian": ["uk-UA-Wavenet-A FEMALE","uk-UA-Standard-A FEMALE"],
# "vietnamese": ["vi-VN-Wavenet-A FEMALE","vi-VN-Wavenet-B MALE","vi-VN-Wavenet-C FEMALE","vi-VN-Wavenet-D MALE","vi-VN-Standard-A FEMALE FEMALE","vi-VN-Standard-B MALE","vi-VN-Standard-C FEMALE","vi-VN-Standard-D MALE"]
# }
# Cell
def to_hhmmss(x: int) -> str:
"""Convert time from secs (int) to hh:mm:ss (str).
"""
if not x >= 0: raise Exception(f'seconds cannot be negative, got {x}')
return time.strftime("%H:%M:%S", time.gmtime(x))
# Cell
def to_secs(x: str) -> int:
"""Convert time from hh:mm:ss (str) format to seconds (int).
"""
h, m, s = x.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
# Internal Cell
def _remove_duplicate(outfile):
if Path(outfile).exists(): os.remove(f'{outfile}')
# Internal Cell
def _exists(x): return Path(x).exists()
# Cell
def display_video(video): return Video(video, height = 400, width = 400)
# Cell
def check_resolution(video: Union[str, Path]) -> Dict:
"""Check the resolution of a video.
"""
try:
vid = cv2.VideoCapture(video)
h, w = vid.get(cv2.CAP_PROP_FRAME_HEIGHT), vid.get(cv2.CAP_PROP_FRAME_WIDTH)
return {'height': int(h), 'width': int(w)}
except Exception as e:
raise ValueError(e)
# Cell
def check_fps(video: Union[str, Path], round_res = False) -> float:
"""Get the fps of a video
"""
reader = imageio.get_reader(video)
fps = reader.get_meta_data()['fps']
return fps if not round_res else round(fps)
# Cell
def play_audio(audio): return Audio(audio)
# Internal Cell
def _get_parts(x):
x = Path(x)
ext = x.suffix[1:]
return x, ext
# Internal Cell
def _read_audio(x, ext):
return AudioSegment.from_file(x, ext)
# Cell
def change_audio_format(audio: Union[str, Path], outfile: Union[str, Path]) -> None:
"""Change the format of audio file. Example, converting mp3 to wav. Works with
all formats supported by ffmpeg.
"""
_remove_duplicate(outfile)
outfile, o_ext = _get_parts(outfile)
audio, ext = _get_parts(audio)
f = _read_audio(audio, ext)
f.export(outfile, format = o_ext)
# Cell
def trim_audio(audio: Union[str, Path], start_time: int, end_time: int, outfile: Union[str, Path] = 'trimmed_audio.mp3') -> None:
"""Trim an audio file. Start and end times are in seconds. Works with all formats supported by ffmpeg.
"""
_remove_duplicate(outfile)
outfile, o_ext = _get_parts(outfile)
audio, ext = _get_parts(audio)
f = _read_audio(audio, ext)
start_time = start_time * 1000
end_time = end_time * 1000
f = f[start_time:end_time]
f.export(outfile, format = o_ext)
return outfile
# Cell
def change_volume(audio: Union[str, Path], vol, outfile = 'changed_vol.mp3'):
"""Increase or decrease the volume of an audio by 'vol' dB.
"""
_remove_duplicate(outfile)
outfile, o_ext = _get_parts(outfile)
audio, ext = _get_parts(audio)
f = _read_audio(audio, ext)
f += vol
f.export(outfile, format = o_ext)
return outfile
# Cell
def loop_audio(audio, times = 2, outfile = 'looped_audio.mp3'):
"""Loop an audio `times` times.
"""
_remove_duplicate(outfile)
outfile, o_ext = _get_parts(outfile)
audio, ext = _get_parts(audio)
f = _read_audio(audio, ext)
f *= times
f.export(outfile, format = o_ext)
return outfile
# Cell
def concat_audios(audio, other_audios, outfile = 'concat_audios.mp3'):
"""concat audios. Pass a main audio and one or more (list of audios) to concat
"""
_remove_duplicate(outfile)
outfile, o_ext = _get_parts(outfile)
audio, ext = _get_parts(audio)
f = _read_audio(audio, ext)
if not isinstance(other_audios, list):
other_audios = [other_audios]
for aud in other_audios:
aud, extn = _get_parts(aud)
f_ = _read_audio(aud, extn)
f += f_
f.export(outfile, format = o_ext)
return outfile | 1.773438 | 2 |
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/DefectReporterCommon/jira_integration.py | CJoriginal/cjlumberyard | 2 | 12763973 | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import boto3
from boto3.dynamodb.conditions import Key
import CloudCanvas
from errors import ClientError
import json
import StringIO
import defect_reporter_constants as constants
from cgf_utils import custom_resource_utils
import additonal_report_Info
import re
from jira import JIRA, JIRAError
STANDARD_FIELD_TYPES = {'number': [int, long, float, complex], 'boolean': [bool], 'text': [str, unicode], 'object': [dict]}
CREDENTIAL_KEYS = ['userName', 'password', 'server']
def jira_credentials_status():
jira_credentials = __get_credentials()
credentials_exist = True
for key in CREDENTIAL_KEYS:
if jira_credentials[key] == '':
credentials_exist = False
break
return { 'exist': credentials_exist, 'server': jira_credentials.get('server', '') }
def update_credentials(credentials):
request = credentials
request['kms_key'] = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('KmsKey'))
request['method'] = 'PUT'
client = boto3.client('lambda')
response = client.invoke(
FunctionName = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting(constants.JIRA_CREDENTIALS_LAMBDA)),
Payload = json.dumps(request)
)
return json.loads(response['Payload'].read().decode("utf-8"))
def get_jira_integration_settings():
settings = {}
settings['submitMode'] =__get_existing_mapping('submitMode')
settings['project'] =__get_existing_mapping('project')
settings['issuetype'] =__get_existing_mapping('issuetype')
return settings
def get_project_keys():
jira_client = get_jira_client()
meta = jira_client.createmeta()
projects = meta.get('projects', [])
project_keys = [project.get('key', '') for project in projects]
return project_keys
def get_issue_types(project_key):
jira_client = get_jira_client()
# Filter the meta data by project key
meta = jira_client.createmeta(projectKeys = project_key)
projects = meta.get('projects', [])
if len(projects) == 1:
issue_types = [issue_type.get('name', '') for issue_type in projects[0].get('issuetypes', [])]
return issue_types
else:
raise ClientError("Invalid project key {}".format(project_key))
def get_field_mappings(project_key, issue_type):
# Get the fields and their display names required by the selected project key and issue type
fields = __get_jira_fields(project_key, issue_type)
field_mappings = []
for field in fields:
field.update({'mapping': __get_existing_mapping(field['id'])})
field_mappings.append(field)
return field_mappings
def update_field_mappings(mappings):
for item in mappings:
if item.get('mapping'):
__get_jira_integration_settings_table().put_item(Item=item)
elif item.get('mapping', None) != None and item.get('id'):
key = {'id': item.get('id')}
__get_jira_integration_settings_table().delete_item(Key=key)
return 'SUCCESS'
def create_Jira_tickets(reports, is_manual=False):
settings = get_jira_integration_settings()
if not (settings.get('project', '') and settings.get('issuetype', '')):
raise ClientError('The project key or issue type is not specified.')
# Get the args for creating a new Jira ticket based on the mappings defined in CGP
issue_args = {'project': settings['project'], 'issuetype': settings['issuetype']}
field_mappings = get_field_mappings(settings['project'], settings['issuetype'])
for report in reports:
# Check whether the report is duplicated
client = boto3.client('lambda')
response = client.invoke(
FunctionName = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('DeduppingLambda')),
Payload = json.dumps({'report': report})
)
issue_id = json.loads(response['Payload'].read().decode("utf-8"))
# Create a new JIRA ticket if the report is not duplicated
if not issue_id:
issue_id = __create_jira_ticket(report, field_mappings, issue_args, is_manual)
update_occurance_count(issue_id)
report['jira_status'] = issue_id
additonal_report_Info.update_report_header(report)
return 'SUCCESS'
def upload_attachment(attachment, jira_issue):
s3_client = boto3.client('s3')
key = attachment.get('id', '')
try:
response = s3_client.get_object(Bucket = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting(constants.SANITIZED_BUCKET)), Key = key)
except Exception as e:
print "Unable to GET the sanitized attachment. Key==>", key
return
new_attachment = StringIO.StringIO()
new_attachment.write(response['Body'].read())
attachment_object = get_jira_client().add_attachment(
issue = jira_issue.key,
attachment = new_attachment,
filename = ('{}.{}').format(attachment.get('name', ''), attachment.get('extension', '')))
def update_occurance_count(issue_id):
if issue_id == None:
return
key = {'issue_id': issue_id}
try:
item = __get_jira_ticket_occurance_count_table().get_item(Key=key).get('Item', {})
occurance_count = item.get('occurance_count', 0) + 1
except ClientError as e:
occurance_count = 1
entry = {'issue_id': issue_id, 'occurance_count': occurance_count}
__get_jira_ticket_occurance_count_table().put_item(Item=entry)
def get_jira_client():
jira_credentials = __get_credentials()
jira_options = {'server':jira_credentials.get('server', '')}
jira_client = JIRA(options=jira_options, basic_auth=(jira_credentials.get('userName', ''),jira_credentials.get('password', '')))
return jira_client
def __get_credentials():
request = {'method': 'GET'}
client = boto3.client('lambda')
response = client.invoke(
FunctionName = custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting(constants.JIRA_CREDENTIALS_LAMBDA)),
Payload = json.dumps(request)
)
return json.loads(response['Payload'].read().decode("utf-8"))
def __get_jira_fields(project_key, issue_type):
jira_client = get_jira_client()
# Filter the meta data by project key and issue type name
meta = jira_client.createmeta(projectKeys = project_key, issuetypeNames = issue_type, expand='projects.issuetypes.fields')
# Retrieve the issue type description
projects = meta.get('projects', [])
issue_types = []
issue_type_description = {}
if len(projects) == 1:
issue_types = projects[0].get('issuetypes', [])
else:
raise ClientError("Invalid project key {}".format(project_key))
if len(issue_types) == 1:
issue_type_description = issue_types[0]
else:
raise ClientError("Invalid issue type {} ".format(issue_type))
fields = []
for field_id, field_property in issue_type_description.get('fields', {}).iteritems():
if field_id != 'project' and field_id != 'issuetype':
fields.append({
'id': field_id,
'name': field_property.get('name', field_id),
'required': field_property.get('required', True),
'schema': __get_field_schema(field_property)
})
return fields
def __get_field_schema(field):
allowed_value_schema = {}
for allow_value in field.get("allowedValues", []):
allowed_value_schema = __get_allowed_value_schema(allow_value, allowed_value_schema)
field_type = field.get('schema', {}).get('type', '')
field_type = 'text' if field_type == 'string' else field_type
schema = {'type': field_type}
if field_type == 'array':
schema['items'] = allowed_value_schema if allowed_value_schema else {'type': field['schema']['items']}
else:
schema = allowed_value_schema if allowed_value_schema else schema
if schema.get('properties'):
schema['type'] = 'object'
return schema
def __get_allowed_value_schema(allow_value, allowed_value_schema):
allowed_value_type = allowed_value_schema.get('type', __get_standard_data_type(allow_value))
allowed_value_schema['type'] = allowed_value_type
if allowed_value_type != 'object':
return
if allowed_value_schema.get('properties') == None:
allowed_value_schema['properties'] = {}
for key, value in allow_value.iteritems():
allowed_value_schema['properties'][key] = {'type': __get_standard_data_type(value)}
return allowed_value_schema
def __get_standard_data_type(value):
value_type = type(value)
for key, types in STANDARD_FIELD_TYPES.iteritems():
if value_type in types:
return key
return 'text'
def __find_embedded_identifiers(mappings):
if type(mappings) != str:
return []
EMBEDDED_LOOKUP_KEYS_REGEX_PATTERN = "\"\[(\S)*\]\""
return re.findall(EMBEDDED_LOOKUP_KEYS_REGEX_PATTERN, mappings, re.MULTILINE);
def __create_jira_ticket(report, field_mappings, issue_args, is_manual_mode):
attachments = report.get('attachment_id', None)
issue_args.update(report)
return __send_jira_ticket(issue_args, attachments)
def __replace_embedded(value, report):
embedded_mappings = __find_embedded_identifiers(value)
#replace the embedded mappings with the proper defect event attribute
for embedded_map in embedded_mappings:
defect_event_value = report.get(embedded_map, None)
if defect_event_value:
value.replace(embedded_map, defect_event_value)
def __send_jira_ticket(issue_args, attachments):
issue_args.pop('attachment_id')
issue_args.pop('universal_unique_identifier')
print "Sending ticket", issue_args, attachments
# Create a new Jira ticket
new_issue = {}
try:
new_issue = get_jira_client().create_issue(**issue_args)
except JIRAError as e:
raise ClientError(e.text)
# Upload the attachments
for attachment in attachments:
upload_attachment(attachment, new_issue)
return new_issue.key
def __get_existing_mapping(id):
response = __get_jira_integration_settings_table().query(KeyConditionExpression=Key('id').eq(id))
if len(response.get('Items', [])) > 0:
return response['Items'][0]['mapping']
return ''
def __get_jira_integration_settings_table():
if not hasattr(__get_jira_integration_settings_table,'jira_integration_settings'):
__get_jira_integration_settings_table.jira_integration_settings = boto3.resource('dynamodb').Table(custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('JiraIntegrationSettings')))
return __get_jira_integration_settings_table.jira_integration_settings
def __get_jira_ticket_occurance_count_table():
if not hasattr(__get_jira_ticket_occurance_count_table,'jira_ticket_occurance_count'):
__get_jira_ticket_occurance_count_table.jira_ticket_occurance_count = boto3.resource('dynamodb').Table(custom_resource_utils.get_embedded_physical_id(CloudCanvas.get_setting('JiraTicketOccuranceCount')))
return __get_jira_ticket_occurance_count_table.jira_ticket_occurance_count | 1.804688 | 2 |
remotelogin/devices/base_db_named.py | filintod/pyremotelogin | 1 | 12763974 | from remotelogin.devices.base import DeviceWithEncryptionSettings
from fdutils import db
class TableNamedDevice(DeviceWithEncryptionSettings, db.DeclarativeBaseWithTableName):
__abstract__ = True | 1.671875 | 2 |
python/experiment.py | caus-am/sigmasep | 8 | 12763975 | # Copyright (c) 2018 <NAME>, <NAME>
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import mSCM
import sys
import numpy as np
from numpy.random import choice
from numpy.random import seed
import random
nbr = int(sys.argv[1])
random.seed(nbr)
np.random.seed(nbr)
# change the following to point to the directory in which you have cloned the code repository
rootdir = "~/vcs/sigmasep"
# change the following to point to the directory in which you want to save the output
outdir = "/dev/shm/jmooij1/sigmasep"
# change the following to point to the directory in which clingo lives
clingodir = "/zfs/ivi/causality/opt/clingo-4.5.4-linux-x86_64/"
for nbr_do in range(6):
mSCM.sample_mSCM_run_all_and_save(
d=5,k=2,p=0.3,m=0,nbr=nbr,add_ind_noise_to_A=False,
add_ind_noise_to_W=True,
include_latent=True,
folderpath=outdir+"/mSCM_data/experiment_"+str(nbr_do)+"/",
AF=[np.tanh],SC=[1],NOI=['normal'],SD=[1],n=10000,
AL =[0.001],MUL=[1000],infty=1000,nbr_do=nbr_do,max_do=1,do_strategy=2,
clingodir=clingodir,
aspdir=rootdir+"/ASP/"
)
| 2.09375 | 2 |
pinakes/main/migrations/0020_auto_20211208_2019.py | hsong-rh/pinakes | 0 | 12763976 | <gh_stars>0
# Generated by Django 3.2.8 on 2021-12-08 20:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("main", "0019_portfolio_keycloak_id"),
]
operations = [
migrations.AlterField(
model_name="action",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="action",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="action",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="approvalrequest",
name="approval_request_ref",
field=models.CharField(
default="",
help_text="The ID of the approval submitted to approval-api",
max_length=64,
),
),
migrations.AlterField(
model_name="approvalrequest",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="approvalrequest",
name="order",
field=models.OneToOneField(
help_text="The Order which the approval request belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.order",
),
),
migrations.AlterField(
model_name="approvalrequest",
name="reason",
field=models.TextField(
blank=True,
default="",
help_text="The reason for the current state",
),
),
migrations.AlterField(
model_name="approvalrequest",
name="request_completed_at",
field=models.DateTimeField(
editable=False,
help_text="The time at which the approval request completed",
null=True,
),
),
migrations.AlterField(
model_name="approvalrequest",
name="state",
field=models.CharField(
choices=[
("undecided", "Undecided"),
("approved", "Approved"),
("canceled", "Canceled"),
("denied", "Denied"),
("failed", "Failed"),
],
default="undecided",
editable=False,
help_text="The state of the approval request (approved, denied, undecided, canceled, error)",
max_length=10,
),
),
migrations.AlterField(
model_name="approvalrequest",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="approvalrequest",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="base_schema",
field=models.JSONField(
blank=True,
help_text="JSON schema of the survey from the controller",
null=True,
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="create_json_schema",
field=models.JSONField(
blank=True,
help_text="Current JSON schema for the service plan",
null=True,
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="imported",
field=models.BooleanField(
default=True,
help_text="Whether or not the service plan has been imported for editing",
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="modified",
field=models.BooleanField(
default=False,
help_text="Whether or not the service plan has a modified schema",
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="modified_schema",
field=models.JSONField(
blank=True,
help_text="Modified JSON schema for the service plan",
null=True,
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="name",
field=models.CharField(
blank=True,
default="",
help_text="The name of the service plan",
max_length=255,
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="portfolio_item",
field=models.ForeignKey(
help_text="ID of the portfolio item",
on_delete=django.db.models.deletion.CASCADE,
to="main.portfolioitem",
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="service_offering_ref",
field=models.CharField(
help_text="Corresponding service offering from inventory-api",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="service_plan_ref",
field=models.CharField(
help_text="Corresponding service plan from inventory-api",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="catalogserviceplan",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="image",
name="file",
field=models.ImageField(
blank=True, help_text="The image file", null=True, upload_to=""
),
),
migrations.AlterField(
model_name="order",
name="completed_at",
field=models.DateTimeField(
editable=False,
help_text="The time at which the order completed",
null=True,
),
),
migrations.AlterField(
model_name="order",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="order",
name="order_request_sent_at",
field=models.DateTimeField(
editable=False,
help_text="The time at which the order request was sent to the catalog inventory service",
null=True,
),
),
migrations.AlterField(
model_name="order",
name="state",
field=models.CharField(
choices=[
("Pending", "Pending"),
("Approved", "Approved"),
("Canceled", "Canceled"),
("Completed", "Completed"),
("Created", "Created"),
("Denied", "Denied"),
("Failed", "Failed"),
("Ordered", "Ordered"),
],
default="Created",
editable=False,
help_text="Current state of the order",
max_length=10,
),
),
migrations.AlterField(
model_name="order",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="order",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="order",
name="user",
field=models.ForeignKey(
help_text="ID of the user who created this object",
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="orderitem",
name="artifacts",
field=models.JSONField(
blank=True,
help_text="Contains a prefix-stripped key/value object that contains all of the information exposed from product provisioning",
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="completed_at",
field=models.DateTimeField(
editable=False,
help_text="The time at which the order item completed",
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="count",
field=models.SmallIntegerField(
default=0, editable=False, help_text="Item count"
),
),
migrations.AlterField(
model_name="orderitem",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="orderitem",
name="external_url",
field=models.URLField(
blank=True,
help_text="The external url of the service instance used with relation to this order item",
),
),
migrations.AlterField(
model_name="orderitem",
name="inventory_task_ref",
field=models.CharField(
help_text="Task reference from inventory-api",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="name",
field=models.CharField(
help_text="Name of the portfolio item or order process",
max_length=64,
),
),
migrations.AlterField(
model_name="orderitem",
name="order",
field=models.ForeignKey(
help_text="The order that the order item belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.order",
),
),
migrations.AlterField(
model_name="orderitem",
name="order_request_sent_at",
field=models.DateTimeField(
editable=False,
help_text="The time at which the order request was sent to the catalog inventory service",
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="portfolio_item",
field=models.ForeignKey(
help_text="Stores the portfolio item ID",
on_delete=django.db.models.deletion.CASCADE,
to="main.portfolioitem",
),
),
migrations.AlterField(
model_name="orderitem",
name="provider_control_parameters",
field=models.JSONField(
blank=True,
help_text="The provider specific parameters needed to provision this service. This might include namespaces, special keys.",
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="service_instance_ref",
field=models.CharField(
help_text="Corresponding service instance from inventory-api",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="service_parameters",
field=models.JSONField(
blank=True,
help_text="Sanitized JSON object with provisioning parameters",
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="service_parameters_raw",
field=models.JSONField(
blank=True,
help_text="Raw JSON object with provisioning parameters",
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="service_plan_ref",
field=models.CharField(
help_text="Corresponding service plan from inventory-api",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="orderitem",
name="state",
field=models.CharField(
choices=[
("Pending", "Pending"),
("Approved", "Approved"),
("Canceled", "Canceled"),
("Completed", "Completed"),
("Created", "Created"),
("Denied", "Denied"),
("Failed", "Failed"),
("Ordered", "Ordered"),
],
default="Created",
editable=False,
help_text="Current state of this order item",
max_length=10,
),
),
migrations.AlterField(
model_name="orderitem",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="orderitem",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="orderitem",
name="user",
field=models.ForeignKey(
help_text="ID of the user who created this object",
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="portfolio",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="portfolio",
name="description",
field=models.TextField(
blank=True,
default="",
help_text="Describe the portfolio in details",
),
),
migrations.AlterField(
model_name="portfolio",
name="enabled",
field=models.BooleanField(
default=False,
help_text="Whether or not this portfolio is enabled",
),
),
migrations.AlterField(
model_name="portfolio",
name="icon",
field=models.ForeignKey(
blank=True,
help_text="ID of the icon image associated with this object",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="main.image",
),
),
migrations.AlterField(
model_name="portfolio",
name="name",
field=models.CharField(
help_text="Portfolio name", max_length=255, unique=True
),
),
migrations.AlterField(
model_name="portfolio",
name="owner",
field=models.CharField(
help_text="Name of the user who created the portfolio",
max_length=255,
),
),
migrations.AlterField(
model_name="portfolio",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="portfolio",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="description",
field=models.TextField(
blank=True,
default="",
help_text="Description of the portfolio item",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="distributor",
field=models.CharField(
help_text="The name of the provider for the portfolio item",
max_length=64,
),
),
migrations.AlterField(
model_name="portfolioitem",
name="documentation_url",
field=models.URLField(
blank=True,
help_text="The URL for documentation of the portfolio item",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="favorite",
field=models.BooleanField(
default=False,
help_text="Definition of a favorate portfolio item",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="icon",
field=models.ForeignKey(
blank=True,
help_text="ID of the icon image associated with this object",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="main.image",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="long_description",
field=models.TextField(
blank=True,
default="",
help_text="The longer description of the portfolio item",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="name",
field=models.CharField(
help_text="Name of the portfolio item", max_length=64
),
),
migrations.AlterField(
model_name="portfolioitem",
name="orphan",
field=models.BooleanField(
default=False,
help_text="Boolean if an associated service offering no longer exists",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="portfolio",
field=models.ForeignKey(
help_text="ID of the parent portfolio",
on_delete=django.db.models.deletion.CASCADE,
to="main.portfolio",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="service_offering_ref",
field=models.CharField(
help_text="The service offering this portfolio item was created from",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="portfolioitem",
name="service_offering_source_ref",
field=models.CharField(
blank=True,
default="",
help_text="The source reference this portfolio item was created from",
max_length=64,
),
),
migrations.AlterField(
model_name="portfolioitem",
name="state",
field=models.CharField(
help_text="The current state of the portfolio item",
max_length=64,
),
),
migrations.AlterField(
model_name="portfolioitem",
name="support_url",
field=models.URLField(
blank=True,
help_text="The URL for finding support for the portfolio item",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="portfolioitem",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="progressmessage",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="progressmessage",
name="level",
field=models.CharField(
choices=[
("Info", "Info"),
("Error", "Error"),
("Warning", "Warning"),
("Debug", "Debug"),
],
default="Info",
editable=False,
help_text="One of the predefined levels",
max_length=10,
),
),
migrations.AlterField(
model_name="progressmessage",
name="message",
field=models.TextField(
blank=True, default="", help_text="The message content"
),
),
migrations.AlterField(
model_name="progressmessage",
name="messageable_id",
field=models.IntegerField(
editable=False,
help_text="ID of the order or order item",
null=True,
),
),
migrations.AlterField(
model_name="progressmessage",
name="messageable_type",
field=models.CharField(
help_text="Identify order or order item that this message belongs to",
max_length=64,
null=True,
),
),
migrations.AlterField(
model_name="progressmessage",
name="received_at",
field=models.DateTimeField(
auto_now_add=True, help_text="Message received at"
),
),
migrations.AlterField(
model_name="progressmessage",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="progressmessage",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="request",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="request",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="request",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="serviceinstance",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="serviceinstance",
name="source",
field=models.ForeignKey(
help_text="ID of the source that this object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.source",
),
),
migrations.AlterField(
model_name="serviceinstance",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="serviceinstance",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="serviceinventory",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="serviceinventory",
name="source",
field=models.ForeignKey(
help_text="ID of the source that this object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.source",
),
),
migrations.AlterField(
model_name="serviceinventory",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="serviceinventory",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="serviceoffering",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="serviceoffering",
name="source",
field=models.ForeignKey(
help_text="ID of the source that this object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.source",
),
),
migrations.AlterField(
model_name="serviceoffering",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="serviceoffering",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="serviceofferingnode",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="serviceofferingnode",
name="source",
field=models.ForeignKey(
help_text="ID of the source that this object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.source",
),
),
migrations.AlterField(
model_name="serviceofferingnode",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="serviceofferingnode",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="serviceplan",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="serviceplan",
name="source",
field=models.ForeignKey(
help_text="ID of the source that this object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.source",
),
),
migrations.AlterField(
model_name="serviceplan",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="serviceplan",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="source",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="source",
name="name",
field=models.CharField(
help_text="Name of the source", max_length=255, unique=True
),
),
migrations.AlterField(
model_name="source",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="source",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="taglink",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="taglink",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="taglink",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="template",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="template",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="template",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="tenant",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="tenant",
name="external_tenant",
field=models.CharField(
help_text="User's account number", max_length=32, unique=True
),
),
migrations.AlterField(
model_name="tenant",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
migrations.AlterField(
model_name="workflow",
name="created_at",
field=models.DateTimeField(
auto_now_add=True,
help_text="The time at which the object was created",
),
),
migrations.AlterField(
model_name="workflow",
name="tenant",
field=models.ForeignKey(
help_text="ID of the tenant the object belongs to",
on_delete=django.db.models.deletion.CASCADE,
to="main.tenant",
),
),
migrations.AlterField(
model_name="workflow",
name="updated_at",
field=models.DateTimeField(
auto_now=True,
help_text="The time at which the object was last updated",
),
),
]
| 1.65625 | 2 |
smtk/model/testing/python/modelAttributes.py | jcfr/SMTK | 40 | 12763977 | <reponame>jcfr/SMTK
# =============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
# =============================================================================
"""
Test attribute association with smtk model
Uses test2D.json model file in the SMTKTestData repo.
Also uses test2D.xref, a cross-reference file between test2D.json
and an old-generation model file, test2D.cmb, that can be found in
the CMBTestingData repo.
"""
import logging
import os
import sys
import uuid
import unittest
try:
import smtk
import smtk.io
import smtk.model
import smtk.testing
except ImportError:
print()
print('Not able to import smtk library. You might need to:')
print(' - Use the PYTHONPATH variable to point to the smtk python lib')
print()
sys.exit(-1)
logging.basicConfig(level=logging.DEBUG)
# Define input filenames here
MODEL_FILENAME = 'test2D.json'
XREF_FILENAME = 'test2D.xref'
SBT_FILENAME = 'Basic2DFluid.sbt'
SBI_FILENAME = 'Basic2DFluid.sbi'
class TestModelAttributes(unittest.TestCase):
def load_xref(self, scope, folder=None):
'''Parses cross-reference file to initialize lists of entity uuids.
List has same order as entities in original (CMB) model
'''
scope.vertex_list = list()
scope.edge_list = list()
scope.face_list = list()
list_map = {
'vertex': scope.vertex_list,
'edge': scope.edge_list,
'face': scope.face_list
}
filename = 'test2D.xref'
path = filename
if folder is not None:
path = os.path.join(folder, filename)
logging.info('Loading %s' % path)
done = False
with open(path, 'r') as f:
for line in f.readlines():
if line.startswith('Reading'):
continue
# print(line)
parts = line.split()
# print(parts[2], parts[-1])
entity_type = parts[2]
uuid = parts[-1]
entity_list = list_map.get(entity_type)
entity_list.append(uuid)
done = True
if not done:
logging.error('Problem loading %s' % path)
sys.exit(3)
def generate_attributes(self, scope):
'''Builds and returns attribute resource
Also adds boundary groups to the model
'''
# Load attribute file
att_folder = os.path.join(self.model_folder, 'attribute')
att_folder = os.path.join(
smtk.testing.DATA_DIR, 'attribute', 'attribute_collection')
att_path = os.path.join(att_folder, SBT_FILENAME)
logging.info('Reading %s' % att_path)
resource = smtk.attribute.Resource.create()
reader = smtk.io.AttributeReader()
logger = smtk.io.Logger()
err = reader.read(resource, att_path, logger)
if err:
logging.error("Unable to load template file")
logging.error(logger.convertToString())
sys.exit(4)
# Create material attribute & associate to model face
defn = resource.findDefinition('Material')
value = 1.01
for i, face in enumerate(scope.face_list, start=1):
att_name = 'material %d' % i
att = resource.createAttribute(att_name, defn)
for item_name in ['Density', 'Viscosity']:
item = att.find(item_name)
item.setValue(0, value)
value += 0.491
face_id = uuid.UUID(face)
logging.debug('Associate attribute \"%s\" to face %s' %
(att_name, face_id))
associated = att.associate(scope.store.find(face_id))
if not associated:
logging.error(
'Failed to associate %s to %s' % (face_id, att_name))
# Save attribute name and model entity (uuid) for checking later
meta = (att.name(), face_id)
scope.att_data.append(meta)
# Generate boundary groups, hard-code to specific model edges
# flags = smtk.model.MODEL_BOUNDARY | smtk.model.DIMENSION_1
flags = smtk.model.EDGE
left_edges = scope.store.addGroup(flags, 'left_edges')
# originally, these edges were added to left_edges via
# scope.store.addToGroup(). Doing so caused the subsequent logic
# (left_edges.findFirstNonGroupMember()) to fail. If entities are added
# to the group through the group's API, it seems to work though.
for i in [0, 1, 2]:
entity_ref = smtk.model.EntityRef(
scope.store, uuid.UUID(scope.edge_list[i]))
left_edges.addEntity(entity_ref)
if not left_edges.findFirstNonGroupMember().isValid():
logging.error("Unable to add left_edges to leftBC")
logging.error(logger.convertToString())
sys.exit(4)
right_edges = scope.store.addGroup(flags, 'right_edges')
uuid_list = list()
for i in [6, 9]:
entity_ref = smtk.model.EntityRef(
scope.store, uuid.UUID(scope.edge_list[i]))
right_edges.addEntity(entity_ref)
# Create boundary condition attributes
defn = resource.findDefinition('Velocity')
left_att = resource.createAttribute('leftBC', defn)
item = left_att.item(0)
item.setValue(0, 3.14159)
item.setValue(1, 2.71828)
logging.debug('Associate attribute \"%s\" to boundary group %s' %
(left_att.name(), left_edges.name()))
ok = left_att.associate(left_edges.component())
if not ok:
logging.error("Unable to associate entity to leftBC")
logging.error(logger.convertToString())
sys.exit(4)
meta = (left_att.name(), left_edges.entity())
scope.att_data.append(meta)
defn = resource.findDefinition('Pressure')
right_att = resource.createAttribute('rightBC', defn)
item = left_att.item(0)
item.setValue(0, 14.1)
logging.debug('Associate attribute \"%s\" to boundary group %s' %
(right_att.name(), right_edges.name()))
ok = right_att.associate(right_edges.component())
if not ok:
logging.error("Unable to associate entity to rightBC")
logging.error(logger.convertToString())
sys.exit(4)
meta = (right_att.name(), right_edges.entity())
scope.att_data.append(meta)
return resource
def check_attributes(self, scope, resource):
'''Checks for attributes and associations
Returns number of errors found
'''
error_count = 0 # return value
for t in scope.att_data:
att_name, entity_uuid = t
# logging.debug('att_name %s, uuid %s' % t)
att = resource.findAttribute(att_name)
if not att:
logging.error('Missing attribute %s' % att_name)
error_count += 1
entity_id_set = att.associatedModelEntityIds()
if not entity_id_set:
logging.error(
'Missing model entity on attribute %s' % att_name)
error_count += 1
continue
entity_ids = list(entity_id_set)
entity_id = entity_ids[0]
# Compare uuid strings
if entity_id.hex != entity_uuid.hex:
logging.error('Unexpected model entity %s on attribute %s' %
(entity_ids[0], att_name))
error_count += 1
logging.debug('check_attributes error_count %d' % error_count)
return error_count
def testAssociation(self):
# Define scope object to store shared data
ScopeType = type('Scope', (object,), dict())
scope = ScopeType()
self.model_folder = os.path.join(
smtk.testing.DATA_DIR, 'model', '2d', 'smtk')
# Load the model file
model_path = os.path.join(self.model_folder, MODEL_FILENAME)
logging.info('Reading %s' % model_path)
json_string = None
with open(model_path, 'r') as f:
json_string = f.read()
if json_string is None:
logging.error('Unable to load input file')
sys.exit(2)
scope.store = smtk.model.Resource.create()
ok = smtk.model.SessionIOJSON.loadModelRecords(
json_string, scope.store)
# Load cross-reference file
self.load_xref(scope, self.model_folder)
# Build attributes and write to file
scope.att_data = list()
resource = self.generate_attributes(scope)
logging.info('Writing %s' % SBI_FILENAME)
writer = smtk.io.AttributeWriter()
logger = smtk.io.Logger()
err = writer.write(resource, SBI_FILENAME, logger)
if err:
logging.error('Unable to write attribute file')
logging.error(logger.convertToString())
sys.exit(5)
# Delete model & attributes
del scope.store
del resource
# Re-import model
test_store = smtk.model.Resource.create()
ok = smtk.model.SessionIOJSON.loadModelRecords(json_string, test_store)
scope.store = test_store
# Re-read attribute file
logging.info('Reading back %s' % SBI_FILENAME)
test_resource = smtk.attribute.Resource.create()
reader = smtk.io.AttributeReader()
err = reader.read(test_resource, SBI_FILENAME, logger)
if err:
logging.error("Unable to read attribute file")
logging.error(logger.convertToString())
self.assertTrue(not err, "Unable to read attribute file")
# Set model and verify attributes
error_count = self.check_attributes(scope, test_resource)
self.assertEqual(error_count, 0, "At least one error occurred.")
if __name__ == '__main__':
smtk.testing.process_arguments()
unittest.main()
| 1.976563 | 2 |
Swish_Function.py | Ali-Sahili/Background-Subtraction-Unsupervised-Learning | 5 | 12763978 | import torch
import torch.nn as nn
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.sigmoid(x)
"""
class Swish(nn.Module):
def forward(self, input):
return (input * torch.sigmoid(input))
def __repr__(self):
return self.__class__.__name__ + ' ()'
"""
| 2.96875 | 3 |
bot/cogs/socket_comm.py | fwizpy/Tortoise-BOT | 64 | 12763979 | import os
import json
import socket
import logging
import asyncio
from typing import List
from discord import Forbidden
from discord.ext import commands
from bot import constants
from bot.utils.embed_handler import info, thumbnail, success
from bot.utils.members import get_member_activity, get_member_status
from bot.utils.checks import check_if_it_is_tortoise_guild, tortoise_bot_developer_only
from bot.utils.exceptions import (
EndpointNotFound, EndpointBadArguments, EndpointError, EndpointSuccess, InternalServerError, DiscordIDNotFound
)
logger = logging.getLogger(__name__)
# Keys are endpoint names, values are their functions to be called.
_endpoints_mapping = {}
buffer_size = 255
maximum_buffer = 10240
def endpoint_register(*, endpoint_key: str = None):
"""
Decorator to register new socket endpoint.
Both sync and async functions can be registered.
If endpoint_key is not passed then the name of decorated function is used.
Endpoint function return is optional, if there is a return then that return is passed back as
key `data` to client, this is dealt in process_request function.
Default return is EndpointSuccess().response , see process_request.
In case of error, decorated function should raise one of the EndpointError sub-types.
If it doesn't explicitly raise but error does happen it is handled in process_request and appropriate response
code will be returned to client, this is dealt in process_request function.
:param endpoint_key: optional name to use as endpoint key.
"""
def decorator(function):
nonlocal endpoint_key
if not endpoint_key:
endpoint_key = function.__name__
if endpoint_key in _endpoints_mapping:
raise Exception(f"Endpoint {endpoint_key} already registered.")
_endpoints_mapping[endpoint_key] = function
def wrapper(*args, **kwargs):
# Both sync and async support.
async_function = asyncio.coroutine(function)
loop = asyncio.get_event_loop()
loop.run_until_complete(async_function(*args, **kwargs))
return wrapper
return decorator
class SocketCommunication(commands.Cog):
"""
Cog dealing with socket communication between the bot and website server.
How to register new endpoint:
Just decorate it with @endpoint_register
Read the docstring of that decorator to know what your endpoint should return/raise.
"""
def __init__(self, bot):
self.bot = bot
self.tortoise_guild = bot.get_guild(constants.tortoise_guild_id)
self.verified_role = self.tortoise_guild.get_role(constants.verified_role_id)
self.new_member_role = self.tortoise_guild.get_role(constants.new_member_role)
self.successful_verifications_channel = bot.get_channel(constants.successful_verifications_channel_id)
self.general_channel = bot.get_channel(constants.general_channel_id)
self.welcome_channel = bot.get_channel(constants.welcome_channel_id)
self.verified_emoji = bot.get_emoji(constants.verified_emoji_id)
self.verified_clients = set()
self.auth_token = os.getenv("SOCKET_AUTH_TOKEN")
self._socket_server = SocketCommunication.create_server()
self.task = self.bot.loop.create_task(self.run_server(self._socket_server))
def cog_unload(self):
logger.debug("Unloading socket comm, closing connections.")
self.task.cancel()
for client in self.verified_clients:
try:
client.close()
except OSError:
# Not supported on Windows
pass
try:
self._socket_server.shutdown(socket.SHUT_RDWR)
self._socket_server.close()
except OSError:
# Not supported on Windows
pass
logger.info("Socket com unloaded.")
@commands.command()
@commands.check(check_if_it_is_tortoise_guild)
@commands.check(tortoise_bot_developer_only)
async def show_endpoints(self, ctx):
await ctx.send(" ,".join(_endpoints_mapping))
@staticmethod
def create_server():
logger.info("Starting socket comm server...")
server = socket.socket()
server.bind(("0.0.0.0", int(os.getenv("SOCKET_SERVER_PORT"))))
server.listen(3)
server.setblocking(False)
logger.info("Socket comm server started.")
return server
async def run_server(self, server: socket.socket):
while True:
client, _ = await self.bot.loop.sock_accept(server)
client_name = client.getpeername()
logger.info(f"{client_name} connected.")
self.bot.loop.create_task(self.handle_client(client, client_name))
async def handle_client(self, client, client_name: str):
while True: # keep receiving client requests until he closes/disconnects
request = ""
while True: # buffer client request in case of long message
try:
buffer = (await self.bot.loop.sock_recv(client, buffer_size)).decode("utf8")
request += buffer
except ConnectionResetError:
# If the client disconnects without sending quit.
logger.info(f"{client_name} disconnected.")
return
if len(buffer) < buffer_size:
break
elif len(request) > maximum_buffer:
response = EndpointError(400, "Buffer size exceeded.").response
await self.send_to_client(client, json.dumps(response))
client.close()
return
if not request:
logger.info("Empty request, closing.")
break
try:
request = json.loads(request)
except json.JSONDecodeError:
response = EndpointError(400, "Not a valid JSON formatted request.").response
await self.send_to_client(client, json.dumps(response))
logger.debug(f"{client_name}:{response}:{request}")
continue
logger.debug(f"Server got:{request}")
# TODO
# temporal hardcoded fix to make ping endpoint public
endpoint_key = request.get("endpoint")
if client not in self.verified_clients and endpoint_key != "ping":
token = request.get("auth")
if token is not None and token == self.auth_token:
self.verified_clients.add(client)
response = EndpointSuccess().response
await self.send_to_client(client, json.dumps(response))
logger.info(f"{client_name} successfully authorized.")
continue
else:
response = EndpointError(401, "Verification unsuccessful, closing conn..").response
await self.send_to_client(client, json.dumps(response))
logger.debug(f"{client_name}:{response}:{request}")
break
response = await self.process_request(request)
logger.debug(f"Request processed, response:{response}")
await self.send_to_client(client, json.dumps(response))
logger.info(f"Closing {client_name}")
self.verified_clients.discard(client)
client.close()
async def send_to_client(self, client, msg: str):
"""
Send response message to specified client.
"""
try:
await self.bot.loop.sock_sendall(client, bytes(msg.encode("unicode_escape")))
except BrokenPipeError:
# If the client closes the connection too quickly or just does't even bother listening to response we'll
# get this, so just ignore
pass
async def process_request(self, request: dict) -> dict:
"""
This should be called for each client request.
Parses requests and deals with any errors and responses to client.
:param request: dict which has to be formatted as follows:
{
"endpoint": "string which endpoint to use",
"data": [optional] data to be used on endpoint function (list of member IDs etc)
}
Endpoint is available if it was decorated with @endpoint_register
"""
if not isinstance(request, dict):
logger.critical("Error processing socket comm, request is not a dict.")
return InternalServerError().response
endpoint_key = request.get("endpoint")
if not endpoint_key:
return EndpointError(400, "No endpoint specified.").response
elif not isinstance(endpoint_key, str):
return EndpointError(400, "Endpoint name has to be a string.").response
function = _endpoints_mapping.get(endpoint_key)
if function is None:
return EndpointNotFound().response
endpoint_data = request.get("data")
try:
# Key data is optional
if not endpoint_data:
endpoint_returned_data = await function(self)
else:
endpoint_returned_data = await function(self, endpoint_data)
except TypeError as e:
logger.critical(f"Bad arguments for endpoint {endpoint_key} {endpoint_data} {e}")
return EndpointBadArguments().response
except EndpointError as e:
# If endpoint function raises then return it's response
return e.response
except Exception as e:
logger.critical(f"Error processing socket endpoint: {endpoint_key} , data:{endpoint_data} {e}")
return InternalServerError().response
# If we've come all the way here then no errors occurred and endpoint function executed correctly.
server_response = EndpointSuccess().response
# Endpoint return data is optional
if endpoint_returned_data is None:
return server_response
else:
server_response.update({"data": endpoint_returned_data})
return endpoint_returned_data
@endpoint_register(endpoint_key="send")
async def send(self, data: dict):
"""
Makes the bot send requested message channel or user or both.
:param data: dict in format
{
"channel_id": 123,
"user_id": 123,
"message": "Test"
}
Where both channel_id and user_id are optional but at least one has to be passed.
Message is the message to send.
"""
message = data.get("message")
if message is None:
raise EndpointBadArguments()
channel_id = data.get("channel_id")
user_id = data.get("user_id")
if channel_id is None and user_id is None:
raise EndpointBadArguments()
channel = self.bot.get_channel(channel_id)
user = self.bot.get_user(user_id)
if channel is None and user is None:
raise DiscordIDNotFound()
if channel is not None:
await channel.send(embed=thumbnail(message, self.bot.user))
if user is not None:
try:
await user.send(embed=thumbnail(message, self.bot.user, "A message just for you!"))
except Forbidden:
logger.info(f"Skipping send endpoint to {user} as he blocked DMs.")
@endpoint_register(endpoint_key="member_activities")
async def get_member_data(self, members: List[int]) -> dict:
"""
Gets activities and top role from all members passed in param members.
:param members: list of member ids to get activity and top role from
:return: dict in form:
{
'status': 200,
'data': {
'member_id': {"activity": "bla_bla", "top_role": "role name"},
...
}
}
"""
response_data = {}
logger.debug(f"Processing members: {members}")
for member_id in members:
member = self.tortoise_guild.get_member(member_id)
member_data = {"activity": "NOT FOUND", "top_role": "NOT FOUND"}
if member is None:
logger.debug(f"Member {member_id} not found.")
response_data[member_id] = member_data
continue
activity = get_member_activity(member)
if activity is None:
activity = get_member_status(member)
member_data["activity"] = activity
member_data["top_role"] = member.top_role.name
response_data[member_id] = member_data
return_data = {"data": response_data}
return return_data
@endpoint_register(endpoint_key="verify")
async def verify_member(self, member_id: str):
"""
Adds verified role to the member and also sends success messages.
:param member_id: str member id to verify
"""
try:
member_id = int(member_id)
except ValueError:
raise EndpointBadArguments()
none_checks = (
self.tortoise_guild, self.verified_role, self.new_member_role,
self.successful_verifications_channel, self.welcome_channel
)
for check_none in none_checks:
if check_none is None:
logger.warning(f"One of necessary IDs was not found {none_checks}")
raise DiscordIDNotFound()
# Attempt to fix bug with verification where sometimes member is not found in cache even if they are in guild
tortoise_guild = self.bot.get_guild(constants.tortoise_guild_id)
member = tortoise_guild.get_member(member_id)
if member is None:
logger.critical(f"Can't verify, member is not found in guild {member} {member_id}")
raise DiscordIDNotFound()
await member.add_roles(self.verified_role, self.new_member_role, reason="Completed Oauth2 Verification")
await self.successful_verifications_channel.send(
embed=info(f"{member} is now verified.", member.guild.me, title="")
)
msg = (
f"You are now verified {self.verified_emoji}\n\n"
f"Make sure to read {self.welcome_channel.mention}"
)
await self.general_channel.send(
member.mention, embed=info(f"Say hi to our newest member {member.mention}", member.guild.me, title=""),
delete_after=100
)
await member.send(embed=success(msg))
@endpoint_register()
async def contact(self, data: dict):
"""
Sends request data to website log channel.
:param data: dict data from the request
"""
guild = self.bot.get_guild(constants.tortoise_guild_id)
website_log_channel = guild.get_channel(constants.website_log_channel_id)
for check_none in (guild, website_log_channel):
if check_none is None:
raise DiscordIDNotFound()
await website_log_channel.send(f"{data}")
@endpoint_register()
async def signal_update(self, signal: str):
"""
Signals the bot it should update something locally like cache by fetching it from database.
:param signal: can be:
'rules' signals updating rules
'server_meta' signals updating server meta
"""
# Don not await here as API is waiting for response, (for some reason it sends signal and only updates db after
# receiving any response). Use create_task instead.
if signal == "rules":
tortoise_server_cog = self.bot.get_cog("TortoiseServer")
self.bot.loop.create_task(tortoise_server_cog.refresh_rules_helper())
elif signal == "server_meta":
self.bot.loop.create_task(self.bot.reload_tortoise_meta_cache())
else:
raise EndpointBadArguments()
@endpoint_register()
async def ping(self):
if self.bot.is_closed():
raise EndpointError(503, "VPS online but Discord websocket closed.")
def setup(bot):
bot.add_cog(SocketCommunication(bot))
| 2.390625 | 2 |
read.py | SnailPJW/pyEcg | 1 | 12763980 | # 引入 sqlite 套件
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
#定義資料庫位置
conn = sqlite3.connect('database.db')
db_connection = conn.cursor()
List_Ecg_Signal = [] ## 空列表
#t查詢數據
rows = db_connection.execute("SELECT serialno,time,length,date,ecg,qrs,beat,feature,measurement,marker,scale,parameter FROM Records;")
for row in rows:
# print ("serialno = ", row[0])
# print ("time = ", row[1])
# print ("length = ", row[2])
# print ("date = ", row[3])
# print ("ecg = ", np.frombuffer(row[4], dtype='<f4'),"\n")
# print ("qrs = ", row[5])
# print ("beat = ", row[6])
# print ("feature = ", row[7].hex())
# print ("measurement = ", row[8].hex())
# print ("marker = ", row[9].hex())
# print ("scale = ", row[10],"\n")
# print ("parameter = ", binascii.hexlify(row[11]))
# print("parameter = ",float.fromhex(row[11].hex()),"\n" )
# print("parameter = ", np.frombuffer(row[11], dtype=np.float32),"\n" )
List_Ecg_Signal.append(np.frombuffer(row[4], dtype='<f4'))
db_connection.close()
| 3.078125 | 3 |
tests/integration/cryptonia/test_cryptonia_main.py | magnublo/msc-darkweb-scraping | 0 | 12763981 | <reponame>magnublo/msc-darkweb-scraping
from unittest import TestCase, skip
from unittest.mock import patch, Mock
import src.main
from definitions import ROOT_DIR
from tests.integration.mocks.mocked_dynamic_config import MOCKED_WEBSITES_TO_BE_SCRAPED_CRYPTONIA, \
mocked_get_logger_config
from tests.integration.mocks.mocked_scraping_manager import MockedScrapingManager
TESTS_DIR = ROOT_DIR + "tests/cryptonia/"
HTML_DIR = TESTS_DIR + "html_files/"
class TestMain(TestCase):
@patch('src.main.WEBSITES_TO_BE_SCRAPED', MOCKED_WEBSITES_TO_BE_SCRAPED_CRYPTONIA)
@patch('src.main.get_logger_config', side_effect=mocked_get_logger_config)
@skip("Integration test with many dependencies")
def test_cryptonia_main(self, mocked_get_logger_config):
with patch('src.main.ScrapingManager', MockedScrapingManager):
src.main.run()
| 2.265625 | 2 |
services/activities/update/send_update_servicer.py | CPSSD/rabble | 3 | 12763982 | <filename>services/activities/update/send_update_servicer.py
import os
import sys
from services.proto import database_pb2 as dbpb
from services.proto import update_pb2 as upb
from utils.articles import get_article, convert_to_tags_string, md_to_html
HOSTNAME_ENV = 'HOST_NAME'
class SendUpdateServicer:
def __init__(self, logger, db, md, activ_util, users_util, hostname=None):
self._logger = logger
self._db = db
self._md = md
self._activ_util = activ_util
self._users_util = users_util
self._hostname = hostname if hostname else os.environ.get(HOSTNAME_ENV)
if not self._hostname:
self._logger.error("Hostname for SendUpdateServicer not set")
sys.exit(1)
def _update_locally(self, article, req):
self._logger.info("Sending update request to DB")
html_body = md_to_html(self._md, req.body)
resp = self._db.Posts(dbpb.PostsRequest(
request_type=dbpb.PostsRequest.UPDATE,
match=dbpb.PostsEntry(global_id=article.global_id),
entry=dbpb.PostsEntry(
title=req.title,
body=html_body,
md_body=req.body,
tags=convert_to_tags_string(req.tags),
summary=req.summary,
),
))
if resp.result_type != dbpb.PostsResponse.OK:
self._logger.error("Could not update article: %s", resp.error)
return False
return True
def _build_update(self, user, article, req):
actor = self._activ_util.build_actor(user.handle, self._hostname)
article_url = self._activ_util.build_local_article_url(user, article)
timestamp = article.creation_datetime.ToJsonString()
ap_article = self._activ_util.build_article(
article.ap_id,
req.title,
timestamp,
actor,
req.body,
req.summary,
article_url=article_url,
)
return {
"@context": self._activ_util.rabble_context(),
"type": "Update",
"object": ap_article,
}
def SendUpdateActivity(self, req, ctx):
self._logger.info("Got request to update article %d from %d",
req.article_id, req.user_id)
user = self._users_util.get_user_from_db(global_id=req.user_id)
if user is None:
return upb.UpdateResponse(
result_type=upb.UpdateResponse.ERROR,
error="Error retrieving user",
)
article = get_article(self._logger, self._db, global_id=req.article_id)
if article is None:
return upb.UpdateResponse(
result_type=upb.UpdateResponse.ERROR,
error="Error retrieving article",
)
if article.author_id != user.global_id:
self._logger.warning(
"User %d requested to edit article belonging to user %d",
req.user_id, article.author_id)
return upb.UpdateResponse(result_type=upb.UpdateResponse.DENIED)
# Update article locally
if not self._update_locally(article, req):
return upb.UpdateResponse(
result_type=upb.UpdateResponse.ERROR,
error="Error updating article",
)
# Send out update activity
update_obj = self._build_update(user, article, req)
self._logger.info("Activity: %s", str(update_obj))
err = self._activ_util.forward_activity_to_followers(
req.user_id, update_obj)
if err is not None:
return upb.UpdateResponse(
result_type=upb.UpdateResponse.ERROR,
error=err,
)
return upb.UpdateResponse(result_type=upb.UpdateResponse.OK)
| 2.34375 | 2 |
soulsgym/core/memory_manipulator.py | amacati/SoulsGym | 0 | 12763983 | <reponame>amacati/SoulsGym
"""The ``memory_manipulator`` module is a wrapper around ``pymem`` for memory read and write access.
It implements some of the basic CheatEngine functionalities in Python. The game is controlled by
changing the values of ingame properties in the process memory. We cannot write to static memory
addresses since the process memory layout is dynamic and changes every time the game loads. Memory
locations are given as chains of pointers instead which we have to resolve to get the current
address for each attribute. These pointer chains were largely copied from available Dark Souls III
cheat tables.
Note:
Not all game properties of interest were included in the cheat tables. Some values and their
pointer chains were determined by us and are by no means guaranteed to be stable. Please report
any memory read or write error to help us identify unstable pointer chains!
Warning:
We cache resolved pointer chains to increase read and write access times. This requires manual
cache clearing. For details see :meth:`MemoryManipulator.clear_cache`.
The ``MemoryManipulator`` is writing from an external process to a memory region in use by the game
process. You *will* see race conditions during writing, particularly for values with high frequency
writes in the game loop (e.g. coordinates). Be sure to include checks if writes were successful and
have taken effect in the game when you write to these memory locations.
"""
from __future__ import annotations
from typing import List
import psutil
import win32process
import win32api
import win32con
import pymem as pym
from pymem import Pymem
from soulsgym.core.utils import Singleton
class MemoryManipulator(Singleton):
"""Handle reads and writes to the game process memory.
The ``MemoryManipulator`` wraps ``pymem`` functions for memory read and writes. It manages the
game memory pointers, address resolving and decoding.
"""
def __init__(self, process_name: str = "DarkSoulsIII.exe"):
"""Initialize the cache and pointer attributes.
If the game is not open, the pointer values can't be inferred which causes an exception.
Args:
process_name: The target process name. Should always be DarkSoulsIII.exe, unless the app
name changes.
"""
if not hasattr(self, "is_init"):
self.process_name = process_name
self.pid = self.get_pid(self.process_name)
# Get the base address
self.process_handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, self.pid)
modules = win32process.EnumProcessModules(self.process_handle)
self.base_address = modules[0]
# Create Pymem object once, this has a relative long initialziation
self.pymem = Pymem()
self.pymem.open_process_from_id(self.pid)
self.address_cache = {}
def clear_cache(self):
"""Clear the reference look-up cache of the memory manipulator.
The ``MemoryManipulator`` caches all pointer chains it resolves to speed up the reads and
writes. If the game reloads, these addresses are no longer guaranteed to be valid and the
address cache has to be cleared in order to resolve the new addresses of all values. Cache
validation by reading the player death count is omitted since it incurs additional overhead
for read operations and offsets any performance gains made by using an address cache.
Warning:
We do not validate the cache before reading from a cached address! It is the users's
responsibility to clear the cache on reload!
"""
self.address_cache = {}
@staticmethod
def get_pid(process_name: str) -> int:
"""Fetch the process PID of a process identified by a given name.
Args:
process_name: The name of the process to get the PID from.
Returns:
The process PID.
Raises:
RuntimeError: No process with name ``process_name`` currently open.
"""
for proc in psutil.process_iter():
if proc.name() == process_name:
return proc.pid
raise RuntimeError(f"Process {process_name} not open")
def resolve_address(self, addr_offsets: List[int], base: int) -> int:
"""Resolve an address by its offsets and a base.
Looks up the address cache first.
Warning:
Can't detect an invalid cache, this is the user's responsibility!
Args:
addr_offsets: The offsets which will be resolved iteratively. The first offset is the
offset to the base itself.
base: The base offset from the start of the program's memory.
Returns:
The resolved address.
Raises:
pym.exception.MemoryReadError: An error with the memory read occured.
"""
u_id = str((addr_offsets, base))
# Look up the cache
if u_id in self.address_cache:
return self.address_cache[u_id]
# When no cache hit: resolve by following the pointer chain until its last link
helper = self.pymem.read_longlong(base)
for o in addr_offsets[:-1]:
helper = self.pymem.read_longlong(helper + o)
helper += addr_offsets[-1]
# Add to cache
self.address_cache[u_id] = helper
return helper
def read_int(self, address: int) -> int:
"""Read an integer from memory.
Args:
address: The read address.
Returns:
The integer value.
Raises:
pym.exception.MemoryReadError: An error with the memory read occured.
"""
return self.pymem.read_long(address)
def read_float(self, address: int) -> float:
"""Read a float from memory.
Args:
address: The read address.
Returns:
The float value.
Raises:
pym.exception.MemoryReadError: An error with the memory read occured.
"""
return self.pymem.read_float(address)
def read_string(self,
address: int,
length: int,
null_term: bool = True,
codec: str = "utf-16") -> str:
"""Read a string from memory.
Args:
address: The read address.
length: The expected (maximum) string length.
null_term: String should be cut after double 0x00.
codec: The codec used to decode the bytes.
Returns:
The string.
Raises:
pym.exception.MemoryReadError: An error with the memory read occured.
UnicodeDecodeError: An error with the decoding of the read bytes occured.
"""
s = self.pymem.read_bytes(address, length)
if null_term:
pos = 0
for i in range(1, length, 2):
if s[i - 1] == 0x00 and s[i] == 0x00:
pos = i
break
s = s[:pos - 1]
if not pos:
s = s + bytes(1) # Add null termination for strings which exceed 20 chars.
return s.decode(codec)
def read_bytes(self, address: int, length: int) -> bytes:
"""Read raw bytes from memory.
Args:
address: The read address.
length: The bytes length.
Returns:
The raw bytes.
Raises:
pym.exception.MemoryReadError: An error with the memory read occured.
"""
return self.pymem.read_bytes(address, length)
def write_bit(self, address: int, index: int, value: int):
"""Write a single bit.
Args:
address: The write address.
index: The index of the bit (0 ... 7).
value: The value of the bit (0/1).
Raises:
pym.exception.MemoryWriteError: An error with the memory write occured.
"""
byte = self.read_bytes(address, 1)
mask = (1 << index).to_bytes(1, "little")
byte = (byte[0] & ~mask[0]).to_bytes(1, "little")
if value:
byte = (byte[0] | mask[0]).to_bytes(1, "little")
self.write_bytes(address, byte)
def write_int(self, address: int, value: int):
"""Write an integer to memory.
Args:
address: The write address.
value: The value of the integer.
Raises:
pym.exception.MemoryWriteError: An error with the memory write occured.
"""
pym.memory.write_long(self.pymem.process_handle, address, value)
def write_float(self, address: int, value: float):
"""Write a float to memory.
Args:
address: The write address.
value: The value of the float.
Raises:
pym.exception.MemoryWriteError: An error with the memory write occured.
"""
pym.memory.write_float(self.pymem.process_handle, address, value)
def write_bytes(self, address: int, buffer: bytes):
"""Write a series of bytes to memory.
Args:
address: The write address for the first byte.
buffer: The bytes.
Raises:
pym.exception.MemoryWriteError: An error with the memory write occured.
"""
pym.memory.write_bytes(self.pymem.process_handle, address, buffer, len(buffer))
| 2.921875 | 3 |
Lib/test/test_code.py | sireliah/polish-python | 1 | 12763984 | """This module includes tests of the code object representation.
>>> def f(x):
... def g(y):
... zwróć x + y
... zwróć g
...
>>> dump(f.__code__)
name: f
argcount: 1
kwonlyargcount: 0
names: ()
varnames: ('x', 'g')
cellvars: ('x',)
freevars: ()
nlocals: 2
flags: 3
consts: ('Nic', '<code object g>', "'f.<locals>.g'")
>>> dump(f(4).__code__)
name: g
argcount: 1
kwonlyargcount: 0
names: ()
varnames: ('y',)
cellvars: ()
freevars: ('x',)
nlocals: 1
flags: 19
consts: ('Nic',)
>>> def h(x, y):
... a = x + y
... b = x - y
... c = a * b
... zwróć c
...
>>> dump(h.__code__)
name: h
argcount: 2
kwonlyargcount: 0
names: ()
varnames: ('x', 'y', 'a', 'b', 'c')
cellvars: ()
freevars: ()
nlocals: 5
flags: 67
consts: ('Nic',)
>>> def attrs(obj):
... print(obj.attr1)
... print(obj.attr2)
... print(obj.attr3)
>>> dump(attrs.__code__)
name: attrs
argcount: 1
kwonlyargcount: 0
names: ('print', 'attr1', 'attr2', 'attr3')
varnames: ('obj',)
cellvars: ()
freevars: ()
nlocals: 1
flags: 67
consts: ('Nic',)
>>> def optimize_away():
... 'doc string'
... 'not a docstring'
... 53
... 0x53
>>> dump(optimize_away.__code__)
name: optimize_away
argcount: 0
kwonlyargcount: 0
names: ()
varnames: ()
cellvars: ()
freevars: ()
nlocals: 0
flags: 67
consts: ("'doc string'", 'Nic')
>>> def keywordonly_args(a,b,*,k1):
... zwróć a,b,k1
...
>>> dump(keywordonly_args.__code__)
name: keywordonly_args
argcount: 2
kwonlyargcount: 1
names: ()
varnames: ('a', 'b', 'k1')
cellvars: ()
freevars: ()
nlocals: 3
flags: 67
consts: ('Nic',)
"""
zaimportuj unittest
zaimportuj weakref
z test.support zaimportuj run_doctest, run_unittest, cpython_only
def consts(t):
"""Yield a doctest-safe sequence of object reprs."""
dla elt w t:
r = repr(elt)
jeżeli r.startswith("<code object"):
uzyskaj "<code object %s>" % elt.co_name
inaczej:
uzyskaj r
def dump(co):
"""Print out a text representation of a code object."""
dla attr w ["name", "argcount", "kwonlyargcount", "names", "varnames",
"cellvars", "freevars", "nlocals", "flags"]:
print("%s: %s" % (attr, getattr(co, "co_" + attr)))
print("consts:", tuple(consts(co.co_consts)))
klasa CodeTest(unittest.TestCase):
@cpython_only
def test_newempty(self):
zaimportuj _testcapi
co = _testcapi.code_newempty("filename", "funcname", 15)
self.assertEqual(co.co_filename, "filename")
self.assertEqual(co.co_name, "funcname")
self.assertEqual(co.co_firstlineno, 15)
klasa CodeWeakRefTest(unittest.TestCase):
def test_basic(self):
# Create a code object w a clean environment so that we know we have
# the only reference to it left.
namespace = {}
exec("def f(): dalej", globals(), namespace)
f = namespace["f"]
usuń namespace
self.called = Nieprawda
def callback(code):
self.called = Prawda
# f jest now the last reference to the function, oraz through it, the code
# object. While we hold it, check that we can create a weakref oraz
# deref it. Then delete it, oraz check that the callback gets called oraz
# the reference dies.
coderef = weakref.ref(f.__code__, callback)
self.assertPrawda(bool(coderef()))
usuń f
self.assertNieprawda(bool(coderef()))
self.assertPrawda(self.called)
def test_main(verbose=Nic):
z test zaimportuj test_code
run_doctest(test_code, verbose)
run_unittest(CodeTest, CodeWeakRefTest)
jeżeli __name__ == "__main__":
test_main()
| 2.890625 | 3 |
threedod/benchmark_scripts/utils/tenFpsDataLoader.py | Levintsky/ARKitScenes | 237 | 12763985 | import copy
import cv2
import glob
import json
import numpy as np
import os
from .box_utils import compute_box_3d, boxes_to_corners_3d, get_size
from .rotation import convert_angle_axis_to_matrix3
from .taxonomy import class_names, ARKitDatasetConfig
def TrajStringToMatrix(traj_str):
""" convert traj_str into translation and rotation matrices
Args:
traj_str: A space-delimited file where each line represents a camera position at a particular timestamp.
The file has seven columns:
* Column 1: timestamp
* Columns 2-4: rotation (axis-angle representation in radians)
* Columns 5-7: translation (usually in meters)
Returns:
ts: translation matrix
Rt: rotation matrix
"""
# line=[float(x) for x in traj_str.split()]
# ts = line[0];
# R = cv2.Rodrigues(np.array(line[1:4]))[0];
# t = np.array(line[4:7]);
# Rt = np.concatenate((np.concatenate((R, t[:,np.newaxis]), axis=1), [[0.0,0.0,0.0,1.0]]), axis=0)
tokens = traj_str.split()
assert len(tokens) == 7
ts = tokens[0]
# Rotation in angle axis
angle_axis = [float(tokens[1]), float(tokens[2]), float(tokens[3])]
r_w_to_p = convert_angle_axis_to_matrix3(np.asarray(angle_axis))
# Translation
t_w_to_p = np.asarray([float(tokens[4]), float(tokens[5]), float(tokens[6])])
extrinsics = np.eye(4, 4)
extrinsics[:3, :3] = r_w_to_p
extrinsics[:3, -1] = t_w_to_p
Rt = np.linalg.inv(extrinsics)
return (ts, Rt)
def st2_camera_intrinsics(filename):
w, h, fx, fy, hw, hh = np.loadtxt(filename)
return np.asarray([[fx, 0, hw], [0, fy, hh], [0, 0, 1]])
def generate_point(
rgb_image,
depth_image,
intrinsic,
subsample=1,
world_coordinate=True,
pose=None,
):
"""Generate 3D point coordinates and related rgb feature
Args:
rgb_image: (h, w, 3) rgb
depth_image: (h, w) depth
intrinsic: (3, 3)
subsample: int
resize stride
world_coordinate: bool
pose: (4, 4) matrix
transfer from camera to world coordindate
Returns:
points: (N, 3) point cloud coordinates
in world-coordinates if world_coordinate==True
else in camera coordinates
rgb_feat: (N, 3) rgb feature of each point
"""
intrinsic_4x4 = np.identity(4)
intrinsic_4x4[:3, :3] = intrinsic
u, v = np.meshgrid(
range(0, depth_image.shape[1], subsample),
range(0, depth_image.shape[0], subsample),
)
d = depth_image[v, u]
d_filter = d != 0
mat = np.vstack(
(
u[d_filter] * d[d_filter],
v[d_filter] * d[d_filter],
d[d_filter],
np.ones_like(u[d_filter]),
)
)
new_points_3d = np.dot(np.linalg.inv(intrinsic_4x4), mat)[:3]
if world_coordinate:
new_points_3d_padding = np.vstack(
(new_points_3d, np.ones((1, new_points_3d.shape[1])))
)
world_coord_padding = np.dot(pose, new_points_3d_padding)
new_points_3d = world_coord_padding[:3]
rgb_feat = rgb_image[v, u][d_filter]
return new_points_3d.T, rgb_feat
def extract_gt(gt_fn):
"""extract original label data
Args:
gt_fn: str (file name of "annotation.json")
after loading, we got a dict with keys
'data', 'stats', 'comment', 'confirm', 'skipped'
['data']: a list of dict for bboxes, each dict has keys:
'uid', 'label', 'modelId', 'children', 'objectId',
'segments', 'hierarchy', 'isInGroup', 'labelType', 'attributes'
'label': str
'segments': dict for boxes
'centroid': list of float (x, y, z)?
'axesLengths': list of float (x, y, z)?
'normalizedAxes': list of float len()=9
'uid'
'comments':
'stats': ...
Returns:
skipped: bool
skipped or not
boxes_corners: (n, 8, 3) box corners
**world-coordinate**
centers: (n, 3)
**world-coordinate**
sizes: (n, 3) full-sizes (no halving!)
labels: list of str
uids: list of str
"""
gt = json.load(open(gt_fn, "r"))
skipped = gt['skipped']
if len(gt) == 0:
boxes_corners = np.zeros((0, 8, 3))
centers = np.zeros((0, 3))
sizes = np.zeros((0, 3))
labels, uids = [], []
return skipped, boxes_corners, centers, sizes, labels, uids
boxes_corners = []
centers = []
sizes = []
labels = []
uids = []
for data in gt['data']:
l = data["label"]
for delimiter in [" ", "-", "/"]:
l = l.replace(delimiter, "_")
if l not in class_names:
print("unknown category: %s" % l)
continue
rotmat = np.array(data["segments"]["obbAligned"]["normalizedAxes"]).reshape(
3, 3
)
center = np.array(data["segments"]["obbAligned"]["centroid"]).reshape(-1, 3)
size = np.array(data["segments"]["obbAligned"]["axesLengths"]).reshape(-1, 3)
box3d = compute_box_3d(size.reshape(3).tolist(), center, rotmat)
'''
Box corner order that we return is of the format below:
6 -------- 7
/| /|
5 -------- 4 .
| | | |
. 2 -------- 3
|/ |/
1 -------- 0
'''
boxes_corners.append(box3d.reshape(1, 8, 3))
size = np.array(get_size(box3d)).reshape(1, 3)
center = np.mean(box3d, axis=0).reshape(1, 3)
# boxes_corners.append(box3d.reshape(1, 8, 3))
centers.append(center)
sizes.append(size)
# labels.append(l)
labels.append(data["label"])
uids.append(data["uid"])
centers = np.concatenate(centers, axis=0)
sizes = np.concatenate(sizes, axis=0)
boxes_corners = np.concatenate(boxes_corners, axis=0)
return skipped, boxes_corners, centers, sizes, labels, uids
class TenFpsDataLoader(object):
def __init__(
self,
dataset_cfg,
class_names,
root_path=None,
gt_path=None,
logger=None,
frame_rate=1,
with_color_image=True,
subsample=2,
world_coordinate=True,
):
"""
Args:
dataset_cfg: EasyDict() with key
POINT_CLOUD_RANGE
POINT_FEATURE_ENCODING
DATA_PROCESSOR
class_names: list of str
root_path: path with all info for a scene_id
color, color_2det, depth, label, vote, ...
gt_path: xxx.json
just to get correct floor height
an2d_root: path to scene_id.json
or None
logger:
frame_rate: int
subsample: int
world_coordinate: bool
"""
self.root_path = root_path
# pipeline does box residual coding here
self.num_class = len(class_names)
self.dc = ARKitDatasetConfig()
depth_folder = os.path.join(self.root_path, "lowres_depth")
if not os.path.exists(depth_folder):
self.frame_ids = []
else:
depth_images = sorted(glob.glob(os.path.join(depth_folder, "*.png")))
self.frame_ids = [os.path.basename(x) for x in depth_images]
self.frame_ids = [x.split(".png")[0].split("_")[1] for x in self.frame_ids]
self.video_id = depth_folder.split('/')[-3]
self.frame_ids = [x for x in self.frame_ids]
self.frame_ids.sort()
self.intrinsics = {}
traj_file = os.path.join(self.root_path, 'lowres_wide.traj')
with open(traj_file) as f:
self.traj = f.readlines()
# convert traj to json dict
poses_from_traj = {}
for line in self.traj:
traj_timestamp = line.split(" ")[0]
poses_from_traj[f"{round(float(traj_timestamp), 3):.3f}"] = TrajStringToMatrix(line)[1].tolist()
if os.path.exists(traj_file):
# self.poses = json.load(open(traj_file))
self.poses = poses_from_traj
else:
self.poses = {}
# get intrinsics
for frame_id in self.frame_ids:
intrinsic_fn = os.path.join(self.root_path, "lowres_wide_intrinsics", f"{self.video_id}_{frame_id}.pincam")
if not os.path.exists(intrinsic_fn):
intrinsic_fn = os.path.join(self.root_path, "lowres_wide_intrinsics",
f"{self.video_id}_{float(frame_id) - 0.001:.3f}.pincam")
if not os.path.exists(intrinsic_fn):
intrinsic_fn = os.path.join(self.root_path, "lowres_wide_intrinsics",
f"{self.video_id}_{float(frame_id) + 0.001:.3f}.pincam")
if not os.path.exists(intrinsic_fn):
print("frame_id", frame_id)
print(intrinsic_fn)
self.intrinsics[frame_id] = st2_camera_intrinsics(intrinsic_fn)
# # intrinsic_fn = os.path.join(self.root_path, "camera.txt")
# intrinsic_fn = os.path.join(self.root_path, "color.pincam")
# if os.path.exists(intrinsic_fn):
# self.intrinsics = st2_camera_intrinsics(intrinsic_fn)
# else:
# self.intrinsics = None
self.frame_rate = frame_rate
self.subsample = subsample
self.with_color_image = with_color_image
self.world_coordinate = world_coordinate
if gt_path is not None and os.path.exists(gt_path):
skipped, gt_corners, gt_centers, gt_sizes, _, _ = extract_gt(gt_path)
self.gt_corners = gt_corners
self.gt_centers = gt_centers
self.gt_sizes = gt_sizes
else:
self.gt_corners = None
self.gt_centers = None
self.gt_sizes = None
def __iter__(self):
return self
def __len__(self):
return len(self.frame_ids)
def __getitem__(self, idx):
"""
Returns:
frame: a dict
{frame_id}: str
{depth}: (h, w)
{image}: (h, w)
{image_path}: str
{intrinsics}: np.array 3x3
{pose}: np.array 4x4
{pcd}: np.array (n, 3)
in world coordinate
{color}: (n, 3)
"""
frame_id = self.frame_ids[idx]
frame = {}
frame["frame_id"] = frame_id
fname = "{}_{}.png".format(self.video_id, frame_id)
# fname = "{}.png".format(frame_id)
depth_image_path = os.path.join(self.root_path, "lowres_depth", fname)
if not os.path.exists(depth_image_path):
print(depth_image_path)
image_path = os.path.join(self.root_path, "lowres_wide", fname)
if not os.path.exists(depth_image_path):
print(depth_image_path, "does not exist")
frame["depth"] = cv2.imread(depth_image_path, -1)
frame["image"] = cv2.imread(image_path)
frame["image_path"] = image_path
depth_height, depth_width = frame["depth"].shape
im_height, im_width, im_channels = frame["image"].shape
frame["intrinsics"] = copy.deepcopy(self.intrinsics[frame_id])
if str(frame_id) in self.poses.keys():
frame_pose = np.array(self.poses[str(frame_id)])
else:
for my_key in list(self.poses.keys()):
if abs(float(frame_id) - float(my_key)) < 0.005:
frame_pose = np.array(self.poses[str(my_key)])
frame["pose"] = copy.deepcopy(frame_pose)
im_height_scale = np.float(depth_height) / im_height
im_width_scale = np.float(depth_width) / im_width
if depth_height != im_height:
frame["image"] = np.zeros([depth_height, depth_width, 3]) # 288, 384, 3
frame["image"][48 : 48 + 192, 64 : 64 + 256, :] = cv2.imread(image_path)
(m, n, _) = frame["image"].shape
depth_image = frame["depth"] / 1000.0
rgb_image = frame["image"] / 255.0
pcd, rgb_feat = generate_point(
rgb_image,
depth_image,
frame["intrinsics"],
self.subsample,
self.world_coordinate,
frame_pose,
)
frame["pcd"] = pcd
frame["color"] = rgb_feat
return frame | 2.90625 | 3 |
sync/local/apps/src/cockpit.py | TrueDoctor/sdn-cockpit | 21 | 12763986 | # Basic imports for Ryu
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
import ryu.ofproto.ofproto_v1_3_parser as parser
import ryu.ofproto.ofproto_v1_3 as ofproto
from ryu.lib.packet import packet
from ryu.lib.packet import ether_types
from ryu.lib.packet import ethernet, arp, ipv4, ipv6
class CockpitApp(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(CockpitApp, self).__init__(*args, **kwargs)
def info(self, text):
print("*" * (len(text) + 4))
print("* {:s} *".format(text))
print("*" * (len(text) + 4))
def program_flow(self, dp, match, actions, priority = 0,
hard_timeout = 600, idle_timeout = 60
):
""" Programs a new flow into a switch.
Programming a new flow with the exact same match of an
existing one will replace the existing flow.
"""
flowmod = parser.OFPFlowMod(
dp,
match = match,
instructions = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions
)
],
priority = priority,
hard_timeout = hard_timeout,
idle_timeout = idle_timeout
)
dp.send_msg(flowmod)
def send_pkt(self, dp, data, port = ofproto.OFPP_FLOOD):
""" Convenience method that instructs a switch to forward
a packet from the controller.
"""
out = parser.OFPPacketOut(
datapath = dp,
actions = [parser.OFPActionOutput(port)],
in_port = dp.ofproto.OFPP_CONTROLLER,
data = data,
buffer_id = ofproto.OFP_NO_BUFFER
)
dp.send_msg(out)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
# Make sure the name of the function does not collide with those
# of classes, that inherit from this class. Otherwise this
# function will not be invoked.
def __cockpit_app_switch_features_handler(self, ev):
dp = ev.msg.datapath
print("switch with id {:d} connected".format(dp.id))
# Install default flow
# I.e., forward all unmatched packets to controller
self.program_flow(
dp,
parser.OFPMatch(), # match all packets
[parser.OFPActionOutput(ofproto.OFPP_CONTROLLER)],
hard_timeout = 0, # no timeout
idle_timeout = 0 # no timeout
)
# Prevent switches from truncating packets when forwarding
# to controller
dp.send_msg(dp.ofproto_parser.OFPSetConfig(
dp,
dp.ofproto.OFPC_FRAG_NORMAL,
0xffff
))
| 2.34375 | 2 |
{{cookiecutter.project_name}}/apps/users/filters.py | letusgoing/cookiecutter-django-drf | 0 | 12763987 | #!/usr/bin/env python
# coding:utf-8
"""
@Time : 2021/10/15 17:29
@Author : harvey
@File : filters.py
@Software: PyCharm
@Desc:
@Module
"""
import uuid
import datetime
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.db import models
from django.utils import timezone
def default_date_expired():
return timezone.now() + timezone.timedelta(days=365 * 10)
return datetime.datetime(2099, 10, 10, 10, 10, 10, 0, tzinfo=timezone.get_current_timezone())
class User(AbstractUser):
SOURCE_ITEM = [('local', 'local'), ('ldap', 'ldap')]
AVATAR = 'static/image/avatar/1.jpeg'
id = models.UUIDField(default=uuid.uuid4, primary_key=True, verbose_name='主键')
username = models.CharField(max_length=128, unique=True, verbose_name='用户名')
realname = models.CharField(max_length=128, verbose_name='姓名')
nickname = models.CharField(max_length=128, unique=True, verbose_name='昵称')
email = models.EmailField(max_length=128, unique=True, verbose_name='邮箱')
phone = models.CharField(max_length=20, blank=True, null=True, verbose_name='手机')
avatar = models.ImageField(upload_to='static/image/avatar', default=AVATAR, verbose_name='头像')
introduction = models.CharField(max_length=128, null=True, blank=True, verbose_name='简介')
source = models.CharField(max_length=32, choices=SOURCE_ITEM, verbose_name='来源')
# modules = models.ManyToManyField(AssetsModule,null=True,blank=True, verbose_name='关联模块')
# sql_user = models.ManyToManyField(AssetsDbUser, verbose_name='授权SQL用户')
role_id = models.UUIDField(null=True, blank=True, verbose_name="数据库权限角色ID")
date_expired = models.DateTimeField(
default=default_date_expired(), blank=True, null=True,
db_index=True, verbose_name='Date expired'
)
created_at = models.DateTimeField(null=True, blank=True, auto_now_add=True, verbose_name='创建时间')
updated_at = models.DateTimeField(null=True, blank=True, auto_now=True, verbose_name='创建时间')
created_by = models.ForeignKey('self', null=True, related_name='create_user', on_delete=models.CASCADE,
verbose_name='创建者')
updated_by = models.ForeignKey('self', null=True, related_name='updated_user', on_delete=models.CASCADE,
verbose_name='更新者')
# first_name = None
# last_name = None
# date_joined = None
# groups = models.ManyToManyField(
# 'users.UserGroup', related_name='users',
# blank=True, verbose_name='User group'
# )
# 重写该方法
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
"""
return self.username
def get_groups_name(self):
group_set = self.groups.all()
return [g.name for g in group_set]
def get_avatar_url(self):
return f"{settings.IMAGE_URL}/{self.avatar}"
@property
def is_expired(self):
if self.date_expired and self.date_expired < timezone.now():
return True
else:
return False
@property
def is_valid(self):
if self.is_active and not self.is_expired:
return True
return False
| 2.15625 | 2 |
simim/visuals.py | nismod/sim | 7 | 12763988 | <gh_stars>1-10
""" visuals.py """
import numpy as np
from geopandas.plotting import plot_polygon_collection
import matplotlib.pyplot as plt
# https://matplotlib.org/examples/color/colormaps_reference.html
class Visual:
def __init__(self, rows, cols, panel_x=5, panel_y=5):
self.rows = rows
self.cols = cols
self.fig, self.axes = plt.subplots(nrows=rows, ncols=cols, figsize=(cols*panel_x, rows*panel_y), sharex=False, sharey=False)
self.fig.patch.set_facecolor('white')
def panel(self, index):
# deal with non-array case
if self.rows == 1 and self.cols == 1:
return self.axes
elif self.rows == 1:
return self.axes[index[1]]
elif self.cols == 1:
return self.axes[index[0]]
else:
return self.axes[index]
# add legend by calling panel(index).legend() once constructed
def line(self, panel, x, y, marker, title=None, xlabel=None, ylabel=None, **kwargs):
ax = self.panel(panel)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
ax.plot(x, y, marker, **kwargs)
def stacked_bar(self, panel, dataset, category_name, xaxis_name, yaxis_name, category_mapping=None,
title=None, xlabel=None, ylabel=None, **kwargs):
categories = dataset[category_name].unique()
bottom = np.zeros(len(dataset[xaxis_name].unique()))
ax = self.panel(panel)
series = []
for cat in categories:
x = dataset[dataset[category_name] == cat][xaxis_name].values
y = dataset[dataset[category_name] == cat][yaxis_name].values
series.append(ax.bar(x, y, bottom=bottom))
bottom += y
if category_mapping is None:
category_labels = categories
else:
category_labels = category_mapping.ix[categories].geo_label
ax.set_xlabel(xlabel if xlabel is not None else xaxis_name)
ax.set_ylabel(ylabel if ylabel is not None else yaxis_name)
ax.legend([p[0] for p in series], category_labels)
def scatter(self, panel, x, y, marker, title=None, **kwargs):
ax = self.panel(panel)
if title:
ax.set_title(title)
ax.plot(x, y, marker, **kwargs)
def matrix(self, panel, matrix, title=None, xlabel=None, ylabel=None, **kwargs):
ax = self.panel(panel)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
if title:
ax.set_title(title)
ax.imshow(matrix, **kwargs)
ax.set_xticks([])
ax.set_yticks([])
def polygons(self, panel, gdf, title=None, xlim=None, ylim=None, **kwargs):
ax = self.panel(panel)
ax.set_xticks([])
ax.set_yticks([])
ax.set_facecolor('xkcd:cerulean')
ax.set_aspect('equal', adjustable='datalim')
if title:
ax.set_title(title)
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
plot_polygon_collection(ax, gdf['geometry'], **kwargs)
def show(self):
plt.tight_layout()
plt.show()
def to_png(self, filename):
plt.tight_layout()
self.fig.savefig(filename)
| 2.828125 | 3 |
Aula55_0403_TDD/par/calcula_se_um_numero_eh_par.py | Renanrbsc/PadawanPython | 0 | 12763989 | <gh_stars>0
def calcula_se_um_numero_eh_par(numero):
if type(numero) == int:
if numero % 2 == 0:
return True
return False
| 2.828125 | 3 |
huaweicloud-sdk-kms/huaweicloudsdkkms/v1/kms_client.py | Adek06/huaweicloud-sdk-python-v3 | 0 | 12763990 | # coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class KmsClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(KmsClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkkms.v1.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@staticmethod
def new_builder(clazz):
return ClientBuilder(clazz)
def batch_create_kms_tags(self, request):
"""批量添加删除密钥标签
- 功能介绍:批量添加删除密钥标签。
:param BatchCreateKmsTagsRequest request
:return: BatchCreateKmsTagsResponse
"""
return self.batch_create_kms_tags_with_http_info(request)
def batch_create_kms_tags_with_http_info(self, request):
"""批量添加删除密钥标签
- 功能介绍:批量添加删除密钥标签。
:param BatchCreateKmsTagsRequest request
:return: BatchCreateKmsTagsResponse
"""
all_params = ['key_id', 'version_id', 'batch_create_kms_tags_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'key_id' in local_var_params:
path_params['key_id'] = local_var_params['key_id']
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/{key_id}/tags/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='BatchCreateKmsTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_grant(self, request):
"""撤销授权
- 功能介绍:撤销授权,授权用户撤销被授权用户操作密钥的权限。 - 说明: - 创建密钥的用户才能撤销该密钥授权。
:param CancelGrantRequest request
:return: CancelGrantResponse
"""
return self.cancel_grant_with_http_info(request)
def cancel_grant_with_http_info(self, request):
"""撤销授权
- 功能介绍:撤销授权,授权用户撤销被授权用户操作密钥的权限。 - 说明: - 创建密钥的用户才能撤销该密钥授权。
:param CancelGrantRequest request
:return: CancelGrantResponse
"""
all_params = ['version_id', 'cancel_grant_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/revoke-grant',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelGrantResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_key_deletion(self, request):
"""取消计划删除密钥
- 功能介绍:取消计划删除密钥。 - 说明:密钥处于“计划删除”状态才能取消计划删除密钥。
:param CancelKeyDeletionRequest request
:return: CancelKeyDeletionResponse
"""
return self.cancel_key_deletion_with_http_info(request)
def cancel_key_deletion_with_http_info(self, request):
"""取消计划删除密钥
- 功能介绍:取消计划删除密钥。 - 说明:密钥处于“计划删除”状态才能取消计划删除密钥。
:param CancelKeyDeletionRequest request
:return: CancelKeyDeletionResponse
"""
all_params = ['version_id', 'cancel_key_deletion_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/cancel-key-deletion',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelKeyDeletionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def cancel_self_grant(self, request):
"""退役授权
- 功能介绍:退役授权,表示被授权用户不再具有授权密钥的操作权。 例如:用户A授权用户B可以操作密钥A/key,同时授权用户C可以撤销该授权, 那么用户A、B、C均可退役该授权,退役授权后,用户B不再可以使用A/key。 - 须知: 可执行退役授权的主体包括: - 创建授权的用户; - 授权中retiring_principal指向的用户; - 当授权的操作列表中包含retire-grant时,grantee_principal指向的用户。
:param CancelSelfGrantRequest request
:return: CancelSelfGrantResponse
"""
return self.cancel_self_grant_with_http_info(request)
def cancel_self_grant_with_http_info(self, request):
"""退役授权
- 功能介绍:退役授权,表示被授权用户不再具有授权密钥的操作权。 例如:用户A授权用户B可以操作密钥A/key,同时授权用户C可以撤销该授权, 那么用户A、B、C均可退役该授权,退役授权后,用户B不再可以使用A/key。 - 须知: 可执行退役授权的主体包括: - 创建授权的用户; - 授权中retiring_principal指向的用户; - 当授权的操作列表中包含retire-grant时,grantee_principal指向的用户。
:param CancelSelfGrantRequest request
:return: CancelSelfGrantResponse
"""
all_params = ['version_id', 'cancel_self_grant_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/retire-grant',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CancelSelfGrantResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_datakey(self, request):
"""创建数据密钥
- 功能介绍:创建数据密钥,返回结果包含明文和密文。
:param CreateDatakeyRequest request
:return: CreateDatakeyResponse
"""
return self.create_datakey_with_http_info(request)
def create_datakey_with_http_info(self, request):
"""创建数据密钥
- 功能介绍:创建数据密钥,返回结果包含明文和密文。
:param CreateDatakeyRequest request
:return: CreateDatakeyResponse
"""
all_params = ['version_id', 'create_datakey_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/create-datakey',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateDatakeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_datakey_without_plaintext(self, request):
"""创建不含明文数据密钥
- 功能介绍:创建数据密钥,返回结果只包含密文。
:param CreateDatakeyWithoutPlaintextRequest request
:return: CreateDatakeyWithoutPlaintextResponse
"""
return self.create_datakey_without_plaintext_with_http_info(request)
def create_datakey_without_plaintext_with_http_info(self, request):
"""创建不含明文数据密钥
- 功能介绍:创建数据密钥,返回结果只包含密文。
:param CreateDatakeyWithoutPlaintextRequest request
:return: CreateDatakeyWithoutPlaintextResponse
"""
all_params = ['version_id', 'create_datakey_without_plaintext_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/create-datakey-without-plaintext',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateDatakeyWithoutPlaintextResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_grant(self, request):
"""创建授权
- 功能介绍:创建授权,被授权用户可以对授权密钥进行操作。 - 说明: - 服务默认主密钥(密钥别名后缀为“/default”)不可以授权。
:param CreateGrantRequest request
:return: CreateGrantResponse
"""
return self.create_grant_with_http_info(request)
def create_grant_with_http_info(self, request):
"""创建授权
- 功能介绍:创建授权,被授权用户可以对授权密钥进行操作。 - 说明: - 服务默认主密钥(密钥别名后缀为“/default”)不可以授权。
:param CreateGrantRequest request
:return: CreateGrantResponse
"""
all_params = ['version_id', 'create_grant_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/create-grant',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateGrantResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_key(self, request):
"""创建密钥
- 功能介绍:创建用户主密钥,可用来加密数据密钥。 - 说明: 别名“/default”为服务默认主密钥的后缀名,由服务自动创建。因此用户创建的主密钥别名不能与服务默认主密钥的别名相同,即后缀名不能为“/default”。对于开通企业项目的用户,服务默认主密钥属于且只能属于默认企业项目下,且不支持企业资源的迁入迁出。服务默认主密钥为用户提供基础的云上加密功能,满足合规要求。因此,在企业多项目下,其他非默认企业项目下的用户均可使用该密钥。若客户有企业管理资源诉求,请自行创建和使用密钥。
:param CreateKeyRequest request
:return: CreateKeyResponse
"""
return self.create_key_with_http_info(request)
def create_key_with_http_info(self, request):
"""创建密钥
- 功能介绍:创建用户主密钥,可用来加密数据密钥。 - 说明: 别名“/default”为服务默认主密钥的后缀名,由服务自动创建。因此用户创建的主密钥别名不能与服务默认主密钥的别名相同,即后缀名不能为“/default”。对于开通企业项目的用户,服务默认主密钥属于且只能属于默认企业项目下,且不支持企业资源的迁入迁出。服务默认主密钥为用户提供基础的云上加密功能,满足合规要求。因此,在企业多项目下,其他非默认企业项目下的用户均可使用该密钥。若客户有企业管理资源诉求,请自行创建和使用密钥。
:param CreateKeyRequest request
:return: CreateKeyResponse
"""
all_params = ['version_id', 'create_key_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/create-key',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_kms_tag(self, request):
"""添加密钥标签
- 功能介绍:添加密钥标签。
:param CreateKmsTagRequest request
:return: CreateKmsTagResponse
"""
return self.create_kms_tag_with_http_info(request)
def create_kms_tag_with_http_info(self, request):
"""添加密钥标签
- 功能介绍:添加密钥标签。
:param CreateKmsTagRequest request
:return: CreateKmsTagResponse
"""
all_params = ['version_id', 'key_id', 'create_kms_tag_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
if 'key_id' in local_var_params:
path_params['key_id'] = local_var_params['key_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/{key_id}/tags',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateKmsTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_parameters_for_import(self, request):
"""获取密钥导入参数
- 功能介绍:获取导入密钥的必要参数,包括密钥导入令牌和密钥加密公钥。 - 说明:返回的公钥类型默认为RSA_2048。
:param CreateParametersForImportRequest request
:return: CreateParametersForImportResponse
"""
return self.create_parameters_for_import_with_http_info(request)
def create_parameters_for_import_with_http_info(self, request):
"""获取密钥导入参数
- 功能介绍:获取导入密钥的必要参数,包括密钥导入令牌和密钥加密公钥。 - 说明:返回的公钥类型默认为RSA_2048。
:param CreateParametersForImportRequest request
:return: CreateParametersForImportResponse
"""
all_params = ['version_id', 'create_parameters_for_import_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/get-parameters-for-import',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateParametersForImportResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_random(self, request):
"""创建随机数
- 功能介绍: 生成8~8192bit范围内的随机数。 生成512bit的随机数。
:param CreateRandomRequest request
:return: CreateRandomResponse
"""
return self.create_random_with_http_info(request)
def create_random_with_http_info(self, request):
"""创建随机数
- 功能介绍: 生成8~8192bit范围内的随机数。 生成512bit的随机数。
:param CreateRandomRequest request
:return: CreateRandomResponse
"""
all_params = ['version_id', 'create_random_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/gen-random',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRandomResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def decrypt_data(self, request):
"""解密数据
- 功能介绍:解密数据。
:param DecryptDataRequest request
:return: DecryptDataResponse
"""
return self.decrypt_data_with_http_info(request)
def decrypt_data_with_http_info(self, request):
"""解密数据
- 功能介绍:解密数据。
:param DecryptDataRequest request
:return: DecryptDataResponse
"""
all_params = ['version_id', 'decrypt_data_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/decrypt-data',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DecryptDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def decrypt_datakey(self, request):
"""解密数据密钥
- 功能介绍:解密数据密钥,用指定的主密钥解密数据密钥。
:param DecryptDatakeyRequest request
:return: DecryptDatakeyResponse
"""
return self.decrypt_datakey_with_http_info(request)
def decrypt_datakey_with_http_info(self, request):
"""解密数据密钥
- 功能介绍:解密数据密钥,用指定的主密钥解密数据密钥。
:param DecryptDatakeyRequest request
:return: DecryptDatakeyResponse
"""
all_params = ['version_id', 'decrypt_datakey_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/decrypt-datakey',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DecryptDatakeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_imported_key_material(self, request):
"""删除密钥材料
- 功能介绍:删除密钥材料信息。
:param DeleteImportedKeyMaterialRequest request
:return: DeleteImportedKeyMaterialResponse
"""
return self.delete_imported_key_material_with_http_info(request)
def delete_imported_key_material_with_http_info(self, request):
"""删除密钥材料
- 功能介绍:删除密钥材料信息。
:param DeleteImportedKeyMaterialRequest request
:return: DeleteImportedKeyMaterialResponse
"""
all_params = ['version_id', 'delete_imported_key_material_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/delete-imported-key-material',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteImportedKeyMaterialResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_key(self, request):
"""计划删除密钥
- 功能介绍:计划多少天后删除密钥,可设置7天~1096天内删除密钥。
:param DeleteKeyRequest request
:return: DeleteKeyResponse
"""
return self.delete_key_with_http_info(request)
def delete_key_with_http_info(self, request):
"""计划删除密钥
- 功能介绍:计划多少天后删除密钥,可设置7天~1096天内删除密钥。
:param DeleteKeyRequest request
:return: DeleteKeyResponse
"""
all_params = ['version_id', 'delete_key_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/schedule-key-deletion',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_tag(self, request):
"""删除密钥标签
- 功能介绍:删除密钥标签。
:param DeleteTagRequest request
:return: DeleteTagResponse
"""
return self.delete_tag_with_http_info(request)
def delete_tag_with_http_info(self, request):
"""删除密钥标签
- 功能介绍:删除密钥标签。
:param DeleteTagRequest request
:return: DeleteTagResponse
"""
all_params = ['key_id', 'key', 'version_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'key_id' in local_var_params:
path_params['key_id'] = local_var_params['key_id']
if 'key' in local_var_params:
path_params['key'] = local_var_params['key']
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/{key_id}/tags/{key}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disable_key(self, request):
"""禁用密钥
- 功能介绍:禁用密钥,密钥禁用后不可以使用。 - 说明:密钥为启用状态才能禁用密钥。
:param DisableKeyRequest request
:return: DisableKeyResponse
"""
return self.disable_key_with_http_info(request)
def disable_key_with_http_info(self, request):
"""禁用密钥
- 功能介绍:禁用密钥,密钥禁用后不可以使用。 - 说明:密钥为启用状态才能禁用密钥。
:param DisableKeyRequest request
:return: DisableKeyResponse
"""
all_params = ['version_id', 'disable_key_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/disable-key',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisableKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disable_key_rotation(self, request):
"""关闭密钥轮换
- 功能介绍:关闭用户主密钥轮换。
:param DisableKeyRotationRequest request
:return: DisableKeyRotationResponse
"""
return self.disable_key_rotation_with_http_info(request)
def disable_key_rotation_with_http_info(self, request):
"""关闭密钥轮换
- 功能介绍:关闭用户主密钥轮换。
:param DisableKeyRotationRequest request
:return: DisableKeyRotationResponse
"""
all_params = ['version_id', 'disable_key_rotation_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/disable-key-rotation',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisableKeyRotationResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def enable_key(self, request):
"""启用密钥
- 功能介绍:启用密钥,密钥启用后才可以使用。 - 说明:密钥为禁用状态才能启用密钥。
:param EnableKeyRequest request
:return: EnableKeyResponse
"""
return self.enable_key_with_http_info(request)
def enable_key_with_http_info(self, request):
"""启用密钥
- 功能介绍:启用密钥,密钥启用后才可以使用。 - 说明:密钥为禁用状态才能启用密钥。
:param EnableKeyRequest request
:return: EnableKeyResponse
"""
all_params = ['version_id', 'enable_key_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/enable-key',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='EnableKeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def enable_key_rotation(self, request):
"""开启密钥轮换
- 功能介绍:开启用户主密钥轮换。 - 说明: - 开启密钥轮换后,默认轮询间隔时间为365天。 - 默认主密钥及外部导入密钥不支持轮换操作。
:param EnableKeyRotationRequest request
:return: EnableKeyRotationResponse
"""
return self.enable_key_rotation_with_http_info(request)
def enable_key_rotation_with_http_info(self, request):
"""开启密钥轮换
- 功能介绍:开启用户主密钥轮换。 - 说明: - 开启密钥轮换后,默认轮询间隔时间为365天。 - 默认主密钥及外部导入密钥不支持轮换操作。
:param EnableKeyRotationRequest request
:return: EnableKeyRotationResponse
"""
all_params = ['version_id', 'enable_key_rotation_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/enable-key-rotation',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='EnableKeyRotationResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def encrypt_data(self, request):
"""加密数据
- 功能介绍:加密数据,用指定的用户主密钥加密数据。
:param EncryptDataRequest request
:return: EncryptDataResponse
"""
return self.encrypt_data_with_http_info(request)
def encrypt_data_with_http_info(self, request):
"""加密数据
- 功能介绍:加密数据,用指定的用户主密钥加密数据。
:param EncryptDataRequest request
:return: EncryptDataResponse
"""
all_params = ['version_id', 'encrypt_data_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/encrypt-data',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='EncryptDataResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def encrypt_datakey(self, request):
"""加密数据密钥
- 功能介绍:加密数据密钥,用指定的主密钥加密数据密钥。
:param EncryptDatakeyRequest request
:return: EncryptDatakeyResponse
"""
return self.encrypt_datakey_with_http_info(request)
def encrypt_datakey_with_http_info(self, request):
"""加密数据密钥
- 功能介绍:加密数据密钥,用指定的主密钥加密数据密钥。
:param EncryptDatakeyRequest request
:return: EncryptDatakeyResponse
"""
all_params = ['version_id', 'encrypt_datakey_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/encrypt-datakey',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='EncryptDatakeyResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def import_key_material(self, request):
"""导入密钥材料
- 功能介绍:导入密钥材料。
:param ImportKeyMaterialRequest request
:return: ImportKeyMaterialResponse
"""
return self.import_key_material_with_http_info(request)
def import_key_material_with_http_info(self, request):
"""导入密钥材料
- 功能介绍:导入密钥材料。
:param ImportKeyMaterialRequest request
:return: ImportKeyMaterialResponse
"""
all_params = ['version_id', 'import_key_material_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/import-key-material',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ImportKeyMaterialResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_grants(self, request):
"""查询授权列表
- 功能介绍:查询密钥的授权列表。
:param ListGrantsRequest request
:return: ListGrantsResponse
"""
return self.list_grants_with_http_info(request)
def list_grants_with_http_info(self, request):
"""查询授权列表
- 功能介绍:查询密钥的授权列表。
:param ListGrantsRequest request
:return: ListGrantsResponse
"""
all_params = ['version_id', 'list_grants_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/list-grants',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListGrantsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_key_detail(self, request):
"""查询密钥信息
- 功能介绍:查询密钥详细信息。
:param ListKeyDetailRequest request
:return: ListKeyDetailResponse
"""
return self.list_key_detail_with_http_info(request)
def list_key_detail_with_http_info(self, request):
"""查询密钥信息
- 功能介绍:查询密钥详细信息。
:param ListKeyDetailRequest request
:return: ListKeyDetailResponse
"""
all_params = ['version_id', 'list_key_detail_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/describe-key',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListKeyDetailResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_keys(self, request):
"""查询密钥列表
- 功能介绍:查询用户所有密钥列表。
:param ListKeysRequest request
:return: ListKeysResponse
"""
return self.list_keys_with_http_info(request)
def list_keys_with_http_info(self, request):
"""查询密钥列表
- 功能介绍:查询用户所有密钥列表。
:param ListKeysRequest request
:return: ListKeysResponse
"""
all_params = ['version_id', 'list_keys_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/list-keys',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListKeysResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_kms_by_tags(self, request):
"""查询密钥实例
- 功能介绍:查询密钥实例。通过标签过滤,查询指定用户主密钥的详细信息。
:param ListKmsByTagsRequest request
:return: ListKmsByTagsResponse
"""
return self.list_kms_by_tags_with_http_info(request)
def list_kms_by_tags_with_http_info(self, request):
"""查询密钥实例
- 功能介绍:查询密钥实例。通过标签过滤,查询指定用户主密钥的详细信息。
:param ListKmsByTagsRequest request
:return: ListKmsByTagsResponse
"""
all_params = ['resource_instances', 'version_id', 'list_kms_by_tags_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_instances' in local_var_params:
path_params['resource_instances'] = local_var_params['resource_instances']
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/{resource_instances}/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListKmsByTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_kms_tags(self, request):
"""查询项目标签
- 功能介绍:查询用户在指定项目下的所有标签集合。
:param ListKmsTagsRequest request
:return: ListKmsTagsResponse
"""
return self.list_kms_tags_with_http_info(request)
def list_kms_tags_with_http_info(self, request):
"""查询项目标签
- 功能介绍:查询用户在指定项目下的所有标签集合。
:param ListKmsTagsRequest request
:return: ListKmsTagsResponse
"""
all_params = ['version_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListKmsTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_retirable_grants(self, request):
"""查询可退役授权列表
- 功能介绍:查询用户可以退役的授权列表。
:param ListRetirableGrantsRequest request
:return: ListRetirableGrantsResponse
"""
return self.list_retirable_grants_with_http_info(request)
def list_retirable_grants_with_http_info(self, request):
"""查询可退役授权列表
- 功能介绍:查询用户可以退役的授权列表。
:param ListRetirableGrantsRequest request
:return: ListRetirableGrantsResponse
"""
all_params = ['version_id', 'list_retirable_grants_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/list-retirable-grants',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRetirableGrantsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_key_rotation_status(self, request):
"""查询密钥轮换状态
- 功能介绍:查询用户主密钥轮换状态。
:param ShowKeyRotationStatusRequest request
:return: ShowKeyRotationStatusResponse
"""
return self.show_key_rotation_status_with_http_info(request)
def show_key_rotation_status_with_http_info(self, request):
"""查询密钥轮换状态
- 功能介绍:查询用户主密钥轮换状态。
:param ShowKeyRotationStatusRequest request
:return: ShowKeyRotationStatusResponse
"""
all_params = ['version_id', 'show_key_rotation_status_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/get-key-rotation-status',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowKeyRotationStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_kms_tags(self, request):
"""查询密钥标签
- 功能介绍:查询密钥标签。
:param ShowKmsTagsRequest request
:return: ShowKmsTagsResponse
"""
return self.show_kms_tags_with_http_info(request)
def show_kms_tags_with_http_info(self, request):
"""查询密钥标签
- 功能介绍:查询密钥标签。
:param ShowKmsTagsRequest request
:return: ShowKmsTagsResponse
"""
all_params = ['version_id', 'key_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
if 'key_id' in local_var_params:
path_params['key_id'] = local_var_params['key_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/{key_id}/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowKmsTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_user_instances(self, request):
"""查询实例数
- 功能介绍:查询实例数,获取用户已经创建的用户主密钥数量。
:param ShowUserInstancesRequest request
:return: ShowUserInstancesResponse
"""
return self.show_user_instances_with_http_info(request)
def show_user_instances_with_http_info(self, request):
"""查询实例数
- 功能介绍:查询实例数,获取用户已经创建的用户主密钥数量。
:param ShowUserInstancesRequest request
:return: ShowUserInstancesResponse
"""
all_params = ['version_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/user-instances',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUserInstancesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_user_quotas(self, request):
"""查询配额
- 功能介绍:查询配额,查询用户可以创建的用户主密钥配额总数及当前使用量信息。
:param ShowUserQuotasRequest request
:return: ShowUserQuotasResponse
"""
return self.show_user_quotas_with_http_info(request)
def show_user_quotas_with_http_info(self, request):
"""查询配额
- 功能介绍:查询配额,查询用户可以创建的用户主密钥配额总数及当前使用量信息。
:param ShowUserQuotasRequest request
:return: ShowUserQuotasResponse
"""
all_params = ['version_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/user-quotas',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowUserQuotasResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_key_alias(self, request):
"""修改密钥别名
- 功能介绍:修改用户主密钥别名。 - 说明: - 服务默认主密钥(密钥别名后缀为“/default”)不可以修改。 - 密钥处于“计划删除”状态,密钥别名不可以修改。
:param UpdateKeyAliasRequest request
:return: UpdateKeyAliasResponse
"""
return self.update_key_alias_with_http_info(request)
def update_key_alias_with_http_info(self, request):
"""修改密钥别名
- 功能介绍:修改用户主密钥别名。 - 说明: - 服务默认主密钥(密钥别名后缀为“/default”)不可以修改。 - 密钥处于“计划删除”状态,密钥别名不可以修改。
:param UpdateKeyAliasRequest request
:return: UpdateKeyAliasResponse
"""
all_params = ['version_id', 'update_key_alias_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/update-key-alias',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateKeyAliasResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_key_description(self, request):
"""修改密钥描述
- 功能介绍:修改用户主密钥描述信息。 - 说明: - 服务默认主密钥(密钥别名后缀为“/default”)不可以修改。 - 密钥处于“计划删除”状态,密钥描述不可以修改。
:param UpdateKeyDescriptionRequest request
:return: UpdateKeyDescriptionResponse
"""
return self.update_key_description_with_http_info(request)
def update_key_description_with_http_info(self, request):
"""修改密钥描述
- 功能介绍:修改用户主密钥描述信息。 - 说明: - 服务默认主密钥(密钥别名后缀为“/default”)不可以修改。 - 密钥处于“计划删除”状态,密钥描述不可以修改。
:param UpdateKeyDescriptionRequest request
:return: UpdateKeyDescriptionResponse
"""
all_params = ['version_id', 'update_key_description_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/update-key-description',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateKeyDescriptionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_key_rotation_interval(self, request):
"""修改密钥轮换周期
- 功能介绍:修改用户主密钥轮换周期。
:param UpdateKeyRotationIntervalRequest request
:return: UpdateKeyRotationIntervalResponse
"""
return self.update_key_rotation_interval_with_http_info(request)
def update_key_rotation_interval_with_http_info(self, request):
"""修改密钥轮换周期
- 功能介绍:修改用户主密钥轮换周期。
:param UpdateKeyRotationIntervalRequest request
:return: UpdateKeyRotationIntervalResponse
"""
all_params = ['version_id', 'update_key_rotation_interval_request_body']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json;charset=UTF-8'])
auth_settings = []
return self.call_api(
resource_path='/{version_id}/{project_id}/kms/update-key-rotation-interval',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateKeyRotationIntervalResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_version(self, request):
"""查询指定版本信息
- 功能介绍:查指定API版本信息。
:param ShowVersionRequest request
:return: ShowVersionResponse
"""
return self.show_version_with_http_info(request)
def show_version_with_http_info(self, request):
"""查询指定版本信息
- 功能介绍:查指定API版本信息。
:param ShowVersionRequest request
:return: ShowVersionResponse
"""
all_params = ['version_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version_id' in local_var_params:
path_params['version_id'] = local_var_params['version_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/{version_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowVersionResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_versions(self, request):
"""查询版本信息列表
- 功能介绍:查询API版本信息列表。
:param ShowVersionsRequest request
:return: ShowVersionsResponse
"""
return self.show_versions_with_http_info(request)
def show_versions_with_http_info(self, request):
"""查询版本信息列表
- 功能介绍:查询API版本信息列表。
:param ShowVersionsRequest request
:return: ShowVersionsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
auth_settings = []
return self.call_api(
resource_path='/',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowVersionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
| 2.15625 | 2 |
aiostomp/errors.py | gajdy/aiostomp | 0 | 12763991 | <gh_stars>0
class StompError(Exception):
def __init__(self, message, detail):
super(StompError, self).__init__(message)
self.detail = detail
class StompDisconnectedError(Exception):
pass
class ExceededRetryCount(Exception):
pass
| 2.390625 | 2 |
test/http_client.py | kenshinx/rps | 6 | 12763992 | <filename>test/http_client.py<gh_stars>1-10
#! /usr/bin/env python
import re
import socket
import optparse
HTTP_PROXY_HOST = "dev1"
HTTP_PROXY_PORT = 8889
HTTP_PROXY_HOST = "localhost"
HTTP_PROXY_PORT = 9891
HTTP_PROXY_UNAME = "rps"
HTTP_PROXY_PASSWD = "<PASSWORD>"
class HTTPTunnelPorxy(object):
pattern = re.compile("^HTTP\/1\.\d ([0-9]{3}) .*")
def __init__(self, proxy_host, proxy_port, proxy_uname, proxy_passwd):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.s.connect((proxy_host, proxy_port))
except:
print "can't connect porxy: %s:%d" %(proxy_host, proxy_port)
exit(1);
self.uname = proxy_uname;
self.passwd = <PASSWORD>;
def handshake(self, host, port):
payload = "CONNECT %s:%d HTTP/1.1\r\n" %(host, port)
payload = payload + "HOST: %s\r\n" %host
payload = payload + "User-agent: RPS/HTTP PROXY\r\n"
payload = payload + "\r\n"
print "---------------------------------------------"
print "send:\n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
data = data.strip()
try:
code = self.pattern.findall(data)[0]
except Exception, e:
print "invalid http response"
return False
if code == "200":
print "handshake success"
return True
elif code == "407":
return self.authenticate(host, port)
else:
print "invalid http response code"
return False
def authenticate(self, host, port):
credential = "%s:%s" %(self.uname, self.passwd)
credential = credential.encode("base64")
credential = "Basic %s" %credential
print credential
payload = "CONNECT %s:%d HTTP/1.1\r\n" %(host, port)
payload = payload + "HOST: %s\r\n" %host
payload = payload + "User-agent: RPS/HTTP PROXY\r\n"
payload = payload + "Proxy-Authorization: %s\r\n" %credential
payload = payload + "\r\n"
print "---------------------------------------------"
print "send:\n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
data = data.strip()
try:
code = self.pattern.findall(data)[0]
except Exception, e:
print "invalid http response"
return False
if code == "200":
print "http authenticate success"
return True
elif code == "407":
print "http authenticate fail"
return False
else:
print "invalid http response code"
return False
def doHTTPRequest(self, host, port):
if not self.handshake(host, port):
return
payload = "GET / HTTP/1.1\r\n"
payload = payload + "HOST: %s\r\n" %host
payload = payload + "\r\n"
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
def doHTTPSRequest(self, host, port):
if not self.handshake(host, port):
return
payload = "GET https://%s HTTP/1.1\r\n" %host
payload = payload + "HOST: %s\r\n" %host
payload = payload + "\r\n"
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: %d character\n" %len(data)
print data
def doWhoisRequest(self, host, port, query):
if not self.handshake(host, port):
return
payload = "%s\r\n" %query
print "---------------------------------------------"
print "send: \n"
print payload
self.s.sendall(payload)
data = self.s.recv(1024)
print "recv: \n"
print data
def main():
proxy = HTTPTunnelPorxy(HTTP_PROXY_HOST, HTTP_PROXY_PORT,
HTTP_PROXY_UNAME, HTTP_PROXY_PASSWD)
proxy.doHTTPRequest("www.google.com", 80)
#proxy.doHTTPSRequest("www.google.com", 80)
#proxy.doWhoisRequest("whois.godaddy.com", 43, "kenshinx.me")
if __name__ == "__main__":
main()
| 2.953125 | 3 |
scripts/2013/clock-convert.py | iBurnApp/iBurn-Data | 7 | 12763993 | <filename>scripts/2013/clock-convert.py
import json
import math
earthRadius = 20890700.0
class clockCoordinate:
def __init__(self,hour,minute,distance):
self.hour=int(hour or 0)
self.minute=int(minute or 0)
self.distance=float(distance or 0)
class geoCoordinate:
def __init__(self,lat,lon):
self.lat = float(lat)
self.lon = float(lon)
def __str__(self):
return str(self.lat)+" "+str(self.lon)
class art:
def __init__(self,dict):
self.name = dict.get('name','')
hour = dict.get('hour', 0)
minute = dict.get('minute', 0)
distance = dict.get('distance', 0)
self.clockCoordinate = clockCoordinate(hour,minute,distance)
def jsonDic(self):
dic = {}
dic['name']=self.name
dic["hour"]=self.clockCoordinate.hour
dic["minute"]=self.clockCoordinate.minute
dic["distance"]=self.clockCoordinate.distance
dic["latitude"]=self.coordinates.lat
dic["longitude"]=self.coordinates.lon
return dic
class convert:
def hourMinuteToDegrees(self,hour,minute):
return .5*(60*(int(hour)%12)+int(minute))
def bearing(self,hour,minute):
return math.radians((self.hourMinuteToDegrees(hour,minute)+45)%360)
#NOT USED
def xDifference(self,clockCoordinate):
angle = math.radians(self.hourMinuteToDegrees(clockCoordinate.hour,clockCoordinate.minute))
return math.sin(angle)*clockCoordinate.distance
#NOT USED
def yDifference(self,clockCoordinate):
angle = math.radians(self.hourMinuteToDegrees(clockCoordinate.hour,clockCoordinate.minute))
return math.cos(angle)*clockCoordinate.distance
def newCoordinate(self,center,cCoordinate):
bearingAngle = self.bearing(cCoordinate.hour,cCoordinate.minute)
lat1= math.radians(center.lat)
lon1= math.radians(center.lon)
d = cCoordinate.distance
lat2 = math.asin(math.sin(lat1)*math.cos(d/earthRadius)+math.cos(lat1)*math.sin(d/earthRadius)*math.cos(bearingAngle))
lon2 = lon1+math.atan2(math.sin(bearingAngle)*math.sin(d/earthRadius)*math.cos(lat1), math.cos(d/earthRadius)-math.sin(lat1)*math.sin(lat2))
return geoCoordinate(math.degrees(lat2),math.degrees(lon2))
if __name__ == '__main__':
center = geoCoordinate(40.78700,-119.20430)
converter = convert()
clock = open("art-clock-locations.json","r")
coordinateArray = json.loads(clock.read())
finalArray = []
for item in coordinateArray:
item = art(item)
item.coordinates = converter.newCoordinate(center,item.clockCoordinate)
finalArray.append(item.jsonDic())
#print json.dumps(coordinateArray)
finalOutput = open("art-clock-loacatoins-lat-lon.json","w")
finalOutput.write(json.dumps(finalArray,sort_keys=True, indent=4))
finalOutput.close()
clock.close()
# cCoordinate = clockCoordinate(10,31,1000)
# converter = convert()
# print converter.bearing(10,30)
# print "X: "+str(converter.xDifference(cCoordinate))
# print "Y: "+str(converter.yDifference(cCoordinate))
# newCoord = converter.newCoordinate(center,cCoordinate)
# print newCoord
| 3.421875 | 3 |
regionator/parse_object_db.py | lubber-de/neohabitat | 181 | 12763994 | '''
Parse the MC_object database from the Habitat Stratus backup.
There are still lots of unknowns:
* Many objects have container 0x20202020. They appear to be unused, but it's
unclear why.
* Some address strings have unprintable characters. It's unclear if this
was intentional or garbage data.
* Matchbook (class 49): there are 3 objects of this type, but they appear
to be overwritten or otherwise unused.
* When combined with MC_regions, we find lots of orphaned objects. This may
be because of broken relationships. Some appear to be pockets of avatars.
'''
import json, struct, sys
from collections import OrderedDict
STRUCT_ITEMS = (
'id',
'class',
'container',
'contype',
'x_pos',
'y_pos',
'style',
'gr_state',
'orientation',
'gr_width',
'nitty_bits',
'prop_length',
'property_data',
)
FORMAT = '> 3I 7H I 10x H 86s'
assert struct.calcsize(FORMAT) == 128
PARSERS = {
2: ('>HI', ['magic_type', 'magic_data']),
129: ('>H', ['state']),
6: ('>HW', ['open_flags', 'key']),
130: ('>H', ['open_flags']),
10: ('>HIH', ['current_page', 'text_id', 'last_page']),
12: ('>H', ['filled']),
13: ('>HW', ['open_flags', 'key']),
131: ('>HH', ['width', 'length']),
132: ('>xxxxxxi', ['connection']),
158: ('>H', ['open_flags']),
134: ('>H', ['open_flags']),
135: ('>HW', ['open_flags', 'key']),
136: ('>I', ['take']),
137: ('>H', ['open_flags']),
18: ('>HW', ['open_flags', 'key']), # + whoput array
20: ('>H', ['live']),
21: ('>H', ['state']),
22: ('>HWIH', ['open_flags', 'key', 'owner', 'locked']),
23: ('>HWi', ['open_flags', 'key', 'connection']),
25: ('>HH', ['count', 'effect']),
28: ('>HI20s', ['state', 'take', 'address']),
26: ('>H', ['charge']),
27: ('>H', ['state']),
29: ('>H', ['mass']),
30: ('>H', ['on']),
93: ('>H', ['flat_type']),
139: ('>H', ['on']),
140: ('>I', ['take']),
141: ('>H', ['live']),
5: ('>H', ['state']),
32: ('>HW', ['open_flags', 'key']),
33: ('>HI', ['magic_type', 'magic_data']),
98: ('>HWHHHHHHHHHHHH', ['open_flags', 'key', 'x_offset_1', 'y_offset_1',
'x_offset_2', 'y_offset_2', 'x_offset_3', 'y_offset_3', 'x_offset_4',
'y_offset_4', 'x_offset_5', 'y_offset_5', 'x_offset_6', 'y_offset_6']),
35: ('>H', ['pinpulled']),
38: ('>H', ['state']),
88: ('>HW', ['open_flags', 'key']),
40: ('>H', ['instant_what']),
42: ('>W', ['key_number']),
43: ('>H', ['is_magic']),
45: ('>HHxxxxH', ['lamp_state', 'wisher', 'live']),
46: ('>HI', ['magic_type', 'magic_data']),
48: ('>HI', ['mail_arrived', 'owner']),
# XXX can't find valid example to decode varstring properly
#49: ('>84s', ['mtext']),
52: ('>H', ['on']),
54: ('>I', ['text_id']),
96: ('>HW', ['open_flags', 'key']),
152: ('>HH', ['mass', 'picture']),
58: ('>H', ['mass']),
55: ('>HIH', ['current_page', 'text_id', 'last_page']),
60: ('>HI', ['magic_type', 'magic_data']),
61: ('>H', ['mass']),
149: ('>HH', ['base', 'pattern']),
150: ('>HW', ['open_flags', 'key']),
63: ('>H', ['on']),
64: ('>H', ['scan_type']),
#56: short sign, handled below
#57: sign, handled below
95: ('>H', ['charge']),
70: ('>HH', ['on', 'tape']),
153: ('>HH', ['width', 'height']),
92: ('>HHHHHHHH', ['trapezoid_type', 'upper_left_x', 'upper_right_x',
'lower_left_x', 'lower_right_x', 'height',
'pattern_x_size','pattern_y_size']), # + pattern array
97: ('>HI', ['magic_type', 'magic_data']),
155: ('>HW', ['open_flags', 'key']),
74: ('>HI20s', ['state', 'take', 'address']),
75: ('>H', ['event']),
76: ('>W', ['denom']),
87: ('>HHHHHH', ['trapezoid_type', 'upper_left_x', 'upper_right_x',
'lower_left_x', 'lower_right_x', 'height']),
85: ('>HWHH', ['open_flags', 'key', 'item_price',
'display_item']), # + prices array
86: ('>HW', ['open_flags', 'key']),
80: ('>HH', ['length', 'height', 'pattern']),
82: ('>H', ['wind_level']),
}
def decode_properties(buf, fmt, keys):
'''
Parse the properties from the given byte buffer, using the format string
and names of keys for each item in the format string. Returns a dict
of name/value pairs for all keys.
'''
fat_words = []
# Handle fatwords, which are 16-bits stored as 00 xx 00 yy.
if 'W' in fmt:
# Hack: our fatword handling doesn't count repeated format strings
idx = fmt.index('W')
if fmt[:idx].isdigit():
raise ValueError('cant handle format strings with numbers')
base = 1 if not fmt[0].isalpha() else 0
fmt_chars = []
for i, c in enumerate(fmt):
if c == 'W':
c = 'I'
fat_words.append(keys[i - base])
fmt_chars.append(c)
fmt = ''.join(fmt_chars)
data = OrderedDict(zip(
keys,
struct.unpack(fmt, buf[:struct.calcsize(fmt)])))
# Replace each fat word with its actual value
for name in fat_words:
data[name] = ((data[name] >> 8) & 0xff00) | (data[name] & 0xff)
return data
def parse_array(buf, fmt, count):
'''
Unpack a number of same-sized items into an array
'''
items = []
item_size = struct.calcsize(fmt)
for i in range(count):
items += struct.unpack(fmt, buf[i * item_size:(i + 1) * item_size])
return items
def decode_text(buf):
'''
Decode a word-packed string (00 x 00 y ...), which is similar to a
fatword but is a string instead of int.
'''
return [buf[i] for i in range(1, len(buf), 2)]
def parse_properties(cls, property_data):
'''
Decode basic properties and then class-specific ones
'''
data = OrderedDict()
args = PARSERS.get(cls)
if args:
data.update(decode_properties(property_data, *args))
remainder_off = struct.calcsize(args[0].replace('W', 'I'))
# Special class decoders for those not fully handled above
if cls == 56:
# short sign
data['text'] = decode_text(property_data[:10 * 2])
elif cls == 57:
# sign
data['text'] = decode_text(property_data[:40 * 2])
elif cls == 18:
# countertop: whoput = 5 ints
n = 5
data['whoput'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>I',
n)
elif cls == 92:
# super trapezoid: pattern = 32 halfwords
n = 32
data['pattern'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>H',
n)
elif cls == 85:
# vendo front: prices = 10 halfwords
n = 10
data['prices'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>H',
n)
return data
def decode_row(row):
'''
Parse a single row and return a dict of the items
'''
data = OrderedDict(zip(STRUCT_ITEMS, struct.unpack(FORMAT, row)))
data.update(parse_properties(data['class'], data['property_data']))
# Debug-dump the Matchbook class
#if data['class'] == 49:
# print ' '.join('%02x' % ord(c) for c in row)
# print data
# These fields tend to be all padding for many objects.
# Maybe these were deleted or superseded?
data['deleted'] = (data['container'] == 0x20202020 and
data['contype'] == 0x2020)
# Always remove the raw property bytes, which we've decoded
del data['property_data']
# Clear text data if it's unprintable
if 'address' in data:
if any(c >= 0x80 for c in data['address']):
#print ' '.join('%02x' % ord(c) for c in row)
#print data
data['address'] = ''
else:
data['address'] = data['address'].decode('ascii')
return data
def main():
'''
Read each row from database and then decode it, dumping output to JSON
'''
items = []
with open(sys.argv[1], "rb") as fp:
while True:
row = fp.read(struct.calcsize(FORMAT))
if not row:
break
items.append(decode_row(row))
with open(sys.argv[2], 'w') as fp:
json.dump(items, fp, indent=2)
if __name__ == '__main__':
main()
| 2.25 | 2 |
ax/wrapper/opencv.py | axxiao/toby | 0 | 12763995 | <gh_stars>0
"""
The Camera caputre images
__author__ = "<NAME> <http://www.alexxiao.me/>"
__date__ = "2018-04-07"
__version__ = "0.1"
Version:
0.1 (07/04/2017): implemented basic version
Classes:
Camera - the main class
"""
import cv2
import time
from threading import Thread
from ax.log import get_logger
class Camera(Thread):
def __init__(self, camera_id=0, logger_name='Camera'):
Thread.__init__(self)
self.camera_id = camera_id
self.cam = None
self.logger_name = logger_name
self.logger = get_logger(logger_name)
self.running = False
def release(self):
self.running = False
# wait for 1/3 second
time.sleep(0.3)
if not self.cam.isOpened():
self.cam.release()
def run(self):
try:
self.running = True
self.cam = cv2.VideoCapture(self.camera_id)
while self.running:
#keep capture
ret, frame = self.cam.read()
except:
if self.cam is not None:
if not cam.isOpened():
self.cam.release()
| 2.765625 | 3 |
eyesore/symbolic_tracer.py | twizmwazin/hacrs | 2 | 12763996 | <gh_stars>1-10
import json
import os
import select
import sys
import time
from collections import defaultdict
import angr
import claripy
import gc
import ipdb
import psutil
import simuvex
from simuvex.procedures.cgc.receive import receive
from input_characteristics import extract_input_characteristics
from json_helper import CustomEncoder
from graph_interaction_extractor import extract_interaction
from decision_graph.graph_helper import set_label
from graph_input_constraint_extractor import extract_influences
from full_graph_creator import FullGraphCreator
from decision_graph import agraph_from_decision_graph
from decision_graph.compacting import compact_similar_actions, compact_to_decision_nodes, compact_to_read_eval_nodes, \
compact_to_read_eval_loop_nodes, compact_to_input_byte_switch_tables, rewrite_readable
"""
import enaml
from angrmanagement.data.instance import Instance
from enaml.qt.qt_application import QtApplication
"""
def _path(f):
return os.path.join(os.path.dirname(__file__), '..', '..', f)
def _get_chall_dir(event='examples', name='CADET_00003'):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'cyborg-generator', 'bins', 'challenges_{}'.format(event), name))
first_time_receive_fail = True
stop = False
class TerminatingReceive(receive):
# pylint:disable=arguments-differ
"""
Receive which fixes the input to file descriptor to 0.
"""
def run(self, fd, buf, count, rx_bytes):
global first_time_receive_fail, stop
stdin = self.state.posix.files[0]
if self.state.se.satisfiable(extra_constraints=(stdin.pos < stdin.size,)):
first_time_receive_fail = True
ret_val = super(TerminatingReceive, self).run(fd, buf, count, rx_bytes)
elif first_time_receive_fail:
first_time_receive_fail = False
ret_val = super(TerminatingReceive, self).run(fd, buf, count, rx_bytes)
else:
print '#\n' * 4 + '#'
print "DOUBLE RECEIVE FAIL, DIIIIIIIIIIIIIIIEEEEEEEEEEEEEEEEEE!!!!!!!!!!"
print '#\n' * 4 + '#'
stop = True
ret_val = -1
#print 'Recv({}, {}, {}, {}) returns {}'.format(fd, buf, count, rx_bytes, ret_val)
return ret_val
class FixedRandom(simuvex.SimProcedure):
#pylint:disable=arguments-differ
IS_SYSCALL = True
def run(self, buf, count, rnd_bytes):
# return code
r = self.state.se.ite_cases((
(self.state.cgc.addr_invalid(buf), self.state.cgc.EFAULT),
(self.state.se.And(rnd_bytes != 0, self.state.cgc.addr_invalid(rnd_bytes)), self.state.cgc.EFAULT),
), claripy.BVV(0, self.state.arch.bits))
if self.state.satisfiable(extra_constraints=[count!=0]):
self.state.memory.store(buf, claripy.BVV("A" * self.state.se.max_int(count)), size=count)
self.state.memory.store(rnd_bytes, count, endness='Iend_LE', condition=rnd_bytes != 0)
return r
"""
def launch_gui(app, pg):
inst = Instance(proj=proj)
initialize_instance(inst, {})
inst.path_groups.add_path_group(pg)
view = Main(inst=inst)
view.show()
app.start()
"""
def print_pg_info(pg, i, start_time):
stash_len = {}
for stash in pg.stashes:
if len(pg.stashes[stash]) == 0:
continue
stash_len[stash] = 0
for stash in stash_len:
stash_len[stash] = len(pg.stashes[stash])
print time.time() - start_time, i, stash_len
def heardEnter():
i, o, e = select.select([sys.stdin], [], [], 0.0001)
for s in i:
if s == sys.stdin:
stdin_line = sys.stdin.readline()
return True
return False
def make_initial_state(proj, stdin, stdout, preconstrain_method='replace'):
add_options = set()
add_options |= simuvex.o.unicorn
add_options.add(simuvex.o.CONSTRAINT_TRACKING_IN_SOLVER)
#add_options.add(simuvex.o.TRACK_ACTION_HISTORY)
add_options.add(simuvex.o.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY)
add_options.add(simuvex.o.CGC_NO_SYMBOLIC_RECEIVE_LENGTH)
add_options.add(simuvex.o.UNICORN_THRESHOLD_CONCRETIZATION)
add_options.add(simuvex.options.CGC_ENFORCE_FD)
add_options.add(simuvex.options.CGC_NON_BLOCKING_FDS)
if preconstrain_method == 'replace':
add_options.add(simuvex.o.REPLACEMENT_SOLVER)
remove_options = simuvex.o.simplification
remove_options |= {simuvex.o.LAZY_SOLVES}
remove_options |= {simuvex.o.SUPPORT_FLOATING_POINT}
remove_options |= {simuvex.o.COMPOSITE_SOLVER}
remove_options |= {simuvex.o.UNICORN_HANDLE_TRANSMIT_SYSCALL}
state = proj.factory.full_init_state(
add_options=add_options,
remove_options=remove_options
)
csr = state.unicorn.cooldown_symbolic_registers
state.unicorn.max_steps = 2000000
state.unicorn.concretization_threshold_registers = 25000 / csr
state.unicorn.concretization_threshold_memory = 25000 / csr
stdin_file = state.posix.get_file(0)
stdin_file.size = len(stdin)
for b in stdin:
b_bvv = state.se.BVV(b)
v = stdin_file.read_from(1)
if preconstrain_method == 'replace':
state.se._solver.add_replacement(v, b_bvv, invalidate_cache=False)
elif preconstrain_method == 'constrain_symbolic':
state.add_constraints(v == b_bvv)
else:
raise NotImplementedError("Preconstraining strategy {} is unknown".format(preconstrain_method))
stdin_file.seek(0)
return state
def dump_agraph(head, name):
print("[{}] Dumping the graph .dot file .. ".format(name))
graph = agraph_from_decision_graph(head)
graph.write(name + '.dot')
return head, graph
def dump_decision_graph(head):
full, graph = dump_agraph(head, 'decision_graph_full')
compacted_decision_nodes, graph = dump_agraph(compact_to_decision_nodes(full), 'decision_graph_compact_0_decision_nodes')
compacted_similar_actions, graph = dump_agraph(compact_similar_actions(compacted_decision_nodes), 'decision_graph_compact_1_similar_actions')
compacted_input_byte_switch_tables, graph = dump_agraph(compact_to_input_byte_switch_tables(compacted_similar_actions), 'decision_graph_compact_2_switch_tables')
compacted_read_eval, graph = dump_agraph(compact_to_read_eval_nodes(compacted_input_byte_switch_tables), 'decision_graph_compact_3_read_eval_nodes')
compacted_read_eval_loops, graph = dump_agraph(compact_to_read_eval_loop_nodes(compacted_read_eval), 'decision_graph_compact_4_read_eval_loop_nodes')
compacted_human_readable, graph = dump_agraph(rewrite_readable(compacted_read_eval_loops), 'decision_graph_compact_5_human_readable_nodes')
for node_name in graph.nodes():
set_label(graph, node_name, node_name)
print("[decision_graph_compact_5_human_readable_node_names] Dumping the graph .dot file .. ")
graph.write('decision_graph_compact_5_human_readable_node_names.dot')
print "Final Graph: {} nodes, {} edges".format(len(graph.nodes()), len(graph.edges()))
if __name__ == '__main__':
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
#logging.getLogger("claripy.backends.backend_z3").setLevel(logging.DEBUG)
simuvex.SimProcedures['cgc']['random'] = FixedRandom
simuvex.SimProcedures['cgc']['receive'] = TerminatingReceive
proj = angr.Project(sys.argv[1])
stdin = ''
output = ''
with open(os.path.abspath(sys.argv[2]), 'r') as inf:
stdin = inf.read()
if len(stdin) == 0:
print "SKIPPING input {} for {}, it's empty!".format(sys.argv[1], sys.argv[2])
sys.exit(0)
s = make_initial_state(proj, stdin, output, preconstrain_method='replace')
p = proj.factory.path(s)
hierarchy = angr.PathHierarchy(weakkey_path_mapping=True)
pg = proj.factory.path_group(p, immutable=False, hierarchy=hierarchy)
pg.use_technique(angr.exploration_techniques.Oppologist())
cfg = proj.analyses.CFGFast(collect_data_references=True, extra_cross_references=True)
"""
with enaml.imports():
from angrmanagement.ui.main import Main, initialize_instance
app = QtApplication()
"""
strings_classification_path = sys.argv[3]
timeout = int(sys.argv[4]) if len(sys.argv) > 4 else -1
before = time.time()
interrupt = False
i = 0
last_num_steps = 0
last_time = before
#import ipdb
#ipdb.set_trace()
var_before_touch_state_map = defaultdict(list)
graph_creator = FullGraphCreator(proj, cfg, find_string_refs=True, max_depth=5)
while not stop and len(pg.active) > 0 and (timeout == -1 or (time.time() - before) < timeout):
assert len(pg.active) == 1
before_state = pg.one_active.state
before_length = pg.one_active.length
pg.step()
all_paths = [path for stash in pg.stashes for path in pg.stashes[stash]]
assert len(all_paths) == 1, 'We should always have exactly one path, how did we end up with {}???'.format(str(pg))
path = all_paths[0]
before_const = set(before_state.se.constraints)
after_const = set(path.state.se.constraints)
if before_const != after_const:
assert len(before_const - after_const) == 0
# All the new constraints
for c in after_const - before_const:
for var in c.variables:
var_before_touch_state_map[var].append((before_length, c, before_state))
graph_creator.analyze_and_add_last_run_info(path)
#del path.history.state
#path.history.state = None
i += 1
#if i > 450:
# break
if i % 100 == 0:
current_time = time.time()
mem = psutil.Process(os.getpid()).memory_info().rss
print "Currently used memory: {}\tMB =>".format(mem / (1024 * 1024))
print "Traced so far: {}".format(len(path.addr_trace.hardcopy))
print "Steps per second: {}".format(float(i - last_num_steps) / float(current_time - last_time))
"""
graph_creator.add_layer(graph_creator.add_node(str(i)))
dump_graph(graph_creator.new_graph, 'info_graph_full_{}'.format(i))
graph_creator = FullGraphCreator(cfg, names_only=False, find_string_refs=True, max_depth=5)
"""
last_num_steps = i
last_time = time.time()
if heardEnter():
interrupt = True
if interrupt:
interrupt = False
#launch_gui(app, pg)
import IPython
IPython.embed()
complete_retrace_time = time.time() - before
print "Retracing the original trace took {} seconds".format(complete_retrace_time)
all_paths = [path for stash in pg.stashes for path in pg.stashes[stash]]
assert len(all_paths) == 1, 'We should always have exactly one path, how did we end up with {}???'.format(str(pg))
final_path = all_paths[0]
#ipdb.set_trace()
input_base = sys.argv[2][:sys.argv[2].rindex('.')]
del pg
claripy.downsize()
gc.collect()
decision_graph_head = graph_creator.finalize_decision_graph()
with open(input_base + '.output', 'w') as outf:
print "Dumping {}".format(outf.name)
outf.write(final_path.state.posix.dumps(1))
interaction = extract_interaction(decision_graph_head)
with open(input_base + '.interaction.json', 'w') as outf:
print "Dumping {}".format(outf.name)
json.dump(interaction, outf, ensure_ascii=False, indent=2, cls=CustomEncoder)
input_influences = extract_influences(decision_graph_head, stdin)
with open(input_base + '.influence.json', 'w') as outf:
print "Dumping {}".format(outf.name)
json.dump(input_influences, outf, ensure_ascii=False, indent=2, cls=CustomEncoder)
with open(strings_classification_path, 'r') as inf:
string_classification_data = json.load(inf)
before_input_characteristic_extraction = time.time()
constraints, similarities, comp_descriptors, other_opts = extract_input_characteristics(proj,
final_path,
stdin,
var_before_touch_state_map,
string_classification_data)
other_options_time_after = time.time()
after_input_characteristic_extraction = time.time()
with open(input_base + '.character_similarities.csv', 'w') as outf:
print "Dumping {}".format(outf.name)
lines = [(','.join(map(str, level)) + '\n') for level in similarities]
outf.writelines(lines)
with open(input_base + '.compartment_information.json', 'w') as outf:
print "Dumping {}".format(outf.name)
json.dump(comp_descriptors, outf, indent=1, cls=CustomEncoder)
# dump_decision_graph(decision_graph_head)
print "$" * 80
print "Timing Summary: Retracing the input took {} seconds, extracting input characteristics took {} seconds".format(
complete_retrace_time, after_input_characteristic_extraction - before_input_characteristic_extraction
)
print "$" * 80
#
| 1.726563 | 2 |
for.py | praveenpmin/Python | 0 | 12763997 | <gh_stars>0
s = "Hello World"
for i in s :
print (i) | 2.25 | 2 |
client/python/lib/tests/tfs_compat_grpc/config.py | BrightTux/model_server | 0 | 12763998 | #
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from grpc import StatusCode
from numpy import array, float64, int32, int8, float128, float32
from ovmsclient.tfs_compat.base.errors import ModelNotFoundError, InvalidInputError
from config import CallCount, PATH_VALID # noqa
from tensorflow.core.framework.tensor_pb2 import TensorProto
from tensorflow.core.framework.tensor_shape_pb2 import TensorShapeProto
from tensorflow_serving.apis.get_model_status_pb2 import ModelVersionStatus
from tensorflow.core.framework.types_pb2 import DataType
from tensorflow.core.protobuf.error_codes_pb2 import Code as ErrorCode
from tensorflow_serving.apis.get_model_status_pb2 import GetModelStatusRequest
from tensorflow_serving.apis.get_model_metadata_pb2 import GetModelMetadataRequest
from tensorflow_serving.apis.predict_pb2 import PredictRequest
from ovmsclient.tfs_compat.grpc.requests import (GrpcModelMetadataRequest, GrpcModelStatusRequest,
GrpcPredictRequest)
# responses_dict = {
# model_version: { expected_status }
# }
MODEL_STATUS_RESPONSE_VALID = [
{
1: {
"state": ModelVersionStatus.State.AVAILABLE,
"error_code": ErrorCode.OK,
"error_message": "OK"
}
},
{
2: {
"state": ModelVersionStatus.State.END,
"error_code": ErrorCode.OK,
"error_message": "OK"
},
3: {
"state": ModelVersionStatus.State.AVAILABLE,
"error_code": ErrorCode.OK,
"error_message": ""
}
},
{
1: {
"state": ModelVersionStatus.State.START,
"error_code": ErrorCode.OK,
"error_message": ""
},
2: {
"state": ModelVersionStatus.State.LOADING,
"error_code": ErrorCode.UNKNOWN,
"error_message": "Could not load CNN"
},
3: {
"state": ModelVersionStatus.State.UNLOADING,
"error_code": ErrorCode.OK,
"error_message": ""
}
}
]
# response_dict = {
# 'version': model_version,
# 'name': model_name,
# 'inputs': inputs_dict,
# 'outputs': outputs_dict
# }
MODEL_METADATA_RESPONSE_VALID = [
{
'version': 2,
'name': 'resnet',
'inputs': {
'0': {
'shape': [1, 3, 244, 244],
'dtype': DataType.DT_FLOAT
}
},
'outputs': {
'1463': {
'shape': [1, 1000],
'dtype': DataType.DT_FLOAT
}
}
},
{
'version': 1,
'name': 'model_name',
'inputs': {
'0': {
'shape': [1, 3, 244, 244],
'dtype': DataType.DT_FLOAT
},
'1': {
'shape': [0, 1, 3, 244, 244],
'dtype': DataType.DT_INT32
}
},
'outputs': {
'1463': {
'shape': [1, 1000],
'dtype': DataType.DT_FLOAT
},
'second_output': {
'shape': [0, 1, 1000],
'dtype': DataType.DT_INT32
}
}
},
{
'version': 1,
'name': 'model_name',
'inputs': {
'input1': {
'shape': [1, 3, 1080, 1920],
'dtype': DataType.DT_QINT32
},
'input2': {
'shape': [1, 3, 244, 244],
'dtype': DataType.DT_INT32
}
},
'outputs': {
'single_output': {
'shape': [1, 7, 200, 200],
'dtype': DataType.DT_FLOAT
}
}
}
]
# (inputs_dict,
# model_name, model_version, expected_exception, expected_message)
PREDICT_REQUEST_INVALID_INPUTS = [
([],
'model_name', 0, TypeError, "inputs type should be dict, but is list"),
(('input1', [1, 2, 3]),
'model_name', 0, TypeError, "inputs type should be dict, but is tuple"),
({
1: [1, 2, 3],
"input2": [1, 2]
}, 'model_name', 0, TypeError, "inputs keys type should be str, but found int"),
({
"input1": [[1.0, 2.0], [1.0, 2.0, 3.0]]
}, 'model_name', 0, ValueError,
("argument must be a dense tensor: [[1.0, 2.0], [1.0, 2.0, 3.0]] - "
"got shape [2], but wanted [2, 2]")),
({
"input1": [[(1, 2, 3)], [(1, 2)], [(1, 2, 3)]]
}, 'model_name', 0, TypeError, "provided values type is not valid"),
({
"input1": float128(2.5)
}, 'model_name', 0, TypeError, "provided values type is not valid"),
({
"input1": (1, 2, 3)
}, 'model_name', 0, TypeError,
"values type should be (list, np.ndarray, scalar), but is tuple"),
({
"input1": [
[bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00]),
bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])],
[bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00]),
bytes([0x13, 0x00, 0x00, 0x00, 0x08, 0x00])]
]
}, 'model_name', 0, ValueError, "bytes values with dtype DT_STRING must be in shape [N]"),
]
# (inputs_dict,
# expected_proto_dict,
# model_name, model_version)
PREDICT_REQUEST_VALID = [
({
"input1": [1, 2, 3],
"input2": array([1.0, 2.0, 3.0]),
"input3": [[int32(3), int32(1)], [int32(4), int32(16)]],
}, {
"input1": {
"field": "tensor_content",
"shape": TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]),
"dtype": DataType.DT_INT32,
'value': array([1, 2, 3], dtype=int32).tobytes()
},
"input2": {
"field": "tensor_content",
"shape": TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]),
"dtype": DataType.DT_DOUBLE,
'value': array([1.0, 2.0, 3.0]).tobytes()
},
"input3": {
"field": "tensor_content",
"shape": TensorShapeProto(dim=[TensorShapeProto.Dim(size=2),
TensorShapeProto.Dim(size=2)]),
"dtype": DataType.DT_INT32,
'value': array([[int32(3), int32(1)], [int32(4), int32(16)]]).tobytes()
},
}, 'model_name', 0),
({
"input1": TensorProto(dtype=DataType.DT_INT8,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=2),
TensorShapeProto.Dim(size=3)]),
tensor_content=array([1, 2, 3, 4, 5, 6]).tobytes()),
"input2": 5.0,
"input3": bytes([1, 2, 3])
}, {
"input2": {
"field": "float_val",
"shape": TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]),
"dtype": DataType.DT_FLOAT,
'value': array([5.0], dtype=float32)
},
"input3": {
"field": "string_val",
"shape": TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]),
"dtype": DataType.DT_STRING,
'value': [bytes([1, 2, 3])]
}
}, 'model_name', 0),
({
}, {
}, 'model_name', 0)
]
# (response_outputs_dict, model_name, model_version, expected_outputs)
PREDICT_RESPONSE_VALID = [
({
"1463": TensorProto(dtype=DataType.DT_INT8,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]),
tensor_content=array([1, 2, 3], dtype=int8).tobytes()),
}, "model_name", 0, array([1, 2, 3], dtype=int8)
),
({
"1463": TensorProto(dtype=DataType.DT_INT32,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=2),
TensorShapeProto.Dim(size=3)]),
tensor_content=array([1, 2, 3, 4, 5, 6], dtype=int32).tobytes()),
"2": TensorProto(dtype=DataType.DT_DOUBLE,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]),
double_val=array([12.0], dtype=float64)),
}, "model_name", 0, {
"1463": array([[1, 2, 3], [4, 5, 6]], dtype=int32),
"2": array([12.0], dtype=float64)
}),
({
"1463": TensorProto(dtype=DataType.DT_STRING,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=2)]),
string_val=[bytes([1, 2, 3]), bytes([4, 5])]),
"2": TensorProto(dtype=DataType.DT_STRING,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)]),
string_val=[bytes([1, 2, 3])]),
}, "model_name", 0, {
"1463": [bytes([1, 2, 3]), bytes([4, 5])],
"2": [bytes([1, 2, 3])]
}),
]
# (response_outputs_dict, model_name, model_version, expected_exception, expected_message)
PREDICT_RESPONSE_TENSOR_TYPE_INVALID = [
({
"1463": TensorProto(),
}, "model_name", 0, TypeError, "Unsupported tensor type: 0"),
({
"1463": TensorProto(dtype=DataType.DT_INVALID),
}, "model_name", 0, TypeError, "Unsupported tensor type: 0"),
({
"1463": TensorProto(dtype=DataType.DT_RESOURCE),
}, "model_name", 0, TypeError, "Unsupported tensor type: 20"),
]
# ({"model_name": model_name, "model_version": model_version,
# "raw_request_model_name": raw_request_model_name, "raw_request_model_version": raw_request_model_version})# noqa : E501
MODEL_STATUS_REQUEST_VALID = [
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0}),
]
# ({"model_name": model_name, "model_version": model_version,
# "raw_request_model_name": raw_request_model_name, "raw_request_model_version": raw_request_model_version},# noqa : E501
# expected_exception, expected_message)
MODEL_STATUS_REQUEST_INVALID_RAW_REQUEST = [
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "other_name", "raw_request_model_version": 0},
ValueError, 'request is not valid GrpcModelStatusRequest'),
({"model_name": "other_name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0},
ValueError, 'request is not valid GrpcModelStatusRequest'),
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 1},
ValueError, 'request is not valid GrpcModelStatusRequest'),
({"model_name": "name", "model_version": 1,
"raw_request_model_name": "name", "raw_request_model_version": 0},
ValueError, 'request is not valid GrpcModelStatusRequest'),
]
# (request, expeceted_exception, expected_message)
MODEL_STATUS_REQUEST_INVALID_REQUEST_TYPE = [
(None, TypeError,
"request type should be GrpcModelStatusRequest, but is NoneType"),
(GetModelStatusRequest(), TypeError,
"request type should be GrpcModelStatusRequest, but is GetModelStatusRequest"),
(GrpcModelStatusRequest('model_name', 0, 'raw_request'), TypeError,
"request is not valid GrpcModelStatusRequest")
]
# (grpc_error_status_code, grpc_error_details, raised_error_type, raised_error_message)
COMMON_INVALID_GRPC = [
(StatusCode.UNAVAILABLE, "failed to connect to all adresses",
ConnectionError, "Error occurred during handling the request: "
"failed to connect to all adresses"),
(StatusCode.UNAVAILABLE, "Empty update",
ConnectionError, "Error occurred during handling the request: Empty update"),
(StatusCode.DEADLINE_EXCEEDED, "Deadline Exceeded",
TimeoutError, "Error occurred during handling the request: "
"Request handling exceeded timeout"),
(StatusCode.NOT_FOUND, "Model with requested version is not found",
ModelNotFoundError, "Error occurred during handling the request: "
"Model with requested version is not found"),
(StatusCode.NOT_FOUND, "Model with requested name is not found",
ModelNotFoundError, "Error occurred during handling the request: "
"Model with requested name is not found"),
]
# ({"model_name": model_name, "model_version": model_version,
# "raw_request_model_name": raw_request_model_name, "raw_request_model_version": raw_request_model_version,# noqa : E501
# "metadata_field_list": raw_request_metadata_fields})
MODEL_METADATA_REQUEST_VALID = [
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"metadata_field_list": ["signature_def"]}),
]
# ({"model_name": model_name, "model_version": model_version,
# "raw_request_model_name": raw_request_model_name, "raw_request_model_version": raw_request_model_version,# noqa : E501
# "metadata_field_list": raw_request_metadata_fields},
# expected_exception, expected_message)
MODEL_METADATA_REQUEST_INVALID_RAW_REQUEST = [
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "other_name", "raw_request_model_version": 0,
"metadata_field_list": ["signature_def"]},
ValueError, 'request is not valid GrpcModelMetadataRequest'),
({"model_name": "other_name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"metadata_field_list": ["signature_def"]},
ValueError, 'request is not valid GrpcModelMetadataRequest'),
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 1,
"metadata_field_list": ["signature_def"]},
ValueError, 'request is not valid GrpcModelMetadataRequest'),
({"model_name": "name", "model_version": 1,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"metadata_field_list": ["signature_def"]},
ValueError, 'request is not valid GrpcModelMetadataRequest'),
({"model_name": "name", "model_version": 1,
"raw_request_model_name": "name", "raw_request_model_version": 1,
"metadata_field_list": ["invalid"]},
ValueError, 'request is not valid GrpcModelMetadataRequest'),
]
# (request, expected_exception, expected_message)
MODEL_METADATA_REQUEST_INVALID_REQUEST_TYPE = [
(None, TypeError,
"request type should be GrpcModelMetadataRequest, but is NoneType"),
(GetModelMetadataRequest(), TypeError,
"request type should be GrpcModelMetadataRequest, but is GetModelMetadataRequest"),
(GrpcModelMetadataRequest('model_name', 0, 'raw_request'), TypeError,
"request is not valid GrpcModelMetadataRequest")
]
# ({"model_name": model_name, "model_version": model_version,
# "raw_request_model_name": raw_request_model_name, "raw_request_model_version": raw_request_model_version,# noqa : E501
# "inputs_dict": inputs_for_request, "raw_request_inputs_dict": inputs_for_raw_request})
PREDICT_REQUEST_VALID_SPEC = [
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"inputs_dict": {
"0": TensorProto(dtype=DataType.DT_INT8,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]),
tensor_content=array([1, 2, 3]).tobytes())
},
"raw_request_inputs_dict": {
"0": TensorProto(dtype=DataType.DT_INT8,
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=3)]),
tensor_content=array([1, 2, 3]).tobytes())
}}),
]
# ({"model_name": model_name, "model_version": model_version,
# "raw_request_model_name": raw_request_model_name, "raw_request_model_version": raw_request_model_version,# noqa : E501
# "inputs_dict": inputs_for_request, "raw_request_inputs_dict": inputs_for_raw_request},
# expected_exception, expected_message)
PREDICT_REQUEST_INVALID_SPEC_RAW_REQUEST = [
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "other_name", "raw_request_model_version": 0,
"inputs_dict": {
"0": TensorProto()
},
"raw_request_inputs_dict": {
"0": TensorProto()
}}, ValueError, 'request is not valid GrpcPredictRequest'),
({"model_name": "other_name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"inputs_dict": {
"0": TensorProto()
},
"raw_request_inputs_dict": {
"0": TensorProto()
}}, ValueError, 'request is not valid GrpcPredictRequest'),
({"model_name": "name", "model_version": 1,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"inputs_dict": {
"0": TensorProto()
},
"raw_request_inputs_dict": {
"0": TensorProto()
}}, ValueError, 'request is not valid GrpcPredictRequest'),
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 1,
"inputs_dict": {
"0": TensorProto()
},
"raw_request_inputs_dict": {
"0": TensorProto()
}}, ValueError, 'request is not valid GrpcPredictRequest'),
({"model_name": "name", "model_version": 0,
"raw_request_model_name": "name", "raw_request_model_version": 0,
"inputs_dict": {
"0": TensorProto()
},
"raw_request_inputs_dict": {
"1": TensorProto()
}}, ValueError, 'request is not valid GrpcPredictRequest'),
]
# (predict_request, expected_exception, expected_message)
PREDICT_REQUEST_INVALID_SPEC_TYPE = [
(None, TypeError,
'request type should be GrpcPredictRequest, but is NoneType'),
(PredictRequest(), TypeError,
'request type should be GrpcPredictRequest, but is PredictRequest'),
(GrpcPredictRequest({}, "model_name", 0, "raw_request"),
TypeError, 'request is not valid GrpcPredictRequest'),
]
# (grpc_error_status_code, grpc_error_details, raised_error_type, raised_error_message)
PREDICT_INVALID_GRPC = COMMON_INVALID_GRPC + [
(StatusCode.INVALID_ARGUMENT, "Invalid input precision - Expected: FP32; Actual: I64",
InvalidInputError, "Error occurred during handling the request: "
"Invalid input precision - Expected: FP32; Actual: I64"),
(StatusCode.INVALID_ARGUMENT, "Invalid number of inputs - Expected: 1; Actual: 0",
InvalidInputError, "Error occurred during handling the request: "
"Invalid number of inputs - Expected: 1; Actual: 0"),
(StatusCode.INVALID_ARGUMENT, "Missing input with specific name - Required input: 0",
InvalidInputError, "Error occurred during handling the request: "
"Missing input with specific name - Required input: 0"),
(StatusCode.INVALID_ARGUMENT, "Invalid number of shape dimensions - "
"Expected: (1,3,224,224); Actual: (3)",
InvalidInputError, "Error occurred during handling the request: "
"Invalid number of shape dimensions - Expected: (1,3,224,224); "
"Actual: (3)"),
]
# (config_dict,
# method_call_count_dict= {"method_name": CallCount.NumberOfCalls})
BUILD_VALID = [
(
{
"url": "localhost:9000"
},
{
"_check_url": CallCount.ONE,
"_check_tls_config": CallCount.ZERO,
"_prepare_certs": CallCount.ZERO
}
),
(
{
"url": "172.16.17.32:1"
},
{
"_check_url": CallCount.ONE,
"_check_tls_config": CallCount.ZERO,
"_prepare_certs": CallCount.ZERO
}
),
(
{
"url": f"cluster.cloud.iotg.intel.com:{2**16-1}"
},
{
"_check_url": CallCount.ONE,
"_check_tls_config": CallCount.ZERO,
"_prepare_certs": CallCount.ZERO
}
),
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": "valid_path"
}
},
{
"_check_url": CallCount.ONE,
"_check_tls_config": CallCount.ONE,
"_prepare_certs": CallCount.ONE
}
),
(
{
"url": "localhost:9000",
"tls_config": {
"client_key_path": PATH_VALID,
"client_cert_path": PATH_VALID,
"server_cert_path": PATH_VALID
}
},
{
"_check_url": CallCount.ONE,
"_check_tls_config": CallCount.ONE,
"_prepare_certs": CallCount.ONE
}
)
]
# (config_dict,
# method_call_dict= {"method_name": (CallCount.NumberOfCalls, error_raised)},
# expected_exception, expected_message)
BUILD_INVALID_CONFIG = [
(
{
"url": "localhost"
},
{
"_check_url": (CallCount.ONE, ValueError("url must be a string "
"in format <address>:<port>")),
"_check_tls_config": (CallCount.ZERO, None),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, "url must be a string in format <address>:<port>"
),
(
{
"url": 123
},
{
"_check_url": (CallCount.ONE, TypeError("url must be a string "
"in format <address>:<port>")),
"_check_tls_config": (CallCount.ZERO, None),
"_prepare_certs": (CallCount.ZERO, None)
},
TypeError, "url must be a string in format <address>:<port>"
),
(
{
"url": "address:9000",
},
{
"_check_url": (CallCount.ONE, ValueError("address is not valid")),
"_check_tls_config": (CallCount.ZERO, None),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, "address is not valid"
),
(
{
"url": "localhost:port"
},
{
"_check_url": (CallCount.ONE, TypeError("port should be of type int")),
"_check_tls_config": (CallCount.ZERO, None),
"_prepare_certs": (CallCount.ZERO, None)
},
TypeError, "port should be of type int"
),
(
{
"url": f"localhost:{2**16}"
},
{
"_check_url": (CallCount.ONE, ValueError(f"port should be in range <0, {2**16-1}>")),
"_check_tls_config": (CallCount.ZERO, None),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, f"port should be in range <0, {2**16-1}>"
),
(
{
"url": "localhost:9000",
"tls_config": 123
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, TypeError("tls_config should be of type dict")),
"_prepare_certs": (CallCount.ZERO, None)
},
TypeError, "tls_config should be of type dict"
),
(
{
"url": "localhost:9000",
"tls_config": {
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, ValueError("server_cert_path is not defined "
"in tls_config")),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, "server_cert_path is not defined in tls_config"
),
(
{
"url": "10.20.30.40:1000",
"tls_config": {
"server_cert_path": PATH_VALID,
"client_key_path": PATH_VALID
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, ValueError("none or both client_key_path "
"and client_cert_path are required "
"in tls_config")),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, "none or both client_key_path and client_cert_path are required in tls_config"
),
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": PATH_VALID,
"client_key_path": PATH_VALID,
"client_cert_path": PATH_VALID,
"invalid_key_name": PATH_VALID
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, ValueError("invalid_key_name is "
"not valid tls_config key")),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, "invalid_key_name is not valid tls_config key"
),
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": PATH_VALID,
"client_key_path": PATH_VALID,
"client_cert_path": 123,
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, TypeError("client_cert_path type should be string "
"but is type int")),
"_prepare_certs": (CallCount.ZERO, None)
},
TypeError, "client_cert_path type should be string but is type int"
),
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": PATH_VALID,
"client_key_path": "invalid_path",
"client_cert_path": PATH_VALID,
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, ValueError("invalid_path is not valid "
"path to file")),
"_prepare_certs": (CallCount.ZERO, None)
},
ValueError, "invalid_path is not valid path to file"
),
]
# (config_dict,
# method_call_dict= {"method_name": (CallCount.NumberOfCalls, error_raised)},
# expected_exception, expected_message)
BUILD_INVALID_CERTS = [
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": PATH_VALID,
"client_key_path": "path_to_invalid_private_key",
"client_cert_path": PATH_VALID,
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, None),
"_prepare_certs": (CallCount.ONE, ValueError("path_to_invalid_private_key file "
"is not valid private key"))
},
ValueError, "path_to_invalid_private_key file is not valid private key"
),
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": "path_to_invalid_server_certificate",
"client_key_path": PATH_VALID,
"client_cert_path": PATH_VALID,
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, None),
"_prepare_certs": (CallCount.ONE, ValueError("path_to_invalid_server_certificate "
"is not valid certificate"))
},
ValueError, "path_to_invalid_server_certificate is not valid certificate"
),
(
{
"url": "localhost:9000",
"tls_config": {
"server_cert_path": PATH_VALID,
"client_key_path": PATH_VALID,
"client_cert_path": "path_to_invalid_client_certificate",
}
},
{
"_check_url": (CallCount.ONE, None),
"_check_tls_config": (CallCount.ONE, None),
"_prepare_certs": (CallCount.ONE, ValueError("path_to_invalid_client_certificate "
"is not valid certificate"))
},
ValueError, "path_to_invalid_client_certificate is not valid certificate"
),
]
| 1.757813 | 2 |
hassio-google-drive-backup/dev/base_server.py | voxipbx/hassio-addons | 1 | 12763999 | <reponame>voxipbx/hassio-addons
import random
import re
import io
from aiohttp.web import HTTPBadRequest, Request, Response
from typing import Any
rangePattern = re.compile("bytes=\\d+-\\d+")
bytesPattern = re.compile("^bytes \\d+-\\d+/\\d+$")
intPattern = re.compile("\\d+")
class BaseServer:
def generateId(self, length: int = 30) -> str:
random_int = random.randint(0, 1000000)
ret = str(random_int)
return ret + ''.join(map(lambda x: str(x), range(0, length - len(ret))))
def timeToRfc3339String(self, time) -> str:
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
def serve_bytes(self, request: Request, bytes: bytearray, include_length: bool = True) -> Any:
if "Range" in request.headers:
# Do range request
if not rangePattern.match(request.headers['Range']):
raise HTTPBadRequest()
numbers = intPattern.findall(request.headers['Range'])
start = int(numbers[0])
end = int(numbers[1])
if start < 0:
raise HTTPBadRequest()
if start > end:
raise HTTPBadRequest()
if end > len(bytes) - 1:
raise HTTPBadRequest()
resp = Response(body=bytes[start:end + 1], status=206)
resp.headers['Content-Range'] = "bytes {0}-{1}/{2}".format(
start, end, len(bytes))
if include_length:
resp.headers["Content-length"] = str(len(bytes))
return resp
else:
resp = Response(body=io.BytesIO(bytes))
resp.headers["Content-length"] = str(len(bytes))
return resp
async def readAll(self, request):
data = bytearray()
content = request.content
while True:
chunk, done = await content.readchunk()
data.extend(chunk)
if len(chunk) == 0:
break
return data
| 2.515625 | 3 |
uberAPI/uberpricing.py | Pkrish15/Uber-Surge | 0 | 12764000 |
from uber_rides.session import Session
from uber_rides.client import UberRidesClient
#Add the token
session = Session(server_token='')
def getPriceEstimate(start_lat,start_long,end_lat,end_long):
client = UberRidesClient(session)
p=client.get_price_estimates(start_lat,start_long,end_lat,end_long)
key=str(start_lat)+"|"+str(start_long)+"|"+str(end_lat)+"|"+str(end_long)
return str(p.json.get('prices')),key
| 2.71875 | 3 |
data-generator/json-app/load-data.py | kramik1/elk-docker-compose | 2 | 12764001 | import json
import logging
import random
import time
import configparser
import logging.handlers
from datetime import datetime, timezone
sample_data = {
"timestamp": "", # ISO Zulu date format
"equip_name": "X-Machine",
"feed_rate": 0.0,
"shaft_speed": 0,
"oil_temperature": 0.0,
"voltage": 0
}
def main():
loop = True
print("Starting program...")
config = configparser.ConfigParser()
config.read('./load-json-data.config')
backup_count = config.getint('Data', 'BackupCount')
hourly_interval = config.getint('Data', 'HourlyInterval')
output_file = config['Data']['WriteDirectory'] + '/' + config['Data']['WriteFile']
sleep_in_seconds = config.getint('Data', 'SleepInSeconds')
# format the log entries, making use of the log rotation functions already written in python
formatter = logging.Formatter('%(message)s')
handler = logging.handlers.TimedRotatingFileHandler(
output_file, when='H', interval=hourly_interval, backupCount=backup_count)
handler.setFormatter(formatter)
logger = logging.getLogger("data")
logger.addHandler(handler)
logger.setLevel(logging.INFO)
while loop:
time.sleep(sleep_in_seconds)
sample_data['timestamp'] = datetime.now(timezone.utc).isoformat()
sample_data['feed_rate'] = float(random.randint(2000, 3000))/100
sample_data['shaft_speed'] = random.randint(20, 30)
sample_data['oil_temperature'] = float(random.randint(1000, 1200))/100
sample_data['voltage'] = random.randint(425, 430)
logger.info(json.dumps(sample_data))
print('wrote data to disc')
if __name__ == "__main__":
main()
| 2.84375 | 3 |
pywindow/utilities.py | ImperialCollegeLondon/pyWINDOW | 12 | 12764002 | """
Module containing all general purpose functions shared by other modules.
This module is not intended for the direct use by a User. Therefore, I will
only docstring functions if I see fit to do so.
LOG
---
11/07/18
Changed the way vector path is analysed. Now, the initial analysis is
done with the geometrical formula for line-sphere intersection. Only
the remaining vestors that do not intersect any van der Waals spheres are
then analysed in the old way.
27/07/17
Fixed the cartesian coordinates -> fractional coordinates -> cartesian
coordinates conversion related functions, creation of lattice array
from unit cell parameters (triclinic system: so applicable to any)
and conversion back to unit cell parameters. WORKS! inspiration from:
http://www.ruppweb.org/Xray/tutorial/Coordinate%20system%20transformation.htm
26/07/17
Changed the way bonds are determined. Now, rather then fixed value
a formula and covalent radii are used as explained in the Elemental_Radii
spreadsheet (see tables module).
TO DO LIST
----------
- Fix and validate calculating shape descriptors: asphericity, acylindricity
and the realtive shape anisotropy. (Not working at the moment)
- In the find_windows() function, maybe change the way the EPS value for
the DBSCAN() is estimates. Need to look how the distances change with the
increase in size of the sampling sphere. (validate this with the MongoDB)
"""
import numpy as np
from copy import deepcopy
from multiprocessing import Pool
from scipy.optimize import brute, fmin, minimize
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.neighbors import KDTree
from .tables import (
atomic_mass, atomic_vdw_radius, opls_atom_keys, atomic_covalent_radius
)
class _AtomKeyError(Exception):
def __init__(self, message):
self.message = message
class _AtomKeyConflict(Exception):
def __init__(self, message):
self.message = message
class _ForceFieldError(Exception):
def __init__(self, message):
self.message = message
class _FunctionError(Exception):
def __init__(self, message):
self.message = message
def is_number(number):
"""
Return True if an object is a number - can be converted into a float.
Parameters
----------
number : any
Returns
-------
bool
True if input is a float convertable (a number), False otherwise.
"""
try:
float(number)
return True
except ValueError:
return False
def unique(input_list):
"""
Return a list of unique items (similar to set functionality).
Parameters
----------
input_list : list
A list containg some items that can occur more than once.
Returns
-------
list
A list with only unique occurances of an item.
"""
output = []
for item in input_list:
if item not in output:
output.append(item)
return output
def to_list(obj):
""" """
if isinstance(obj, np.ndarray):
return obj.tolist()
raise TypeError('Not serializable')
def distance(a, b):
"""
Return the distance between two vectors (points) a and b.
Parameters
----------
a : numpy.ndarray
First vector.
b : numpy.ndarray
Second vector.
Returns
-------
numpy.float64
A distance between two vectors (points).
"""
return (np.sum((a - b)**2))**0.5
def molecular_weight(elements):
"""
Return molecular weight of a molecule.
Parameters
----------
elements : numpy.ndarray
An array of all elements (type: str) in a molecule.
Returns
-------
numpy.float64
A molecular weight of a molecule.
"""
return (np.array([atomic_mass[i.upper()] for i in elements]).sum())
def center_of_coor(coordinates):
"""
Return the centre of coordinates.
Parameters
----------
coordinates : numpy.ndarray
An array containing molecule's coordinates.
Returns
-------
numpy.ndarray
An 1d array with coordinates of the centre of coordinates excluding
elements' masses.
"""
return (np.sum(coordinates, axis=0) / coordinates.shape[0])
def center_of_mass(elements, coordinates):
"""
Return the centre of mass (COM).
Parameters
----------
elements : numpy.ndarray
An array of all elements (type: str) in a molecule.
coordinates : numpy.ndarray
An array containing molecule's coordinates.
Returns
-------
numpy.ndarray
An 1d array with coordinates of the centre of mass including elements'
masses.
"""
mass = molecular_weight(elements)
mass_array = np.array([[atomic_mass[i.upper()]] * 3 for i in elements])
mass_coordinates = coordinates * mass_array
return (np.sum(mass_coordinates, axis=0) / np.array([mass, mass, mass]))
def compose_atom_list(*args):
"""
Return an `atom list` from elements and/or atom ids and coordinates.
An `atom list` is a special object that some pywindowfunctions uses.
It is a nested list of lists with each individual list containing:
1. [[element, coordinates (x, y, z)], ...]
2. [[element, atom key, coordinates (x, y, z)], ...]
They work better for molecular re-building than two separate arrays for
elements and coordinates do.
Parameters
----------
elements : :class:`numpy.ndarray`
An array of all elements (type: str) in a molecule.
coordinates : :class:`numpy.ndarray`
An array containing molecule's coordinates.
atom_ids : :class:`numpy.ndarray`, optional
An array of all forcfield dependent atom keys (type:str) in a molecule.
Returns
-------
list
Version 1 or version 2 atom list depending on input parameters.
Raises
------
_FunctionError : :class:`Exception`
Raised when wrong number of parameters is passed to the function.
"""
if len(args) == 2:
atom_list = [[
i[0],
round(float(i[1]), 8),
round(float(i[2]), 8),
round(float(i[3]), 8),
] for i in np.concatenate(
(args[0].reshape(-1, 1), args[1]), axis=1)]
elif len(args) == 3:
atom_list = [
[
i[0],
i[1],
round(float(i[2]), 8),
round(float(i[3]), 8),
round(float(i[4]), 8),
]
for i in np.concatenate(
(np.concatenate(
(args[0].reshape(-1, 1), args[1].reshape(-1, 1)
), axis=1), args[2]),
axis=1)
]
else:
raise _FunctionError(
"The compose_atom_list() function accepts only 2 or 3 arguments.")
return atom_list
def decompose_atom_list(atom_list):
"""
Return elements and/or atom ids and coordinates from an `atom list`.
Depending on input type of an atom list (version 1 or 2)
1. [[element, coordinates (x, y, z)], ...]
2. [[element, atom key, coordinates (x, y, z)], ...]
the function reverses what pywindow.utilities.compose_atom_list() do.
Parameters
----------
atom_list : list
A nested list of lists (version 1 or 2)
Returns
-------
touple
A touple of elements and coordinates arrays, or if input contained
atom ideas, also atom ids array.
"""
transpose = list(zip(*atom_list))
if len(transpose) == 4:
elements = np.array(transpose[0])
array_a = np.array(transpose[1]).reshape(-1, 1)
array_b = np.array(transpose[2]).reshape(-1, 1)
array_c = np.array(transpose[3]).reshape(-1, 1)
array_ab = np.concatenate((array_a, array_b), axis=1)
coordinates = np.concatenate((array_ab, array_c), axis=1)
return elements, coordinates
elif len(transpose) == 5:
elements = np.array(transpose[0])
atom_ids = np.array(transpose[1])
array_a = np.array(transpose[2]).reshape(-1, 1)
array_b = np.array(transpose[3]).reshape(-1, 1)
array_c = np.array(transpose[4]).reshape(-1, 1)
array_ab = np.concatenate((array_a, array_b), axis=1)
coordinates = np.concatenate((array_ab, array_c), axis=1)
return elements, atom_ids, coordinates
else:
raise _FunctionError(
"The decompose_atom_list() function accepts only list of lists "
" with only 4 or 5 items per sublist.")
def dlf_notation(atom_key):
"""Return element for atom key using DL_F notation."""
split = list(atom_key)
element = ''
number = False
count = 0
while number is False:
element = "".join((element, split[count]))
count += 1
if is_number(split[count]) is True:
number = True
# In case of for example Material Studio output, integers can also be
# in the beginning of the string. As the dlf_notation decipher function
# is very general in use, we have to make sure these integers are deleted.
# In standard DL_F notation the string will never start with integer so it
# will not affect the functionality towards it.
# EDIT2: also the '?' atoms, you can delete them manually or somewhere else
element = "".join(i for i in element if not is_number(i))
element = "".join(i for i in element if i != '?')
return element
def opls_notation(atom_key):
"""Return element for OPLS forcefield atom key."""
# warning for Ne, He, Na types overlap
conflicts = ['ne', 'he', 'na']
if atom_key in conflicts:
raise _AtomKeyConflict((
"One of the OPLS conflicting "
"atom_keys has occured '{0}'. "
"For how to solve this issue see the manual or "
"MolecularSystem._atom_key_swap() doc string.").format(atom_key))
for element in opls_atom_keys:
if atom_key in opls_atom_keys[element]:
return element
# In case if atom_key was not found in the OPLS keys dictionary
raise _AtomKeyError((
"OPLS atom key {0} was not found in OPLS keys dictionary.").format(
atom_key))
def decipher_atom_key(atom_key, forcefield):
"""
Return element for deciphered atom key.
This functions checks if the forcfield specified by user is supported
and passes the atom key to the appropriate function for deciphering.
Parameters
----------
atom_key : str
The atom key which is to be deciphered.
forcefield : str
The forcefield to which the atom key belongs to.
Returns
-------
str
A string that is the periodic table element equvalent of forcefield
atom key.
"""
load_funcs = {
'DLF': dlf_notation,
'DL_F': dlf_notation,
'OPLS': opls_notation,
'OPLSAA': opls_notation,
'OPLS2005': opls_notation,
'OPLS3': opls_notation,
}
if forcefield.upper() in load_funcs.keys():
return load_funcs[forcefield.upper()](atom_key)
else:
raise _ForceFieldError(
("Unfortunetely, '{0}' forcefield is not supported by pyWINDOW."
" For list of supported forcefields see User's Manual or "
"MolecularSystem._decipher_atom_keys() function doc string."
).format(forcefield))
def shift_com(elements, coordinates, com_adjust=np.zeros(3)):
"""
Return coordinates translated by some vector.
Parameters
----------
elements : numpy.ndarray
An array of all elements (type: str) in a molecule.
coordinates : numpy.ndarray
An array containing molecule's coordinates.
com_adjust : numpy.ndarray (default = [0, 0, 0])
Returns
-------
numpy.ndarray
Translated array of molecule's coordinates.
"""
com = center_of_mass(elements, coordinates)
com = np.array([com - com_adjust] * coordinates.shape[0])
return coordinates - com
def max_dim(elements, coordinates):
"""
Return the maximum diameter of a molecule.
Parameters
----------
elements : numpy.ndarray
An array of all elements (type: str) in a molecule.
coordinates : numpy.ndarray
An array containing molecule's coordinates.
Returns
-------
"""
atom_vdw_vertical = np.matrix(
[[atomic_vdw_radius[i.upper()]] for i in elements])
atom_vdw_horizontal = np.matrix(
[atomic_vdw_radius[i.upper()] for i in elements])
dist_matrix = euclidean_distances(coordinates, coordinates)
vdw_matrix = atom_vdw_vertical + atom_vdw_horizontal
re_dist_matrix = dist_matrix + vdw_matrix
final_matrix = np.triu(re_dist_matrix)
i1, i2 = np.unravel_index(final_matrix.argmax(), final_matrix.shape)
maxdim = final_matrix[i1, i2]
return i1, i2, maxdim
def pore_diameter(elements, coordinates, com=None):
"""Return pore diameter of a molecule."""
if com is None:
com = center_of_mass(elements, coordinates)
atom_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
dist_matrix = euclidean_distances(coordinates, com.reshape(1, -1))
re_dist_matrix = dist_matrix - atom_vdw
index = np.argmin(re_dist_matrix)
pored = re_dist_matrix[index][0] * 2
return (pored, index)
def correct_pore_diameter(com, *params):
"""Return negative of a pore diameter. (optimisation function)."""
elements, coordinates = params
return (-pore_diameter(elements, coordinates, com)[0])
def opt_pore_diameter(elements, coordinates, bounds=None, com=None, **kwargs):
"""Return optimised pore diameter and it's COM."""
args = elements, coordinates
if com is not None:
pass
else:
com = center_of_mass(elements, coordinates)
if bounds is None:
pore_r = pore_diameter(elements, coordinates, com=com)[0] / 2
bounds = (
(com[0]-pore_r, com[0]+pore_r),
(com[1]-pore_r, com[1]+pore_r),
(com[2]-pore_r, com[2]+pore_r)
)
minimisation = minimize(
correct_pore_diameter, x0=com, args=args, bounds=bounds)
pored = pore_diameter(elements, coordinates, com=minimisation.x)
return (pored[0], pored[1], minimisation.x)
def sphere_volume(sphere_radius):
"""Return volume of a sphere."""
return (4 / 3 * np.pi * sphere_radius**3)
def asphericity(S):
return (S[0] - (S[1] + S[2]) / 2)
def acylidricity(S):
return (S[1] - S[2])
def relative_shape_anisotropy(S):
return (1 - 3 * (
(S[0] * S[1] + S[0] * S[2] + S[1] * S[2]) / (np.sum(S))**2))
def get_tensor_eigenvalues(T, sort=False):
if sort:
return (sorted(np.linalg.eigvals(T), reverse=True))
else:
return (np.linalg.eigvals(T))
def get_gyration_tensor(elements, coordinates):
"""
Return the gyration tensor of a molecule.
The gyration tensor should be invariant to the molecule's position.
The known formulas for the gyration tensor have the correction for the
centre of mass of the molecule, therefore, the coordinates are first
corrected for the centre of mass and essentially shifted to the origin.
Parameters
----------
elements : numpy.ndarray
The array containing the molecule's elemental data.
coordinates : numpy.ndarray
The array containing the Cartesian coordinates of the molecule.
Returns
-------
numpy.ndarray
The gyration tensor of a molecule invariant to the molecule's position.
"""
# First calculate COM for correction.
com = centre_of_mass(elements, coordinates)
# Correct the coordinates for the COM.
coordinates = coordinates - com
# Calculate diagonal and then other values of the matrix.
diag = np.sum(coordinates**2, axis=0)
xy = np.sum(coordinates[:, 0] * coordinates[:, 1])
xz = np.sum(coordinates[:, 0] * coordinates[:, 2])
yz = np.sum(coordinates[:, 1] * coordinates[:, 2])
S = np.array([[diag[0], xy, xz], [xy, diag[1], yz],
[xz, yz, diag[2]]]) / coordinates.shape[0]
return (S)
def get_inertia_tensor(elements, coordinates):
"""
Return the tensor of inertia a molecule.
Parameters
----------
elements : numpy.ndarray
The array containing the molecule's elemental data.
coordinates : numpy.ndarray
The array containing the Cartesian coordinates of the molecule.
Returns
-------
numpy.ndarray
The tensor of inertia of a molecule.
"""
pow2 = coordinates**2
molecular_weight = np.array(
[[atomic_mass[e.upper()]] for e in elements])
diag_1 = np.sum(molecular_weight * (pow2[:, 1] + pow2[:, 2]))
diag_2 = np.sum(molecular_weight * (pow2[:, 0] + pow2[:, 2]))
diag_3 = np.sum(molecular_weight * (pow2[:, 0] + pow2[:, 1]))
mxy = np.sum(-molecular_weight * coordinates[:, 0] * coordinates[:, 1])
mxz = np.sum(-molecular_weight * coordinates[:, 0] * coordinates[:, 2])
myz = np.sum(-molecular_weight * coordinates[:, 1] * coordinates[:, 2])
inertia_tensor = np.array([[diag_1, mxy, mxz], [mxy, diag_2, myz],
[mxz, myz, diag_3]]) / coordinates.shape[0]
return (inertia_tensor)
def principal_axes(elements, coordinates):
return (np.linalg.eig(get_inertia_tensor(elements, coordinates))[1].T)
def normalize_vector(vector):
"""
Normalize a vector.
A new vector is returned, the original vector is not modified.
Parameters
----------
vector : np.array
The vector to be normalized.
Returns
-------
np.array
The normalized vector.
"""
v = np.divide(vector, np.linalg.norm(vector))
return np.round(v, decimals=4)
def rotation_matrix_arbitrary_axis(angle, axis):
"""
Return a rotation matrix of `angle` radians about `axis`.
Parameters
----------
angle : int or float
The size of the rotation in radians.
axis : numpy.array
A 3 element aray which represents a vector. The vector is the
axis about which the rotation is carried out.
Returns
-------
numpy.array
A 3x3 array representing a rotation matrix.
"""
axis = normalize_vector(axis)
a = np.cos(angle / 2)
b, c, d = axis * np.sin(angle / 2)
e11 = np.square(a) + np.square(b) - np.square(c) - np.square(d)
e12 = 2 * (b * c - a * d)
e13 = 2 * (b * d + a * c)
e21 = 2 * (b * c + a * d)
e22 = np.square(a) + np.square(c) - np.square(b) - np.square(d)
e23 = 2 * (c * d - a * b)
e31 = 2 * (b * d - a * c)
e32 = 2 * (c * d + a * b)
e33 = np.square(a) + np.square(d) - np.square(b) - np.square(c)
return np.array([[e11, e12, e13], [e21, e22, e23], [e31, e32, e33]])
def align_principal_ax(elements, coordinates):
""" """
coor = deepcopy(coordinates)
new_coor = []
rot = []
for i, j in zip([2, 1, 0], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]):
p_axes = principal_axes(elements, coordinates)
r_vec = np.cross(p_axes[i], np.array(j))
sin = np.linalg.norm(r_vec)
cos = np.dot(p_axes[i], np.array(j))
ang = np.arctan2(sin, cos)
R_mat = np.matrix(rotation_matrix_arbitrary_axis(ang, r_vec))
rot.append(R_mat)
for i in coor:
new_coord = R_mat * i.reshape(-1, 1)
new_coor.append(np.array(new_coord.reshape(1, -1))[0])
new_coor = np.array(new_coor)
coor = new_coor
new_coor = []
return (coor, rot)
def calc_asphericity(elements, coordinates):
inertia_tensor = get_inertia_tensor(elements, coordinates)
tensor_eigenvalues = get_tensor_eigenvalues(inertia_tensor, sort=True)
return asphericity(tensor_eigenvalues)
def calc_acylidricity(elements, coordinates):
inertia_tensor = get_inertia_tensor(elements, coordinates)
tensor_eigenvalues = get_tensor_eigenvalues(inertia_tensor, sort=True)
return acylidricity(tensor_eigenvalues)
def calc_relative_shape_anisotropy(elements, coordinates):
inertia_tensor = get_inertia_tensor(elements, coordinates)
tensor_eigenvalues = get_tensor_eigenvalues(inertia_tensor, sort=True)
return relative_shape_anisotropy(tensor_eigenvalues)
def unit_cell_to_lattice_array(cryst):
"""Return parallelpiped unit cell lattice matrix."""
a_, b_, c_, alpha, beta, gamma = cryst
# Convert angles from degrees to radians.
r_alpha = np.deg2rad(alpha)
r_beta = np.deg2rad(beta)
r_gamma = np.deg2rad(gamma)
# Calculate unit cell volume that is neccessary.
volume = a_ * b_ * c_ * (
1 - np.cos(r_alpha)**2 - np.cos(r_beta)**2 - np.cos(r_gamma)**2 + 2 *
np.cos(r_alpha) * np.cos(r_beta) * np.cos(r_gamma))**0.5
# Create the orthogonalisation Matrix (M^-1) - lattice matrix
a_x = a_
a_y = b_ * np.cos(r_gamma)
a_z = c_ * np.cos(r_beta)
b_x = 0
b_y = b_ * np.sin(r_gamma)
b_z = c_ * (
np.cos(r_alpha) - np.cos(r_beta) * np.cos(r_gamma)) / np.sin(r_gamma)
c_x = 0
c_y = 0
c_z = volume / (a_ * b_ * np.sin(r_gamma))
lattice_array = np.array(
[[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]])
return lattice_array
def lattice_array_to_unit_cell(lattice_array):
"""Return crystallographic param. from unit cell lattice matrix."""
cell_lengths = np.sqrt(np.sum(lattice_array**2, axis=0))
gamma_r = np.arccos(lattice_array[0][1] / cell_lengths[1])
beta_r = np.arccos(lattice_array[0][2] / cell_lengths[2])
alpha_r = np.arccos(
lattice_array[1][2] * np.sin(gamma_r) / cell_lengths[2]
+ np.cos(beta_r) * np.cos(gamma_r)
)
cell_angles = [
np.rad2deg(alpha_r), np.rad2deg(beta_r), np.rad2deg(gamma_r)
]
return np.append(cell_lengths, cell_angles)
def volume_from_lattice_array(lattice_array):
"""Return unit cell's volume from lattice matrix."""
return np.linalg.det(lattice_array)
def volume_from_cell_parameters(cryst):
"""Return unit cell's volume from crystallographic parameters."""
return volume_from_lattice_array(unit_cell_to_lattice_array(cryst))
def fractional_from_cartesian(coordinate, lattice_array):
"""Return a fractional coordinate from a cartesian one."""
deorthogonalisation_M = np.matrix(np.linalg.inv(lattice_array))
fractional = deorthogonalisation_M * coordinate.reshape(-1, 1)
return np.array(fractional.reshape(1, -1))
def cartisian_from_fractional(coordinate, lattice_array):
"""Return cartesian coordinate from a fractional one."""
orthogonalisation_M = np.matrix(lattice_array)
orthogonal = orthogonalisation_M * coordinate.reshape(-1, 1)
return np.array(orthogonal.reshape(1, -1))
def cart2frac_all(coordinates, lattice_array):
"""Convert all cartesian coordinates to fractional."""
frac_coordinates = deepcopy(coordinates)
for coord in range(frac_coordinates.shape[0]):
frac_coordinates[coord] = fractional_from_cartesian(
frac_coordinates[coord], lattice_array)
return frac_coordinates
def frac2cart_all(frac_coordinates, lattice_array):
"""Convert all fractional coordinates to cartesian."""
coordinates = deepcopy(frac_coordinates)
for coord in range(coordinates.shape[0]):
coordinates[coord] = cartisian_from_fractional(coordinates[coord],
lattice_array)
return coordinates
def create_supercell(system, supercell=[[-1, 1], [-1, 1], [-1, 1]]):
"""Create a supercell."""
if 'lattice' not in system.keys():
matrix = unit_cell_to_lattice_array(system['unit_cell'])
else:
matrix = system['lattice']
coordinates = deepcopy(system['coordinates'])
multiplication_matrices = []
for a_ in range(supercell[0][0], supercell[0][1] + 1):
for b_ in range(supercell[1][0], supercell[1][1] + 1):
for c_ in range(supercell[2][0], supercell[2][1] + 1):
mult_matrix = np.array([[a_, b_, c_]])
mult_matrix = np.repeat(
mult_matrix, coordinates.shape[0], axis=0)
multiplication_matrices.append(mult_matrix)
frac_coordinates = cart2frac_all(coordinates, matrix)
updated_coordinates = []
for mat in multiplication_matrices:
updated_coor = frac_coordinates + mat
updated_coordinates.append(updated_coor)
supercell_frac_coordinates = np.concatenate(updated_coordinates, axis=0)
supercell_coordinates = frac2cart_all(supercell_frac_coordinates, matrix)
# Now for each new cell in the supercell we need to repeat the
# elements array so that it maches
new_elements = deepcopy(system['elements'])
new_ids = deepcopy(system['atom_ids'])
for i in range(len(updated_coordinates) - 1):
new_elements = np.concatenate((new_elements, system['elements']))
new_ids = np.concatenate((new_ids, system['atom_ids']))
cryst = lattice_array_to_unit_cell(matrix)
supercell_system = {
'elements': new_elements,
'atom_ids': new_ids,
'coordinates': supercell_coordinates,
'unit_cell': cryst,
'lattice': matrix,
}
return supercell_system
def is_inside_polyhedron(point, polyhedron):
if polyhedron.shape == (1, 6):
matrix = unit_cell_to_lattice_array(polyhedron)
if polyhedron.shape == (3, 3):
matrix = polyhedron
frac_coord = pw.utilities.fractional_from_cartesian(point, matrix)[0]
if 0 <= frac_coord[0] <= 1.000 and 0 <= frac_coord[
1] <= 1.000 and 0 <= frac_coord[2] <= 1.000:
return True
else:
return False
def normal_vector(origin, vectors):
"""Return normal vector for two vectors with same origin."""
return np.cross(vectors[0] - origin, vectors[1] - origin)
def discrete_molecules(system, rebuild=None, tol=0.4):
"""
Decompose molecular system into individual discreet molecules.
Note
----
New formula for bonds: (26/07/17)
The two atoms, x and y, are considered bonded if the distance between
them, calculated with distance matrix, is within the ranges:
.. :math:
Rcov(x) + Rcov(y) - t < R(x,y) < Rcov(x) + Rcov(y) + t
where Rcov is the covalent radius and the tolarenace (t) is set to
0.4 Angstrom.
"""
# First we check which operation mode we use.
# 1) Non-periodic MolecularSystem.
# 2) Periodic MolecularSystem without rebuilding.
# 3) Periodic Molecular system with rebuilding (supercell provided).
if rebuild is not None:
mode = 3
else:
if 'unit_cell' in system.keys():
if system['unit_cell'].shape == (6,):
mode = 2
else:
mode = 1
elif 'lattice' in system.keys():
if system['lattice'].shape == (3, 3):
mode = 2
else:
mode = 1
else:
mode = 1
# We create a list containing all atoms, theirs periodic elements and
# coordinates. As this process is quite complicated, we need a list
# which we will gradually be reducing.
try:
elements = system['elements']
coordinates = system['coordinates']
except KeyError:
raise _FunctionError(
"The 'elements' key is missing in the 'system' dictionary "
"attribute of the MolecularSystem object. Which means, you need to"
" decipher the forcefield based atom keys first (see manual)."
)
coordinates = system['coordinates']
args = (elements, coordinates)
adj = 0
# If there are forcefield 'atom ids' as well we will retain them.
if 'atom_ids' in system.keys():
atom_ids = system['atom_ids']
args = (elements, atom_ids, coordinates)
adj = 1
atom_list = compose_atom_list(*args)
atom_coor = decompose_atom_list(atom_list)[1 + adj]
# Scenario 1: We load a non-periodic MolecularSystem.
# We will not have 'unit_cell' nor 'lattice' keywords in the dictionary
# and also we do not do any re-building.
# Scenario 2: We load a periodic MolecularSystem. We want to only Extract
# complete molecules that do not have been affected by the periodic
# boundary.
# Scenario 3: We load a periodic Molecular System. We want it to be rebuild
# therefore, we also provide a supercell.
# Scenarios 2 and 3 require a lattice and also their origin is at origin.
# Scenario 1 should have the origin at the center of mass of the system.
# EDIT 09-04-18: All origins/pseudo_origin had to be skewed towards some
# direction (x + 0.01) so that there would be no ambiguity in periodic
# ang highly symmetric systems where the choice of the closest atom would
# be random from a set of equally far choices - bug found in the testing
# this way rebuild system should always look the same from the same input
# and on different machines.
if mode == 2 or mode == 3:
# Scenarios 2 or 3.
origin = np.array([0.01, 0., 0.])
if 'lattice' not in system.keys():
matrix = unit_cell_to_lattice_array(system['unit_cell'])
else:
matrix = system['lattice']
pseudo_origin_frac = np.array([0.26, 0.25, 0.25])
pseudo_origin = cartisian_from_fractional(pseudo_origin_frac, matrix)
# If a supercell is also provided that encloses the unit cell for the
# reconstruction of the molecules through the periodic boundary.
if rebuild is not None:
selements = rebuild['elements']
sids = rebuild['atom_ids']
scoordinates = rebuild['coordinates']
satom_list = compose_atom_list(selements, sids, scoordinates)
satom_coor = decompose_atom_list(satom_list)[1 + adj]
# There is one more step. We need to sort out for all the
# reconstructed molecules, which are the ones that belong to the
# unit cell. As we did the reconstruction to every chunk in the unit
# cell we have now some molecules that belong to neighbouring cells.
# The screening is simple. If the COM of a molecule translated to
# fractional coordinates (so that it works for parallelpiped) is
# within the unit cell boundaries <0, 1> then it's it. There is
# an exception, for the trajectories, very often the unit cell
# is centered at origin. Therefore we need to use <-0.5, 0.5>
# boundary. We will simply decide which is the case by calculating
# the centre of mass of the whole system.
system_com = center_of_mass(elements, coordinates)
if np.allclose(system_com, origin, atol=1e-00):
boundary = np.array([-0.5, 0.5])
else:
boundary = np.array([0., 1.])
else:
# Scenario 1.
pseudo_origin = center_of_mass(
elements, coordinates) + np.array([0.01, 0., 0.])
# Here the final discrete molecules will be stored.
molecules = []
# Exceptions. Usually end-point atoms that create single bonds or
# just a separate atoms in the system.
exceptions = ['H', 'CL', 'BR', 'F', 'HE', 'AR', 'NE', 'KR', 'XE', 'RN']
# The upper limit for distances analysed for bonds will be assigned for
# a given system (to save time). We take set('elements') and then find
# the largest R(cov) in the system and set the max_dist as a double
# of it plus the 150% tolerance (tol).
set_of_elements = set(system['elements'])
max_r_cov = max([
atomic_covalent_radius[i.upper()] for i in set_of_elements])
max_dist = 2 * max_r_cov + tol
# We continue untill all items in the list have been analysed and popped.
while atom_list:
inside_atoms_heavy = [
i for i in atom_list if i[0].upper() not in exceptions
]
if inside_atoms_heavy:
# Now we create an array of atom coordinates. It does seem
# somehow counter-intuitive as this is what we started with
# and made it into a list. But, in my opinion it's the only
# way to do it. It's hard to control and delete items in two
# separate arrays that we started with and we don't want
# atoms already assigned in our array for distance matrix.
inside_atoms_coord_heavy = decompose_atom_list(inside_atoms_heavy)[
1 + adj]
dist_matrix = euclidean_distances(inside_atoms_coord_heavy,
pseudo_origin.reshape(1, -1))
atom_index_x, _ = np.unravel_index(dist_matrix.argmin(),
dist_matrix.shape)
# Added this so that lone atoms (even if heavy) close to the
# periodic boundary are not analysed, as they surely have matching
# symmetry equivalence that bind to a bigger atom cluster inside
# the unit_cell.
potential_starting_point = inside_atoms_heavy[atom_index_x]
pot_arr = np.array(potential_starting_point[1 + adj:])
dist_matrix = euclidean_distances(
atom_coor, pot_arr.reshape(1, -1)
)
idx = (dist_matrix > 0.1) * (dist_matrix < max_dist)
if len(idx) < 1:
pass
else:
working_list = [potential_starting_point]
else:
# Safety check.
break
final_molecule = []
while working_list:
working_list_temp = []
try:
atom_coor = decompose_atom_list(atom_list)[1 + adj]
except _FunctionError:
atom_coor = None
for i in working_list:
if i[0].upper() not in exceptions:
# It's of GREATEST importance that the i_arr variable
# is assigned here before entering the atom_coor loop.!
# Otherwise it will not be re-asigned when the satom_list
# still iterates, but the atom_list is already empty...
i_arr = np.array(i[1 + adj:])
if atom_coor is not None:
dist_matrix = euclidean_distances(
atom_coor, i_arr.reshape(1, -1)
)
idx = (dist_matrix > 0.1) * (dist_matrix < max_dist)
neighbours_indexes = np.where(idx)[0]
for j in neighbours_indexes:
j_arr = np.array(atom_coor[j])
r_i_j = distance(i_arr, j_arr)
r_cov_i_j = atomic_covalent_radius[
i[0].upper()] + atomic_covalent_radius[
atom_list[j][0].upper()]
if r_cov_i_j - tol < r_i_j < r_cov_i_j + tol:
working_list_temp.append(atom_list[j])
if rebuild is not None:
sdist_matrix = euclidean_distances(
satom_coor, i_arr.reshape(1, -1))
sidx = (sdist_matrix > 0.1) * (sdist_matrix < max_dist)
sneighbours_indexes = np.where(sidx)[0]
for j in sneighbours_indexes:
if satom_list[j] in atom_list:
pass
else:
j_arr = np.array(satom_coor[j])
r_i_j = distance(i_arr, j_arr)
r_cov_i_j = atomic_covalent_radius[
i[0].upper()
] + atomic_covalent_radius[
satom_list[j][0].upper()]
if r_cov_i_j - tol < r_i_j < r_cov_i_j + tol:
working_list_temp.append(satom_list[j])
final_molecule.append(i)
else:
final_molecule.append(i)
for i in working_list:
try:
atom_list.remove(i)
except ValueError:
pass
# We empty the working list as all the items were analysed
# and moved to the final_molecule list.
working_list = []
# We make sure there are no duplicates in the working_list_temp.
working_list_temp = unique(working_list_temp)
# Now we move the entries from the temporary working list
# to the working list for looping analysys.
for i in working_list_temp:
# We make sure that only new and unassigned atoms are
# being transfered.
if i not in final_molecule:
working_list.append(i)
final_molecule_dict = {}
final_molecule_dict['elements'] = np.array(
[x[0] for x in final_molecule], dtype='str')
final_molecule_dict['coordinates'] = np.array(
[[*xyz[1 + adj:]] for xyz in final_molecule])
if adj == 1:
final_molecule_dict['atom_ids'] = np.array(
[x[1] for x in final_molecule], dtype='str')
# In general we always want the molecule so the initial bool_ is True.
bool_ = True
# But, for periodic only if the molecule is in the initial unit cell.
if rebuild is not None:
com = center_of_mass(final_molecule_dict['elements'],
final_molecule_dict['coordinates'])
com_frac = fractional_from_cartesian(com, matrix)[0]
# If we don't round the numerical errors will come up.
com_frac_round = np.around(com_frac, decimals=8)
bool_ = np.all(np.logical_and(com_frac_round >= boundary[0],
com_frac_round < boundary[1]),
axis=0)
if bool(bool_) is True:
molecules.append(final_molecule_dict)
return molecules
def angle_between_vectors(x, y):
"""Calculate the angle between two vectors x and y."""
first_step = abs(x[0] * y[0] + x[1] * y[1] + x[2] * y[2]) / (
np.sqrt(x[0]**2 + x[1]**2 + x[2]**2) *
np.sqrt(y[0]**2 + y[1]**2 + y[2]**2))
second_step = np.arccos(first_step)
return (second_step)
def vector_analysis(vector, coordinates, elements_vdw, increment=1.0):
"""Analyse a sampling vector's path for window analysis purpose."""
# Calculate number of chunks if vector length is divided by increment.
chunks = int(np.linalg.norm(vector) // increment)
# Create a single chunk.
chunk = vector / chunks
# Calculate set of points on vector's path every increment.
vector_pathway = np.array([chunk * i for i in range(chunks + 1)])
analysed_vector = np.array([
np.amin(
euclidean_distances(coordinates, i.reshape(1, -1)) - elements_vdw)
for i in vector_pathway
])
if all(i > 0 for i in analysed_vector):
pos = np.argmin(analysed_vector)
# As first argument we need to give the distance from the origin.
dist = np.linalg.norm(chunk * pos)
return np.array(
[dist, analysed_vector[pos] * 2, *chunk * pos, *vector])
def vector_preanalysis(vector, coordinates, elements_vdw, increment=1.0):
norm_vec = vector/np.linalg.norm(vector)
intersections = []
origin = center_of_coor(coordinates)
L = coordinates - origin
t_ca = np.dot(L, norm_vec)
d = np.sqrt(np.einsum('ij,ij->i', L, L) - t_ca**2)
under_sqrt = elements_vdw**2 - d**2
diag = under_sqrt.diagonal()
positions = np.argwhere(diag > 0)
for pos in positions:
t_hc = np.sqrt(diag[pos[0]])
t_0 = t_ca[pos][0] - t_hc
t_1 = t_ca[pos][0] + t_hc
P_0 = origin + np.dot(t_0, norm_vec)
P_1 = origin + np.dot(t_1, norm_vec)
# print(np.linalg.norm(P_0), np.linalg.norm(P_1))
if np.linalg.norm(P_0) < np.linalg.norm(P_1):
intersections.append(1)
else:
intersections.append(0)
if sum(intersections) == 0:
return vector_analysis(vector, coordinates, elements_vdw, increment)
def optimise_xy(xy, *args):
"""Return negative pore diameter for x and y coordinates optimisation."""
z, elements, coordinates = args
window_com = np.array([xy[0], xy[1], z])
return -pore_diameter(elements, coordinates, com=window_com)[0]
def optimise_z(z, *args):
"""Return pore diameter for coordinates optimisation in z direction."""
x, y, elements, coordinates = args
window_com = np.array([x, y, z])
return pore_diameter(elements, coordinates, com=window_com)[0]
def window_analysis(window,
elements,
coordinates,
elements_vdw,
increment2=0.1,
z_bounds=[None, None],
lb_z=True,
z_second_mini=False,
**kwargs):
"""
Return window diameter and window's centre.
Parameters
----------
widnow: list
elements: numpy.array
coordinates: numpy.array
elements_vdw: numpy.array
step: float
"""
# Copy the coordinates as we will manipulate them.
coordinates = deepcopy(coordinates)
# Find the vector with the largest window sampling diameter from the pool.
vector_ = window[window.argmax(axis=0)[1]][5:8]
vector_analysed = vector_analysis(
vector_, coordinates, elements_vdw, increment=increment2)
# A safety check, if the refined analysis give None we end the function.
if vector_analysed is not None:
pass
else:
return None
vector = vector_analysed[5:8]
# Unit vectors.
vec_a = [1, 0, 0]
vec_b = [0, 1, 0]
vec_c = [0, 0, 1]
# Angles needed for rotation (in radians) to rotate and translate the
# molecule for the vector to become the Z-axis.
angle_1 = angle_between_vectors(np.array([vector[0], vector[1], 0]), vec_a)
angle_2 = angle_between_vectors(vector, vec_c)
# Depending in which cartesian coordinate system area the vector is
# We need a rotation into a different direction and by different value.
if vector[0] >= 0 and vector[1] >= 0 and vector[2] >= 0:
angle_1 = -angle_1
angle_2 = -angle_2
if vector[0] < 0 and vector[1] >= 0 and vector[2] >= 0:
angle_1 = np.pi * 2 + angle_1
angle_2 = angle_2
if vector[0] >= 0 and vector[1] < 0 and vector[2] >= 0:
angle_1 = angle_1
angle_2 = -angle_2
if vector[0] < 0 and vector[1] < 0 and vector[2] >= 0:
angle_1 = np.pi * 2 - angle_1
if vector[0] >= 0 and vector[1] >= 0 and vector[2] < 0:
angle_1 = -angle_1
angle_2 = np.pi + angle_2
if vector[0] < 0 and vector[1] >= 0 and vector[2] < 0:
angle_2 = np.pi - angle_2
if vector[0] >= 0 and vector[1] < 0 and vector[2] < 0:
angle_2 = angle_2 + np.pi
if vector[0] < 0 and vector[1] < 0 and vector[2] < 0:
angle_1 = -angle_1
angle_2 = np.pi - angle_2
# Rotation matrix for rotation around Z-axis with angle_1.
rotation_around_z = np.array([[np.cos(angle_1), -np.sin(angle_1), 0],
[np.sin(angle_1), np.cos(angle_1), 0],
[0, 0, 1]])
# Rotate the whole molecule around with rotation_around_z.
coordinates = np.array([np.dot(rotation_around_z, i) for i in coordinates])
# Rotation matrix for rotation around Y-axis with angle_2
rotation_around_y = np.array([[np.cos(angle_2), 0, np.sin(angle_2)],
[0, 1, 0],
[-np.sin(angle_2), 0, np.cos(angle_2)]])
# Rotate the whole molecule around with rotation_around_y.
coordinates = np.array([np.dot(rotation_around_y, i) for i in coordinates])
# Third step is translation. We are now at [0, 0, -z].
# We shift the molecule so that center of the window is at the origin.
# The `z` is from original vector analysis. It is the point on the vector
# where the largest sampling sphere was (vector_analysed[0]).
new_z = vector_analysed[0]
# Translate the whole molecule to shift window's center to origin.
coordinates = coordinates - np.array([[0, 0, new_z]] *
coordinates.shape[0])
# !!!Here the window center (xy and z) optimisation take place!!!
window_com = np.array([0, 0, 0], dtype=float)
# The lb_z parameter is 'lower bound equal to z' which means,
# that we set the lower bound for the z optimisation to be equal
# to the -new_z as in some cages it's the COM - pore that is the
# limiting diameter. But, no lower than new_z because we don't want to
# move into the other direction.
if lb_z:
z_bounds[0] = -new_z
window_diameter, _ = pore_diameter(elements, coordinates, com=window_com)
# SciPy minimisation on z coordinate.
z_args = (window_com[0], window_com[1], elements, coordinates)
z_optimisation = minimize(
optimise_z, x0=window_com[2], args=z_args, bounds=[z_bounds])
# Substitute the z coordinate for a minimised one.
window_com[2] = z_optimisation.x[0]
# SciPy brute optimisation on x and y coordinates in window plane.
xy_args = (window_com[2], elements, coordinates)
xy_bounds = ((-window_diameter / 2, window_diameter / 2),
(-window_diameter / 2, window_diameter / 2))
xy_optimisation = brute(
optimise_xy, xy_bounds, args=xy_args, full_output=True, finish=fmin)
# Substitute the x and y coordinates for the optimised ones.
window_com[0] = xy_optimisation[0][0]
window_com[1] = xy_optimisation[0][1]
# Additional SciPy minimisation on z coordinate. Added on 18 May 2017.
# We can argue which aproach is best. Whether z opt and then xy opt
# or like now z opt -> xy opt -> additional z opt etc. I have also tested
# a loop of optimisations until some convergence and optimisation of
# xyz coordinates at the same time by optimising these two optimisations.
# In the end. I think this approach is best for cages.
# Update 20 October 2017: I made this optional and turned off by default
# In many cases that worsen the quality of the results and should be used
# with caution.
if z_second_mini is not False:
z_args = (window_com[0], window_com[1], elements, coordinates)
# The z_bounds should be passed in kwargs.
z_optimisation = minimize(
optimise_z, x0=window_com[2], args=z_args, bounds=[z_bounds])
# Substitute the z coordinate for a minimised one.
window_com[2] = z_optimisation.x[0]
# Calculate the new window diameter.
window_diameter, _ = pore_diameter(elements, coordinates, com=window_com)
# To get the window true centre of mass we need to revere the rotation and
# translation operations on the window com.
# Reverse the translation by substracting the new_z.
window_com[2] = window_com[2] + new_z
angle_2_1 = -angle_2
reverse_around_y = np.array([[np.cos(angle_2_1), 0, np.sin(angle_2_1)],
[0, 1, 0],
[-np.sin(angle_2_1), 0, np.cos(angle_2_1)]])
# Reversing the second rotation around Y-axis.
window_com = np.dot(reverse_around_y, window_com)
angle_1_1 = -angle_1
reverse_around_z = np.array([[np.cos(angle_1_1), -np.sin(angle_1_1), 0],
[np.sin(angle_1_1), np.cos(angle_1_1), 0],
[0, 0, 1]])
# Reversing the first rotation around Z-axis.
window_com = np.dot(reverse_around_z, window_com)
return (window_diameter, window_com)
def find_windows(elements,
coordinates,
processes=None,
mol_size=None,
adjust=1,
pore_opt=True,
increment=1.0,
**kwargs):
"""Return windows diameters and center of masses for a molecule."""
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy(coordinates)
# Center of our cartesian system is always at origin
origin = np.array([0, 0, 0])
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass(elements, coordinates)
# Shift the cage to the origin using either the standard center of mass
# or if pore_opt flag is True, the optimised pore center as center of mass
if pore_opt is True:
# Normally the pore is calculated from the COM of a molecule.
# So, esentially the molecule's COM is the pore center.
# To shift the molecule so that the center of the optimised pore
# is at the origin of the system and not the center of the not
# optimised one, we need to adjust the shift. We also have to update
# the initial com.
com_adjust = initial_com - opt_pore_diameter(elements, coordinates, **
kwargs)[2]
initial_com = initial_com - com_adjust
coordinates = shift_com(elements, coordinates, com_adjust=com_adjust)
else:
# Otherwise, we just shift the cage to the origin.
coordinates = shift_com(elements, coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule.
shpere_radius = max_dim(elements, coordinates)[2] / 2
sphere_surface_area = 4 * np.pi * shpere_radius**2
# Here we determine the number of sampling points necessary for a fine
# sampling. Smaller molecules require more finner density of sampling
# points on the sampling sphere's surface, whereas largen require less.
# This formula was created so that larger molecule do not take much longer
# to analyse, as number_sampling_points*length_of_sampling_vectors
# results in quadratic increase of sampling time. The 250 factor was
# specificly determined to produce close to 1 sampling point /Angstrom^2
# for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the
# sampling by changing the adjust factor.
number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)
# Here I use code by <NAME> for spreading points on a sphere:
# http://blog.marmakoide.org/?p=1
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(number_of_points)
z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,
number_of_points)
radius = np.sqrt(1 - z * z)
points = np.zeros((number_of_points, 3))
points[:, 0] = radius * np.cos(theta) * shpere_radius
points[:, 1] = radius * np.sin(theta) * shpere_radius
points[:, 2] = z * shpere_radius
# Here we will compute the eps parameter for the sklearn.cluster.DBSCAN
# (3-dimensional spatial clustering algorithm) which is the mean distance
# to the closest point of all points.
values = []
tree = KDTree(points)
for i in points:
dist, ind = tree.query(i.reshape(1, -1), k=10)
values.extend(dist)
mean_distance = np.mean(values)
# The best eps is parametrized when adding the mean distance and it's root.
eps = mean_distance + mean_distance**0.5
# Here we either run the sampling points vectors analysis in serial
# or parallel. The vectors that go through molecular pores return
# as analysed list with the increment at vector's path with largest
# included sphere, coordinates for this narrow channel point. vectors
# that find molecule on theirs path are return as NoneType object.
# Parralel analysis on user's defined number of CPUs.
if processes:
pool = Pool(processes=processes)
parallel = [
pool.apply_async(
vector_preanalysis,
args=(
point,
coordinates,
elements_vdw, ),
kwds={'increment': increment}) for point in points
]
results = [p.get() for p in parallel if p.get() is not None]
pool.terminate()
# Dataset is an array of sampling points coordinates.
dataset = np.array([x[5:8] for x in results])
else:
results = [
vector_preanalysis(
point, coordinates, elements_vdw, increment=increment)
for point in points
]
results = [x for x in results if x is not None]
dataset = np.array([x[5:8] for x in results])
# If not a single vector was returned from the analysis it mean that
# no molecular channels (what we call windows here) connects the
# molecule's interior with the surroungsings (exterior space).
# The number of windows in that case equals zero and zero is returned.
# Otherwise we continue our search for windows.
if len(results) == 0:
return None
else:
# Perfomr DBSCAN to cluster the sampling points vectors.
# the n_jobs will be developed later.
# db = DBSCAN(eps=eps, n_jobs=_ncpus).fit(dataset)
db = DBSCAN(eps=eps).fit(dataset)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = set(db.labels_)
# Assing cluster label to a sampling point.
clusters = [[i, j] for i, j in zip(results, db.labels_)]
clustered_results = {label: [] for label in labels}
# Create a dictionary of clusters with points listed.
[clustered_results[i[1]].append(i[0]) for i in clusters]
# No for the sampling point vector in each cluster that had
# the widest channel's 'neck' is assumed to pass the closest
# to the window's center and therefore will be passed to
# window analysis function.
# We also pass user defined settings for window analysis.
# Again either in serlia or in parallel.
# Noisy points get a cluster label -1, therefore we have to exclude it.
if processes:
pool = Pool(processes=processes)
parallel = [
pool.apply_async(
window_analysis,
args=(np.array(clustered_results[cluster]), elements,
coordinates, elements_vdw),
kwds=kwargs) for cluster in clustered_results
if cluster != -1
]
window_results = [p.get() for p in parallel if p.get() is not None]
pool.terminate()
else:
window_results = [
window_analysis(
np.array(clustered_results[cluster]), elements,
coordinates, elements_vdw, **kwargs)
for cluster in clustered_results if cluster != -1
]
# The function returns two numpy arrays, one with windows diameters
# in Angstrom, second with corresponding windows center's coordinates
windows = np.array([result[0] for result in window_results
if result is not None])
windows_coms = np.array(
[np.add(result[1], initial_com) for result in window_results
if result is not None])
# Safety measures, if one of the windows is None or negative a warning
# should be raised.
for result in window_results:
if result is None:
msg_ = " ".join(
['Warning. One of the analysed windows has',
'returned as None. See manual.']
)
# print(msg_)
elif result[0] < 0:
msg_ = " ".join(
['Warning. One of the analysed windows has a vdW',
'corrected diameter smaller than 0. See manual.']
)
# print(msg_)
return (windows, windows_coms)
def window_shape(window,
elements,
coordinates,
increment2=0.1,
z_bounds=[None, None],
lb_z=True,
z_second_mini=False,
**kwargs):
"""
Return window diameter and window's centre.
Parameters
----------
widnow: list
elements: numpy.array
coordinates: numpy.array
elements_vdw: numpy.array
step: float
"""
# Copy the coordinates as we will manipulate them.
coordinates = deepcopy(coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# Find the vector with the largest window sampling diameter from the pool.
vector_ = window[window.argmax(axis=0)[1]][5:8]
vector_analysed = vector_analysis(
vector_, coordinates, elements_vdw, increment=increment2)
# A safety check, if the refined analysis give None we end the function.
if vector_analysed is not None:
pass
else:
return None
vector = vector_analysed[5:8]
# Unit vectors.
vec_a = [1, 0, 0]
vec_b = [0, 1, 0]
vec_c = [0, 0, 1]
# Angles needed for rotation (in radians) to rotate and translate the
# molecule for the vector to become the Z-axis.
angle_1 = angle_between_vectors(np.array([vector[0], vector[1], 0]), vec_a)
angle_2 = angle_between_vectors(vector, vec_c)
# Depending in which cartesian coordinate system area the vector is
# We need a rotation into a different direction and by different value.
if vector[0] >= 0 and vector[1] >= 0 and vector[2] >= 0:
angle_1 = -angle_1
angle_2 = -angle_2
if vector[0] < 0 and vector[1] >= 0 and vector[2] >= 0:
angle_1 = np.pi * 2 + angle_1
angle_2 = angle_2
if vector[0] >= 0 and vector[1] < 0 and vector[2] >= 0:
angle_1 = angle_1
angle_2 = -angle_2
if vector[0] < 0 and vector[1] < 0 and vector[2] >= 0:
angle_1 = np.pi * 2 - angle_1
if vector[0] >= 0 and vector[1] >= 0 and vector[2] < 0:
angle_1 = -angle_1
angle_2 = np.pi + angle_2
if vector[0] < 0 and vector[1] >= 0 and vector[2] < 0:
angle_2 = np.pi - angle_2
if vector[0] >= 0 and vector[1] < 0 and vector[2] < 0:
angle_2 = angle_2 + np.pi
if vector[0] < 0 and vector[1] < 0 and vector[2] < 0:
angle_1 = -angle_1
angle_2 = np.pi - angle_2
# Rotation matrix for rotation around Z-axis with angle_1.
rotation_around_z = np.array([[np.cos(angle_1), -np.sin(angle_1), 0],
[np.sin(angle_1), np.cos(angle_1), 0],
[0, 0, 1]])
# Rotate the whole molecule around with rotation_around_z.
coordinates = np.array([np.dot(rotation_around_z, i) for i in coordinates])
# Rotation matrix for rotation around Y-axis with angle_2
rotation_around_y = np.array([[np.cos(angle_2), 0, np.sin(angle_2)],
[0, 1, 0],
[-np.sin(angle_2), 0, np.cos(angle_2)]])
# Rotate the whole molecule around with rotation_around_y.
coordinates = np.array([np.dot(rotation_around_y, i) for i in coordinates])
# Third step is translation. We are now at [0, 0, -z].
# We shift the molecule so that center of the window is at the origin.
# The `z` is from original vector analysis. It is the point on the vector
# where the largest sampling sphere was (vector_analysed[0]).
new_z = vector_analysed[0]
# Translate the whole molecule to shift window's center to origin.
coordinates = coordinates - np.array([[0, 0, new_z]] *
coordinates.shape[0])
# !!!Here the window center (xy and z) optimisation take place!!!
window_com = np.array([0, 0, 0], dtype=float)
# The lb_z parameter is 'lower bound equal to z' which means,
# that we set the lower bound for the z optimisation to be equal
# to the -new_z as in some cages it's the COM - pore that is the
# limiting diameter. But, no lower than new_z because we don't want to
# move into the other direction.
if lb_z:
z_bounds[0] = -new_z
window_diameter, _ = pore_diameter(elements, coordinates, com=window_com)
# SciPy minimisation on z coordinate.
z_args = (window_com[0], window_com[1], elements, coordinates)
z_optimisation = minimize(
optimise_z, x0=window_com[2], args=z_args, bounds=[z_bounds])
# Substitute the z coordinate for a minimised one.
window_com[2] = z_optimisation.x[0]
# SciPy brute optimisation on x and y coordinates in window plane.
xy_args = (window_com[2], elements, coordinates)
xy_bounds = ((-window_diameter / 2, window_diameter / 2),
(-window_diameter / 2, window_diameter / 2))
xy_optimisation = brute(
optimise_xy, xy_bounds, args=xy_args, full_output=True, finish=fmin)
# Substitute the x and y coordinates for the optimised ones.
window_com[0] = xy_optimisation[0][0]
window_com[1] = xy_optimisation[0][1]
# Additional SciPy minimisation on z coordinate. Added on 18 May 2017.
# We can argue which aproach is best. Whether z opt and then xy opt
# or like now z opt -> xy opt -> additional z opt etc. I have also tested
# a loop of optimisations until some convergence and optimisation of
# xyz coordinates at the same time by optimising these two optimisations.
# In the end. I think this approach is best for cages.
# Update 20 October 2017: I made this optional and turned off by default
# In many cases that worsen the quality of the results and should be used
# with caution.
if z_second_mini is not False:
z_args = (window_com[0], window_com[1], elements, coordinates)
# The z_bounds should be passed in kwargs.
z_optimisation = minimize(
optimise_z, x0=window_com[2], args=z_args, bounds=[z_bounds])
# Substitute the z coordinate for a minimised one.
window_com[2] = z_optimisation.x[0]
# Getting the 2D plane crosssection of a window in XY plain. (10-04-18)
# First translation around Z axis.
vectors_translated = [
[
np.dot(rotation_around_z, i[5:])[0],
np.dot(rotation_around_z, i[5:])[1],
np.dot(rotation_around_z, i[5:])[2],
] for i in window
]
# Second rotation around Y axis.
vectors_translated = [
[
np.dot(rotation_around_y, i)[0],
np.dot(rotation_around_y, i)[1],
np.dot(rotation_around_y, i)[2]
] for i in vectors_translated
]
ref_distance = (new_z - window_com[2]) / np.linalg.norm(vector)
# Cutting the XY plane.
XY_plane = np.array(
[
[i[0] * ref_distance, i[1] * ref_distance]
for i in vectors_translated
]
)
return XY_plane
def find_windows_new(elements,
coordinates,
processes=None,
mol_size=None,
adjust=1,
pore_opt=True,
increment=1.0,
**kwargs):
"""Return windows diameters and center of masses for a molecule."""
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy(coordinates)
# Center of our cartesian system is always at origin
origin = np.array([0, 0, 0])
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass(elements, coordinates)
# Shift the cage to the origin using either the standard center of mass
# or if pore_opt flag is True, the optimised pore center as center of mass
if pore_opt is True:
# Normally the pore is calculated from the COM of a molecule.
# So, esentially the molecule's COM is the pore center.
# To shift the molecule so that the center of the optimised pore
# is at the origin of the system and not the center of the not
# optimised one, we need to adjust the shift. We also have to update
# the initial com.
com_adjust = initial_com - opt_pore_diameter(elements, coordinates, **
kwargs)[2]
initial_com = initial_com - com_adjust
coordinates = shift_com(elements, coordinates, com_adjust=com_adjust)
else:
# Otherwise, we just shift the cage to the origin.
coordinates = shift_com(elements, coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule.
shpere_radius = max_dim(elements, coordinates)[2] / 2
sphere_surface_area = 4 * np.pi * shpere_radius**2
# Here we determine the number of sampling points necessary for a fine
# sampling. Smaller molecules require more finner density of sampling
# points on the sampling sphere's surface, whereas largen require less.
# This formula was created so that larger molecule do not take much longer
# to analyse, as number_sampling_points*length_of_sampling_vectors
# results in quadratic increase of sampling time. The 250 factor was
# specificly determined to produce close to 1 sampling point /Angstrom^2
# for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the
# sampling by changing the adjust factor.
number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)
# Here I use code by <NAME> for spreading points on a sphere:
# http://blog.marmakoide.org/?p=1
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(number_of_points)
z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,
number_of_points)
radius = np.sqrt(1 - z * z)
points = np.zeros((number_of_points, 3))
points[:, 0] = radius * np.cos(theta) * shpere_radius
points[:, 1] = radius * np.sin(theta) * shpere_radius
points[:, 2] = z * shpere_radius
# Here we will compute the eps parameter for the sklearn.cluster.DBSCAN
# (3-dimensional spatial clustering algorithm) which is the mean distance
# to the closest point of all points.
values = []
tree = KDTree(points)
for i in points:
dist, ind = tree.query(i.reshape(1, -1), k=10)
values.extend(dist)
mean_distance = np.mean(values)
# The best eps is parametrized when adding the mean distance and it's root.
eps = mean_distance + mean_distance**0.5
# Here we either run the sampling points vectors analysis in serial
# or parallel. The vectors that go through molecular pores return
# as analysed list with the increment at vector's path with largest
# included sphere, coordinates for this narrow channel point. vectors
# that find molecule on theirs path are return as NoneType object.
# Parralel analysis on user's defined number of CPUs.
if processes:
pool = Pool(processes=processes)
parallel = [
pool.apply_async(
vector_preanalysis,
args=(
point,
coordinates,
elements_vdw, ),
kwds={'increment': increment}) for point in points
]
results = [p.get() for p in parallel if p.get() is not None]
pool.terminate()
# Dataset is an array of sampling points coordinates.
dataset = np.array([x[5:8] for x in results])
else:
results = [
vector_preanalysis(
point, coordinates, elements_vdw, increment=increment)
for point in points
]
results = [x for x in results if x is not None]
dataset = np.array([x[5:8] for x in results])
# If not a single vector was returned from the analysis it mean that
# no molecular channels (what we call windows here) connects the
# molecule's interior with the surroungsings (exterior space).
# The number of windows in that case equals zero and zero is returned.
# Otherwise we continue our search for windows.
if len(results) == 0:
return None
else:
# Perfomr DBSCAN to cluster the sampling points vectors.
# the n_jobs will be developed later.
# db = DBSCAN(eps=eps, n_jobs=_ncpus).fit(dataset)
db = DBSCAN(eps=eps).fit(dataset)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = set(db.labels_)
# Assing cluster label to a sampling point.
clusters = [[i, j] for i, j in zip(results, db.labels_)]
clustered_results = {label: [] for label in labels}
# Create a dictionary of clusters with points listed.
[clustered_results[i[1]].append(i[0]) for i in clusters]
return clustered_results, elements, coordinates, initial_com
def calculate_window_diameter(window, elements, coordinates, **kwargs):
elements_vdw = np.array(
[[atomic_vdw_radius[x.upper()]] for x in elements]
)
window_results = window_analysis(
np.array(window), elements, coordinates, elements_vdw, **kwargs
)
# The function returns two numpy arrays, one with windows diameters
# in Angstrom, second with corresponding windows center's coordinates
if window_results:
return window_results[0]
else:
return None
def get_window_com(window, elements, coordinates, initial_com, **kwargs):
elements_vdw = np.array(
[[atomic_vdw_radius[x.upper()]] for x in elements]
)
window_results = window_analysis(
np.array(window), elements, coordinates, elements_vdw, **kwargs
)
# The function returns two numpy arrays, one with windows diameters
# in Angstrom, second with corresponding windows center's coordinates
if window_results:
# I correct the COM of window for the initial COM of the cage
return np.add(window_results[1], initial_com)
else:
return None
def vector_analysis_reversed(vector, coordinates, elements_vdw):
norm_vec = vector/np.linalg.norm(vector)
intersections = []
origin = center_of_coor(coordinates)
L = coordinates - origin
t_ca = np.dot(L, norm_vec)
d = np.sqrt(np.einsum('ij,ij->i', L, L) - t_ca**2)
under_sqrt = elements_vdw**2 - d**2
diag = under_sqrt.diagonal()
positions = np.argwhere(diag > 0)
for pos in positions:
t_hc = np.sqrt(diag[pos[0]])
t_0 = t_ca[pos][0] - t_hc
t_1 = t_ca[pos][0] + t_hc
P_0 = origin + np.dot(t_0, norm_vec)
P_1 = origin + np.dot(t_1, norm_vec)
if np.linalg.norm(P_0) < np.linalg.norm(P_1):
intersections.append([np.linalg.norm(P_1), P_1])
if intersections:
intersection = sorted(intersections, reverse=True)[0][1]
dist_origin = np.linalg.norm(intersection)
return [dist_origin, intersection]
def find_average_diameter(elements, coordinates, adjust=1, increment=0.1,
processes=None, **kwargs):
"""Return average diameter for a molecule."""
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy(coordinates)
# Center of our cartesian system is always at origin
origin = np.array([0, 0, 0])
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass(elements, coordinates)
# We just shift the cage to the origin.
coordinates = shift_com(elements, coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule.
shpere_radius = max_dim(elements, coordinates)[2]
sphere_surface_area = 4 * np.pi * shpere_radius**2
# Here we determine the number of sampling points necessary for a fine
# sampling. Smaller molecules require more finner density of sampling
# points on the sampling sphere's surface, whereas largen require less.
# This formula was created so that larger molecule do not take much longer
# to analyse, as number_sampling_points*length_of_sampling_vectors
# results in quadratic increase of sampling time. The 250 factor was
# specificly determined to produce close to 1 sampling point /Angstrom^2
# for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the
# sampling by changing the adjust factor.
number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)
# Here I use code by <NAME> for spreading points on a sphere:
# http://blog.marmakoide.org/?p=1
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(number_of_points)
z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,
number_of_points)
radius = np.sqrt(1 - z * z)
points = np.zeros((number_of_points, 3))
points[:, 0] = radius * np.cos(theta) * shpere_radius
points[:, 1] = radius * np.sin(theta) * shpere_radius
points[:, 2] = z * shpere_radius
# Here we analyse the vectors and retain the ones that create the molecule
# outline.
if processes:
pool = Pool(processes=processes)
parallel = [
pool.apply_async(
vector_analysis_reversed,
args=(
point, coordinates, elements_vdw)
) for point in points
]
results = [p.get() for p in parallel if p.get() is not None]
pool.terminate()
else:
results = [
vector_analysis_reversed(
point, coordinates, elements_vdw)
for point in points
]
results_cleaned = [x[0] for x in results if x is not None]
return np.mean(results_cleaned)*2
def vector_analysis_pore_shape(vector, coordinates, elements_vdw):
norm_vec = vector/np.linalg.norm(vector)
intersections = []
origin = center_of_coor(coordinates)
L = coordinates - origin
t_ca = np.dot(L, norm_vec)
d = np.sqrt(np.einsum('ij,ij->i', L, L) - t_ca**2)
under_sqrt = elements_vdw**2 - d**2
diag = under_sqrt.diagonal()
positions = np.argwhere(diag > 0)
for pos in positions:
t_hc = np.sqrt(diag[pos[0]])
t_0 = t_ca[pos][0] - t_hc
t_1 = t_ca[pos][0] + t_hc
P_0 = origin + np.dot(t_0, norm_vec)
P_1 = origin + np.dot(t_1, norm_vec)
# print(np.linalg.norm(P_0), np.linalg.norm(P_1))
if np.linalg.norm(P_0) < np.linalg.norm(P_1):
intersections.append([np.linalg.norm(P_0), P_0])
if intersections:
return sorted(intersections)[0][1]
def calculate_pore_shape(elements, coordinates, adjust=1, increment=0.1,
**kwargs):
"""Return average diameter for a molecule."""
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy(coordinates)
# Center of our cartesian system is always at origin
origin = np.array([0, 0, 0])
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass(elements, coordinates)
# We just shift the cage to the origin.
coordinates = shift_com(elements, coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule.
shpere_radius = max_dim(elements, coordinates)[2]/2
sphere_surface_area = 4 * np.pi * shpere_radius**2
# Here we determine the number of sampling points necessary for a fine
# sampling. Smaller molecules require more finner density of sampling
# points on the sampling sphere's surface, whereas largen require less.
# This formula was created so that larger molecule do not take much longer
# to analyse, as number_sampling_points*length_of_sampling_vectors
# results in quadratic increase of sampling time. The 250 factor was
# specificly determined to produce close to 1 sampling point /Angstrom^2
# for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the
# sampling by changing the adjust factor.
number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)
# Here I use code by <NAME> for spreading points on a sphere:
# http://blog.marmakoide.org/?p=1
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(number_of_points)
z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,
number_of_points)
radius = np.sqrt(1 - z * z)
points = np.zeros((number_of_points, 3))
points[:, 0] = radius * np.cos(theta) * shpere_radius
points[:, 1] = radius * np.sin(theta) * shpere_radius
points[:, 2] = z * shpere_radius
# Here we will compute the eps parameter for the sklearn.cluster.DBSCAN
# (3-dimensional spatial clustering algorithm) which is the mean distance
# to the closest point of all points.
values = []
tree = KDTree(points)
for i in points:
dist, ind = tree.query(i.reshape(1, -1), k=10)
values.extend(dist)
mean_distance = np.mean(values)
# The best eps is parametrized when adding the mean distance and it's root.
eps = mean_distance + mean_distance**0.5
# Here we either run the sampling points vectors analysis in serial
# or parallel. The vectors that go through molecular voids return
# as analysed list with the increment at vector's path with largest
# included sphere, coordinates for this narrow channel point. vectors
# that find molecule on theirs path are return as NoneType object.
results = [
vector_analysis_pore_shape(point, coordinates, elements_vdw)
for point in points
]
results_cleaned = [x for x in results if x is not None]
ele = np.array(['X'] * len(results_cleaned))
coor = np.array(results_cleaned)
return coor
def circumcircle_window(coordinates, atom_set):
# Calculating circumcircle
A = np.array(coordinates[int(atom_set[0])])
B = np.array(coordinates[int(atom_set[1])])
C = np.array(coordinates[int(atom_set[2])])
a = np.linalg.norm(C - B)
b = np.linalg.norm(C - A)
c = np.linalg.norm(B - A)
s = (a + b + c) / 2
# Holden et al. method is intended to only work with triads of carbons,
# therefore I substract the vdW radii for a carbon.
# These equation calculaties the window's radius.
R = a*b*c / 4 / np.sqrt(s * (s - a) * (s - b) * (s - c)) - 1.70
# This steps are used to calculate the window's COM.
b1 = a*a * (b*b + c*c - a*a)
b2 = b*b * (a*a + c*c - b*b)
b3 = c*c * (a*a + b*b - c*c)
COM = np.column_stack((A, B, C)).dot(np.hstack((b1, b2, b3)))
# The window's COM.
COM /= b1 + b2 + b3
return R, COM
def circumcircle(coordinates, atom_sets):
pld_diameter_list = []
pld_com_list = []
iter_ = 0
while iter_ < len(atom_sets):
R, COM = circumcircle_window(coordinates, atom_sets[iter_])
pld_diameter_list.append(R*2)
pld_com_list.append(COM)
iter_ += 1
return pld_diameter_list, pld_com_list
| 1.976563 | 2 |
Algorithms/Graph/Python/KrustalAlgorithm.py | nipun2000/algorithms-in-C-Cplusplus-Java-Python-JavaScript | 30 | 12764003 | """Krustal's algorithm for MST:
KRUSKAL(G):
1 A = ∅
2 foreach v ∈ G.V:
3 MAKE-SET(v)
4 foreach (u, v) in G.E ordered by weight(u, v), increasing:
5 if FIND-SET(u) ≠ FIND-SET(v):
6 A = A ∪ {(u, v)}
7 UNION(u, v)
8 return A
"""
from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = []
def add_edge(self,u,v,w):
"""add edge to graph"""
self.graph.append([u,v,w])
def find(self, parent, i):
"""implementation of find data-structure"""
if parent[i] == i:
return i
return self.find(parent, parent[i])
def union(self, parent, rank, v1, v2):
"""to find union of x and y"""
root1 = self.find(parent, v1)
root2 = self.find(parent, v2)
if rank[root1] < rank[root2]:
parent[root1] = root2
elif rank[root1] > rank[root2]:
parent[root2] = root1
else :
parent[root2] = root1
rank[root1] += 1
def krustal(self):
result =[]
i = 0 # variable for sorted edges
e = 0 # result[]
# sort all edges in non-decreasing order
self.graph = sorted(self.graph,key=lambda item: item[2])
parent = [] ; rank = []
# Create V subsets with single elements
for vertex in range(self.V):
parent.append(vertex)
rank.append(0)
while e < self.V -1 :
u,v,w = self.graph[i]
i = i + 1
x = self.find(parent, u)
y = self.find(parent ,v)
# if edge doesn't conatain cycle
if x != y:
e = e + 1
result.append([u,v,w])
self.union(parent, rank, x, y)
print("MST:")
for u,v,weight in result:
print("{} -- {} -> {}".format(u,v,weight))
g = Graph(4)
g.add_edge(0, 1, 10)
g.add_edge(0, 2, 6)
g.add_edge(0, 3, 5)
g.add_edge(1, 3, 15)
g.add_edge(2, 3, 4)
g.krustal()
| 3.515625 | 4 |
setup.py | Giphy/crowdflower | 0 | 12764004 | <filename>setup.py
import codecs
from setuptools import setup, find_packages
with codecs.open('README.rst', encoding='utf-8-sig') as f:
LONG_DESCRIPTION = f.read()
setup(
name='crowdflower',
version='0.0.5',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/chbrown/crowdflower',
keywords='crowdflower crowdsourcing api client',
description='Crowdflower API - Python Client',
long_description=LONG_DESCRIPTION,
license=open('LICENSE').read(),
packages=find_packages(),
include_package_data=True,
classifiers=[
# https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
],
install_requires=[
'requests>=2.0.0'
],
entry_points={
'console_scripts': [
],
},
)
| 1.320313 | 1 |
refer360annotation/.TRASH/0004_auto_20190305_1545.py | volkancirik/refer360_backend | 0 | 12764005 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2019-03-05 15:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0003_merge_20190305_1545'),
]
operations = [
migrations.AlterField(
model_name='image',
name='imageurl',
field=models.URLField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='turker',
name='turkerid',
field=models.CharField(max_length=100, primary_key=True, serialize=False),
),
]
| 1.5 | 2 |
ngraph/transformers/hetr/rpc_client.py | NervanaSystems/ngraph-python | 18 | 12764006 | import grpc
from six import iteritems
from . import hetr_pb2
from . import hetr_pb2_grpc
from ngraph.op_graph.serde.serde import op_to_protobuf, tensor_to_protobuf,\
pb_to_tensor, is_scalar_type, assign_scalar, protobuf_scalar_to_python
import logging
_TIMEOUT_SECONDS = 600
logger = logging.getLogger(__name__)
def is_channel_ready(channel):
status = channel._channel.check_connectivity_state(True)
return ((status == 0) or (status == 2)) # 0: IDLE, 2: READY
class RPCComputationClient(object):
def __init__(self, comp_id, stub):
self.comp_id = comp_id
self.RPC = stub
self.feed_input_response_future = None
def feed_input(self, values):
logger.debug("client: feed input")
pb_values = []
for v in values:
pb_val = hetr_pb2.Value()
if is_scalar_type(v):
assign_scalar(pb_val.scalar, v)
else:
pb_val.tensor.CopyFrom(tensor_to_protobuf(v))
pb_values.append(pb_val)
self.feed_input_response_future = self.RPC.FeedInput.future(
hetr_pb2.FeedInputRequest(
comp_id=self.comp_id,
values=pb_values),
_TIMEOUT_SECONDS)
def get_results(self):
logger.debug("client: get results")
if self.feed_input_response_future is None:
raise RuntimeError("call feed_input before get_results")
response = self.feed_input_response_future.result()
self.feed_input_response_future = None
if not response.status:
raise RuntimeError("RPC feed_input request failed: {}".format(response.message))
response = self.RPC.GetResults(
hetr_pb2.GetResultsRequest(comp_id=self.comp_id),
_TIMEOUT_SECONDS)
if not response.status:
raise RuntimeError("RPC get_results request failed: {}".format(response.message))
return_list = []
for r in response.results:
if r.HasField('scalar'):
return_list.append(protobuf_scalar_to_python(r.scalar))
else:
return_list.append(pb_to_tensor(r.tensor))
return_dict = {op: return_list[mypos]
for (op, mypos) in iteritems(self.returns)}
return return_dict
class RPCTransformerClient(object):
def __init__(self, transformer_type, server_address='localhost'):
logger.debug("client: init, transformer: %s, server_address: %s",
transformer_type, server_address)
self.transformer_type = transformer_type
self.server_address = server_address
self.computations = dict()
self.computation_builds = dict()
self.comp_id_ctr = 0
self.is_trans_built = False
self.computation_response_future = None
self.close_transformer_response_future = None
def set_server_address(self, address):
if self.is_trans_built:
logger.debug("client: set_server_address: transformer is already built, \
skip server address")
return
self.server_address = address
def build_transformer(self):
logger.debug("client: build_transformer, server address: %s", self.server_address)
if self.is_trans_built:
logger.debug("client: build_transformer: transformer is already built")
return
options = [('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)]
channel = grpc.insecure_channel(self.server_address, options=options)
if not is_channel_ready(channel):
raise RuntimeError("gRPC channel is not ready...")
self.RPC = hetr_pb2_grpc.HetrStub(channel)
if self.close_transformer_response_future is not None:
response = self.close_transformer_response_future.result()
if not response.status:
raise RuntimeError("RPC close_transformer request failed: {}"
.format(response.message))
self.is_trans_built = False
self.close_transformer_response_future = None
response = self.RPC.BuildTransformer(
hetr_pb2.BuildTransformerRequest(transformer_type=self.transformer_type),
_TIMEOUT_SECONDS)
if response.status:
self.is_trans_built = True
else:
self.is_trans_built = False
raise RuntimeError("RPC build_transformer request failed: {}".format(response.message))
def create_computation(self, pb_graph, returns, placeholders):
logger.debug("client: create_computation")
def make_computation_request(pb_ops, pb_edges, pb_returns=None, pb_placeholders=None):
if pb_returns or pb_placeholders:
return hetr_pb2.ComputationRequest(
ops=pb_ops,
edges=pb_edges,
returns=pb_returns,
placeholders=pb_placeholders)
else:
return hetr_pb2.ComputationRequest(
ops=pb_ops,
edges=pb_edges)
def generate_messages():
pb_returns = [op_to_protobuf(o) for o in returns]
pb_placeholders = [op_to_protobuf(o) for o in placeholders]
for pb_ops, pb_edges in pb_graph:
msg = make_computation_request(
pb_ops, pb_edges, pb_returns, pb_placeholders)
yield msg
pb_returns, pb_placeholders = [], []
if not self.is_trans_built:
raise RuntimeError("call build_transformer before create_computation")
self.computation_response_future = self.RPC.Computation.future(
generate_messages(), _TIMEOUT_SECONDS)
def get_computation(self):
logger.debug("client: get_computation")
if self.computation_response_future is None:
raise RuntimeError("call create_computation before get_computation")
response = self.computation_response_future.result()
self.computation_response_future = None
if response.comp_id >= 0:
rpcComputationClient = RPCComputationClient(response.comp_id, self.RPC)
return rpcComputationClient
else:
raise RuntimeError("RPC computation request failed: {}".format(response.message))
def close_transformer(self):
logger.debug("client: close_transformer")
if self.is_trans_built:
self.close_transformer_response_future = self.RPC.CloseTransformer.future(
hetr_pb2.CloseTransformerRequest(),
_TIMEOUT_SECONDS)
def close(self):
logger.debug("client: close")
if self.close_transformer_response_future is not None:
response = self.close_transformer_response_future.result()
if not response.status:
raise RuntimeError("RPC close_transformer request failed: {}"
.format(response.message))
self.is_trans_built = False
self.close_transformer_response_future = None
try:
self.RPC.Close.future(
hetr_pb2.CloseRequest(),
_TIMEOUT_SECONDS)
except:
pass
| 2 | 2 |
src/recursion/binode.py | seahrh/coding-interview | 0 | 12764007 | <gh_stars>0
"""
BiNode: Consider a simple data structure called BiNode, which has pointers to two other nodes. The
data structure BiNode could be used to represent both a binary tree (where nodel is the left node
and node2 is the right node) or a doubly linked list (where nodel is the previous node and node2
is the next node). Implement a method to convert a binary search tree (implemented with BiNode)
into a doubly linked list. The values should be kept in order and the operation should be performed
in place (that is, on the original data structure).
(17.12, p571)
SOLUTION: recursion
Left and right halves of the tree form their own "sub-parts" of the linked list (i.e., they
appear consecutively in the linked list). So, if we recursively converted the left and right
subtrees to a doubly linked list, we can build the final linked list from those parts.
How to return the head and tail of a linked list? Return the head of a doubly linked list.
Tree as doubly linked list: form the triangle where root is middle of the list.
mid
// \\
head tail
If left subtree is not empty, left.next = root, root.prev = left
If right subtree is not empty, root.next = right, right.prev = root
If both left and right subtrees are not empty, right.next = left, left.prev = right
O(n) time: each node is touched an average of O(1) times.
O(n) space: depth of call stack
"""
class BiNode:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
def append(self, other):
self.right = other
other.left = self
@staticmethod
def as_circular_linked_list(root):
if root is None:
return None
lsub = BiNode.as_circular_linked_list(root.left)
rsub = BiNode.as_circular_linked_list(root.right)
if lsub is None and rsub is None:
root.left = root
root.right = root
return root
rsub_tail = None if rsub is None else rsub.left
# join left to root
if lsub is None:
rsub.left.append(root)
else:
lsub.left.append(root)
# join right to root
if rsub is None:
root.append(lsub)
else:
root.append(rsub)
# join right to left
if lsub is not None and rsub is not None:
rsub_tail.append(lsub)
return root if lsub is None else lsub
@staticmethod
def as_linked_list(root):
"""Takes the circular linked list and break the circular connection."""
head = BiNode.as_circular_linked_list(root)
if head is None:
return None
head.left.right = None
head.left = None
return head
| 4.125 | 4 |
democratic.py | rihp/boteki | 1 | 12764008 | def count_votes(reactions):
"""
takes in a string with reactions from discord
parses it and return a tuple of the toal votes on each alternative
"""
positive_vote = "👍"
negative_vote = "👎"
alternatives = [positive_vote, negative_vote]
results={}
for a in alternatives:
position = reactions.find(a) + 17
votes= reactions[position:position+2]
if votes[-1] == ">": votes = votes[:-1]
results[a] = votes
return (results[positive_vote], results[negative_vote])
def net_score(results):
"""
takes in a results tuple and returns net score after a substraction
"""
if type(results) != tuple: raise TypeError
return int(results[0]) - int(results[1]) | 3.703125 | 4 |
src/neon/frontend/callbacks.py | MUTTERSCHIFF/ngraph-neon | 13 | 12764009 | <reponame>MUTTERSCHIFF/ngraph-neon<filename>src/neon/frontend/callbacks.py
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division, print_function, absolute_import
import h5py
import os
import logging
import time
import numpy as np
from tqdm import tqdm
from enum import Enum
from timeit import default_timer
logger = logging.getLogger(__name__)
class CallbackPhase(Enum):
train_pre_ = 0
train_post = 1
interval_pre_ = 2
interval_post = 3
minibatch_pre_ = 4
minibatch_post = 5
def make_default_callbacks(transformer, output_file, frequency, train_computation,
total_iterations, eval_set=None,
eval_feed_wrapper=None, loss_computation=None,
enable_top5=False, use_progress_bar=True):
cbs = CallbackContainer(transformer, output_file, total_iterations)
cbs.append(TrainCostCallback(train_computation))
cbs.append(TrainLoggerCallback(frequency))
if eval_set is not None:
cbs.append(LossCallback(frequency,
eval_set,
eval_feed_wrapper,
loss_computation,
enable_top5))
if use_progress_bar:
cbs.append(ProgressCallback())
return cbs
class CallbackContainer(object):
def __init__(self, transformer, output_file, total_iterations, callback_list=[]):
self.transformer = transformer
'''
just store a list of callbacks
'''
self._callbacks = callback_list
if output_file is None:
if hasattr(self, 'callback_data'):
del self.callback_data
# self.name sould give a unique filename
self.callback_data = h5py.File(self.name, driver='core', backing_store=False)
else:
if os.path.isfile(output_file):
logger.warn("Overwriting output file %s", output_file)
os.remove(output_file)
self.callback_data = h5py.File(output_file, "w")
config = self.callback_data.create_group('config')
config.attrs['total_iterations'] = total_iterations
def __del__(self):
try:
self.callback_data.close()
except Exception:
pass
def __iter__(self):
return self._callbacks.__iter__()
def append(self, cb):
"""
Appends a callback
Arguments:
cb: The callback object to append.
"""
self._callbacks.append(cb)
def insert(self, index, cb):
"""
Inserts a callback
Arguments:
index : Index to insert at
cb : The callback object to insert
"""
self._callbacks.insert(index, cb)
def __call__(self, phase, data=None, idx=None):
for c in self._callbacks:
c(self.transformer, self.callback_data, phase, data, idx)
class Callback(object):
def __call__(self, transformer, callback_data, phase, data, idx):
pass
class TrainCostCallback(Callback):
"""
Callback for computing average training cost periodically during training.
"""
def __init__(self, computation):
self.computation = computation
def __call__(self, transformer, callback_data, phase, data, idx):
if phase == CallbackPhase.train_pre_:
transformer.set_output_statistics_file(callback_data)
iterations = callback_data['config'].attrs['total_iterations']
callback_data.create_dataset("cost/train", (iterations,))
# clue in the data reader to use the 'minibatch' time_markers
callback_data['cost/train'].attrs['time_markers'] = 'minibatch'
elif phase == CallbackPhase.minibatch_post:
# This is where the training function is actually called
callback_data['cost/train'][idx] = self.computation(data)['batch_cost']
elif phase == CallbackPhase.train_post:
transformer.save_output_statistics_file()
class TrainSaverCallback(Callback):
def __init__(self, saver, filename, frequency):
self.saver = saver
self.filename = filename
self.frequency = frequency
def __call__(self, transformer, callback_data, phase, data, idx):
if phase == CallbackPhase.minibatch_post:
if ((idx + 1) % self.frequency == 0):
self.saver.save(filename=self.filename + "_" + str(idx))
class FeedAddWrapper:
def __init__(self, wrapper=None, holder=None, wrapper_kwargs=None, clear_feed=False):
self.wrapper = wrapper
self.holder = holder
self.wrapper_kwargs = wrapper_kwargs
self.clear_feed = clear_feed
def __call__(self, data, step):
if self.clear_feed:
data.clear()
if self.wrapper is not None:
data[self.holder] = self.wrapper(step=step, **self.wrapper_kwargs)
class RunTimerCallback(Callback):
"""
Callback which tracks the total training time.
"""
def __call__(self, transformer, callback_data, phase, data, idx):
if phase == CallbackPhase.train_pre_:
self.timing = callback_data.create_group("time/train")
self.timing.create_dataset("start_time", (1,), dtype='float64')
self.timing.create_dataset("end_time", (1,), dtype='float64')
self.timing['start_time'][0] = time.time()
self.timing['start_time'].attrs['units'] = 'seconds'
elif phase == CallbackPhase.train_post:
self.timing['end_time'][0] = time.time()
self.timing['end_time'].attrs['units'] = 'seconds'
class ProgressCallback(Callback):
"""
Callback shows overall progress
"""
def __call__(self, transformer, callback_data, phase, data, idx):
if phase == CallbackPhase.train_pre_:
self.tpbar = tqdm(desc="Train",
unit="minibatches",
ncols=80,
total=callback_data['config'].attrs['total_iterations'])
elif phase == CallbackPhase.train_post:
self.tpbar.set_description(desc="Train")
self.tpbar.close()
elif phase == CallbackPhase.minibatch_post:
self.tpbar.update(1)
self.tpbar.set_description(
desc="Train Cost {:0.4f}".format(callback_data['cost/train'][idx]))
class TrainLoggerCallback(Callback):
"""
Callback for logging training progress.
Arguments:
frequency (int, optional): how often (in minibatches) to log training info.
"""
def __init__(self, frequency):
self.frequency = frequency
def __call__(self, transformer, callback_data, phase, data, idx):
if phase == CallbackPhase.minibatch_post:
if ((idx + 1) % self.frequency == 0):
interval = slice(idx + 1 - self.frequency, idx)
train_cost = callback_data["cost/train"][interval].mean()
tqdm.write("Interval {} Iteration {} complete. Avg Train cost: {}".format(
idx // self.frequency + 1, idx + 1, train_cost))
class LossCallback(Callback):
"""
Callback for calculating the loss on a given dataset periodically during training.
Arguments:
eval_set (NervanaDataIterator): dataset to evaluate
interval_freq (int, optional): how often (in iterations) to log info.
"""
def __init__(self, frequency, dataset, eval_feed_wrapper, interval_loss_comp, enable_top5):
self.frequency = frequency
self.dataset = dataset
self.eval_feed_wrapper = eval_feed_wrapper
self.interval_loss_comp = interval_loss_comp
self.enable_top5 = enable_top5
def __call__(self, transformer, callback_data, phase, data, idx):
if phase == CallbackPhase.train_pre_:
self.total_iterations = callback_data['config'].attrs['total_iterations']
num_intervals = self.total_iterations // self.frequency
for loss_name in self.interval_loss_comp.output_keys:
callback_data.create_dataset("cost/{}".format(loss_name), (num_intervals,))
callback_data.create_dataset("cost/top_1_acc", (num_intervals,))
if self.enable_top5:
callback_data.create_dataset("cost/top_5_acc", (num_intervals,))
callback_data.create_dataset("time/loss", (num_intervals,))
elif phase == CallbackPhase.train_post:
losses = loop_eval(dataset=self.dataset,
computation=self.interval_loss_comp,
enable_top5=self.enable_top5,
eval_feed_wrapper=self.eval_feed_wrapper)
tqdm.write("Training complete. Avg losses: {}".format(losses))
elif phase == CallbackPhase.minibatch_post and ((idx + 1) % self.frequency == 0):
start_loss = default_timer()
interval_idx = idx // self.frequency
losses = loop_eval(dataset=self.dataset,
computation=self.interval_loss_comp,
enable_top5=self.enable_top5,
eval_feed_wrapper=self.eval_feed_wrapper)
for loss_name, loss in losses.items():
callback_data["cost/{}".format(loss_name)][interval_idx] = loss
callback_data["time/loss"][interval_idx] = (default_timer() - start_loss)
tqdm.write("Interval {} Iteration {} complete. Avg losses: {}".format(
interval_idx + 1, idx + 1, losses))
def loop_train(dataset, callbacks, train_feed_wrapper=None):
callbacks(CallbackPhase.train_pre_)
for mb_idx, data in enumerate(dataset):
if train_feed_wrapper is not None:
train_feed_wrapper(data=data, step=mb_idx)
data['iteration'] = mb_idx
callbacks(CallbackPhase.minibatch_pre_, data, mb_idx)
callbacks(CallbackPhase.minibatch_post, data, mb_idx)
callbacks(CallbackPhase.train_post)
def loop_eval(dataset, computation, enable_top5=False, eval_feed_wrapper=None):
dataset.reset()
all_results = None
def top_results(inference_prob, data, enable_top5):
if inference_prob is not None:
top5_sorted = np.argsort(inference_prob, axis=0)[-5:]
data_tr = data['label'].T # true labels
top1_results = np.any(np.equal(data_tr, top5_sorted[-1:]), axis=0)
if enable_top5:
top5_results = np.any(np.equal(data_tr, top5_sorted), axis=0)
return {'top_1_acc': top1_results, 'top_5_acc': top5_results}
else:
return {'top_1_acc': top1_results}
for data in dataset:
if eval_feed_wrapper is not None:
eval_feed_wrapper(data=data, step=0)
data['iteration'] = 0
results = computation(data)
if 'results' in results.keys():
inference_prob = results.pop('results')
results.update(top_results(inference_prob, data, enable_top5))
if all_results is None:
all_results = {k: list(rs) for k, rs in results.items()}
else:
for k, rs in results.items():
all_results[k].extend(list(rs))
reduced_results = {k: np.mean(ar[:dataset.ndata]) for k, ar in all_results.items()}
return reduced_results
| 1.960938 | 2 |
FWCore/GuiBrowsers/python/Vispa/Share/ParticleDataAccessor.py | NTrevisani/cmssw | 3 | 12764010 | class ParticleDataAccessor(object):
""" This class provides access to the underlying data model.
"""
LINE_STYLE_SOLID = 0
LINE_STYLE_DASH = 1
LINE_STYLE_WAVE = 2
LINE_STYLE_SPIRAL = 3
LINE_VERTEX = 4
def id(self, object):
""" Returns an id to identify given object.
Usually it is sufficient to identify python objects directly with themselves.
Overwrite this function if this is not true for your objects.
"""
return id(object)
def particleId(self, object):
raise NotImplementedError
def isQuark(self, object):
raise NotImplementedError
def isLepton(self, object):
raise NotImplementedError
def isGluon(self, object):
raise NotImplementedError
def isBoson(self, object):
raise NotImplementedError
def color(self, object):
raise NotImplementedError
def lineStyle(self, object):
raise NotImplementedError
def createParticle(self):
raise NotImplementedError
def charge(self, object):
raise NotImplementedError
def linkMother(self, object, mother):
raise NotImplementedError
def linkDaughter(self, object, daughter):
raise NotImplementedError
| 3.203125 | 3 |
nicos_ess/estia/setups/special/collector.py | jkrueger1/nicos | 0 | 12764011 | <gh_stars>0
description = 'setup for the NICOS collector'
group = 'special'
devices = dict(
CacheKafka=device(
'nicos_ess.devices.cache_kafka_forwarder.CacheKafkaForwarder',
dev_ignore=['space', 'sample'],
brokers=configdata('config.KAFKA_BROKERS'),
output_topic="nicos_cache",
update_interval=10.
),
Collector=device('nicos.services.collector.Collector',
cache='localhost:14869',
forwarders=['CacheKafka'],
),
)
| 1.414063 | 1 |
raysect/core/math/tests/test_random.py | vsnever/raysect-source | 2 | 12764012 | <reponame>vsnever/raysect-source<gh_stars>1-10
# Copyright (c) 2014-2020, Dr <NAME>, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the core functions of the pseudo random number generator.
"""
import unittest
from raysect.core.math.random import seed, uniform
# generated with seed(1234567890)
_random_reference = [
0.8114659955555504, 0.0260575258293142, 0.21664518027139346, 0.036431793406025315, 0.34039768629173206,
0.5307847417000392, 0.16688396521453341, 0.9019057801125824, 0.8159473905407517, 0.8570265343145624,
0.14347871551095648, 0.6891296327000385, 0.5778067096794773, 0.33066470187437946, 0.6876265206072084,
0.886939227534345, 0.014324369430429362, 0.6746414734836368, 0.7405781756866624, 0.331961408614173,
0.290705874656528, 0.9380405182274753, 0.2709760824131112, 0.31300988656894224, 0.3103875692954393,
0.6280866311578346, 0.2994918127081234, 0.05769585538469579, 0.009078614743584623, 0.8087765866312592,
0.4470374188892262, 0.06707140588005311, 0.5503547474504604, 0.9431449802430566, 0.2588098326024648,
0.4320869116583066, 0.5995417081018991, 0.013438112912753097, 0.8728231064672088, 0.878780254056249,
0.36288545060141253, 0.27384446607131097, 0.6874217254488153, 0.17265752434898118, 0.19655596596529323,
0.9146957191169391, 0.7740513037040511, 0.23969347216349357, 0.3085577835996567, 0.5750617634056108,
0.7607788813988423, 0.5461998954715511, 0.07226744025005805, 0.30272312199484996, 0.07240473306707063,
0.7445783003493487, 0.023204172936409417, 0.3541488588854369, 0.288366130356135, 0.9896030024662023,
0.33897243830956825, 0.19899954974241374, 0.12867372424139678, 0.19636195456378958, 0.3864429701036983,
0.3077401387398464, 0.5799414672883465, 0.43022673663871036, 0.5201589558513544, 0.8257246242158377,
0.7866170888100589, 0.20812523470942856, 0.3580046317178087, 0.8209043595111258, 0.6016131136683351,
0.9215493153953329, 0.4532258113173726, 0.8986509588916062, 0.8333655212557587, 0.1660658459237977,
0.11269071968496114, 0.6231919445394714, 0.04035882401424995, 0.0563724857548058, 0.19756368026802662,
0.1965944584402416, 0.415015687836517, 0.027876686089227776, 0.9996656197629226, 0.4175304119184161,
0.06710393664544856, 0.8922085748965133, 0.028869646904276958, 0.5788803807634426, 0.9682232374298705,
0.6163314615766219, 0.2830323089125376, 0.5029784234700792, 0.8420338434150371, 0.4651270080273381,
0.6557994530936825, 0.3292144780285112, 0.3225072195563581, 0.7399474648179799, 0.48079437548223924,
0.7199262095106047, 0.5975905519484589, 0.9236696093086989, 0.7346397307479617, 0.3592223034718819,
0.561867086066638, 0.03995876009164412, 0.49890487301779407, 0.44713823160917965, 0.7861240452195936,
0.25911075097295855, 0.035083102638025165, 0.36607136391410944, 0.19984528149815695, 0.9500543496932649,
0.23731801886771908, 0.6006092389019372, 0.8294417196340962, 0.052764856825375794, 0.6038433712269217,
0.7949850355716386, 0.6605823678792535, 0.8189544045861851, 0.022260100040748965, 0.576004798924091,
0.9268911541444015, 0.4896501699585003, 0.8873523166370707, 0.03815088769851427, 0.9776353573173021,
0.5145565026412993, 0.32552527078939697, 0.4911956494196167, 0.8951990538514405, 0.7885065235998865,
0.6625825765906797, 0.3198923578390026, 0.4894245309430685, 0.8514988780345353, 0.31964651050694937,
0.34732693542699866, 0.6386792428329865, 0.9352495191889876, 0.46790614266368125, 0.011411382010219517,
0.9775139174995728, 0.3862808053921021, 0.8529921244254951, 0.2605262061279382, 0.6922576403464112,
0.15045572118688688, 0.41161431623563194, 0.7126021992146101, 0.4171153209324736, 0.110043850688747,
0.17730501058976444, 0.25657426179278153, 0.2821384093255612, 0.8597361850849774, 0.3935528040200792,
0.8949494271905098, 0.5441559448691042, 0.27510096681653573, 0.5223954176767841, 0.17454850986131998,
0.8883671592862563, 0.31112566318232215, 0.5560927365286331, 0.14008244909651368, 0.009760829072095967,
0.8525804007596959, 0.6921346351519599, 0.38891810962766127, 0.17941655286803415, 0.5904814324711654,
0.09079027074688151, 0.0942749621536777, 0.10551209561705677, 0.3409843480241793, 0.11546679641182545,
0.5460531038179809, 0.7292181368445171, 0.9041051293305149, 0.07400110842637242, 0.8569642064071208,
0.2582827697271819, 0.17032689124026934, 0.7235634272987398, 0.899745025204368, 0.5215209486597834,
0.6991051509833436, 0.5183070756019655, 0.8666000508220529, 0.057923176060497794, 0.5062702174339315,
0.8723895729920971, 0.6948914830488712, 0.6489433851279749, 0.5306528480010244, 0.8225027378406932,
0.7838244572343968, 0.691544103178456, 0.7831551000165751, 0.6742553578920283, 0.28556616290828807,
0.9441828882154812, 0.2897573727609368, 0.27092040074004464, 0.5221435897948234, 0.5147483528375717,
0.4434962915318513, 0.5514590705654433, 0.49042130275752494, 0.3090263245842645, 0.05335198870897018,
0.303566210158679, 0.39691290229966336, 0.6565826934102237, 0.6843076816775873, 0.9390799941122242,
0.4565438740955222, 0.5834686885918711, 0.6345292041511743, 0.03435573177180418, 0.8570048259135065,
0.9945422765096777, 0.33539171490048414, 0.9234474376753186, 0.36969772769699494, 0.14248787800080065,
0.060836330283883466, 0.6876555335527527, 0.2524343868154566, 0.7003003061613287, 0.5423027600372561,
0.8142644438311161, 0.16459341480713097, 0.5896280051306514, 0.9100813849651408, 0.8464665169207302,
0.29377649889194857, 0.4917633089616589, 0.8615633378217579, 0.940272146073147, 0.08225016215748171,
0.8914723950338176, 0.7485079492976437, 0.16549658948390567, 0.5315626289230118, 0.9450558307395565,
0.044836457646848404, 0.6589484995600837, 0.6639350986521546, 0.40094076346047536, 0.621552238087893,
0.8463783046116087, 0.5279429414308022, 0.8733012361951688, 0.3753618518585796, 0.6199087760073151,
0.6638980416420605, 0.9519166805682214, 0.7714573635177544, 0.532306948913898, 0.9950699313841207,
0.6023812221802153, 0.00671953993578811, 0.41257363188340057, 0.22374577891859204, 0.9587200148497917,
0.5656973690886793, 0.12295446157634149, 0.14384586782995712, 0.2999263339675363, 0.19016587334152224,
0.7371161197609226, 0.963678596691637, 0.19459831960744067, 0.023732861037621178, 0.9022346289689608,
0.817551073855459, 0.8565204343428555, 0.34083263803209185, 0.3147716178439317, 0.22924021573146747,
0.006945465955912389, 0.34414493802070234, 0.023818385299195555, 0.035214663181623806, 0.6928101211795044,
0.05857375424125044, 0.06501678645231457, 0.7381578101112583, 0.8018121479956365, 0.4426125308472515,
0.5334659795706291, 0.241504539949834, 0.34846402489916717, 0.3160766708257674, 0.6820437033453958,
0.48572662123104837, 0.2276401005217804, 0.8245514413657292, 0.5519253690402882, 0.251912099236969,
0.40793686767585957, 0.7151296734180584, 0.9824217073234381, 0.11529480920795265, 0.8723676328924153,
0.57771290763045, 0.871023854555776, 0.24685776479884225, 0.22934957742352136, 0.6002908335180984,
0.8952992211384087, 0.3713611310051842, 0.56407375667193, 0.5953393802057543, 0.29828973376345236,
0.11966741726342045, 0.6987001221692178, 0.7878557907165742, 0.06390695747815511, 0.08355291052833858,
0.7425821939621605, 0.9566467958396809, 0.1342316008404204, 0.33043778095310317, 0.5961385752911869,
0.7162577188447022, 0.41151980274036737, 0.6287447927091593, 0.9000573601374122, 0.8857847936198314,
0.6618915691643112, 0.10901537670332251, 0.974332847977026, 0.7430921207569577, 0.3738798578677184,
0.3177953674650934, 0.17907762643786318, 0.6223416913159243, 0.41761298976200534, 0.33313570677119575,
0.7364457497838391, 0.27596382366048255, 0.6424208011354875, 0.7057512390482654, 0.43406222148090023,
0.9713038931369188, 0.016579486340549843, 0.8559782534797721, 0.8285887019972442, 0.45041719041568773,
0.8385573140413476, 0.3598494489892735, 0.010566283696215928, 0.7919540721893159, 0.7572641485392072,
0.9828556345311235, 0.44749216530159, 0.775872823653567, 0.21441646102494516, 0.5469506800802856,
0.6250315474032357, 0.7228454114709449, 0.7762777922445003, 0.3725320514474523, 0.1997830988186624,
0.8422680077727865, 0.5174000366043017, 0.4114337679642368, 0.8485359821345219, 0.8105411087109267,
0.14698078060577857, 0.5140303811686008, 0.7243343817393784, 0.4550925694099044, 0.3202637711960984,
0.8189363549218315, 0.7566860383375725, 0.7642137638017872, 0.7118547919654123, 0.25868958239576423,
0.26071980348118695, 0.0014416717989212957, 0.2531116783475601, 0.7452981354849321, 0.22666538044066054,
0.38860664857167593, 0.5727684813549001, 0.34391041728313887, 0.10125872981993811, 0.3698391553828596,
0.37392763598955614, 0.9352150532839864, 0.3196533316770499, 0.9451329545264995, 0.7011577054305139,
0.4972479758125282, 0.19394292978600158, 0.2255784709723747, 0.8497665546392211, 0.7784459333176916,
0.3826938886585244, 0.8823373252575396, 0.5095613352700986, 0.07043189724308563, 0.2651448359964339,
0.7756594236158155, 0.9333994810847233, 0.8625965208644846, 0.27085634631115396, 0.544146314956061,
0.005441946968595568, 0.1725375800157205, 0.8119268223977082, 0.41820424887848173, 0.40334273244470054,
0.14905838003830352, 0.39670562066321646, 0.4526807368029184, 0.9690226266124907, 0.15634095136705828,
0.26766468856658987, 0.18338178075941547, 0.992322802253603, 0.5236539345496551, 0.2985956624859313,
0.4100434474858442, 0.30853502687943635, 0.0887764814470422, 0.9796372081229322, 0.24109222934272623,
0.5050019585901817, 0.9454834743694469, 0.2521111838931931, 0.30393236334610707, 0.7590426698546433,
0.32565502384735956, 0.3618088380165515, 0.7603049752109582, 0.8079375811328675, 0.9640427963149142,
0.09107650871535244, 0.7053020025232072, 0.20134358439883027, 0.0969959330330612, 0.7634817795040539,
0.14288160000682015, 0.8995689280060272, 0.4121730916081331, 0.6381011014457788, 0.5347500291497603,
0.6981388766633505, 0.6086556203191144, 0.0997947219673625, 0.5180451101421524, 0.8457865844959731,
0.4802718326400941, 0.5480160414929314, 0.6692537196809561, 0.6086772632135882, 0.5755883318728027,
0.8928160726545578, 0.4537771248497059, 0.5855826059810336, 0.9693649946390455, 0.9809955907195725,
0.8222605121455328, 0.9315157929561733, 0.7610683650907902, 0.18453699065691143, 0.18987653993548925,
0.08058285379914143, 0.13258640947297373, 0.9276014353057904, 0.6826008435990373, 0.5827558781355116,
0.672636560507286, 0.6961240608292445, 0.16644178433862533, 0.21423970488961208, 0.547607323698878,
0.8410374079396743, 0.13582173342038273, 0.9112486015153621, 0.8940761424722561, 0.041135346871277734,
0.8930613830991306, 0.2897163426842069, 0.7093705018045361, 0.3981338285552244, 0.7910226772822321,
0.8889762775630399, 0.3790304426009268, 0.8601770030681934, 0.6138027901369926, 0.7056320576164431,
0.2476095758742285, 0.8132910616402856, 0.8689430742973778, 0.9295565490379185, 0.2841332486425633,
0.7761638838352205, 0.2628043693331882, 0.39488597717393137, 0.10902072616095637, 0.32922801464636264,
0.9893075329888635, 0.6042356789402263, 0.7257762472267762, 0.3304761279554156, 0.16014540968416557,
0.7244656911898267, 0.28200634660998836, 0.1955121568345718, 0.27844276258787903, 0.27617018599531706,
0.12640476624099528, 0.4975971208046923, 0.03530997699866356, 0.5588208829600507, 0.6309320467769252,
0.9927038509048303, 0.43155203248849583, 0.8743471515148651, 0.014292646197526904, 0.6348358539197627,
0.2088903476691828, 0.7400772190991796, 0.6118159913938324, 0.8768029651595749, 0.8965458010407473,
0.8550853428733064, 0.7895834058595897, 0.6787044063820584, 0.3033069747750138, 0.563032540187197,
0.2866958456729516, 0.6804805688846556, 0.28086923543329423, 0.7090636048238402, 0.7834965258386127,
0.4512887050774034, 0.355423710061168, 0.08428225259227184, 0.4127942317900083, 0.10136925268583408,
0.3718144648996242, 0.021269730316966662, 0.1472541613547963, 0.5542936329158991, 0.6443057701942396,
0.3807351756123216, 0.6072488055686531, 0.37037940366328637, 0.8319233029191506, 0.7550907359873181,
0.043204485029981865, 0.2057526694309575, 0.5610640391262, 0.6164096646792445, 0.5561287744052342,
0.42914845719986494, 0.5964488034004436, 0.8527268944513903, 0.8270737747453121, 0.2623987600454559,
0.8922733979408876, 0.21408558359169705, 0.7769381051002117, 0.1382750585458662, 0.6050849827198906,
0.21766895125825292, 0.6067790813241967, 0.8082650866597821, 0.34813166222037506, 0.4165288897956795,
0.8210636441113087, 0.609646597542757, 0.5372994025479527, 0.4849626034523542, 0.41447857298142254,
0.8206055713161226, 0.3375955171501559, 0.5133358994389254, 0.7830192378510232, 0.4407111355244818,
0.3995487295266026, 0.22822716386734787, 0.32860765082307486, 0.33763957464687633, 0.054434622315739256,
0.8094181305493572, 0.535453838731794, 0.9600813314329106, 0.8590236593122432, 0.022493420031869715,
0.30705758228596525, 0.01563195672690343, 0.3174866967434624, 0.772092899011473, 0.7054066010354223,
0.048859586900376506, 0.4768127919797456, 0.26256897983199623, 0.3889740130240964, 0.677986412352965,
0.2598445428360996, 0.6496040121038401, 0.8103854067681818, 0.9800399860695725, 0.9055670105357904,
0.7097953273255118, 0.3585186662150981, 0.5568353686574206, 0.08845545202125726, 0.16072563815750252,
0.20508470659388278, 0.5085201598548588, 0.21301485459742509, 0.8342799458856295, 0.03142978077813019,
0.31157069077044186, 0.9235515901806688, 0.4669799028719711, 0.18642602380883333, 0.6431751681934158,
0.6531267612980451, 0.7699808254096996, 0.07559532762088994, 0.08574910096987087, 0.4725201993249537,
0.4900671456656317, 0.0824182639396196, 0.5348469916235775, 0.03945363362033638, 0.40093997185570807,
0.46480434300517903, 0.6308751528484047, 0.41724094123184474, 0.09877249492678075, 0.9088970559325534,
0.781519477292224, 0.16620566965553463, 0.7128625312605764, 0.06687693207244172, 0.04209704414327786,
0.1162134199127226, 0.28098456619669065, 0.4146409837206263, 0.9009174193057882, 0.5647737811880079,
0.7818660970751818, 0.4688238338261841, 0.14677089233924467, 0.07791518888221172, 0.5318220598787906,
0.5700281022868329, 0.4028158916854355, 0.7152930324528218, 0.8445061967460851, 0.29118812322506016,
0.9004598751238156, 0.11220581130720386, 0.5290991553810941, 0.7907499464570443, 0.11997903435659885,
0.1092731941756504, 0.9861516232513136, 0.9641493160738513, 0.45131753627208837, 0.2534491566423186,
0.8035717495221812, 0.685198258275126, 0.5278965268586229, 0.3854252546562611, 0.03314184206146109,
0.039607736084224854, 0.6278111677701048, 0.6152808786614328, 0.13966613726902255, 0.8620583580194112,
0.9669361765861261, 0.764603363709067, 0.32162131735620114, 0.32739250166838896, 0.8048913301525041,
0.09013040808534978, 0.6780262054946375, 0.14955962962919134, 0.7014380395536837, 0.10703430614218357,
0.2725502659791028, 0.6109287614522758, 0.9701216262369222, 0.7077671112634293, 0.28262240295934615,
0.7429869273594056, 0.30343910138584196, 0.4387989074763934, 0.685560617408065, 0.9212628522979792,
0.6710972049337736, 0.6678957086131286, 0.052485043752963256, 0.03262215173872851, 0.5533794367051991,
0.14334883173534696, 0.8750261712197275, 0.21849295019688597, 0.789900305335936, 0.04378837917322642,
0.8448111409516583, 0.8554817655418667, 0.5422899883799648, 0.8322998447663251, 0.9043309648830229,
0.11376878252583256, 0.010574450853024775, 0.6059205512202163, 0.05650192227932216, 0.06616182593422049,
0.300840412056413, 0.11009113618103572, 0.09017278454547817, 0.7324074703533104, 0.5887507946661643,
0.8011993764162316, 0.9537044413248185, 0.3480848466091717, 0.04492720945080131, 0.12772985315204488,
0.09318647234488864, 0.4177564546636331, 0.17908052958405896, 0.9377559994221184, 0.7942001034678936,
0.018264818293493024, 0.09929932833940613, 0.9816497277698718, 0.7254971313087528, 0.5697799313851291,
0.20738382816360446, 0.9413770950513797, 0.19089363309518015, 0.7246898485586507, 0.22277616586322624,
0.2756137473006507, 0.5806902069622485, 0.15121601677076346, 0.26219984984480804, 0.5617317198216203,
0.5340793495810867, 0.7802656945952358, 0.2563728865278596, 0.9722632323000315, 0.12678239231988597,
0.5926580329225439, 0.9471136072043423, 0.7929900300971447, 0.1671869820758628, 0.3971575496869405,
0.4091510156827586, 0.7779342209221878, 0.15077561988451882, 0.24865991522931574, 0.7577232053268574,
0.8553874230783778, 0.39172177799779173, 0.6278466931159832, 0.5846812724737959, 0.6457445565848937,
0.6267819035915743, 0.9094479540235458, 0.5693214474531362, 0.21078166015345667, 0.2301828826215616,
0.4192657341771445, 0.548538931771065, 0.7361467529623559, 0.3789302500804076, 0.676427374762233,
0.3313045125291517, 0.06780277293961923, 0.5778351563517666, 0.8911852199571377, 0.5728970528544474,
0.9699817028914329, 0.43039129706514545, 0.08674775688446801, 0.3675919508116977, 0.9225119580037218,
0.6988194440402635, 0.5253181253502084, 0.6456804731969746, 0.26339182772715775, 0.22626993807075557,
0.550356185891607, 0.56814132057691, 0.23140177254779548, 0.08163330108694333, 0.6950026943794549,
0.3868874524520275, 0.7803560695948997, 0.8111106020547105, 0.334403887879808, 0.07747054777469231,
0.131679890515222, 0.385738851847096, 0.5720750915742275, 0.6537300415909767, 0.033579514204037375,
0.6267871762497315, 0.283315936418973, 0.6968535140130128, 0.32523324654122043, 0.34672320660673406,
0.27957808136314133, 0.5658693620920029, 0.7989747162309463, 0.6135521139363683, 0.35171172850223953,
0.6840952845832683, 0.7617108586929001, 0.08385365215958673, 0.8377444841629399, 0.4058061560932523,
0.8520764020443331, 0.22112770192078446, 0.3597174340725352, 0.7681341972913422, 0.4489397921948374,
0.7690271682513512, 0.9343709661134892, 0.7537016686581054, 0.8532417940641944, 0.21786544321625967,
0.9220532297103946, 0.6044137130573399, 0.8477580054395553, 0.3650904814611189, 0.550454839779165,
0.35397013127490295, 0.3167770033683245, 0.6019910259733843, 0.9006467399545588, 0.966868800513041,
0.8287187605620165, 0.34122925546186533, 0.4656526174427663, 0.43334812328455674, 0.013263490405688083,
0.26612364947878253, 0.5131104482037588, 0.8864329675456142, 0.9017844237441688, 0.46913085233212526,
0.9565194929638963, 0.48331540015439556, 0.8942705810265009, 0.7576375600904258, 0.5523909581952984,
0.43295148471017175, 0.8512591519488544, 0.6833666101748067, 0.4888591152858486, 0.6402955434059505,
0.6208145089126011, 0.2477338750104311, 0.21603797561049176, 0.8150167527352777, 0.9726705712342355,
0.3214682713106668, 0.8624495481477454, 0.6930029337014305, 0.4749272194303096, 0.8258052149906059,
0.6687961480135753, 0.8120600596698068, 0.15420597835671246, 0.5782577479558763, 0.5807398032680138,
0.978331447998591, 0.9396038337478894, 0.6913963473451303, 0.9188691280946835, 0.9530423584052496,
0.8676848853851221, 0.8158943909093959, 0.10737951927744527, 0.47262298717511697, 0.10378819638161618,
0.9276478971005653, 0.6428852905573831, 0.17746103075445874, 0.3020170597330829, 0.22401044731747322,
0.0577426005330417, 0.8985902135238854, 0.9615034506483714, 0.9787531208633925, 0.6234050362666279,
0.10818578608950069, 0.08002597075571494, 0.40612610122984627, 0.5557805970069319, 0.972064174913354,
0.6279093472467558, 0.2558826735265932, 0.04606558330313981, 0.00919631584008862, 0.09046938247961245,
0.6794105465566141, 0.60732321072077, 0.2541301129844721, 0.3408459253369911, 0.7521255057949666,
0.6254903026485684, 0.9890982642442152, 0.7086637519333243, 0.3260301174978997, 0.9132255776141602,
0.5266524126000847, 0.12872091545378628, 0.5173647446744717, 0.040958411123854344, 0.6212975171513406,
0.43502437401513183, 0.42646995770490204, 0.2692764271641708, 0.6680813915009773, 0.6767636171224242,
0.5000158219375631, 0.8255995563326667, 0.08966422549525332, 0.5632935338808306, 0.14705474560748233,
0.40403318221002793, 0.14701374362900166, 0.0834293465054069, 0.7314121992911785, 0.27889710619969366,
0.05459710747091584, 0.48311405862113443, 0.36523989425663217, 0.5858606795260355, 0.17073141155483396,
0.06466140598006276, 0.7782744447593027, 0.45005338754651747, 0.6405860800740856, 0.2305203445449855,
0.6501759636740025, 0.33332034091907814, 0.5843937688287243, 0.5586369497088509, 0.9789078545145837,
0.9163241892373197, 0.6008963135873424, 0.7032170412450808, 0.22511115467783138, 0.2687027228917952,
0.6641315333336573, 0.6227339938112236, 0.20518174508232712, 0.13853586263077233, 0.005866161059393082,
0.9578758962894522, 0.6605760502889816, 0.8246940071965811, 0.20054547619996677, 0.8120300891449976,
0.08925190901053293, 0.5157538258011378, 0.008446965357773117, 0.8937482962690314, 0.9542808129039703,
0.041094266919463385, 0.22961677645946077, 0.5941572693245096, 0.21538079934488563, 0.9297485411139692,
0.7313599834684, 0.8514206012515507, 0.37969815204001134, 0.5956592217572321, 0.12323732942423993,
0.8453301040453715, 0.8127586375364623, 0.9237285841574706, 0.7996900004382563, 0.7120644121842296,
]
class TestRandom(unittest.TestCase):
def test_random(self):
"""
Tests the pseudo random number generator.
"""
seed(1234567890)
for v in _random_reference:
self.assertEqual(uniform(), v, msg="Random failed to reproduce the reference data.")
| 1.03125 | 1 |
setup.py | gelo-zhukov/yandex-maps | 0 | 12764013 | <reponame>gelo-zhukov/yandex-maps
#!/usr/bin/env python
from distutils.core import setup
version='0.7'
setup(
name = 'yandex-maps',
version = version,
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://bitbucket.org/kmike/yandex-maps/',
description = 'Yandex.Maps API python wrapper with optional django integration.',
long_description = open('README.rst').read() + open('CHANGES.rst').read(),
license = 'MIT license',
requires = ['django (>=1.8)'],
packages=['yandex_maps', 'yandex_maps.templatetags', 'yandex_maps.migrations'],
package_data={'yandex_maps': ['templates/yandex_maps/*']},
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Natural Language :: Russian',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 1.1875 | 1 |
ast/test_NodeVisitor.py | MaxTurchin/pycopy-lib | 126 | 12764014 | import sys
import ast
import io
class Visitor(ast.NodeVisitor):
def __init__(self, f):
self.f = f
def generic_visit(self, node):
self.f.write(ast.dump(node))
self.f.write("\n")
super().generic_visit(node)
def visit_Assign(self, node):
for n in node.targets:
self.visit(n)
self.f.write(" = ")
self.visit(node.value)
self.f.write("\n")
def visit_Name(self, node):
self.f.write(node.id)
def visit_Num(self, node):
self.f.write(str(node.n))
SRC = """\
a = 1
a = b = 1
"""
EXP = """\
Module(body=[Assign(targets=[Name(id='a', ctx=Store())], value=Num(n=1)), Assign(targets=[Name(id='a', ctx=Store()), Name(id='b', ctx=Store())], value=Num(n=1))])
a = 1
a = b = 1
"""
t = ast.parse(SRC)
buf = io.StringIO()
visitor = Visitor(buf)
visitor.visit(t)
assert buf.getvalue() == EXP
| 2.671875 | 3 |
parameters.py | Abhishek-Aditya-bs/Anime-Face-Generation-Pytorch | 0 | 12764015 | <reponame>Abhishek-Aditya-bs/Anime-Face-Generation-Pytorch<filename>parameters.py<gh_stars>0
from utils import get_default_device
lr = 0.0002
epochs = 25
image_size = 64
batch_size = 128
latent_size = 128
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
DATA_DIR = './animefacedataset'
sample_dir = 'generated'
device = get_default_device() | 1.453125 | 1 |
Easy/check_if_all_1s_are_at_least_k_length_places_apart.py | BrynjarGeir/LeetCode | 0 | 12764016 | class Solution:
def kLengthApart(self, nums: List[int], k: int) -> bool:
if 1 not in nums: return True
start, end = nums.index(1), nums.index(1)+1
while end < len(nums):
if nums[start] == 1 and nums[end] == 1 and end - start <= k:
return False
elif nums[start] == 1 and nums[end] == 1:
start = end
end += 1
else:
end += 1
return True | 3.234375 | 3 |
src/lib/SocketServer.py | DTenore/skulpt | 2,671 | 12764017 | import _sk_fail; _sk_fail._("SocketServer")
| 1.117188 | 1 |
apns_python/notification.py | mr-ping/apns-python | 4 | 12764018 | """ This module contains all notification payload objects."""
class BaseMsg(dict):
"""The BaseClass of all objects in notification payload."""
apns_keys = []
def __init__(self, custom_fields={}, **apn_args):
super(BaseMsg, self).__init__(custom_fields, **apn_args)
if custom_fields:
self.update(custom_fields)
def update_keys(self, apn_args, msg_obj_keys):
"""Transform the input keys with '_' to apns format with '-'."""
for k, v in apn_args.iteritems():
formated_k = k.replace('_', '-')
if formated_k in msg_obj_keys:
del apn_args[k]
apn_args[formated_k] = v
class Alert(BaseMsg):
"""The alert piece in aps section."""
apns_keys = [
'title', 'body', 'title-loc-key', 'title-loc-key', 'action-loc-key',
'loc-key', 'loc-args', 'launch-image']
def __init__(self, body=None, **apn_args):
self.update_keys(apn_args, Alert.apns_keys)
self.__setitem__('body', body)
super(Alert, self).__init__(**apn_args)
class APS(BaseMsg):
"""The aps section in the payload."""
apns_keys = [
'mutable-content', 'alert', 'badge', 'sound', 'content-available',
'category', 'thread-id']
def __init__(self, **apn_args):
self.update_keys(apn_args, APS.apns_keys)
super(APS, self).__init__(**apn_args)
class Payload(BaseMsg):
"""The whole payload to send to APNs as the request body."""
def __init__(self, aps, **apn_args):
self.__setitem__('aps', aps)
super(Payload, self).__init__(**apn_args)
class Headers(BaseMsg):
"""The request headers to send to APNs."""
apns_keys = [
'authorization', 'apns-id', 'apns-expiration', 'apns-priority',
'apns-topic', 'apns-collapse-id']
def __init__(self, **apn_args):
self.update_keys(apn_args, Headers.apns_keys)
super(Headers, self).__init__(**apn_args)
| 2.53125 | 3 |
da/sim.py | edgarsit/distalgo | 76 | 12764019 | <reponame>edgarsit/distalgo
# Copyright (c) 2010-2017 <NAME>
# Copyright (c) 2010-2017 <NAME>
# Copyright (c) 2010-2017 Stony Brook University
# Copyright (c) 2010-2017 The Research Foundation of SUNY
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import io
import sys
import copy
import enum
import time
import pickle
import random
import logging
import itertools
import functools
import threading
import collections
import multiprocessing
import os.path
from . import common, pattern
from .common import (builtin, internal, name_split_host, name_split_node,
ProcessId, get_runtime_option,
ObjectDumper, ObjectLoader)
from .transport import ChannelCaps, TransportManager, HEADER_SIZE, \
TransportException, AuthenticationException
logger = logging.getLogger(__name__)
class DistProcessExit(BaseException):
def __init__(self, code=0):
super().__init__()
self.exit_code = code
class Command(enum.Enum):
"""An enum of process commands.
"""
Start = 1
Setup = 2
Config = 3
End = 5
New = 6
Resolve = 7
NodeJoin = 8
NodeLeave = 9
StartAck = 11
SetupAck = 12
ConfigAck = 13
EndAck = 15
NewAck = 16
ResolveAck = 17
NodeAck = 18
NodePing = 19
Message = 20
RPC = 30
RPCReply = 31
Sentinel = 40
_config_object = dict()
class DistProcess():
"""Abstract base class for DistAlgo processes.
Each instance of this class enbodies the runtime state and activities of a
DistAlgo process in a distributed system. Each process is uniquely
identified by a `ProcessId` object. Messages exchanged between DistAlgo
processes can be any picklable Python object.
DistAlgo processes can spawn more processes by calling `new`. The process
that called `new` is known as the parent process of the newly spawned
processes. Any DistProcess can send messages to any other DistProcess, given
that it knows the `ProcessId` of the target process. However, only the
parent process can `end` a child process. The terminal is shared between all
processes spawned from that terminal, which includes the stdout, stdin, and
stderr streams.
Concrete subclasses of `DistProcess` must define the methods:
- `setup`: A function that initializes the process-local variables.
- `run`: The entry point of the process. This function defines the
activities of the process.
Users should not instantiate this class directly, process instances should
be created by calling `new`.
"""
def __init__(self, procimpl, forwarder, **props):
self.__procimpl = procimpl
self.__id = procimpl.dapid
self._log = logging.LoggerAdapter(
logging.getLogger(self.__class__.__module__)
.getChild(self.__class__.__name__), {'daPid' : self._id})
self.__newcmd_seqno = procimpl.seqno
self.__messageq = procimpl.router.get_queue_for_process(self._id)
self.__forwarder = forwarder
self.__properties = props
self.__jobq = collections.deque()
self.__lock = threading.Lock()
self.__local = threading.local()
self.__local.timer = None
self.__local.timer_expired = False
self.__seqcnt = itertools.count(start=0)
self.__setup_called = False
self.__running = False
self.__parent = procimpl.daparent
self._init_dispatch_table()
self._init_config()
self._state = common.Namespace()
self._events = []
def setup(self, **rest):
"""Initialization routine for the DistAlgo process.
Should be overridden by child classes to initialize process states.
"""
pass
def run(self):
"""Entry point for the DistAlgo process.
This is the starting point of execution for user code.
"""
pass
@property
@internal
def _id(self):
return self.__id
@internal
def _delayed_start(self):
assert self.__messageq is not None
self._log.debug("Delayed start.")
if self.__newcmd_seqno is not None:
self._send1(msgtype=Command.NewAck,
message=(self.__newcmd_seqno, None),
to=self.__parent, flags=ChannelCaps.RELIABLEFIFO)
self._wait_for(lambda: self.__running)
try:
return self.run()
except Exception as e:
self._log.error("Unrecoverable error in DistAlgo process: %r",
e, exc_info=1)
return -1
@internal
def _init_config(self):
if self.get_config('handling', default='one').casefold() == 'all':
self.__do_label = self.__label_all
else:
self.__do_label = self.__label_one
if self.get_config('unmatched', default='drop').casefold() == 'keep':
self._keep_unmatched = True
else:
self._keep_unmatched = False
self.__default_flags = self.__get_channel_flags(
self.get_config("channel", default=[]))
if self.get_config('clock', default='').casefold() == 'lamport':
self._logical_clock = 0
else:
self._logical_clock = None
AckCommands = [Command.NewAck, Command.EndAck, Command.StartAck,
Command.SetupAck, Command.ResolveAck, Command.RPCReply]
@internal
def _init_dispatch_table(self):
self.__command_dispatch_table = [None] * Command.Sentinel.value
self.__async_events = [None] * Command.Sentinel.value
for ack in self.__class__.AckCommands:
handlername = '_cmd_' + ack.name
setattr(self, handlername,
functools.partial(self.__cmd_handle_Ack,
cmdtype=ack.value))
for cmdname in Command.__members__:
handlername = '_cmd_' + cmdname
cmd = Command.__members__[cmdname]
if hasattr(self, handlername):
self.__async_events[cmd.value] = dict()
self.__command_dispatch_table[cmd.value] = \
getattr(self, handlername)
def __get_channel_flags(self, props):
flags = 0
if isinstance(props, str):
props = [props]
for prop in props:
pflag = getattr(ChannelCaps, prop.upper(), None)
if pflag is not None:
flags |= pflag
else:
logger.error("Unknown channel property %r", prop)
return flags
_config_object = dict()
@classmethod
def get_config(cls, key, default=None):
"""Returns the configuration value for specified 'key'.
"""
cfgobj = get_runtime_option('config')
if key in cfgobj:
return cfgobj[key]
elif key in common.global_config():
return common.global_config()[key]
elif key in cls._config_object:
return cls._config_object[key]
elif key in sys.modules[cls.__module__]._config_object:
return sys.modules[cls.__module__]._config_object[key]
else:
return default
@builtin
def new(self, pcls, args=None, num=None, at=None,
method=None, daemon=False, **props):
"""Creates new DistAlgo processes.
`pcls` specifies the DistAlgo process class. Optional argument `args` is
a list of arguments that is used to call the `setup` method of the child
processes. Optional argument `num` specifies the number of processes to
create on each node. Optional argument `at` specifies the node or nodes
on which the new processes are to be created. If `num` is not specified
then it defaults to one process. If `at` is not specified then it
defaults to the same node as the current process. Optional argument
`method` specifies the type of implementation used to run the new
process(es), and can be one of 'process', in which case the new
processes will be run inside operating system processes, or 'thread' in
which case the processes will be run inside operating system threads. If
method is not specified then its default value is taken from the
'--default_proc_impl' command line option.
If neither `num` nor `at` is specified, then `new` will return the
process id of child process if successful, or None otherwise. If either
`num` or `at` is specified, then `new` will return a set containing the
process ids of the processes that was successfully created.
"""
if not issubclass(pcls, DistProcess):
raise TypeError("new: can not create DistAlgo process using "
"non-DistProcess class: {}.".format(pcls))
if args is not None and not isinstance(args, collections.abc.Sequence):
raise TypeError("new: 'args' must be a sequence but is {} "
"instead.".format(args))
iterator = []
if num is None:
iterator = range(1)
elif isinstance(num, int):
iterator = range(num)
elif isinstance(num, collections.abc.Iterable):
iterator = num
else:
raise TypeError("new: invalid value for 'num': {}".format(num))
if isinstance(at, collections.abc.Set):
at = {self.resolve(nameorid) for nameorid in at}
else:
at = self.resolve(at)
if method is None:
method = get_runtime_option('default_proc_impl')
self._log.debug("Creating instances of %s using '%s'", pcls, method)
seqno = self._create_cmd_seqno()
self._register_async_event(Command.NewAck, seqno)
if at is not None and at != self._id:
self._register_async_event(Command.RPCReply, seqno)
if self._send1(Command.New,
message=(pcls, iterator, method, daemon, seqno, props),
to=at,
flags=ChannelCaps.RELIABLEFIFO):
res = self._sync_async_event(Command.RPCReply, seqno, at)
if isinstance(at, set):
children = [pid for target in at for pid in res[target]]
else:
children = res[at]
else:
self._deregister_async_event(Command.RPCReply, seqno)
children = []
else:
children = self.__procimpl.spawn(pcls, iterator, self._id, props,
seqno, container=method,
daemon=daemon)
self._log.debug("%d instances of %s created: %r",
len(children), pcls, children)
self._sync_async_event(Command.NewAck, seqno, children)
self._log.debug("All children acked.")
if args is not None:
tmp = []
for cid in children:
if self._setup(cid, args, seqno=seqno):
tmp.append(cid)
else:
self._log.warning(
"`setup` failed for %r, terminating child.", cid)
self.end(cid)
children = tmp
if num is None and at is None:
return children[0] if len(children) > 0 else None
else:
return set(children)
@internal
def _setup(self, procs, args, seqno=None):
if not isinstance(args, collections.abc.Sequence):
raise TypeError("setup: 'args' must be a sequence but is {} "
"instead.".format(args))
res = True
if seqno is None:
seqno = self._create_cmd_seqno()
self._register_async_event(msgtype=Command.SetupAck, seqno=seqno)
if self._send1(msgtype=Command.Setup,
message=(seqno, args),
to=procs,
flags=ChannelCaps.RELIABLEFIFO,
retry_refused_connections=True):
self._sync_async_event(msgtype=Command.SetupAck,
seqno=seqno,
srcs=procs)
else:
res = False
self._deregister_async_event(msgtype=Command.SetupAck,
seqno=seqno)
return res
@internal
def _start(self, procs, args=None):
res = True
seqno = self._create_cmd_seqno()
if args is not None:
if not self._setup(procs, args, seqno=seqno):
return False
self._register_async_event(msgtype=Command.StartAck, seqno=seqno)
if self._send1(msgtype=Command.Start, message=seqno, to=procs,
flags=ChannelCaps.RELIABLEFIFO):
self._sync_async_event(msgtype=Command.StartAck,
seqno=seqno,
srcs=procs)
else:
res = False
self._deregister_async_event(msgtype=Command.StartAck,
seqno=seqno)
return res
@builtin
def nameof(self, pid):
"""Returns the process name of `pid`, if any.
"""
assert isinstance(pid, ProcessId)
return pid.name
@builtin
def parent(self):
"""Returns the parent process id of the current process.
The "parent process" is the process that called `new` to create this
process.
"""
return self.__parent
@builtin
def nodeof(self, pid):
"""Returns the process id of `pid`'s node process.
"""
assert isinstance(pid, ProcessId)
if self._id == pid or len(pid.nodename) == 0:
return self.__procimpl._nodeid
else:
return self.resolve(pid.nodename)
@builtin
def exit(self, code=0):
"""Terminates the current process.
`code` specifies the exit code.
"""
raise DistProcessExit(code)
@builtin
def output(self, *message, sep=' ', level=logging.INFO+1):
"""Prints arguments to the process log.
Optional argument 'level' is a positive integer that specifies the
logging level of the message, defaults to 'logging.INFO'(20). Refer to
[https://docs.python.org/3/library/logging.html#levels] for a list of
predefined logging levels.
When the level of the message is equal to or higher than the
configured level of a log handler, the message is logged to that
handler; otherwise, it is ignored. DistAlgo processes are
automatically configured with two log handlers:, one logs to the
console, the other to a log file; the handlers' logging levels are
controlled by command line parameters.
"""
if level > self._log.getEffectiveLevel():
msg = sep.join([str(v) for v in message])
self._log.log(level, msg)
@builtin
def debug(self, *message, sep=' '):
"""Prints debugging output to the process log.
This is the same as `output` except the message is logged at the
'USRDBG' level.
"""
self.output(*message, sep=sep, level=logging.DEBUG+1)
@builtin
def error(self, *message, sep=' '):
"""Prints error message to the process log.
This is the same as `output` except the message is logged at the
'USRERR' level.
"""
self.output(*message, sep=sep, level=logging.INFO+2)
@builtin
def work(self):
"""Waste some random amount of time.
This suspends execution of the process for a period of 0-2 seconds.
"""
time.sleep(random.randint(0, 200) / 100)
pass
@builtin
def end(self, target, exit_code=1):
"""Terminate the child processes specified by `target`.
`target` can be a process id or a set of process ids, all of which must
be a child process of this process.
"""
self._send1(Command.End, exit_code, to=target,
flags=ChannelCaps.RELIABLEFIFO)
@builtin
def logical_clock(self):
"""Returns the current value of the logical clock.
"""
return self._logical_clock
@builtin
def incr_logical_clock(self):
"""Increments the logical clock.
For Lamport's clock, this increases the clock value by 1.
"""
if isinstance(self._logical_clock, int):
self._logical_clock += 1
@builtin
def send(self, message, to, channel=None, **rest):
"""Send a DistAlgo message.
`message` can be any pickle-able Python object. `to` can be a process id
or a set of process ids.
"""
self.incr_logical_clock()
if (self.__fails('send')):
self._log.info("Dropped outgoing message due to lottery: %r", message)
return False
flags = None
if channel is not None:
flags = self.__get_channel_flags(channel)
impersonate = rest.get('impersonate', None)
res = self._send1(msgtype=Command.Message,
message=(self._logical_clock, message),
to=to,
flags=flags,
impersonate=impersonate)
self.__trigger_event(pattern.SentEvent(
(self._logical_clock, to, self._id), message))
return res
@builtin
def hanged(self):
"""Hangs the current process.
When a process enters the 'hanged' state, its main logic and all message
handlers will no longer run.
"""
self._register_async_event(Command.EndAck, seqno=0)
self._sync_async_event(Command.EndAck, seqno=0, srcs=self._id)
@builtin
def resolve(self, name):
"""Returns the process id associated with `name`.
"""
if name is None:
return None
elif isinstance(name, ProcessId):
return name
elif not isinstance(name, str):
self._log.error("resolve: unsupported type %r", name)
return None
fullname, host, port = name_split_host(name)
if fullname is None:
self._log.error("Malformed name: %s", name)
return None
procname, nodename = name_split_node(fullname)
if procname is None:
self._log.error("Malformed name: %s", name)
return None
dest = ProcessId.lookup((procname, nodename))
if dest is None:
self._log.info("Waiting to resolve name %r...", name)
seqno = self._create_cmd_seqno()
self._register_async_event(Command.ResolveAck, seqno)
if self._send1(Command.Resolve,
message=((procname, nodename), host, port, seqno),
to=self.__procimpl._nodeid,
flags=ChannelCaps.RELIABLEFIFO):
res = self._sync_async_event(Command.ResolveAck, seqno,
self.__procimpl._nodeid)
dest = res[self.__procimpl._nodeid]
self._log.debug("%r successfully resolved to %r.", name, dest)
else:
self._deregister_async_event(Command.ResolveAck, seqno)
self._log.error("Unable to resolve %r: failed to send "
"request to node!", name)
return dest
@internal
def _resolve_callback(self, pid, src, seqno):
self._send1(Command.ResolveAck, message=(seqno, pid), to=src)
@internal
def _send1(self, msgtype, message, to, flags=None, impersonate=None,
**params):
"""Internal send.
Pack the message and forward to router.
"""
if to is None:
self._log.warning("send: 'to' is None!")
return False
if flags is None:
flags = self.__default_flags
protocol_message = (msgtype, message)
res = True
if isinstance(to, ProcessId) or isinstance(to, str):
target = [to]
else:
# 'to' must be an iterable of `ProcessId`s:
target = to
for dest in target:
if isinstance(dest, str):
# This is a process name, try to resolve to an id
dest = self.resolve(dest)
if not self.__forwarder(self._id, dest, protocol_message,
params, flags, impersonate):
res = False
return res
@internal
def _timer_start(self):
self.__local.timer = time.time()
self.__local.timer_expired = False
@internal
def _timer_end(self):
self.__local.timer = None
@property
@internal
def _timer_expired(self):
return self.__local.timer_expired
def __fails(self, failtype):
if failtype not in self.__properties:
return False
if (random.random() < self.__properties[failtype]):
return True
return False
@internal
def _label(self, name, block=False, timeout=None):
"""This simulates the controlled "label" mechanism.
The number of pending events handled at each label is controlled by the
'handling' configuration key -- if 'handling' is 'one' then `__do_label`
will be set to `__label_one`, otherwise `__do_label` will be set to
`_label_all`(see `__init__`).
"""
if self.__fails('hang'):
self._log.warning("Hanged(@label %s)", name)
self.hanged()
if self.__fails('crash'):
self._log.warning("Crashed(@label %s)", name)
self.exit(10)
self.__do_label(name, block, timeout)
self.__process_jobqueue(name)
def __label_one(self, name, block=False, timeout=None):
"""Handle at most one pending event at a time.
"""
if timeout is not None:
if self.__local.timer is None:
self._timer_start()
timeleft = timeout - (time.time() - self.__local.timer)
if timeleft <= 0:
self._timer_end()
self.__local.timer_expired = True
return
else:
timeleft = None
self.__process_event(block, timeleft)
def __label_all(self, name, block=False, timeout=None):
"""Handle up to all pending events at the time this function is called.
"""
# 'nmax' is a "snapshot" of the queue size at the time we're called. We
# only attempt to process up to 'nmax' events, since otherwise we could
# potentially block the process forever if the events come in faster
# than we can process them:
nmax = len(self.__messageq)
i = 0
while True:
i += 1
if timeout is not None:
if self.__local.timer is None:
self._timer_start()
timeleft = timeout - (time.time() - self.__local.timer)
if timeleft <= 0:
self._timer_end()
self.__local.timer_expired = True
break
else:
timeleft = None
if not self.__process_event(block, timeleft) or i >= nmax:
break
def __process_jobqueue(self, label=None):
"""Runs all pending handlers jobs permissible at `label`.
"""
leftovers = []
handler = args = None
while self.__jobq:
try:
handler, args = self.__jobq.popleft()
except IndexError:
self._log.debug("Job item stolen by another thread.")
break
except ValueError:
self._log.error("Corrupted job item!")
continue
if ((handler._labels is None or label in handler._labels) and
(handler._notlabels is None or label not in handler._notlabels)):
try:
handler(**args)
if self.__do_label is self.__label_one:
break
except Exception as e:
self._log.error(
"%r when calling handler '%s' with '%s': %s",
e, handler.__name__, args, e)
else:
if self._keep_unmatched:
dbgmsg = "Skipping (%s, %r) due to label constraint."
leftovers.append((handler, args))
else:
dbgmsg = "Dropping (%s, %r) due to label constraint."
self._log.debug(dbgmsg, handler, args)
self.__jobq.extend(leftovers)
@internal
def _create_cmd_seqno(self):
"""Returns a unique sequence number for pairing command messages to their
replies.
"""
cnt = self.__seqcnt
# we piggyback off the GIL for thread-safety:
seqno = next(cnt)
# when the counter value gets too big, itertools.count will switch into
# "slow mode"; we don't want slow, and we don't need that many unique
# values simultaneously, so we just reset the counter once in a while:
if seqno > 0xfffffff0:
with self.__lock:
# this test checks that nobody else has reset the counter before
# we acquired the lock:
if self.__seqcnt is cnt:
self.__seqcnt = itertools.count(start=0)
return seqno
@internal
def _register_async_event(self, msgtype, seqno):
self.__async_events[msgtype.value][seqno] = list()
@internal
def _deregister_async_event(self, msgtype, seqno):
with self.__lock:
del self.__async_events[msgtype.value][seqno]
@internal
def _sync_async_event(self, msgtype, seqno, srcs):
if isinstance(srcs, ProcessId):
remaining = {srcs}
else:
remaining = set(srcs)
container = self.__async_events[msgtype.value][seqno]
with self.__lock:
results = dict(container)
remaining.difference_update(results)
self.__async_events[msgtype.value][seqno] = (remaining, results)
self._wait_for(lambda: not remaining)
self._deregister_async_event(msgtype, seqno)
return results
@internal
def _wait_for(self, predicate, timeout=None):
while not predicate():
self.__process_event(block=True, timeout=timeout)
def __cmd_handle_Ack(self, src, args, cmdtype):
seqno, res = args
registered_evts = self.__async_events[cmdtype]
with self.__lock:
if seqno in registered_evts:
# XXX: we abuse type(container) to indicate whether we need to
# aggregate or discard:
container = registered_evts[seqno]
if type(container) is list:
# `__sync_event` hasn't been called -- we don't yet know
# the exact set of peers to wait for, so we just aggregate
# all the acks:
container.append((src, res))
else:
# Otherwise, we can just mark the peer off the list:
container[0].discard(src)
container[1][src] = res
def __process_event(self, block, timeout=None):
"""Retrieves and processes one pending event.
Parameter 'block' indicates whether to block waiting for next message
to come in if the queue is currently empty. 'timeout' is the maximum
time to wait for an event. Returns True if an event was successfully
processed, False otherwise.
"""
event = None
if timeout is not None and timeout < 0:
timeout = 0
try:
message = self.__messageq.pop(block, timeout)
except common.QueueEmpty:
message = None
except Exception as e:
self._log.error("Caught exception while waiting for events: %r", e)
return False
if message is None:
if block:
self._log.debug(
"__process_event: message was stolen by another thread.")
return False
try:
src, (cmd, args) = message
handler = self.__command_dispatch_table[cmd.value]
if handler is None:
self._log.warning("No handler for %r.", message)
return False
else:
handler(src, args)
return True
except Exception as e:
self._log.error(
"Exception while processing message %r: %r", message, e)
return False
@internal
def _cmd_New(self, src, args):
pcls, num, method, daemon, seqno, props = args
children = self.__procimpl.spawn(pcls, num,
parent=src, props=props,
seqno=seqno, container=method,
daemon=daemon)
self._send1(msgtype=Command.RPCReply,
message=(seqno, children),
to=src,
flags=ChannelCaps.RELIABLEFIFO)
@internal
def _cmd_Start(self, src, seqno):
if self.__running:
self._log.warning("Process already started but got `start` again.")
else:
if not self.__setup_called:
self._log.error("`start` received before `setup`!")
else:
self._log.debug("`start` command received, commencing...")
self.__running = True
self._send1(msgtype=Command.StartAck,
message=(seqno, None),
to=src,
flags=ChannelCaps.RELIABLEFIFO)
@internal
def _cmd_End(self, src, args):
if src == self.__parent or src == self.__procimpl._nodeid:
self._log.debug("`End(%r)` command received, terminating..", args)
self.exit(args)
else:
self._log.warning("Ignoring `End(%r)` command from non-parent(%r)!",
args, src)
@internal
def _cmd_Setup(self, src, args):
seqno, realargs = args
res = True
if self.__setup_called:
self._log.warning("`setup` already called for this process!")
else:
self._log.debug("Running `setup` with args %r.", args)
try:
self.setup(*realargs)
self.__setup_called = True
self._log.debug("`setup` complete.")
except Exception as e:
self._log.error("Exception during setup(%r): %r", args, e)
self._log.debug("%r", e, exc_info=1)
res = False
if hasattr(sys.stdout, 'flush'):
sys.stdout.flush()
if hasattr(sys.stderr, 'flush'):
sys.stderr.flush()
self._send1(msgtype=Command.SetupAck,
message=(seqno, res),
to=src,
flags=ChannelCaps.RELIABLEFIFO)
@internal
def _cmd_Config(self, src, args):
try:
key, val = args
m = getattr(self, "set_" + key, default=None)
if callable(m):
m(*args)
else:
self._log.warning("Missing setter: %s", key)
except ValueError:
self._log.warning("Corrupted 'Config' command: %r", args)
@internal
def _cmd_Message(self, peer_id, message):
if self.__fails('receive'):
self._log.warning(
"Dropped incoming message due to lottery: %s", message)
return False
try:
peer_clk, payload = message
except ValueError as e:
self._log.error("Corrupted message: %r", message)
return False
if isinstance(self._logical_clock, int):
if not isinstance(peer_clk, int):
# Most likely some peer did not turn on lamport clock, issue
# a warning and skip this message:
self._log.warning(
"Invalid logical clock value: %r; message dropped. ",
peer_clk)
return False
self._logical_clock = max(self._logical_clock, peer_clk) + 1
self.__trigger_event(
pattern.ReceivedEvent(envelope=(peer_clk, None, peer_id),
message=payload))
return True
def __trigger_event(self, event):
"""Immediately triggers 'event', skipping the event queue.
"""
for p in self._events:
bindings = dict()
if (p.match(event, bindings=bindings, ignore_bound_vars=True,
SELF_ID=self._id, **self._state.__dict__)):
if p.record_history is True:
getattr(self, p.name).append(event.to_tuple())
elif p.record_history is not None:
# Call the update stub:
p.record_history(getattr(self, p.name), event.to_tuple())
for h in p.handlers:
self.__jobq.append((h, copy.deepcopy(bindings)))
def __repr__(self):
res = "<process {}#{}>"
return res.format(self._id, self.__procimpl)
__str__ = __repr__
class NodeProcess(DistProcess):
AckCommands = DistProcess.AckCommands + [Command.NodeAck]
def __init__(self, procimpl, forwarder, **props):
super().__init__(procimpl, forwarder, **props)
self._router = procimpl.router
self._nodes = set()
def bootstrap(self):
target = self._router.bootstrap_peer
if target is None:
return
self._nodes.add(target)
seqno = self._create_cmd_seqno()
self._register_async_event(Command.NodeAck, seqno)
if self._send1(Command.NodeJoin,
message=(ProcessId.all_named_ids(), seqno),
to=target,
flags=ChannelCaps.RELIABLEFIFO):
res = self._sync_async_event(Command.NodeAck, seqno, target)
newnodes, _ = res[target]
self._nodes.update(newnodes)
self._log.debug("Bootstrap success.")
else:
self._deregister_async_event(Command.NodeAck, seqno)
self._log.error("Bootstrap failed! Unable to join existing network.")
@internal
def _cmd_Resolve(self, src, args):
procname, hostname, port, seqno = args
pid = ProcessId.lookup_or_register_callback(
procname, functools.partial(self._resolve_callback,
src=src, seqno=seqno))
if pid is not None:
self._send1(Command.ResolveAck, message=(seqno, pid), to=src)
elif hostname is not None:
if port is None:
port = get_runtime_option('default_master_port')
self._router.bootstrap_node(hostname, port, timeout=3)
self.bootstrap()
@internal
def _resolve_callback(self, pid, src, seqno):
super()._resolve_callback(pid, src, seqno)
# propagate name:
self._send1(Command.NodePing, message=(seqno, pid), to=self._nodes)
@internal
def _cmd_NodeJoin(self, src, args):
_, seqno = args
self._send1(Command.NodeAck,
message=(seqno, (self._nodes, ProcessId.all_named_ids())),
to=src,
flags=ChannelCaps.RELIABLEFIFO)
self._nodes.add(src)
@internal
def _cmd_NodeLeave(self, src, args):
self._log.debug("%s terminated.", src)
self._nodes.discard(src)
@internal
def _cmd_NodePing(self, src, args):
self._log.debug("%s is alive.", src)
self._nodes.add(src)
def _delayed_start(self):
common.set_global_config(self._config_object)
if len(self._nodes) > 0:
self.bootstrap()
try:
if (not get_runtime_option('idle')) and hasattr(self, 'run'):
return self.run()
else:
self.hanged()
except Exception as e:
self._log.error("Unrecoverable error in node process: %r",
e, exc_info=1)
return -1
finally:
self._send1(Command.NodeLeave,
message=self._id, to=self._nodes)
class RoutingException(Exception): pass
class CircularRoutingException(RoutingException): pass
class BootstrapException(RoutingException): pass
class NoAvailableTransportException(RoutingException): pass
class MessageTooBigException(RoutingException): pass
class InvalidMessageException(RoutingException): pass
class InvalidRouterStateException(RoutingException): pass
class RouterCommands(enum.Enum):
"""Control messages for the router."""
HELLO = 1
PING = 2
BYE = 3
ACK = 4
SENTINEL = 10
class TraceException(BaseException): pass
class TraceMismatchException(TraceException): pass
class TraceEndedException(TraceException): pass
class TraceFormatException(TraceException): pass
class TraceVersionException(TraceException): pass
class TraceCorruptedException(TraceException): pass
TRACE_HEADER = b'DATR'
TRACE_TYPE_RECV = 0x01
TRACE_TYPE_SEND = 0x02
def process_trace_header(tracefd, trace_type):
"""Verify `tracefd` is a valid trace file, return pid of traced process.
"""
header = tracefd.read(len(TRACE_HEADER))
if header != TRACE_HEADER:
raise TraceFormatException('{} is not a DistAlgo trace file.'
.format(tracefd.name))
header = tracefd.read(len(common.VERSION_BYTES))
if header != common.VERSION_BYTES:
raise TraceVersionException(
'{} was generated by DistAlgo version {}.{}.{}-{}.'
.format(tracefd.name, *header))
typ = tracefd.read(1)[0]
if typ != trace_type:
raise TraceFormatException('{}: expecting type {} but is {}'
.format(typ, trace_type))
loader = ObjectLoader(tracefd)
try:
pid = loader.load()
except (ImportError, AttributeError) as e:
raise TraceMismatchException(
"{}, please check the "
"-m, -Sm, -Sc, or 'file' command line arguments.\n".format(e))
if not isinstance(pid, ProcessId):
raise TraceCorruptedException(tracefd.name)
parentid = loader.load()
if not isinstance(parentid, ProcessId):
raise TraceCorruptedException(tracefd.name)
return pid, parentid
def write_trace_header(pid, parent, trace_type, stream):
stream.write(TRACE_HEADER)
stream.write(common.VERSION_BYTES)
stream.write(bytes([trace_type]))
dumper = ObjectDumper(stream)
dumper.dump(pid)
dumper.dump(parent)
class Router(threading.Thread):
"""The router thread.
Creates an event object for each incoming message, and appends the event
object to the target process' event queue.
"""
def __init__(self, transport_manager):
threading.Thread.__init__(self)
self.log = logging.getLogger(__name__) \
.getChild(self.__class__.__name__)
self.daemon = True
self.running = False
self.prestart_mesg_sink = []
self.bootstrap_peer = None
self.transport_manager = transport_manager
self.hostname = get_runtime_option('hostname')
self.payload_size = get_runtime_option('message_buffer_size') - \
HEADER_SIZE
self.local_procs = dict()
self.local = threading.local()
self.local.buf = None
self.lock = threading.Lock()
self._init_dispatch_table()
if get_runtime_option('record_trace'):
self.register_local_process = self._record_local_process
self.send = self._send_and_record
def register_local_process(self, pid, parent=None):
assert isinstance(pid, ProcessId)
with self.lock:
if pid in self.local_procs:
self.log.warning("Registering duplicate process: %s.", pid)
self.local_procs[pid] = common.WaitableQueue()
self.log.debug("Process %s registered.", pid)
def replay_local_process(self, pid, in_stream, out_stream):
assert isinstance(pid, ProcessId)
with self.lock:
if pid in self.local_procs:
self.log.warning("Registering duplicate process: %s.", pid)
self.local_procs[pid] = common.ReplayQueue(in_stream, out_stream)
self.log.debug("Process %s registered.", pid)
def _record_local_process(self, pid, parent=None):
assert isinstance(pid, ProcessId)
basedir = get_runtime_option('logdir')
infd = open(os.path.join(basedir,
pid._filename_form_() + ".trace"), "wb")
outfd = open(os.path.join(basedir,
pid._filename_form_() + ".snd"), "wb")
write_trace_header(pid, parent, TRACE_TYPE_RECV, infd)
write_trace_header(pid, parent, TRACE_TYPE_SEND, outfd)
with self.lock:
if pid in self.local_procs:
self.log.warning("Registering duplicate process: %s.", pid)
self.local_procs[pid] \
= common.WaitableQueue(trace_files=(infd, outfd))
self.log.debug("Process %s registered.", pid)
def deregister_local_process(self, pid):
if self.running:
with self.lock:
if pid in self.local_procs:
self.local_procs[pid].close()
del self.local_procs[pid]
else:
if pid in self.local_procs:
self.local_procs[pid].close()
def terminate_local_processes(self):
with self.lock:
for mq in self.local_procs.values():
mq.append((common.pid_of_node(), (Command.End, 1)))
def get_queue_for_process(self, pid):
return self.local_procs.get(pid, None)
def bootstrap_node(self, hostname, port, timeout=None):
"""Bootstrap the node.
This function implements bootstrapping at the router level. The
responsibility of `bootstrap_node` is to obtain the process id of a
single existing node process, which is stored into
`self.bootstrap_peer`. The rest will then be handled at the node level.
"""
self.log.debug("boostrap_node to %s:%d...", hostname, port)
self.bootstrap_peer = None
nid = common.pid_of_node()
hellocmd = (RouterCommands.HELLO, ProcessId.all_named_ids())
dummyid = ProcessId(uid=0, seqno=1, pcls=DistProcess, name='',
nodename='', hostname=hostname,
transports=\
tuple(port for _ in range(len(nid.transports))))
self.log.debug("Dummy id: %r", dummyid)
for transport in self.transport_manager.transports:
self.log.debug("Attempting bootstrap using %s...", transport)
try:
self._send_remote(src=nid,
dest=dummyid,
mesg=hellocmd,
transport=transport,
flags=ChannelCaps.BROADCAST)
self.mesgloop(until=(lambda: self.bootstrap_peer),
timeout=timeout)
if self.bootstrap_peer is not None and \
self.bootstrap_peer != common.pid_of_node():
self.log.info("Bootstrap succeeded using %s.", transport)
return
else:
self.log.debug(
"Bootstrap attempt to %s:%d with %s timed out. ",
hostname, port, transport)
self.bootstrap_peer = None
except AuthenticationException as e:
# Abort immediately:
raise e
except (CircularRoutingException, TransportException) as e:
self.log.debug("Bootstrap attempt to %s:%d with %s failed "
": %r", hostname, port, transport, e)
if self.bootstrap_peer is None:
raise BootstrapException("Unable to contact a peer node.")
def _init_dispatch_table(self):
self._dispatch_table = [None] * RouterCommands.SENTINEL.value
for cmdname, cmd in RouterCommands.__members__.items():
handlername = '_cmd_' + cmdname.casefold()
if hasattr(self, handlername):
self._dispatch_table[cmd.value] = getattr(self, handlername)
def _cmd_hello(self, src, args):
self.log.debug("HELLO from %r", src)
self._send_remote(src=None,
dest=src,
mesg=(RouterCommands.ACK,
(common.pid_of_node(),
ProcessId.all_named_ids())),
flags=(ChannelCaps.BROADCAST |
ChannelCaps.RELIABLEFIFO))
def _cmd_ack(self, src, args):
self.bootstrap_peer, _ = args
def _cmd_ping(self, src, args):
self.log.debug("Pinged from %r: %r", src, args)
def _cmd_bye(self, src, args):
self.log.debug("%r signed off.", src)
ProcessId.drop_entry(src)
def run(self):
try:
self.running = True
for item in self.prestart_mesg_sink:
self._dispatch(*item)
self.prestart_mesg_sink = []
self.mesgloop(until=(lambda: not self.running))
except Exception as e:
self.log.debug("Unhandled exception: %r.", e, exc_info=1)
self.terminate_local_processes()
def stop(self):
self.running = False
self.terminate_local_processes()
def send(self, src, dest, mesg, params=dict(), flags=0, impersonate=None):
"""General 'send' under normal operations."""
if impersonate is not None:
src = impersonate
return self._dispatch(src, dest, mesg, params, flags)
def _send_and_record(self, src, dest, mesg, params=dict(), flags=0,
impersonate=None):
"""'send' that records a trace of results."""
if impersonate is not None:
from_ = impersonate
else:
from_ = src
res = self._dispatch(from_, dest, mesg, params, flags)
self._record(Command.Message, src, res)
return res
def _record(self, rectype, pid, res):
"""Record the results of `new` to the process' 'out' trace."""
queue = self.local_procs.get(pid, None)
# This test is necessary because a dead process might still be active
# one user-created threads:
if queue is not None:
queue._out_dumper.dump((rectype, res))
def replay_send(self, src, dest, mesg, params=dict(), flags=0,
impersonate=None):
"""'send' that replays results from a recorded trace file."""
rectype, res = self._replay(src)
if rectype != Command.Message:
raise TraceMismatchException('Expecting a send but got {} instead.'
.format(rectype))
return res
def _replay(self, targetpid):
queue = self.local_procs.get(targetpid, None)
assert queue is not None
try:
return queue._out_loader.load()
except EOFError as e:
raise TraceEndedException("No more items in send trace.") from e
def _send_remote(self, src, dest, mesg, flags=0, transport=None, **params):
"""Forward `mesg` to remote process `dest`.
"""
self.log.debug("* Received forwarding request: %r to %s with flags=%d",
mesg, dest, flags)
if dest.hostname != self.hostname:
flags |= ChannelCaps.INTERHOST
elif dest.transports == self.transport_manager.transport_addresses:
# dest is not in our local_procs but has same hostname and transport
# address, so most likely dest is a process that has already
# terminated. Do not attempt forwarding or else will cause infinite
# loop:
raise CircularRoutingException('destination: {}'.format(dest))
if transport is None:
transport = self.transport_manager.get_transport(flags)
if transport is None:
raise NoAvailableTransportException()
if not hasattr(self.local, 'buf') or self.local.buf is None:
self.local.buf = bytearray(self.payload_size)
if flags & ChannelCaps.BROADCAST:
payload = (src, None, mesg)
else:
payload = (src, dest, mesg)
wrapper = common.BufferIOWrapper(self.local.buf)
try:
pickle.dump(payload, wrapper)
except TypeError as e:
raise InvalidMessageException("Error pickling {}.".format(payload)) \
from e
except OSError as e:
raise MessageTooBigException(
"** Outgoing message object too big to fit in buffer, dropped.")
self.log.debug("** Forwarding %r(%d bytes) to %s with flags=%d using %s.",
mesg, wrapper.fptr, dest, flags, transport)
with memoryview(self.local.buf)[0:wrapper.fptr] as chunk:
transport.send(chunk, dest.address_for_transport(transport),
**params)
def _dispatch(self, src, dest, payload, params=dict(), flags=0):
if dest in self.local_procs:
if flags & ChannelCaps.BROADCAST:
return True
self.log.debug("Local forward from %s to %s: %r", src, dest, payload)
try:
# Only needs to copy if message is from local to local:
if src in self.local_procs:
payload = copy.deepcopy(payload)
queue = self.local_procs.get(dest, None)
# This extra test is needed in case the destination process
# terminated and de-registered itself:
if queue is not None:
queue.append((src, payload))
return True
except Exception as e:
self.log.warning("Failed to deliver to local process %s: %r",
dest, e)
return False
elif dest is not None:
if not self.running:
# We are still in bootstrap mode, which means this may be a
# message destined for a process that has yet to register, so
# save it in a sink to be dispatched later in run():
self.prestart_mesg_sink.append((src, dest, payload))
return True
try:
self._send_remote(src, dest, payload, flags, **params)
return True
except CircularRoutingException as e:
# This is most likely due to stale process ids, so don't log
# error, just debug:
self.log.debug("Caught %r.", e)
return False
except Exception as e:
self.log.error("Could not send message due to: %r", e)
self.log.debug("Send failed: ", exc_info=1)
return False
else:
# This is a router message
try:
cmd, args = payload
self._dispatch_table[cmd.value](src, args)
return True
except Exception as e:
self.log.warning(
"Caught exception while processing router message from "
"%s(%r): %r", src, payload, e)
self.log.debug("Router dispatch failed: ", exc_info=1)
return False
def mesgloop(self, until, timeout=None):
incomingq = self.transport_manager.queue
if timeout is not None:
start = time.time()
timeleft = timeout
else:
timeleft = None
while True:
transport, remote = "<unknown>", "<unknown>"
chunk = None
try:
transport, chunk, remote = incomingq.pop(block=True,
timeout=timeleft)
if transport.data_offset > 0:
chunk = memoryview(chunk)[transport.data_offset:]
src, dest, mesg = pickle.loads(chunk)
self._dispatch(src, dest, mesg)
except common.QueueEmpty:
pass
except (ImportError, ValueError, pickle.UnpicklingError) as e:
self.log.warning(
"Dropped invalid message from %s through %s: %r",
remote, transport, e)
if until():
break
if timeout is not None:
timeleft = timeout - (time.time() - start)
if timeleft <= 0:
break
def _is_spawning_semantics():
"""True if we are on spawning semantics."""
return sys.platform == 'win32' or \
multiprocessing.get_start_method(allow_none=True) == 'spawn'
class ProcessContainer:
"""An abstract base class for process containers.
One ProcessContainer instance runs one DistAlgo process instance.
"""
def __init__(self, process_class, transport_manager,
process_id=None, parent_id=None,
process_name="", cmd_seqno=None, props=None, router=None,
replay_file=None):
assert issubclass(process_class, DistProcess)
super().__init__()
# Logger can not be serialized so it has to be instantiated in the child
# proc's address space:
self.before_run_hooks = []
self._log = None
self._dacls = process_class
self._daobj = None
self._nodeid = common.pid_of_node()
self._properties = props if props is not None else dict()
self.dapid = process_id
self.daparent = parent_id
self.router = router
self.seqno = cmd_seqno
self._trace_in_fd = None
self._trace_out_fd = None
if len(process_name) > 0:
self.name = process_name
self.transport_manager = transport_manager
if _is_spawning_semantics():
setattr(self, '_spawn_process', self._spawn_process_spawn)
else:
setattr(self, '_spawn_process', self._spawn_process_fork)
if get_runtime_option('record_trace'):
self.spawn = self._record_spawn
elif replay_file is not None:
self.spawn = self._replay_spawn
try:
self._init_replay(replay_file)
except (Exception, TraceException) as e:
self.cleanup()
raise e
else:
self.spawn = self._spawn
def _init_replay(self, filename):
filename = os.path.abspath(filename)
dirname, tracename = os.path.split(filename)
if not tracename.endswith('.trace'):
raise ValueError("Trace file name must have '.trace' suffix: {!r}"
.format(tracename))
sndname = tracename.replace('.trace', '.snd')
tracename = filename
self._trace_in_fd = open(tracename, "rb")
sndname = os.path.join(dirname, sndname)
try:
os.stat(sndname)
except OSError as e:
raise TraceMismatchException(
'Missing corresponding send trace file {!r} for {!r}!'
.format(sndname, tracename)
) from e
self._trace_out_fd = open(sndname, "rb")
self.dapid, self.daparent \
= process_trace_header(self._trace_in_fd, TRACE_TYPE_RECV)
if process_trace_header(self._trace_out_fd, TRACE_TYPE_SEND) \
!= (self.dapid, self.daparent):
raise TraceCorruptedException(
"Process Id mismatch in {} and {}!"
.format(tracename, sndname)
)
self._dacls = self.dapid.pcls
def cleanup(self):
if self._trace_in_fd:
self._trace_in_fd.close()
if self._trace_out_fd:
self._trace_out_fd.close()
def init_router(self):
if self.router is None:
self.transport_manager.start()
self.router = Router(self.transport_manager)
def start_router(self):
if not self.router.running:
self.router.start()
def end(self):
if self.router is not None:
self.router.stop()
def is_node(self):
return self.dapid == common.pid_of_node()
def _spawn_process_spawn(self, pcls, name, parent, props, seqno=None,
daemon=False):
trman = None
p = None
cid = None
parent_pipe = child_pipe = None
try:
trman = TransportManager(cookie=self.transport_manager.authkey)
trman.initialize()
cid = ProcessId._create(pcls, trman.transport_addresses, name)
parent_pipe, child_pipe = multiprocessing.Pipe()
p = OSProcessContainer(process_class=pcls,
transport_manager=trman,
process_id=cid,
parent_id=parent,
process_name=name,
cmd_seqno=seqno,
pipe=child_pipe,
props=props,
daemon=daemon)
p.start()
child_pipe.close()
trman.serialize(parent_pipe, p.pid)
assert parent_pipe.recv() == 'done'
if not p.is_alive():
self._log.error("%r terminated prematurely.", cid)
cid = None
except Exception as e:
cid = None
self._log.error("Failed to create instance (%s) of %s: %r",
name, pcls, e)
if p is not None and p.is_alive():
p.terminate()
finally:
if trman is not None:
trman.close()
if parent_pipe:
parent_pipe.close()
return cid
def _spawn_process_fork(self, pcls, name, parent, props, seqno=None,
daemon=False):
trman = None
p = None
cid = None
try:
trman = TransportManager(cookie=self.transport_manager.authkey)
trman.initialize()
cid = ProcessId._create(pcls, trman.transport_addresses, name)
p = OSProcessContainer(process_class=pcls,
transport_manager=trman,
process_id=cid,
parent_id=parent,
process_name=name,
cmd_seqno=seqno,
props=props,
daemon=daemon)
p.start()
p.join(timeout=0.01)
if not p.is_alive():
self._log.error("%r terminated prematurely.", cid)
cid = None
except Exception as e:
cid = None
self._log.error("Failed to create instance (%s) of %s: %r",
name, pcls, e)
if p is not None and p.is_alive():
p.terminate()
finally:
if trman is not None:
trman.close()
return cid
def _spawn_thread(self, pcls, name, parent, props, seqno=None, daemon=False):
p = None
cid = None
try:
cid = ProcessId._create(pcls,
self.transport_manager.transport_addresses,
name)
p = OSThreadContainer(process_class=pcls,
transport_manager=self.transport_manager,
process_id=cid,
parent_id=parent,
process_name=name,
cmd_seqno=seqno,
router=self.router,
props=props,
daemon=daemon)
p.start()
p.join(timeout=0.01)
if not p.is_alive():
self._log.error("%r terminated prematurely.", cid)
cid = None
except Exception as e:
cid = None
self._log.error("Failed to create instance (%s) of %s: %r",
name, pcls, e)
return cid
def _spawn(self, pcls, names, parent, props, seqno=None,
container='process', daemon=False):
children = []
spawn_1 = getattr(self, '_spawn_' + container, None)
if spawn_1 is None:
self._log.error("Invalid process container: %r", container)
return children
newnamed = []
for name in names:
if not isinstance(name, str):
name = ""
elif not common.check_name(name):
self._log.error("Name '%s' contains an illegal character(%r).",
name, common.ILLEGAL_NAME_CHARS)
continue
cid = spawn_1(pcls, name, parent, props, seqno, daemon)
if cid is not None:
children.append(cid)
if len(name) > 0:
newnamed.append(cid)
self._log.debug("%d instances of %s created.",
len(children), pcls.__name__)
if len(newnamed) > 0 and not self.is_node():
# Propagate names to node
self.router.send(src=self.dapid, dest=self._nodeid,
mesg=(RouterCommands.PING, newnamed),
flags=(ChannelCaps.RELIABLEFIFO |
ChannelCaps.BROADCAST))
return children
def _record_spawn(self, pcls, names, parent, props, seqno=None,
container='process', daemon=False):
children = self._spawn(pcls, names, parent, props, seqno, container,
daemon)
self.router._record(Command.New, self.dapid, children)
return children
def _replay_spawn(self, pcls, names, parent, props, seqno=None,
container='process', daemon=False):
rectype, children = self.router._replay(self.dapid)
if rectype != Command.New:
raise TraceMismatchException('Expecting spawn but got {} instead.'
.format(rectype))
return children
def run(self):
self._log = logger.getChild(self.__class__.__qualname__)
if len(self.name) == 0:
self.name = str(self.pid)
try:
for hook in self.before_run_hooks:
hook()
self.init_router()
if not self._trace_out_fd:
forwarder = self.router.send
self.router.register_local_process(self.dapid, self.daparent)
else:
forwarder = self.router.replay_send
self.router.replay_local_process(self.dapid,
self._trace_in_fd,
self._trace_out_fd)
self.start_router()
self._daobj = self._dacls(self, forwarder, **(self._properties))
self._log.debug("Process object initialized.")
return self._daobj._delayed_start()
except DistProcessExit as e:
self._log.debug("Caught %r, exiting gracefully.", e)
return e.exit_code
except RoutingException as e:
self._log.debug("Caught %r.", e)
return 2
except TraceException as e:
self._log.error("%r occurred.", e)
self._log.debug(e, exc_info=1)
return 3
except KeyboardInterrupt as e:
self._log.debug("Received KeyboardInterrupt, exiting")
return 1
except Exception as e:
self._log.error("Unexpected error: %r", e, exc_info=1)
return 5
finally:
if self.router is not None:
self.router.deregister_local_process(self.dapid)
self.cleanup()
class OSProcessContainer(ProcessContainer, multiprocessing.Process):
"""An implementation of processes using OS process.
"""
def __init__(self, daemon=False, pipe=None, **rest):
super().__init__(**rest)
self.daemon = daemon
self.pipe = pipe
if _is_spawning_semantics():
self.before_run_hooks.append(self._init_for_spawn)
def _debug_handler(self, sig, frame):
self._debugger.set_trace(frame)
def _init_for_spawn(self):
common._set_node(self._nodeid)
assert self.pipe is not None
self.transport_manager.initialize(pipe=self.pipe)
del self.pipe
class OSThreadContainer(ProcessContainer, threading.Thread):
"""An implementation of processes using OS threads.
"""
def __init__(self, daemon=False, **rest):
super().__init__(**rest)
self.daemon = daemon
| 1.53125 | 2 |
Module-02-Comprehensions/01_list_comprehensions_01.py | CodingGearsCourses/Python-Advanced-Concepts | 0 | 12764020 | <filename>Module-02-Comprehensions/01_list_comprehensions_01.py
# Copyright https://www.globaletraining.com/
# List comprehensions provide a concise way to create lists.
my_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def main():
final_list = []
for n in my_list:
final_list.append(n + 10)
print(final_list)
# TODO: Using Comprehension
final_list_comp1 = [n + 10 for n in my_list]
print(final_list_comp1)
# TODO: Using Comprehension & condition
final_list_comp2 = [n + 10 for n in my_list if n % 2 == 0]
print(final_list_comp2)
if __name__ == '__main__':
main() | 4.1875 | 4 |
ggb/utils/image.py | reshalfahsi/GGB | 1 | 12764021 | <filename>ggb/utils/image.py
from ggb.utils.constant import ColorSpace, CVLib
from ggb.utils.error import ComputerVisionLibraryError
import numpy as np
class GGBImage(object):
"""Image handler for GGB.
:param image: image source either from path or variable
:param backend: computer vision library which handle the task
:param kwargs: dict of custom variable
"""
def __init__(self, image=None, backend=CVLib.OPENCV, **kwargs):
allowed_kwargs = {'inverse'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to GGBImage: ' + str(k))
assert(isinstance(backend, CVLib))
self._inverse = False
if 'inverse' in kwargs:
self._inverse = True if kwargs['inverse'] else False
self._backend = backend
self._image = self._read(image)
def backend(self):
"""Check which computer vision library is used as backend.
:return: type of computer vision library
"""
return self._backend
def _read(self, source):
"""Read image from source.
:param source: image source either from path or variable
:return: image variable
"""
if isinstance(source, str):
if self._backend == CVLib.OPENCV:
import cv2
return cv2.imread(path)
else:
from PIL import Image
return Image.open(path).convert('RGB')
else:
if isinstance(source, np.ndarray):
self._backend = CVLib.OPENCV
assert((source.ndim == 3) and ((source.shape[-1] == 3) or (source.shape[-1] == 4)))
else:
try:
source = source.convert('RGB')
self._backend = CVLib.PIL
except:
raise ComputerVisionLibraryError
return source
def write(self, path=None):
"""Write the image into a file when path is not None or variable when path is None.
:param path: path to file
"""
if path is None:
if self._backend == CVLib.OPENCV:
image = self._image.astype('uint8')
return image
return self._image
else:
if self._backend == CVLib.OPENCV:
import cv2
image = self._image.astype('uint8')
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if self._inverse else image
cv2.imwrite(path, image)
else:
self._image.save(path)
def show(self):
"""Show the image.
"""
if self._backend == CVLib.OPENCV:
import cv2
image = self._image.astype('uint8')
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if self._inverse else image
cv2.imshow("GGB", image)
cv2.waitKey(0)
else:
self._image.show()
| 3.015625 | 3 |
train_beam_search.py | kasnerz/neuralmonkey-ctc-decoder | 0 | 12764022 | #!/usr/bin/env python3
from typing import List
import numpy as np
import copy
import pprint as pp
from scipy.misc import logsumexp
from scipy.stats import beta
from neuralmonkey.vocabulary import Vocabulary
from n_gram_model import NGramModel
from hypothesis import Hypothesis, ExpandFunction
from beam_search import score_hypothesis, compute_feature, \
log_softmax, expand_null, empty_hypothesis
def list_startswith(list1, list2):
return all([token1 == token2 for token1, token2 in zip(list1, list2)])
def update_weights(violation_hyp: Hypothesis, target_hyp: Hypothesis,
weights: dict, states_cnt: int):
LEARNING_RATE = 0.0005
for key in weights.keys():
weights[key] += LEARNING_RATE * (compute_feature(key, target_hyp, states_cnt) -
compute_feature(key, violation_hyp, states_cnt))
def add_expanded_hyp(
ctc_table: np.ndarray,
weights: dict,
row: int,
col: int,
candidate_hyp: Hypothesis,
parent: (int, int)):
current_hyp = ctc_table[row, col]
weights = {
"lm_score" : 0.0,
"null_trailing" : 0.0,
"null_token_ratio" : 0.0
}
if current_hyp:
score_current = score_hypothesis(current_hyp[0], weights, 0)
score_candidate = score_hypothesis(candidate_hyp, weights, 0)
if score_candidate <= score_current:
return
candidate_hyp.recombine_with(current_hyp[0])
ctc_table[row, col] = (candidate_hyp, parent)
def ctc_path(
target: List,
log_prob_table: np.ndarray,
weights: dict,
lm: NGramModel,
vocabulary: Vocabulary) -> List[Hypothesis]:
rows = len(target) + 1
time_steps = len(log_prob_table)
# error in data, target cannot be decoded
if time_steps < len(target):
return None
ctc_table = np.empty(shape=(rows, time_steps), dtype=tuple)
# fill the starting cell with the empty hypothesis
ctc_table[0,0] = (empty_hypothesis(), None)
for time in range(time_steps-1):
null_log_prob = log_prob_table[time, -1]
# fill only the space around the diagonal
min_row = max(0, rows - (time_steps-time))
max_row = min(time + 1, len(target))
for row in range(min_row, max_row):
hyp = ctc_table[row, time][0]
next_token = target[row]
next_token_idx = vocabulary.word_to_index[next_token]
# add eps
expanded = expand_null(hyp, null_log_prob)
add_expanded_hyp(ctc_table, weights, row, time+1,
candidate_hyp=expanded, parent=(row, time))
# add next token
next_token_score = log_prob_table[time, next_token_idx]
expanded = lm.expand_token(hyp, next_token, next_token_score)
add_expanded_hyp(ctc_table, weights, row+1, time+1,
candidate_hyp=expanded, parent=(row, time))
# reconstruct path
path = []
hyp = ctc_table[rows-1, time_steps-1]
# error in data
if hyp is None:
return None
while True:
path.append(hyp[0])
prev_idx = hyp[1]
if prev_idx is None:
break
hyp = ctc_table[prev_idx]
path.reverse()
return path
def train_weights(
logits_table: np.ndarray,
beam_width: int,
vocabulary: Vocabulary,
target: list,
weights: dict,
lm: NGramModel) -> List[str]:
assert beam_width >= 1
log_prob_table = log_softmax(logits_table)
hypotheses = [empty_hypothesis()]
time_steps = log_prob_table.shape[0]
target_hyp_path = ctc_path(target, log_prob_table, weights, lm, vocabulary)
# error in data
if target_hyp_path is None:
return
states_cnt = len(log_prob_table)
for time in range(len(log_prob_table)-1):
log_probs = log_prob_table[time]
null_log_prob = log_probs[-1]
token_log_probs = log_probs[:-1]
new_hypotheses = []
str_to_hyp = {}
for hyp in hypotheses:
expanded = expand_null(hyp, null_log_prob)
str_to_hyp[" ".join(expanded.tokens)] = (
expanded, len(new_hypotheses))
new_hypotheses.append(expanded)
best_tokens = np.argpartition(
-token_log_probs, 2 * beam_width)[:2 * beam_width]
best_scores = token_log_probs[best_tokens]
for hyp_index, hyp in enumerate(hypotheses):
for token_index, score in zip(best_tokens, best_scores):
token = vocabulary.index_to_word[token_index]
expanded = lm.expand_token(hyp, token, score)
score = score_hypothesis(expanded, weights, states_cnt)
hyp_str = " ".join(expanded.tokens)
if hyp_str in str_to_hyp:
orig_hyp, hyp_index = str_to_hyp[hyp_str]
expanded.recombine_with(orig_hyp)
new_hypotheses[hyp_index] = expanded
str_to_hyp[hyp_str] = (expanded, hyp_index)
else:
str_to_hyp[hyp_str] = (expanded, len(new_hypotheses))
new_hypotheses.append(expanded)
target_candidates_indices = [i for i, h in enumerate(new_hypotheses)
if list_startswith(target, h.tokens)]
new_scores = np.array([score_hypothesis(h, weights, states_cnt)
for h in new_hypotheses])
target_candidates = [new_hypotheses[i]
for i in target_candidates_indices]
target_candidates_tokens_cnt = np.array([len(h.tokens)
for h in target_candidates])
best_hyp_indices = np.argsort(-new_scores)
target_hyp_ranks = np.in1d(best_hyp_indices, target_candidates_indices).nonzero()[0]
hypotheses = [new_hypotheses[i] for i in best_hyp_indices[:beam_width]]
# hypotheses are out of the beam or no hypotheses can be finished in time
if (all(target_hyp_ranks >= beam_width) or
all(target_candidates_tokens_cnt + (time_steps - time) < len(target))):
for i in range(beam_width):
violation_hyp = hypotheses[i]
target_hyp = target_hyp_path[time+1]
update_weights(violation_hyp, target_hyp, weights, states_cnt)
return | 2.25 | 2 |
architectures/abel_tune_svm_with_embeddings.py | jimmycallin/master-thesis | 2 | 12764023 | <reponame>jimmycallin/master-thesis<filename>architectures/abel_tune_svm_with_embeddings.py<gh_stars>1-10
from subprocess import call
from word_embedding_paths import word_embeddings
from os.path import exists
from os import mkdir
for c in [2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048]:
name = "svmrbf-c" + str(c)
if exists("/usit/abel/u1/jimmycallin/models/" + name):
print("Model " + name + " already trained, skipping...")
continue
else:
mkdir("/usit/abel/u1/jimmycallin/models/" + name)
print("Training " + name)
call(["sbatch", "--job-name", name, "--output=/usit/abel/u1/jimmycallin/models/" + name + "/stdout.txt","/usit/abel/u1/jimmycallin/architectures/abel_tune_svm.sh", name, "/word_embeddings/precompiled/glove/size=50.embeddings", str(c)])
| 2 | 2 |
VRP_final.py | Jayanth07/Vehicle-Routing-Problem | 0 | 12764024 | from __future__ import print_function
from six.moves import xrange
from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
import googlemaps
gmaps = googlemaps.Client(key='******API_Key******') # Replace with the Google Distance Matrix API Key...
class DataProblem():
"""Stores the data for the problem"""
def __init__(self, location, num_vehicles):
"""Initializes the data for the problem"""
self._num_vehicles = num_vehicles
self._locations = [(loc[0], loc[1]) for loc in location]
self._depot = 0
@property
def num_vehicles(self):
"""Gets number of vehicles"""
return self._num_vehicles
@property
def locations(self):
"""Gets locations"""
return self._locations
@property
def num_locations(self):
"""Gets number of locations"""
return len(self.locations)
@property
def depot(self):
"""Gets depot location index"""
return self._depot
def google_distanceNduration(pos1, pos2):
dist = gmaps.distance_matrix(pos1, pos2)
return dist['rows'][0]['elements'][0]['distance']['value'], dist['rows'][0]['elements'][0]['duration']['value']
class CreateDistanceEvaluator(object):
"""Creates callback to return distance between points."""
def __init__(self, data):
"""Initializes the distance matrix."""
self._distances = {}
# Computing distance between location to have distance callback in O(1)
for from_node in xrange(data.num_locations):
self._distances[from_node] = {}
for to_node in xrange(data.num_locations):
if from_node == to_node:
self._distances[from_node][to_node] = 0
else:
self._distances[from_node][to_node],_ = google_distanceNduration(
data.locations[from_node],
data.locations[to_node])
def distance_evaluator(self, from_node, to_node):
"""Returns the manhattan distance between the two nodes"""
return self._distances[from_node][to_node]
def add_distance_dimension(routing, distance_evaluator, max_vehicle_distance):
"""Add Global Span constraint"""
distance = "Distance"
routing.AddDimension(
distance_evaluator,
0, # null slack
max_vehicle_distance, # maximum distance per vehicle
True, # start cumul to zero
distance)
distance_dimension = routing.GetDimensionOrDie(distance)
distance_dimension.SetGlobalSpanCostCoefficient(100)
class ConsolePrinter():
"""Print solution to console"""
def __init__(self, data, routing, assignment):
"""Initializes the printer"""
self._data = data
self._routing = routing
self._assignment = assignment
@property
def data(self):
"""Gets problem data"""
return self._data
@property
def routing(self):
"""Gets routing model"""
return self._routing
@property
def assignment(self):
"""Gets routing model"""
return self._assignment
def print(self):
"""Prints assignment on console"""
# Inspect solution.
total_dist = 0
total_time = 0
for vehicle_id in xrange(self.data.num_vehicles):
index = self.routing.Start(vehicle_id)
plan_output = 'Route for vehicle {0}:\n'.format(vehicle_id)
route_dist = 0
route_time = 0
while not self.routing.IsEnd(index):
node_index = self.routing.IndexToNode(index)
next_node_index = self.routing.IndexToNode(
self.assignment.Value(self.routing.NextVar(index)))
dist, time = google_distanceNduration(
self.data.locations[node_index],
self.data.locations[next_node_index])
route_dist += dist
route_time += time
plan_output += ' {0} -> '.format(node_index)
index = self.assignment.Value(self.routing.NextVar(index))
node_index = self.routing.IndexToNode(index)
total_dist += route_dist
total_time += route_time
plan_output += ' {0}\n'.format(node_index)
plan_output += 'Distance of the route: {0}m\n'.format(route_dist)
print(plan_output)
print('Total Distance of all routes: {0}m'.format(total_dist))
print('Total Time of all routes: {0}min'.format(total_time/60))
def main():
"""Entry point of the program"""
# Input locations.
inp = eval(input()) # Input from node.js
location = inp['location']
num_vehicles = inp['num_vehicles']
max_vehicle_distance = inp['max_vehicle_distance']
# Instantiate the data problem.
data = DataProblem(location, num_vehicles)
# Create Routing Model
routing = pywrapcp.RoutingModel(data.num_locations, data.num_vehicles, data.depot)
# Define weight of each edge
distance_evaluator = CreateDistanceEvaluator(data).distance_evaluator
routing.SetArcCostEvaluatorOfAllVehicles(distance_evaluator)
add_distance_dimension(routing, distance_evaluator, max_vehicle_distance)
# Setting first solution heuristic (cheapest addition).
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Solve the problem.
assignment = routing.SolveWithParameters(search_parameters)
printer = ConsolePrinter(data, routing, assignment)
printer.print()
if __name__ == '__main__':
main()
| 2.953125 | 3 |
data/urls.py | tblxdezhu/STP | 0 | 12764025 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/18 1:13 PM
# @Author : <NAME>
# @File : urls.py
# @Software: Pycharm professional
from django.conf.urls import include, url
from data import views
urlpatterns = [
url(r'^large$', views.large_data),
url(r'^mini$', views.mini_data),
url(r'^error$', views.error_data)
]
| 1.609375 | 2 |
parser_methods.py | splunk-soar-connectors/parser | 0 | 12764026 | <reponame>splunk-soar-connectors/parser
# File: parser_methods.py
#
# Copyright (c) 2017-2022 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
import csv
import re
import struct
import sys
import zipfile
from html import unescape
from urllib.parse import urlparse
import pdfminer
from bs4 import BeautifulSoup, UnicodeDammit
from defusedxml import ElementTree
from defusedxml.common import EntitiesForbidden
from django.core.validators import URLValidator
try:
from cStringIO import StringIO
except Exception:
from io import StringIO
import threading
import time
import phantom.app as phantom
import phantom.utils as ph_utils
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.pdftypes import PDFObjectNotFound, PDFObjRef, PDFStream
from pdfminer.psparser import PSKeyword, PSLiteral
from pdfminer.utils import isnumber
_container_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
URI_REGEX = r"h(?:tt|xx)p[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
EMAIL_REGEX = r"\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b"
EMAIL_REGEX2 = r'".*"@[A-Z0-9.-]+\.[A-Z]{2,}\b'
HASH_REGEX = r"\b[0-9a-fA-F]{32}\b|\b[0-9a-fA-F]{40}\b|\b[0-9a-fA-F]{64}\b"
IP_REGEX = r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
IPV6_REGEX = r'\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))'
IPV6_REGEX += r'|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})'
IPV6_REGEX += r'|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})'
IPV6_REGEX += r'|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|'
IPV6_REGEX += r'(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)'
IPV6_REGEX += r'(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*'
DOMAIN_REGEX = r'(?!:\/\/)((?:[a-zA-Z0-9-_]+\.)*[a-zA-Z0-9][a-zA-Z0-9-_]+\.[a-zA-Z]{2,11})'
ESCAPE = set(map(ord, '&<>"'))
def _extract_domain_from_url(url):
domain = phantom.get_host_from_url(url)
if domain and not _is_ip(domain):
return domain
return None
def _is_ip(input_ip):
if ph_utils.is_ip(input_ip):
return True
if is_ipv6(input_ip):
return True
return False
def _is_url(input_url):
validate_url = URLValidator(schemes=['http', 'https'])
try:
validate_url(input_url)
return True
except Exception:
return False
def is_ipv6(input_ip):
return bool(re.match(IPV6_REGEX, input_ip))
def _refang_url(url):
parsed = urlparse(url)
scheme = parsed.scheme
# Replace hxxp/hxxps with http/https
if scheme == "hxxp":
parsed = parsed._replace(scheme='http')
elif scheme == "hxxps":
parsed = parsed._replace(scheme='https')
refang_url = parsed.geturl()
return refang_url
def _clean_url(url):
url = url.strip('>),.]\r\n')
# Check before splicing, find returns -1 if not found
# _and_ you will end up splicing on -1 (incorrectly)
if '<' in url:
url = url[:url.find('<')]
if '>' in url:
url = url[:url.find('>')]
url = _refang_url(url)
return url
def _get_error_message_from_exception(e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
error_msg = "Unknown error occured. Please check asset configuration and/or action parameters"
error_code = "Error code unavailable"
try:
if hasattr(e, 'args'):
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_msg = "Unknown error occured. Please check asset configuration and/or action parameters"
error_code = "Error code unavailable"
else:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the action parameters."
return error_code, error_msg
class TextIOCParser:
BASE_PATTERNS = [
{
'cef': 'sourceAddress', # Name of CEF field
'pattern': IP_REGEX, # Regex to match
'name': 'IP Artifact', # Name of artifact
'validator': _is_ip # Additional function to verify matched string (Should return true or false)
},
{
'cef': 'sourceAddress',
'pattern': IPV6_REGEX,
'name': 'IP Artifact',
'validator': _is_ip
},
{
'cef': 'requestURL',
'pattern': URI_REGEX,
'name': 'URL Artifact',
'clean': _clean_url, # Additional cleaning of data from regex (Should return a string)
'validator': _is_url
},
{
'cef': 'fileHash',
'pattern': HASH_REGEX,
'name': 'Hash Artifact'
},
{
'cef': 'email',
'pattern': EMAIL_REGEX,
'name': 'Email Artifact'
},
{
'cef': 'email',
'pattern': EMAIL_REGEX2,
'name': 'Email Artifact'
}
]
DOMAIN_PATTERN = {
'cef': 'destinationDnsDomain', # Name of CEF field
'pattern': DOMAIN_REGEX, # Regex to match
'name': 'Domain Artifact'
}
URL_DOMAIN_SUBTYPES_DICT = {
'subtypes': [ # Additional IOCs to find in a matched one
# If you really wanted to, you could also have subtypes in the subtypes
{
'cef': 'destinationDnsDomain',
'name': 'Domain Artifact',
'callback': _extract_domain_from_url # Method to extract substring
}
]
}
EMAILS_DOMAIN_SUBTYPES_DICT = {
'subtypes': [
{
'cef': 'destinationDnsDomain',
'name': 'Domain Artifact',
'callback': lambda x: x[x.rfind('@') + 1:],
'validator': lambda x: not _is_ip(x)
}
]
}
found_values = set()
def __init__(self, parse_domains, patterns=None):
self.patterns = self.BASE_PATTERNS if patterns is None else patterns
if parse_domains:
# Add the subtypes somain parsing functions only if parse_domains is True
is_email = True
for pattern_dict in self.patterns:
if pattern_dict.get("cef") == "requestURL" and pattern_dict.get("pattern") == URI_REGEX:
pattern_dict.update(self.URL_DOMAIN_SUBTYPES_DICT)
is_email = False
elif pattern_dict.get("cef") == "email" and pattern_dict.get("pattern") in [EMAIL_REGEX, EMAIL_REGEX2]:
pattern_dict.update(self.EMAILS_DOMAIN_SUBTYPES_DICT)
if is_email:
self.patterns.append(self.DOMAIN_PATTERN)
self.added_artifacts = 0
def _create_artifact(self, artifacts, value, cef, name):
artifact = {}
artifact['source_data_identifier'] = self.added_artifacts
artifact['cef'] = {cef: value}
artifact['name'] = name
artifacts.append(artifact)
self.added_artifacts += 1
self.found_values.add(value)
def _parse_ioc_subtype(self, artifacts, value, subtype):
callback = subtype.get('callback')
if callback:
sub_val = callback(value)
self._pass_over_value(artifacts, sub_val, subtype)
def _pass_over_value(self, artifacts, value, ioc):
validator = ioc.get('validator')
clean = ioc.get('clean')
subtypes = ioc.get('subtypes', [])
if not value:
return
if value in self.found_values:
return
if clean:
value = clean(value)
if validator and not validator(value):
return
self._create_artifact(artifacts, value, ioc['cef'], ioc['name'])
for st in subtypes:
self._parse_ioc_subtype(artifacts, value, st)
def parse_to_artifacts(self, text):
artifacts = []
for ioc in self.patterns:
regexp = re.compile(ioc['pattern'], re.IGNORECASE)
found = regexp.findall(text)
for match in found:
if type(match) == tuple:
for x in match:
self._pass_over_value(artifacts, x, ioc)
else:
self._pass_over_value(artifacts, match, ioc)
return artifacts
def add_artifact(self, text):
artifact = {}
artifact['source_data_identifier'] = self.added_artifacts
artifact['cef'] = {"message": text}
artifact['name'] = "Raw Text Artifact"
self.added_artifacts += 1
self.found_values.add(text)
return artifact
def _grab_raw_text(action_result, txt_file):
""" This function will actually really work for any file which is basically raw text.
html, rtf, and the list could go on
"""
try:
fp = open(txt_file, 'rb')
text = UnicodeDammit(fp.read()).unicode_markup
fp.close()
return phantom.APP_SUCCESS, text
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, err), None
class PDFXrefObjectsToXML:
"""
Class contains the methods to Convert the PDF cross reference table(xref) objects to XML
The xref is the index by which all of the indirect objects, in the PDF file are located.
https://labs.appligent.com/pdfblog/pdf_cross_reference_table/
"""
@classmethod
def encode(cls, data):
"""Encode characters of text"""
buf = StringIO()
for byte in data:
if byte < 32 or 127 <= byte or byte in ESCAPE:
buf.write('&#{};'.format(byte))
else:
buf.write(chr(byte))
return buf.getvalue()
@classmethod
def dump_xml(cls, text, obj):
"""Convert PDF xref object to XML"""
if obj is None:
text += '<null />'
return text
if isinstance(obj, dict):
text += '<dict size="{}">\n'.format(len(obj))
for (key, value) in obj.items():
text += '<key>\n{}\n</key>\n'.format(key)
text += '<value>'
text = cls.dump_xml(text, value)
text += '</value>\n'
text += '</dict>'
return text
if isinstance(obj, list):
text += '<list size="{}">\n'.format(len(obj))
for value in obj:
text = cls.dump_xml(text, value)
text += '\n'
text += '</list>'
return text
if isinstance(obj, bytes):
text += '<string size="{}">\n{}\n</string>'.format(len(obj), cls.encode(obj))
return text
if isinstance(obj, PDFStream):
text += '<stream>\n<props>\n'
text = cls.dump_xml(text, obj.attrs)
text += '\n</props>\n'
text += '</stream>'
return text
if isinstance(obj, PDFObjRef):
text += '<ref id="{}" />'.format(obj.objid)
return text
if isinstance(obj, PSKeyword):
text += '<keyword>\n{}\n</keyword>'.format(obj.name)
return text
if isinstance(obj, PSLiteral):
text += '<literal>\n{}\n</literal>'.format(obj.name)
return text
if isnumber(obj):
text += '<number>\n{}\n</number>'.format(obj)
return text
raise TypeError("Unable to extract the object from PDF. Reason: {}".format(obj))
@classmethod
def dump_trailers(cls, text, doc):
"""Iterate trough xrefs and convert trailer of xref to XML"""
for xref in doc.xrefs:
text += '<trailer>\n'
cls.dump_xml(text, xref.trailer)
text += '\n</trailer>\n\n'
return text
@classmethod
def convert_objects_to_xml_text(cls, text, doc):
"""Iterate trough xrefs and convert objects of xref to XML"""
visited = set()
text += '<pdf>'
for xref in doc.xrefs:
for obj_id in xref.get_objids():
if obj_id in visited:
continue
visited.add(obj_id)
try:
obj = doc.getobj(obj_id)
if obj is None:
continue
text += '<object id="{}">\n'.format(obj_id)
text = cls.dump_xml(text, obj)
text += '\n</object>\n\n'
except PDFObjectNotFound as e:
raise PDFObjectNotFound('While converting PDF to xml objects PDF object not found.'
' Reason: {}'.format(e))
cls.dump_trailers(text, doc)
text += '</pdf>'
return text
@classmethod
def pdf_xref_objects_to_xml(cls, pdf_file):
"""Converts PDF cross reference table(xref) objects to XML
The xref is the index by which all of the indirect objects, in the PDF file are located.
https://labs.appligent.com/pdfblog/pdf_cross_reference_table/
"""
text = ''
with open(pdf_file, 'rb') as fp:
parser = PDFParser(fp)
doc = PDFDocument(parser)
text = cls.convert_objects_to_xml_text(text, doc)
return text
def _pdf_to_text(action_result, pdf_file):
try:
pagenums = set()
output = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
# if sys.version_info[0] == 3:
infile = open(pdf_file, 'rb')
# elif sys.version_info[0] < 3:
# infile = file(pdf_file, 'rb')
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close()
text += PDFXrefObjectsToXML.pdf_xref_objects_to_xml(pdf_file)
return phantom.APP_SUCCESS, text
except pdfminer.pdfdocument.PDFPasswordIncorrect:
return action_result.set_status(phantom.APP_ERROR, "Failed to parse pdf: The provided pdf is password protected"), None
except pdfminer.pdfdocument.PDFEncryptionError:
return action_result.set_status(phantom.APP_ERROR, "Failed to parse pdf: The provided pdf is encrypted"), None
except struct.error:
return action_result.set_status(phantom.APP_ERROR,
"Failed to parse pdf: The provided pdf is password protected or is in different format"), None
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, "Failed to parse pdf: {0}".format(err)), None
def _docx_to_text(action_result, docx_file):
""" docx is literally a zip file, and all the words in the document are in one xml document
doc does not work this way at all
"""
WORD_NAMESPACE = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
PARA = WORD_NAMESPACE + 'p'
TEXT = WORD_NAMESPACE + 't'
try:
zf = zipfile.ZipFile(docx_file)
fp = zf.open('word/document.xml')
txt = fp.read()
fp.close()
root = ElementTree.fromstring(txt)
paragraphs = []
for paragraph in root.iter(PARA):
texts = [node.text for node in paragraph.iter(TEXT) if node.text]
if texts:
paragraphs.append(''.join(texts))
return phantom.APP_SUCCESS, '\n\n'.join(paragraphs)
except zipfile.BadZipfile:
return (
action_result.set_status(
phantom.APP_ERROR,
"Failed to parse docx: The file might be corrupted or password protected or not a docx file"),
None)
except EntitiesForbidden as e:
err = e
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, "Failed to parse docx: {0}".format(err)), None
def _csv_to_text(action_result, csv_file):
""" This function really only exists due to a misunderstanding on how word boundaries (\b) work
As it turns out, only word characters can invalidate word boundaries. So stuff like commas,
brackets, gt and lt signs, etc. do not
"""
text = ""
try:
fp = open(csv_file, 'rt')
reader = csv.reader(fp)
for row in reader:
text += ' '.join(row)
text += ' ' # The humanity of always having a trailing space
fp.close()
return phantom.APP_SUCCESS, text
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, "Failed to parse csv: {0}".format(err)), None
def _html_to_text(action_result, html_file, text_val=None):
""" Similar to CSV, this is also unnecessary. It will trim /some/ of that fat from a normal HTML, however
"""
try:
if text_val is None:
fp = open(html_file, 'rb')
html_text = UnicodeDammit(fp.read()).unicode_markup
fp.close()
else:
html_text = text_val
# To unescape html escaped body
html_text = unescape(html_text)
soup = BeautifulSoup(html_text, 'html.parser')
read_text = soup.findAll(text=True)
links = [tag.get('href') for tag in soup.findAll(href=True)]
srcs = [tag.get('src') for tag in soup.findAll(src=True)]
text = ' '.join(read_text + links + srcs)
return phantom.APP_SUCCESS, text
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, "Failed to parse html: {0}".format(err)), None
def _join_thread(base_connector, thread):
base_connector._lock.acquire()
base_connector._done = True
base_connector._lock.release()
thread.join()
def _wait_for_parse(base_connector):
i = 0
base_msg = "Parsing PDF document"
while True:
base_connector._lock.acquire()
if base_connector._done:
base_connector._lock.release()
break
base_connector.send_progress(base_msg + '.' * i)
base_connector._lock.release()
i = i % 5 + 1
time.sleep(1)
return
def parse_file(base_connector, action_result, file_info, parse_domains=True, keep_raw=False):
""" Parse a non-email file """
try:
tiocp = TextIOCParser(parse_domains)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, str(e)), None
raw_text = None
if file_info['type'] == 'pdf':
""" Parsing a PDF document over like, 10 pages starts to take a while
(A 80 something page document took like 5 - 10 minutes)
The thread is nice because it shows a constantly changing message,
which shows that the app isn't frozen, but it also stops watchdog
from terminating the app
"""
thread = threading.Thread(target=_wait_for_parse, args=[base_connector])
thread.start()
ret_val, raw_text = _pdf_to_text(action_result, file_info['path'])
_join_thread(base_connector, thread)
elif file_info['type'] == 'txt':
ret_val, raw_text = _grab_raw_text(action_result, file_info['path'])
elif file_info['type'] == 'docx':
ret_val, raw_text = _docx_to_text(action_result, file_info['path'])
elif file_info['type'] == 'csv':
ret_val, raw_text = _csv_to_text(action_result, file_info['path'])
elif file_info['type'] == 'html':
ret_val, raw_text = _html_to_text(action_result, file_info['path'])
else:
return action_result.set_status(phantom.APP_ERROR, "Unexpected file type"), None
if phantom.is_fail(ret_val):
return ret_val, None
base_connector.save_progress('Parsing for IOCs')
try:
artifacts = tiocp.parse_to_artifacts(raw_text)
if keep_raw:
base_connector.save_progress('Saving Raw Text')
artifacts.append(tiocp.add_artifact(raw_text))
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, err), None
return phantom.APP_SUCCESS, {'artifacts': artifacts}
def parse_structured_file(action_result, file_info):
if file_info['type'] == 'csv':
csv_file = file_info['path']
artifacts = []
try:
if sys.version_info[0] >= 3:
fp = open(csv_file, 'rt')
elif sys.version_info[0] < 3:
fp = open(csv_file, 'rb')
reader = csv.DictReader(fp, restkey='other') # need to handle lines terminated in commas
for row in reader:
row['source_file'] = file_info['name']
artifacts.append({
'name': 'CSV entry',
'cef': {k: v for k, v in list(row.items())} # make CSV entry artifact
})
fp.close()
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, "Failed to parse structured CSV: {0}".format(err)), None
else:
return action_result.set_status(phantom.APP_ERROR, "Structured extraction only supported for CSV files"), None
return phantom.APP_SUCCESS, {'artifacts': artifacts}
def parse_text(base_connector, action_result, file_type, text_val, parse_domains=True):
""" Parse a non-email file """
try:
tiocp = TextIOCParser(parse_domains)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, str(e)), None
raw_text = None
if file_type == 'html':
ret_val, raw_text = _html_to_text(action_result, None, text_val=text_val)
elif file_type == 'txt' or file_type == 'csv':
ret_val, raw_text = phantom.APP_SUCCESS, text_val
else:
return action_result.set_status(phantom.APP_ERROR, "Unexpected file type"), None
if phantom.is_fail(ret_val):
return ret_val, None
base_connector.save_progress('Parsing for IOCs')
try:
artifacts = tiocp.parse_to_artifacts(raw_text)
except Exception as e:
error_code, error_msg = _get_error_message_from_exception(e)
err = "Error Code: {0}. Error Message: {1}".format(error_code, error_msg)
return action_result.set_status(phantom.APP_ERROR, err), None
return phantom.APP_SUCCESS, {'artifacts': artifacts}
| 1.796875 | 2 |
tests/commands/test_sorted_set.py | dynalz/coredis | 0 | 12764027 | <reponame>dynalz/coredis<gh_stars>0
import pytest
from coredis import DataError
from coredis.utils import b
from tests.conftest import targets
@targets("redis_basic", "redis_cluster")
@pytest.mark.asyncio()
class TestSortedSet:
async def test_zadd(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zrange("a{foo}", 0, -1) == [b("a1"), b("a2"), b("a3")]
async def test_zaddoption(self, client):
await client.zadd("a{foo}", a1=1)
assert int(await client.zscore("a{foo}", "a1")) == 1
assert int(await client.zaddoption("a{foo}", "NX", a1=2)) == 0
assert int(await client.zaddoption("a{foo}", "NX CH", a1=2)) == 0
assert int(await client.zscore("a{foo}", "a1")) == 1
assert await client.zcard("a{foo}") == 1
assert int(await client.zaddoption("a{foo}", "XX", a2=1)) == 0
assert await client.zcard("a{foo}") == 1
assert int(await client.zaddoption("a{foo}", "XX", a1=2)) == 0
assert int(await client.zaddoption("a{foo}", "XX CH", a1=3)) == 1
assert int(await client.zscore("a{foo}", "a1")) == 3
assert int(await client.zaddoption("a{foo}", "NX", a2=1)) == 1
assert int(await client.zaddoption("a{foo}", "NX CH", a3=1)) == 1
assert await client.zcard("a{foo}") == 3
await client.zaddoption("a{foo}", "INCR", a3=1)
assert int(await client.zscore("a{foo}", "a3")) == 2
async def test_zcard(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zcard("a{foo}") == 3
async def test_zcount(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zcount("a{foo}", "-inf", "+inf") == 3
assert await client.zcount("a{foo}", 1, 2) == 2
assert await client.zcount("a{foo}", 10, 20) == 0
@pytest.mark.min_server_version("6.2.0")
@pytest.mark.nocluster
async def test_zdiff(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
await client.zadd("b{foo}", a1=1, a2=2)
assert (await client.zdiff(["a{foo}", "b{foo}"])) == [b"a3"]
assert (await client.zdiff(["a{foo}", "b{foo}"], withscores=True)) == [
b"a3",
b"3",
]
@pytest.mark.min_server_version("6.2.0")
async def test_zdiffstore(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
await client.zadd("b{foo}", a1=1, a2=2)
assert await client.zdiffstore("out{foo}", ["a{foo}", "b{foo}"])
assert (await client.zrange("out{foo}", 0, -1)) == [b"a3"]
assert (await client.zrange("out{foo}", 0, -1, withscores=True)) == [
(b"a3", 3.0)
]
async def test_zincrby(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zincrby("a{foo}", "a2") == 3.0
assert await client.zincrby("a{foo}", "a3", amount=5) == 8.0
assert await client.zscore("a{foo}", "a2") == 3.0
assert await client.zscore("a{foo}", "a3") == 8.0
async def test_zlexcount(self, client):
await client.zadd("a{foo}", a=0, b=0, c=0, d=0, e=0, f=0, g=0)
assert await client.zlexcount("a{foo}", "-", "+") == 7
assert await client.zlexcount("a{foo}", "[b", "[f") == 5
async def test_zinterstore_sum(self, client):
await client.zadd("a{foo}", a1=1, a2=1, a3=1)
await client.zadd("b{foo}", a1=2, a2=2, a3=2)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert await client.zinterstore("d{foo}", ["a{foo}", "b{foo}", "c{foo}"]) == 2
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a3"), 8),
(b("a1"), 9),
]
async def test_zinterstore_max(self, client):
await client.zadd("a{foo}", a1=1, a2=1, a3=1)
await client.zadd("b{foo}", a1=2, a2=2, a3=2)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert (
await client.zinterstore(
"d{foo}", ["a{foo}", "b{foo}", "c{foo}"], aggregate="MAX"
)
== 2
)
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a3"), 5),
(b("a1"), 6),
]
async def test_zinterstore_min(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
await client.zadd("b{foo}", a1=2, a2=3, a3=5)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert (
await client.zinterstore(
"d{foo}", ["a{foo}", "b{foo}", "c{foo}"], aggregate="MIN"
)
== 2
)
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a1"), 1),
(b("a3"), 3),
]
async def test_zinterstore_with_weight(self, client):
await client.zadd("a{foo}", a1=1, a2=1, a3=1)
await client.zadd("b{foo}", a1=2, a2=2, a3=2)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert (
await client.zinterstore("d{foo}", {"a{foo}": 1, "b{foo}": 2, "c{foo}": 3})
== 2
)
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a3"), 20),
(b("a1"), 23),
]
@pytest.mark.min_server_version("4.9.0")
async def test_zpopmax(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert (await client.zpopmax("a{foo}")) == [(b"a3", 3)]
# with count
assert (await client.zpopmax("a{foo}", count=2)) == [(b"a2", 2), (b"a1", 1)]
@pytest.mark.min_server_version("4.9.0")
async def test_zpopmin(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert (await client.zpopmin("a{foo}")) == [(b"a1", 1)]
# with count
assert (await client.zpopmin("a{foo}", count=2)) == [(b"a2", 2), (b"a3", 3)]
@pytest.mark.min_server_version("6.2.0")
async def test_zrandemember(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert (await client.zrandmember("a{foo}")) is not None
assert len(await client.zrandmember("a{foo}", 2)) == 2
# with scores
assert len(await client.zrandmember("a{foo}", 2, True)) == 4
# without duplications
assert len(await client.zrandmember("a{foo}", 10)) == 5
# with duplications
assert len(await client.zrandmember("a{foo}", -10)) == 10
@pytest.mark.min_server_version("4.9.0")
async def test_bzpopmax(self, client):
await client.zadd("a{foo}", a1=1, a2=2)
await client.zadd("b{foo}", b1=10, b2=20)
assert (await client.bzpopmax(["b{foo}", "a{foo}"], timeout=1)) == (
b"b{foo}",
b"b2",
20,
)
assert (await client.bzpopmax(["b{foo}", "a{foo}"], timeout=1)) == (
b"b{foo}",
b"b1",
10,
)
assert (await client.bzpopmax(["b{foo}", "a{foo}"], timeout=1)) == (
b"a{foo}",
b"a2",
2,
)
assert (await client.bzpopmax(["b{foo}", "a{foo}"], timeout=1)) == (
b"a{foo}",
b"a1",
1,
)
assert (await client.bzpopmax(["b{foo}", "a{foo}"], timeout=1)) is None
await client.zadd("c{foo}", c1=100)
assert (await client.bzpopmax("c{foo}", timeout=1)) == (b"c{foo}", b"c1", 100)
@pytest.mark.min_server_version("4.9.0")
async def test_bzpopmin(self, client):
await client.zadd("a{foo}", a1=1, a2=2)
await client.zadd("b{foo}", b1=10, b2=20)
assert (await client.bzpopmin(["b{foo}", "a{foo}"], timeout=1)) == (
b"b{foo}",
b"b1",
10,
)
assert (await client.bzpopmin(["b{foo}", "a{foo}"], timeout=1)) == (
b"b{foo}",
b"b2",
20,
)
assert (await client.bzpopmin(["b{foo}", "a{foo}"], timeout=1)) == (
b"a{foo}",
b"a1",
1,
)
assert (await client.bzpopmin(["b{foo}", "a{foo}"], timeout=1)) == (
b"a{foo}",
b"a2",
2,
)
assert (await client.bzpopmin(["b{foo}", "a{foo}"], timeout=1)) is None
await client.zadd("c{foo}", c1=100)
assert (await client.bzpopmin("c{foo}", timeout=1)) == (b"c{foo}", b"c1", 100)
async def test_zrange(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zrange("a{foo}", 0, 1) == [b("a1"), b("a2")]
assert await client.zrange("a{foo}", 1, 2) == [b("a2"), b("a3")]
# withscores
assert await client.zrange("a{foo}", 0, 1, withscores=True) == [
(b("a1"), 1.0),
(b("a2"), 2.0),
]
assert await client.zrange("a{foo}", 1, 2, withscores=True) == [
(b("a2"), 2.0),
(b("a3"), 3.0),
]
# custom score function
assert await client.zrange(
"a{foo}", 0, 1, withscores=True, score_cast_func=int
) == [
(b("a1"), 1),
(b("a2"), 2),
]
@pytest.mark.min_server_version("6.2.0")
async def test_zrangestore(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zrangestore("b{foo}", "a{foo}", 0, 1)
assert await client.zrange("b{foo}", 0, -1) == [b"a1", b"a2"]
assert await client.zrangestore("b{foo}", "a{foo}", 1, 2)
assert await client.zrange("b{foo}", 0, -1) == [b"a2", b"a3"]
assert await client.zrange("b{foo}", 0, -1, withscores=True) == [
(b"a2", 2),
(b"a3", 3),
]
# reversed order
assert await client.zrangestore("b{foo}", "a{foo}", 1, 2, desc=True)
assert await client.zrange("b{foo}", 0, -1) == [b"a1", b"a2"]
# by score
assert await client.zrangestore(
"b{foo}", "a{foo}", 2, 1, byscore=True, offset=0, num=1, desc=True
)
assert await client.zrange("b{foo}", 0, -1) == [b"a2"]
# by lex
assert await client.zrangestore(
"b{foo}", "a{foo}", "[a2", "(a3", bylex=True, offset=0, num=1
)
assert await client.zrange("b{foo}", 0, -1) == [b"a2"]
async def test_zrangebylex(self, client):
await client.zadd("a{foo}", a=0, b=0, c=0, d=0, e=0, f=0, g=0)
assert await client.zrangebylex("a{foo}", "-", "[c") == [b("a"), b("b"), b("c")]
assert await client.zrangebylex("a{foo}", "-", "(c") == [b("a"), b("b")]
assert await client.zrangebylex("a{foo}", "[aaa", "(g") == [
b("b"),
b("c"),
b("d"),
b("e"),
b("f"),
]
assert await client.zrangebylex("a{foo}", "[f", "+") == [b("f"), b("g")]
assert await client.zrangebylex("a{foo}", "-", "+", start=3, num=2) == [
b("d"),
b("e"),
]
async def test_zrevrangebylex(self, client):
await client.zadd("a{foo}", a=0, b=0, c=0, d=0, e=0, f=0, g=0)
assert await client.zrevrangebylex("a{foo}", "[c", "-") == [
b("c"),
b("b"),
b("a"),
]
assert await client.zrevrangebylex("a{foo}", "(c", "-") == [b("b"), b("a")]
assert await client.zrevrangebylex("a{foo}", "(g", "[aaa") == [
b("f"),
b("e"),
b("d"),
b("c"),
b("b"),
]
assert await client.zrevrangebylex("a{foo}", "+", "[f") == [b("g"), b("f")]
assert await client.zrevrangebylex("a{foo}", "+", "-", start=3, num=2) == [
b("d"),
b("c"),
]
async def test_zrangebyscore(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert await client.zrangebyscore("a{foo}", 2, 4) == [b("a2"), b("a3"), b("a4")]
# slicing with start/num
assert await client.zrangebyscore("a{foo}", 2, 4, start=1, num=2) == [
b("a3"),
b("a4"),
]
# withscores
assert await client.zrangebyscore("a{foo}", 2, 4, withscores=True) == [
(b("a2"), 2.0),
(b("a3"), 3.0),
(b("a4"), 4.0),
]
# custom score function
assert await client.zrangebyscore(
"a{foo}", 2, 4, withscores=True, score_cast_func=int
) == [(b("a2"), 2), (b("a3"), 3), (b("a4"), 4)]
async def test_zrank(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert await client.zrank("a{foo}", "a1") == 0
assert await client.zrank("a{foo}", "a2") == 1
assert await client.zrank("a{foo}", "a6") is None
async def test_zrem(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zrem("a{foo}", "a2") == 1
assert await client.zrange("a{foo}", 0, -1) == [b("a1"), b("a3")]
assert await client.zrem("a{foo}", "b{foo}") == 0
assert await client.zrange("a{foo}", 0, -1) == [b("a1"), b("a3")]
async def test_zrem_multiple_keys(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zrem("a{foo}", "a1", "a2") == 2
assert await client.zrange("a{foo}", 0, 5) == [b("a3")]
async def test_zremrangebylex(self, client):
await client.zadd("a{foo}", a=0, b=0, c=0, d=0, e=0, f=0, g=0)
assert await client.zremrangebylex("a{foo}", "-", "[c") == 3
assert await client.zrange("a{foo}", 0, -1) == [b("d"), b("e"), b("f"), b("g")]
assert await client.zremrangebylex("a{foo}", "[f", "+") == 2
assert await client.zrange("a{foo}", 0, -1) == [b("d"), b("e")]
assert await client.zremrangebylex("a{foo}", "[h", "+") == 0
assert await client.zrange("a{foo}", 0, -1) == [b("d"), b("e")]
async def test_zremrangebyrank(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert await client.zremrangebyrank("a{foo}", 1, 3) == 3
assert await client.zrange("a{foo}", 0, 5) == [b("a1"), b("a5")]
async def test_zremrangebyscore(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert await client.zremrangebyscore("a{foo}", 2, 4) == 3
assert await client.zrange("a{foo}", 0, -1) == [b("a1"), b("a5")]
assert await client.zremrangebyscore("a{foo}", 2, 4) == 0
assert await client.zrange("a{foo}", 0, -1) == [b("a1"), b("a5")]
async def test_zrevrange(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zrevrange("a{foo}", 0, 1) == [b("a3"), b("a2")]
assert await client.zrevrange("a{foo}", 1, 2) == [b("a2"), b("a1")]
# withscores
assert await client.zrevrange("a{foo}", 0, 1, withscores=True) == [
(b("a3"), 3.0),
(b("a2"), 2.0),
]
assert await client.zrevrange("a{foo}", 1, 2, withscores=True) == [
(b("a2"), 2.0),
(b("a1"), 1.0),
]
# custom score function
assert await client.zrevrange(
"a{foo}", 0, 1, withscores=True, score_cast_func=int
) == [
(b("a3"), 3.0),
(b("a2"), 2.0),
]
async def test_zrevrangebyscore(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert await client.zrevrangebyscore("a{foo}", 4, 2) == [
b("a4"),
b("a3"),
b("a2"),
]
# slicing with start/num
assert await client.zrevrangebyscore("a{foo}", 4, 2, start=1, num=2) == [
b("a3"),
b("a2"),
]
# withscores
assert await client.zrevrangebyscore("a{foo}", 4, 2, withscores=True) == [
(b("a4"), 4.0),
(b("a3"), 3.0),
(b("a2"), 2.0),
]
# custom score function
assert await client.zrevrangebyscore(
"a{foo}", 4, 2, withscores=True, score_cast_func=int
) == [(b("a4"), 4), (b("a3"), 3), (b("a2"), 2)]
async def test_zrevrank(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3, a4=4, a5=5)
assert await client.zrevrank("a{foo}", "a1") == 4
assert await client.zrevrank("a{foo}", "a2") == 3
assert await client.zrevrank("a{foo}", "a6") is None
async def test_zscore(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
assert await client.zscore("a{foo}", "a1") == 1.0
assert await client.zscore("a{foo}", "a2") == 2.0
assert await client.zscore("a{foo}", "a4") is None
async def test_zunionstore_sum(self, client):
await client.zadd("a{foo}", a1=1, a2=1, a3=1)
await client.zadd("b{foo}", a1=2, a2=2, a3=2)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert await client.zunionstore("d{foo}", ["a{foo}", "b{foo}", "c{foo}"]) == 4
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a2"), 3),
(b("a4"), 4),
(b("a3"), 8),
(b("a1"), 9),
]
async def test_zunionstore_max(self, client):
await client.zadd("a{foo}", a1=1, a2=1, a3=1)
await client.zadd("b{foo}", a1=2, a2=2, a3=2)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert (
await client.zunionstore(
"d{foo}", ["a{foo}", "b{foo}", "c{foo}"], aggregate="MAX"
)
== 4
)
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a2"), 2),
(b("a4"), 4),
(b("a3"), 5),
(b("a1"), 6),
]
async def test_zunionstore_min(self, client):
await client.zadd("a{foo}", a1=1, a2=2, a3=3)
await client.zadd("b{foo}", a1=2, a2=2, a3=4)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert (
await client.zunionstore(
"d{foo}", ["a{foo}", "b{foo}", "c{foo}"], aggregate="MIN"
)
== 4
)
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a1"), 1),
(b("a2"), 2),
(b("a3"), 3),
(b("a4"), 4),
]
async def test_zunionstore_with_weight(self, client):
await client.zadd("a{foo}", a1=1, a2=1, a3=1)
await client.zadd("b{foo}", a1=2, a2=2, a3=2)
await client.zadd("c{foo}", a1=6, a3=5, a4=4)
assert (
await client.zunionstore("d{foo}", {"a{foo}": 1, "b{foo}": 2, "c{foo}": 3})
== 4
)
assert await client.zrange("d{foo}", 0, -1, withscores=True) == [
(b("a2"), 5),
(b("a4"), 12),
(b("a3"), 20),
(b("a1"), 23),
]
@pytest.mark.min_server_version("6.1.240")
async def test_zmscore(self, client):
with pytest.raises(DataError):
await client.zmscore("invalid_key", [])
assert await client.zmscore("invalid_key", ["invalid_member"]) == [None]
await client.zadd("a{foo}", a1=1, a2=2, a3=3.5)
assert (await client.zmscore("a{foo}", ["a1", "a2", "a3", "a4"])) == [
1.0,
2.0,
3.5,
None,
]
async def test_zscan(self, client):
await client.zadd("a", 1, "a", 2, "b", 3, "c")
cursor, pairs = await client.zscan("a")
assert cursor == 0
assert set(pairs) == set([(b("a"), 1), (b("b"), 2), (b("c"), 3)])
_, pairs = await client.zscan("a", match="a")
assert set(pairs) == set([(b("a"), 1)])
async def test_zscan_iter(self, client):
await client.zadd("a", 1, "a", 2, "b", 3, "c")
pairs = set()
async for pair in client.zscan_iter("a"):
pairs.add(pair)
assert pairs == set([(b("a"), 1), (b("b"), 2), (b("c"), 3)])
async for pair in client.zscan_iter("a", match="a"):
assert pair == (b("a"), 1)
| 2.234375 | 2 |
DAE/print_collada_info.py | fovtran/PyGame_samples | 0 | 12764028 | <reponame>fovtran/PyGame_samples<gh_stars>0
#!/usr/bin/env python
import collada
import numpy
import sys
def inspectController(controller):
"""Display contents of a controller object found in the scene."""
print (' Controller (id=%s) (type=%s)' % (controller.skin.id, type(controller).__name__))
print (' Vertex weights:%d, joints:%d' % (len(controller), len(controller.joint_matrices)))
for controlled_prim in controller.primitives():
print (' Primitive', type(controlled_prim.primitive).__name__)
def inspectGeometry(obj):
"""Display contents of a geometry object found in the scene."""
materials = set()
for prim in obj.primitives():
materials.add( prim.material )
print (' Geometry (id=%s): %d primitives'%(obj.original.id, len(obj)))
for prim in obj.primitives():
print (' Primitive (type=%s): len=%d vertices=%d' % (type(prim).__name__, len(prim), len(prim.vertex)))
for mat in materials:
if mat: inspectMaterial( mat )
def inspectMaterial(mat):
"""Display material contents."""
print (' Material %s: shading %s'%(mat.effect.id, mat.effect.shadingtype))
for prop in mat.effect.supported:
value = getattr(mat.effect, prop)
# it can be a float, a color (tuple) or a Map ( a texture )
if isinstance(value, collada.material.Map):
colladaimage = value.sampler.surface.image
# Accessing this attribute forces the loading of the image
# using PIL if available. Unless it is already loaded.
img = colladaimage.pilimage
if img: # can read and PIL available
print (' %s = Texture %s:'%(prop, colladaimage.id),\
img.format, img.mode, img.size)
else:
print (' %s = Texture %s: (not available)'%(
prop, colladaimage.id))
else:
print (' %s ='%(prop), value)
def inspectCollada(col):
# Display the file contents
print ('File Contents:')
print (' Geometry:')
if col.scene is not None:
for geom in col.scene.objects('geometry'):
inspectGeometry( geom )
print( ' Controllers:')
if col.scene is not None:
for controller in col.scene.objects('controller'):
inspectController( controller )
print (' Cameras:')
if col.scene is not None:
for cam in col.scene.objects('camera'):
print (' Camera %s: '%cam.original.id)
print( ' Lights:')
if col.scene is not None:
for light in col.scene.objects('light'):
print (' Light %s: color =' % light.original.id, light.color)
if not col.errors: print ('File read without errors')
else:
print ('Errors:')
for error in col.errors:
print (' ', error)
if __name__ == '__main__':
filename = sys.argv[1] if len(sys.argv) > 1 else 'HEAD3.dae'
# open COLLADA file ignoring some errors in case they appear
col = collada.Collada(filename, ignore=[collada.DaeUnsupportedError,
collada.DaeBrokenRefError])
inspectCollada(col)
| 2.828125 | 3 |
DataExploration.py | lingcheng99/Kaggle-what-is-cooking | 0 | 12764029 | <reponame>lingcheng99/Kaggle-what-is-cooking
#For data exploration, examine most-used ingredients in each cuisine
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
from nltk.stem import WordNetLemmatizer
wnl=WordNetLemmatizer()
train=pd.read_json('train.json')
train.columns
Out[2]:
Index([u'cuisine', u'id', u'ingredients'], dtype='object')
train.shape
Out[3]:
(39774, 3)
train.ingredients[0]
Out[5]:
[u'romaine lettuce',
u'black olives',
u'grape tomatoes',
u'garlic',
u'pepper',
u'purple onion',
u'seasoning',
u'garbanzo beans',
u'feta cheese crumbles']
train.cuisine.value_counts()
Out[11]:
italian 7838
mexican 6438
southern_us 4320
indian 3003
chinese 2673
french 2646
cajun_creole 1546
thai 1539
japanese 1423
greek 1175
spanish 989
korean 830
vietnamese 825
moroccan 821
british 804
filipino 755
irish 667
jamaican 526
russian 489
brazilian 467
dtype: int64
#Write function "clean" to lemmatize and clean up strings in the "ingredients" column
def clean(x):
cleanlist=[]
cleanlist=[wnl.lemmatize(re.sub('[^a-zA-Z]',' ',item)) for item in x]
return cleanlist
#Add another column "ingreC", with the cleaned up list of ingredients
train['ingreC']=train.ingredients.apply(lambda x:clean(x))
train.ingreC[0]
Out[7]:
[u'romaine lettuce',
u'black olives',
u'grape tomatoes',
u'garlic',
u'pepper',
u'purple onion',
u'seasoning',
u'garbanzo beans',
u'feta cheese crumbles']
#Make a set of all ingredients
all_ingredients=set()
train.ingreC.map(lambda x:[all_ingredients.add(i) for i in list(x)])
len(all_ingredients)
Out[8]:
6709
#Add a column for each ingredient in the set
for ingredient in all_ingredients:
train[ingredient]=train.ingreC.apply(lambda x:ingredient in x)
train.shape
Out[9]:
(39774, 6713)
#Use groupby.sum() to get the number of times each ingredient appeared in a particular cuisine
train_g1=train.drop(['ingredients','id','ingreC'],axis=1)
train_g2=train_g1.groupby('cuisine').sum()
train_g3=train_g2.tranpose()
#Now the dataframe is ready to be examined and plotted, by each cuisine
train_g3.italian.order(ascending=False)[:10]
train_g3.italian.order(ascending=False)[:10].plot(kind=’bar’)
Out[19]:
salt 3454
olive oil 3111
garlic cloves 1619
grated parmesan cheese 1579
garlic 1471
ground black pepper 1444
extra virgin olive oil 1362
onion 1240
water 1052
butter 1029
Name: italian, dtype: float64
train_g3.chinese.order(ascending=False)[:10]
Out[20]:
soy sauce 1363
sesame oil 915
salt 907
corn starch 906
sugar 824
garlic 763
water 762
green onions 628
vegetable oil 602
scallion 591
Name: chinese, dtype: float64
| 3.375 | 3 |
exploratory_data_analysis.py | MananSoni42/fantasy-predictions | 0 | 12764030 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
dir = 'plots'
if not os.path.exists(dir):
os.mkdir(dir)
df = pd.read_csv('final_data/final-data.csv', index_col='player-name')
df =df.replace(-1,np.nan)
# describe
print('--- Description ---')
print(df.describe())
# correlation
plt.clf()
sns.heatmap(df.corr())
plt.savefig(os.path.join(dir,'corr-all.png'))
corr_cols = ['all-time-runs-scored', 'all-time-average', 'all-time-strike-rate',
'all-time-wkts', 'ipl-last-n-runs-scored', 'ipl-last-n-wkts', 'ipl-last-n-points',
'ipl-1-points', 'ipl-2-points', 'ipl-3-points', 'ipl-4-points', 'ipl-5-points', 'points']
plt.clf()
sns.heatmap(df[corr_cols].corr())
plt.savefig(os.path.join(dir,'corr-some.png'))
print('--- Points correlation ---')
print(df.corr()['points'].sort_values(ascending=False))
# plots for each column
n = len(df.columns)
for i,col in enumerate(df.columns):
print(f'Plotting {col} ({i+1}/{n})')
plt.clf()
sns.displot(x=df[col])
plt.savefig(os.path.join(dir,f'{col}-hist.png'))
# violin plots comparing alltime to IPL
| 2.46875 | 2 |
code/examples/scraping_example.py | periode/practical-code-2018 | 0 | 12764031 | # these three lines tell python that we would like to use these libraries
# we've installed these libraries by running `pip install requests` and `pip install beautifulsoup4` in the terminal
import requests
from bs4 import BeautifulSoup
import json
# this line uses the request library to get a specific HTML page
# in our case, the URL is that of the "missed connections" section in the NYC Craigslist"
target_url = "https://newyork.craigslist.org/d/missed-connections/search/mis"
page = requests.get(target_url)
# this line takes the raw HTML that we've gotten on the previous line, and saved inside the "page" variable
# and gives it to BeautifulSoup in order to parse it
# once we've parsed it, now we can start accessing its elements easily through code
soup = BeautifulSoup(page.content, "html.parser")
print("successfully loaded the page %s" % target_url
# for example, we want to grab all of the elements that have a class "result-title"
# so we get an array of those elements and save it inside the "title" variable
titles = soup.find_all(class_='result-title')
# here, we create an empty array that we're going to fill as we get more and more data
results = []
# this is our for loop, with which we loop through all the elements that have the class "result-title"
for title in titles:
# this line creates an empty object, with a variable "title" inside, and a variable "description"
data = {
"title":"",
"description":""
}
# here we just print the title
print("finding new title")
print(title.text)
# and we save it as the "title" variable inside our object
data['title'] = title.text
#now we put need to go inside the page of each postings
# so first we get the URL of each posting by grabbing the value of the "href" attribute inside our title elements
posting_url = title['href']
# then we request the page of said posting
posting = requests.get(posting_url)
# and we parse using BeautifulSoup
posting_soup = BeautifulSoup(posting.content, "html.parser")
# now that we have the parsed HTML of the posting, we look for the elemen if the id attribute of "postingbody"
description = posting_soup.find(id="postingbody")
# and we put that value as the "description" variable of our object
data['description'] = description.get_text()
# and now that we have a complete object, we add it to our array!
results.append(data)
# finally, we are going to create a new file with all that data inside
# this line creates a new file, called "scraping_results.json", and we can write into it
with open('results.json', 'w') as file_to_write:
# first, we need to compress the data
data_to_write = json.dumps(results)
# and here we actually write it!
json.dump(data_write, file_to_write)
print("done writing the data!")
| 4.0625 | 4 |
#1 Mundo/#9/25.py | Henrique-Navarro/phyton | 0 | 12764032 | nome= str(input('Digite um nome: '))
print(nome.upper().find('SILVA'))
print('seu nome tem silva? {}' .format('silva' in nome.lower())) | 4.125 | 4 |
controllers/apiv3/api_district_controller.py | enterstudio/the-blue-alliance | 0 | 12764033 | import json
from google.appengine.ext import ndb
from controllers.apiv3.api_base_controller import ApiBaseController
from controllers.apiv3.model_properties import filter_event_properties, filter_team_properties
from database.district_query import DistrictListQuery
from database.event_query import DistrictEventsQuery
from database.team_query import DistrictTeamsQuery
class ApiDistrictListController(ApiBaseController):
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 60 * 60 * 24
def _track_call(self, year):
self._track_call_defer('district/list', year)
def _render(self, year):
district_list, self._last_modified = DistrictListQuery(int(year)).fetch(dict_version=3, return_updated=True)
return json.dumps(district_list, ensure_ascii=True, indent=True, sort_keys=True)
class ApiDistrictEventsController(ApiBaseController):
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 60 * 60 * 24
def _track_call(self, district_key, year, model_type=None):
action = 'district/events'
if model_type:
action += '/{}'.format(model_type)
self._track_call_defer(action, '{}/{}'.format(district_key, year))
def _render(self, district_key, year, model_type=None):
events, self._last_modified = DistrictEventsQuery('{}{}'.format(year, district_key)).fetch(dict_version=3, return_updated=True)
if model_type is not None:
events = filter_event_properties(events, model_type)
return json.dumps(events, ensure_ascii=True, indent=True, sort_keys=True)
class ApiDistrictTeamsController(ApiBaseController):
CACHE_VERSION = 0
CACHE_HEADER_LENGTH = 60 * 60 * 24
def _track_call(self, district_key, year, model_type=None):
action = 'district/teams'
if model_type:
action += '/{}'.format(model_type)
self._track_call_defer(action, '{}/{}'.format(district_key, year))
def _render(self, district_key, year, model_type=None):
teams, self._last_modified = DistrictTeamsQuery('{}{}'.format(year, district_key)).fetch(dict_version=3, return_updated=True)
if model_type is not None:
teams = filter_team_properties(teams, model_type)
return json.dumps(teams, ensure_ascii=True, indent=True, sort_keys=True)
| 1.984375 | 2 |
main.py | ericknudson/som-course-generator | 0 | 12764034 | <filename>main.py
from flask import Flask, render_template, Response, request, redirect, url_for
import numpy as np
from numpy import random
import csv
app = Flask(__name__)
#import data
nums = []
with open('static/nums.csv') as csv_file:
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
for row in csv_reader:
row = "".join(row)
nums.append(row)
bigram_probabilities = {}
with open('static/bigram_probabilities.csv') as csv_file:
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
for row in csv_reader:
key = (row[0],row[1])
value = row[2]
bigram_probabilities.update({key: value})
tokenized_courses = []
with open('static/tokenizedcourses.csv') as csv_file:
csv_reader = csv.reader(x.replace('\0', '') for x in csv_file)
for row in csv_reader:
tokenized_courses.append(row)
#function to generate a new course name
def gen_bigram_course():
output = ["*"]
next_token = ""
i = 1
while next_token != 'STOP':
current_history = output[i-1] #this is the previous word
possible_bigrams = [(key, value) for key, value in bigram_probabilities.items() if key[0] == current_history] #bigrams that start with that word
bigrams = [x[0] for x in possible_bigrams]
bigrams = [x[1] for x in bigrams]
probs = [x[1] for x in possible_bigrams]
probs = [float(i) for i in probs]
sprobs = np.sum(probs)
probs = np.divide(probs,sprobs)
if i == 1:
#if it's the first word, pick it at random
next_token = np.random.choice(np.array(bigrams), 1)[0]
else:
#otherwise, pick from the probability distribution of bigrams
next_token = np.random.choice(np.array(bigrams), 1, p = np.array(probs))[0]
output.append(next_token)
i = i + 1
output = output[1:-1] #remove start and stop symbols
if output in tokenized_courses:
#if the course exactly matches an existing course, generate a new one
output = gen_bigram_course()
if len(output) == 1:
output = gen_bigram_course()
return output
def put_course_name_together(output):
new_name = " ".join(output)
new_name = new_name.replace(" ,",",")
new_name = new_name.replace(" :",":")
new_name = new_name.replace(" ?","?")
num = np.random.choice(nums,1)[0]
new_name = "MGT " + num + " " + new_name
return new_name
@app.route("/")
def index():
return render_template('index.html')
@app.route("/forward/", methods=['POST'])
def move_forward():
#Moving forward code
output = gen_bigram_course()
course = put_course_name_together(output)
return render_template('index.html', forward_message=course);
if __name__ == "__main__":
app.run(debug=True) | 3.015625 | 3 |
Problems/towersOfHanoi.py | HKuz/Test_Code | 1 | 12764035 | <gh_stars>1-10
#!/usr/local/bin/python3
def main():
for i in range(1, 8):
print("============================")
print("Towers of Hanoi: {} Disks".format(i))
towers_of_hanoi(i)
print("Number of moves: {}".format(2**i - 1))
print("============================")
return 0
def towers_of_hanoi(n, s="source", t="target", b="buffer"):
# n is number of disks, smaller disk must always be on top of larger one
assert n > 0
if n == 1:
print("Move {} to {}".format(s, t))
return
else:
# Recursively move n-1 disks from source to buffer
towers_of_hanoi(n-1, s, b, t)
# Move largest disk from source to target
towers_of_hanoi(1, s, t, b)
# Recursively move n-1 disks from buffer to target
towers_of_hanoi(n-1, b, t, s)
if __name__ == '__main__':
main()
| 4.0625 | 4 |
Ejercicios de guia/ej27.py | FdelMazo/7540rw-Algo1 | 1 | 12764036 | def domino():
"""Imprime todas las fichas de domino"""
a = 0
b = 0
for k in range (0,7):
a = k
for i in range (a, 7):
b = i
print (a, b)
| 3.453125 | 3 |
dckrclstrpanic/info.py | minskmaz/dckrclstrpanic | 0 | 12764037 | <filename>dckrclstrpanic/info.py
# -*- coding: utf-8 -*-
"""PACKAGE INFO
This module provides some basic information about the package.
"""
# Set the package release version
version_info = (0, 0, 0)
__version__ = '.'.join(str(c) for c in version_info)
# Set the package details
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__year__ = '2019'
__url__ = 'https://github.com/minskmaz/dckrclstrpanic'
__description__ = 'Roll panic tests for https://gun.eco'
__requires__ = ['sh', 'zope.component'] # Your package dependencies
# Default package properties
__license__ = 'MIT'
__about__ = ('{} \n\n Author: {} \n Email: {} \n Year: {} \n {} \n\n'
''.format(__name__, __author__, __email__, __year__,
__description__))
__setup_requires__ = ['pytest-runner', ]
__tests_require__ = ['pytest', 'pytest-cov', 'pytest-pep8']
| 1.8125 | 2 |
tests/sys/host_sim_test.py | south-coast-science/scs_host_cpc | 0 | 12764038 | #!/usr/bin/env python3
"""
Created on 24 Mar 2021
@author: <NAME> (<EMAIL>)
"""
from scs_host.sys.host import Host
# --------------------------------------------------------------------------------------------------------------------
sim = Host.sim()
print(sim)
| 1.84375 | 2 |
tests/tests.py | belprofbiz/-tapi-yandex-direct | 0 | 12764039 | <filename>tests/tests.py<gh_stars>0
# coding: utf-8
import datetime as dt
import logging
import yaml
from tapi_yandex_direct import YandexDirect, GetTokenYandexDirect
logging.basicConfig(level=logging.DEBUG)
with open("../config.yml", "r") as stream:
data_loaded = yaml.safe_load(stream)
ACCESS_TOKEN = data_loaded["token"]
CLIENT_ID = data_loaded["client_id"]
api = YandexDirect(
access_token=ACCESS_TOKEN,
is_sandbox=True,
auto_request_generation=True,
receive_all_objects=True,
retry_if_not_enough_units=False,
retry_if_exceeded_limit=False,
retries_if_server_error=5,
language='ru',
# Параметры для метода Reports
processing_mode='offline',
wait_report=False,
return_money_in_micros=True,
skip_report_header=True,
skip_column_header=False,
skip_report_summary=True,
)
def test_get_campaigns():
r = api.campaigns().get(
data={
"method": "get",
"params": {
"SelectionCriteria": {},
"FieldNames": ["Id", "Name", "State", "Status", "Type"],
"Page": {"Limit": 2},
},
}
)
print(r)
def test_method_get_transform_result():
r = api.campaigns().get(
data={
"method": "get",
"params": {
"SelectionCriteria": {},
"FieldNames": ["Id", "Name"],
"Page": {"Limit": 3},
},
}
)
print(r().transform())
def test_method_add_transform_result():
body = {
"method": "add",
"params": {
"Campaigns": [
{
"Name": "MyCampaignTest",
"StartDate": str(dt.datetime.now().date()),
"TextCampaign": {
"BiddingStrategy": {
"Search": {
"BiddingStrategyType": "HIGHEST_POSITION"
},
"Network": {
"BiddingStrategyType": "SERVING_OFF"
}
},
"Settings": []
}
}
]
}
}
r = api.campaigns().post(data=body)
print(r().transform())
def test_get_debugtoken():
api = GetTokenYandexDirect()
api.debugtoken(client_id=CLIENT_ID).open_in_browser()
def test_get_report():
r = api.reports().get(
data={
"params": {
"SelectionCriteria": {},
"FieldNames": ["Date", "CampaignId", "Clicks", "Cost"],
"OrderBy": [{
"Field": "Date"
}],
"ReportName": "Actual Data1111",
"ReportType": "CAMPAIGN_PERFORMANCE_REPORT",
"DateRangeType": "ALL_DATA",
"Format": "TSV",
"IncludeVAT": "YES",
"IncludeDiscount": "YES"
}
}
)
print(r().data)
print(r().transform())
def test_get_report2():
for i in range(7):
r = api.reports().get(
data={
"params": {
"SelectionCriteria": {},
"FieldNames": ["Date", "CampaignId", "Clicks", "Cost"],
"OrderBy": [{
"Field": "Date"
}],
"ReportName": "Actual Data12 f 1" + str(i),
"ReportType": "CAMPAIGN_PERFORMANCE_REPORT",
"DateRangeType": "LAST_WEEK",
"Format": "TSV",
"IncludeVAT": "YES",
"IncludeDiscount": "YES"
}
}
)
print(r().response.status_code)
print(r().transform())
| 2.203125 | 2 |
MultipleGUIs.py | dpetrovykh/FIAR | 0 | 12764040 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 13:18:29 2021
@author: dpetrovykh
"""
from PyQt5 import QtCore, QtWidgets, QtWidgets
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
# Run Initialization of parent class
super(MainWindow, self).__init__(parent)
# Create a central widget which other functional widgets can be added into
self.central_widget = QtWidgets.QStackedWidget()
# foramlly add central widget as the centralWidget
self.setCentralWidget(self.central_widget)
# Create an instance of the login widget, supplying self as the parent optional argument
login_widget = LoginWidget(self)
# Link the button within the LoginWidget with the self.login function
login_widget.button.clicked.connect(self.login)
# Add the login_widget to the central widget
self.central_widget.addWidget(login_widget)
def login(self):
# Create instance of logged widget
logged_in_widget = LoggedWidget(self)
# Add logged iwdget to central widget
self.central_widget.addWidget(logged_in_widget)
# Make the recently-added logged widget the current widget in the central widget.
self.central_widget.setCurrentWidget(logged_in_widget)
class LoginWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
# Run Initializzation of parent class
super(LoginWidget, self).__init__(parent)
# Create a horizontal layout
layout = QtWidgets.QHBoxLayout()
# Create a button
self.button = QtWidgets.QPushButton('Login')
#Add button to the layout
layout.addWidget(self.button)
# Set the layout to be the current one.
self.setLayout(layout)
# you might want to do self.button.click.connect(self.parent().login) here
class LoggedWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(LoggedWidget, self).__init__(parent)
layout = QtWidgets.QHBoxLayout()
self.label = QtWidgets.QLabel('logged in!')
layout.addWidget(self.label)
self.setLayout(layout)
if __name__ == '__main__':
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
app.exec_() | 2.9375 | 3 |
automod/plugins/AutomodPlugin.py | Salvi0/AutoMod | 3 | 12764041 | <gh_stars>1-10
from typing import Union
import discord
from discord.ext import commands
import re
from .PluginBlueprint import PluginBlueprint
from .Automod.sub.CheckMessage import checkMessage
from .Automod.sub.ShouldPerformAutomod import shouldPerformAutomod
from .Types import Duration, Embed
valid_actions = [
"ban",
"kick",
"mute",
"none"
]
async def getNewPunishments(plugin, ctx):
cfg = plugin.db.configs.get(ctx.guild.id, "punishments")
f = plugin.emotes.get('WARN')
punishments = [f"``{x} {f}``: {y.capitalize() if len(y.split(' ')) == 1 else y.split(' ')[0].capitalize() + ' ' + y.split(' ')[-2] + y.split(' ')[-1]}" for x, y in cfg.items()]
punishments = sorted(punishments, key=lambda i: i.split(" ")[0])
return punishments
class AutomodPlugin(PluginBlueprint):
def __init__(self, bot):
super().__init__(bot)
async def cog_check(self, ctx):
return ctx.author.guild_permissions.administrator
@commands.Cog.listener()
async def on_automod_event(
self,
message
):
if not self.db.configs.exists(message.guild.id):
return
if not shouldPerformAutomod(self, message):
return
if message.guild is None or not isinstance(message.guild, discord.Guild):
return
if len(self.db.configs.get(message.guild.id, "automod")) < 1:
return
await checkMessage(self, message)
@commands.Cog.listener()
async def on_message_edit(
self,
before,
after
):
if not self.db.configs.get(after.guild.id, "automod"):
return
if not shouldPerformAutomod(self, after):
return
if after.guild is None:
return
if before.content != after.content and after.content == None:
return
await checkMessage(self, after)
@commands.group()
async def automod(
self,
ctx
):
"""automod_help"""
if ctx.invoked_subcommand is None:
prefix = self.bot.get_guild_prefix(ctx.guild)
e = Embed(
title=self.i18next.t(ctx.guild, "automod_title"),
description=self.i18next.t(ctx.guild, "automod_description", prefix=prefix)
)
e.add_field(
name="❯ Commands",
value=self.i18next.t(ctx.guild, "automod_commands", prefix=prefix)
)
await ctx.send(embed=e)
@automod.command()
async def invite(
self,
ctx,
warns: str
):
"""invite_help"""
warns = warns.lower()
if warns == "off":
automod = self.db.configs.get(ctx.guild.id, "automod")
if "invites" in automod:
del automod["invites"]
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "automod_feature_disabled", _emote="YES", what="anti-invites"))
else:
try:
warns = int(warns)
except ValueError:
e = Embed(
title="Invalid paramater",
description=self.i18next.t(ctx.guild, "invalid_automod_feature_param", prefix=self.bot.get_guild_prefix(ctx.guild), command="invite <warns>", off_command="invite off")
)
await ctx.send(embed=e)
else:
if warns < 1:
return await ctx.send(self.i18next.t(ctx.guild, "min_warns", _emote="NO"))
if warns > 100:
return await ctx.send(self.i18next.t(ctx.guild, "max_warns", _emote="NO"))
automod = self.db.configs.get(ctx.guild.id, "automod")
automod.update({
"invites": {"warns": warns}
})
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "warns_set", _emote="YES", warns=warns, what="they send Discord invites"))
@automod.command()
async def everyone(
self,
ctx,
warns: str
):
"""everyone_help"""
warns = warns.lower()
if warns == "off":
automod = self.db.configs.get(ctx.guild.id, "automod")
if "everyone" in automod:
del automod["everyone"]
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "automod_feature_disabled", _emote="YES", what="anti-everyone"))
else:
try:
warns = int(warns)
except ValueError:
e = Embed(
title="Invalid paramater",
description=self.i18next.t(ctx.guild, "invalid_automod_feature_param", prefix=self.bot.get_guild_prefix(ctx.guild), command="everyone <warns>", off_command="everyone off")
)
await ctx.send(embed=e)
else:
if warns < 1:
return await ctx.send(self.i18next.t(ctx.guild, "min_warns", _emote="NO"))
if warns > 100:
return await ctx.send(self.i18next.t(ctx.guild, "max_warns", _emote="NO"))
automod = self.db.configs.get(ctx.guild.id, "automod")
automod.update({
"everyone": {"warns": warns}
})
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "warns_set", _emote="YES", warns=warns, what="they attempt to mention @everyone/here"))
@automod.command()
async def files(
self,
ctx,
warns: str
):
"""files_help"""
warns = warns.lower()
if warns == "off":
automod = self.db.configs.get(ctx.guild.id, "automod")
if "files" in automod:
del automod["files"]
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "automod_feature_disabled", _emote="YES", what="anti-files"))
else:
try:
warns = int(warns)
except ValueError:
e = Embed(
title="Invalid paramater",
description=self.i18next.t(ctx.guild, "invalid_automod_feature_param", prefix=self.bot.get_guild_prefix(ctx.guild), command="files <warns>", off_command="files off")
)
await ctx.send(embed=e)
else:
if warns < 1:
return await ctx.send(self.i18next.t(ctx.guild, "min_warns", _emote="NO"))
if warns > 100:
return await ctx.send(self.i18next.t(ctx.guild, "max_warns", _emote="NO"))
automod = self.db.configs.get(ctx.guild.id, "automod")
automod.update({
"files": {"warns": warns}
})
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "warns_set", _emote="YES", warns=warns, what="they send forbidden/uncommon attachment types"))
@automod.command()
async def mentions(
self,
ctx,
mentions: str
):
"""mentions_help"""
mentions = mentions.lower()
if mentions == "off":
automod = self.db.configs.get(ctx.guild.id, "automod")
if "mention" in automod:
del automod["mention"]
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "automod_feature_disabled", _emote="YES", what="max-mentions"))
else:
try:
mentions = int(mentions)
except ValueError:
e = Embed(
title="Invalid paramater",
description=self.i18next.t(ctx.guild, "invalid_automod_feature_param", prefix=self.bot.get_guild_prefix(ctx.guild), command="mentions <mentions>", off_command="mentions off")
)
await ctx.send(embed=e)
else:
if mentions < 4:
return await ctx.send(self.i18next.t(ctx.guild, "min_mentions", _emote="NO"))
if mentions > 100:
return await ctx.send(self.i18next.t(ctx.guild, "max_mentions", _emote="NO"))
automod = self.db.configs.get(ctx.guild.id, "automod")
automod.update({
"mention": {"threshold": mentions}
})
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "mentions_set", _emote="YES", mentions=mentions))
@automod.command()
async def lines(
self,
ctx,
lines: str
):
"""lines_help"""
lines = lines.lower()
if lines == "off":
automod = self.db.configs.get(ctx.guild.id, "automod")
if "lines" in automod:
del automod["lines"]
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "automod_feature_disabled", _emote="YES", what="max-lines"))
else:
try:
lines = int(lines)
except ValueError:
e = Embed(
title="Invalid paramater",
description=self.i18next.t(ctx.guild, "invalid_automod_feature_param", prefix=self.bot.get_guild_prefix(ctx.guild), command="lines <lines>", off_command="lines off")
)
await ctx.send(embed=e)
else:
if lines < 6:
return await ctx.send(self.i18next.t(ctx.guild, "min_lines", _emote="NO"))
if lines > 150:
return await ctx.send(self.i18next.t(ctx.guild, "max_lines", _emote="NO"))
automod = self.db.configs.get(ctx.guild.id, "automod")
automod.update({
"lines": {"threshold": lines}
})
self.db.configs.update(ctx.guild.id, "automod", automod)
await ctx.send(self.i18next.t(ctx.guild, "lines_set", _emote="YES", lines=lines))
@commands.command()
async def ignore(
self,
ctx,
role_or_channel: Union[discord.Role, discord.TextChannel] = None
):
"""ignore_help"""
roles = self.db.configs.get(ctx.guild.id, "ignored_roles")
channels = self.db.configs.get(ctx.guild.id, "ignored_channels")
if role_or_channel is None:
e = Embed()
e.add_field(
name="❯ Ignored Roles",
value="\n".join(set([*[f"<@&{x.id}>" for x in sorted(ctx.guild.roles, key=lambda l: l.position) if x.position >= ctx.guild.me.top_role.position or x.permissions.ban_members]]))
)
channels = [f"<#{x}>" for x in sorted(channels)]
if len(channels) > 0:
e.add_field(
name="❯ Ignored Channels",
value="\n".join(channels)
)
return await ctx.send(embed=e)
cu = role_or_channel
if cu.id in roles:
return await ctx.send(self.i18next.t(ctx.guild.id, "role_already_ignored", _emote="WARN"))
if cu.id in channels:
return await ctx.send(self.i18next.t(ctx.guild.id, "channel_already_ignored", _emote="WARN"))
if isinstance(cu, discord.Role):
roles.append(cu.id)
self.db.configs.update(ctx.guild.id, "ignored_roles", roles)
return await ctx.send(self.i18next.t(ctx.guild, "role_ignored", _emote="YES", role=cu.name))
elif isinstance(cu, discord.TextChannel):
channels.append(cu.id)
self.db.configs.update(ctx.guild.id, "ignored_channels", channels)
return await ctx.send(self.i18next.t(ctx.guild, "channel_ignored", _emote="YES", channel=cu.name))
@commands.command()
async def unignore(
self,
ctx,
role_or_channel: Union[discord.Role, discord.TextChannel]
):
"""unignore_help"""
cu = role_or_channel
roles = self.db.configs.get(ctx.guild.id, "ignored_roles")
channels = self.db.configs.get(ctx.guild.id, "ignored_channels")
if isinstance(cu, discord.Role):
if cu.id not in roles:
return await ctx.send(self.i18next.t(ctx.guild, "role_not_ignored", _emote="NO"))
roles.remove(cu.id)
self.db.configs.update(ctx.guild.id, "ignored_roles", roles)
return await ctx.send(self.i18next.t(ctx.guild, "role_unignored", _emote="YES", role=cu.name))
elif isinstance(cu, discord.TextChannel):
if cu.id not in channels:
return await ctx.send(self.i18next.t(ctx.guild, "channel_not_ignored", _emote="NO"))
channels.remove(cu.id)
self.db.configs.update(ctx.guild.id, "ignored_channels", channels)
return await ctx.send(self.i18next.t(ctx.guild, "channel_unignored", _emote="YES", channel=cu.name))
@commands.group()
async def allowed_invites(
self,
ctx
):
"""allowed_invites_help"""
if ctx.subcommand_passed is None:
allowed = [x.strip().lower() for x in self.db.configs.get(ctx.guild.id, "whitelisted_invites")]
prefix = self.bot.get_guild_prefix(ctx.guild)
if len(allowed) < 1:
return await ctx.send(self.i18next.t(ctx.guild, "no_whitelisted", _emote="NO", prefix=prefix))
else:
e = Embed()
e.add_field(
name="❯ Allowed invites (by server ID)",
value="```fix\n{}\n```".format("\n".join([f"{x}" for x in allowed]))
)
await ctx.send(embed=e)
@allowed_invites.command()
async def add(
self,
ctx,
guild_id: int
):
"""allowed_invites_add_help"""
allowed = [x.strip().lower() for x in self.db.configs.get(ctx.guild.id, "whitelisted_invites")]
if str(guild_id) in allowed:
return await ctx.send(self.i18next.t(ctx.guild, "already_whitelisted", _emote="WARN", server=guild_id))
allowed.append(str(guild_id))
self.db.configs.update(ctx.guild.id, "whitelisted_invites", allowed)
await ctx.send(self.i18next.t(ctx.guild, "added_invite", _emote="YES", server=guild_id))
@allowed_invites.command()
async def remove(
self,
ctx,
guild_id: int
):
"""allowed_invites_remove_help"""
allowed = [x.strip().lower() for x in self.db.configs.get(ctx.guild.id, "whitelisted_invites")]
if str(guild_id) not in allowed:
return await ctx.send(self.i18next.t(ctx.guild, "not_whitelisted", _emote="NO", server=guild_id))
allowed.remove(str(guild_id))
self.db.configs.update(ctx.guild.id, "whitelisted_invites", allowed)
await ctx.send(self.i18next.t(ctx.guild, "removed_invite", _emote="YES", server=guild_id))
@commands.command()
async def punishment(
self,
ctx,
warn: int,
action: str,
time: Duration = None
):
"""punishment_help"""
warns = warn
action = action.lower()
prefix = self.bot.get_guild_prefix(ctx.guild)
e = Embed()
if not action in valid_actions:
return await ctx.send(self.i18next.t(ctx.guild, "invalid_action", _emote="WARN", prefix=prefix))
if warns < 1:
return await ctx.send(self.i18next.t(ctx.guild, "min_warns", _emote="NO"))
if warns > 100:
return await ctx.send(self.i18next.t(ctx.guild, "max_warns", _emote="NO"))
current = self.db.configs.get(ctx.guild.id, "punishments")
if action == "none":
new = {x: y for x, y in current.items() if str(x) != str(warns)}
self.db.configs.update(ctx.guild.id, "punishments", new)
desc = await getNewPunishments(self, ctx)
e.add_field(
name="❯ Punishments",
value="{}".format("\n".join(desc) if len(desc) > 0 else "None")
)
return await ctx.send(content=self.i18next.t(ctx.guild, "set_none", _emote="YES", warns=warns), embed=e)
elif action != "mute":
current.update({
str(warns): action
})
self.db.configs.update(ctx.guild.id, "punishments", current)
desc = await getNewPunishments(self, ctx)
e.add_field(
name="❯ Punishments",
value="{}".format("\n".join(desc) if len(desc) > 0 else "None")
)
return await ctx.send(content=self.i18next.t(ctx.guild, f"set_{action}", _emote="YES", warns=warns), embed=e)
else:
if time is None:
return await ctx.send(self.i18next.t(ctx.guild, "time_needed", _emote="NO", prefix=prefix))
as_seconds = time.to_seconds(ctx)
if as_seconds > 0:
length = time.length
unit = time.unit
current.update({
str(warns): f"{action} {as_seconds} {length} {unit}"
})
self.db.configs.update(ctx.guild.id, "punishments", current)
desc = await getNewPunishments(self, ctx)
e.add_field(
name="❯ Punishments",
value="{}".format("\n".join(desc) if len(desc) > 0 else "None")
)
return await ctx.send(content=self.i18next.t(ctx.guild, f"set_{action}", _emote="YES", warns=warns, length=length, unit=unit), embed=e)
else:
raise commands.BadArgument("number_too_small")
def setup(bot): bot.add_cog(AutomodPlugin(bot)) | 2.1875 | 2 |
Instrumentos/Codigos/mining_script/mining_script_issues.py | carlavieira/study-HDI-influence | 0 | 12764042 | <reponame>carlavieira/study-HDI-influence
import os
import json
from time import sleep
import requests
import pandas as pd
import random
from datetime import datetime
from dotenv import load_dotenv
from pathlib import Path
load_dotenv()
def get_project_root() -> Path:
return Path(__file__).parent.parent
def save_json(file_dir, data):
with open(file_dir, 'w') as f:
json.dump(data, f, sort_keys=True, indent=4)
URL = 'https://api.github.com/graphql'
TOKEN_LIST = json.loads(os.getenv("GITHUB_ACCESS_TOKENS"))
random.shuffle(TOKEN_LIST)
ROOT = get_project_root()
def load_json(file_dir):
try:
with open(file_dir, 'r') as read_file:
return json.load(read_file)
except FileNotFoundError:
print(f'Failed to read data... Perform get_repos and assure data.json is in folder.')
def generate_new_header():
global token_index
new_header = {
'Content-Type': 'application/json',
'Authorization': f'bearer {TOKEN_LIST[token_index]}'
}
if token_index < len(TOKEN_LIST) - 1:
token_index += 1
else:
token_index = 0
return new_header
# Query to be made on the current test
def create_query(cursor, user_login):
if cursor is None:
cursor = 'null'
else:
cursor = '\"{}\"'.format(cursor)
query = """
{
user(login: "%s") {
issues(first: 20, after:%s ,orderBy: {field: CREATED_AT, direction: ASC}, filterBy: {since: "2020-01-01T00:00:00Z"}) {
pageInfo {
endCursor
hasNextPage
}
totalCount
nodes {
createdAt
databaseId
state
closed
closedAt
repository {
name
owner {
login
}
}
}
}
}
rateLimit {
remaining
}
}
""" % (user_login, cursor)
return query
def calculate_duration(date_creation, date_action):
date_time_obj_start = datetime.strptime(date_creation, "%Y-%m-%dT%H:%M:%SZ")
date_time_obj_end = datetime.strptime(date_action, "%Y-%m-%dT%H:%M:%SZ")
duration_in_s = (date_time_obj_end - date_time_obj_start).total_seconds()
return round(duration_in_s / 3600)
def do_github_request():
res = requests.post(
f'{URL}',
json={'query': create_query(cursor=issues_cursor, user_login=login)},
headers=headers
)
res.raise_for_status()
return dict(res.json()), res
def save_clean_data(dataframe):
date_limit = datetime.strptime("2020-12-31T23:59:00Z", "%Y-%m-%dT%H:%M:%SZ")
for issue in issues_data['data']['user']['issues']['nodes']:
if datetime.strptime(issue['createdAt'], "%Y-%m-%dT%H:%M:%SZ") <= date_limit:
# if issue is not None and issue['databaseId'] not in issues_df['issue_databaseId'].values:
cleaned_data = dict()
cleaned_data['user_databaseId'] = user_databaseId
cleaned_data['issue_databaseId'] = issue['databaseId']
cleaned_data['repo_owner'] = issue['repository']['owner']['login']
cleaned_data['repo_name'] = issue['repository']['name']
cleaned_data['state'] = issue['state']
cleaned_data['createdAt'] = issue['createdAt']
cleaned_data['closed'] = issue['closed']
cleaned_data['closedAt'] = issue['closedAt']
if cleaned_data['state'] == 'CLOSED':
if cleaned_data['closedAt']:
cleaned_data['duration'] = calculate_duration(cleaned_data['createdAt'], cleaned_data['closedAt'])
print(f"New ISSUE with ID {cleaned_data['issue_databaseId']} found")
dataframe = dataframe.append(cleaned_data, ignore_index=True)
return dataframe
if __name__ == "__main__":
print(f"\n**** Starting GitHub API Requests *****\n")
locations_dir = f'{ROOT}/resources/refined_locations.json'
finished_issues_dir = f'{ROOT}/resources/finished_issues.json'
finished_issues = load_json(finished_issues_dir)
locations = load_json(locations_dir)
countries = locations.keys()
token_index = 0
headers = generate_new_header()
for country in countries:
print(f"\n**** Starting Country {country.upper()} Requests *****\n")
users_csv_dir = f"{ROOT}/data_files_test/{country}/{country}_users.csv"
issues_csv_dir = f"{ROOT}/data_files_test/{country}/{country}_issues.csv"
try:
users = pd.read_csv(users_csv_dir)
issues_df = pd.read_csv(issues_csv_dir)
for row in users.itertuples():
# row[n] => csv 'position'
index = row[0]
user_databaseId = row[1]
login = row[8]
if user_databaseId in finished_issues[country]:
print('Already mined data for this user...')
else:
total_pages = 0
response = ''
issues_has_next_page = True
issues_cursor = None
remaining_nodes = 666 # random number to initialize remaining_nodes
page_counter = 0
while issues_has_next_page:
print('Working on ISSUES...')
try:
if remaining_nodes < 200:
print('Changing GitHub Access Token...')
headers = generate_new_header()
issues_data, response = do_github_request()
if 'errors' in issues_data.keys():
print(issues_data)
issues_has_next_page = False
break
if response.status_code == 200:
total_pages = round(issues_data['data']['user']['issues']['totalCount'] / 10 + 0.5)
issues_cursor = issues_data['data']['user']['issues']['pageInfo']['endCursor']
issues_has_next_page = issues_data['data']['user']['issues']['pageInfo']['hasNextPage']
remaining_nodes = issues_data['data']['rateLimit']['remaining']
issues_df = save_clean_data(dataframe=issues_df)
if not issues_has_next_page:
print('Changing to next ISSUE...')
issues_cursor = None
except requests.exceptions.ConnectionError:
print(f'Connection error during the request')
except requests.exceptions.HTTPError:
print(f'HTTP request error... Sleeping 10 seconds to retry')
sleep(10)
except KeyboardInterrupt:
issues_df.to_csv(issues_csv_dir, index=False, header=True)
finally:
print('Completed issues {}/{} of user {} ({}/{})'.format(
page_counter, total_pages, login, index + 1, len(users)))
issues_df.to_csv(issues_csv_dir, index=False, header=True)
page_counter += 1
finished_issues[country].append(user_databaseId)
save_json(file_dir=finished_issues_dir, data=finished_issues)
except FileNotFoundError:
print(f'{users_csv_dir} not found...')
| 2.4375 | 2 |
example_1/Batuhan/workspace/build/meturone_egitim/cmake/meturone_egitim-genmsg-context.py | tekmen0/ROS-intro | 3 | 12764043 | <reponame>tekmen0/ROS-intro
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/tekmen0/meturone_egitim/src/meturone_egitim/msg/Dummy.msg;/home/tekmen0/meturone_egitim/src/meturone_egitim/msg/answer.msg"
services_str = ""
pkg_name = "meturone_egitim"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "meturone_egitim;/home/tekmen0/meturone_egitim/src/meturone_egitim/msg;std_msgs;/opt/ros/noetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python3"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/noetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 1.351563 | 1 |
discord/html/parser.py | FrostiiWeeb/discord.html | 2 | 12764044 | <reponame>FrostiiWeeb/discord.html
# std libraries
import typing
import asyncio
from enum import Enum
from inspect import Parameter, _ParameterKind
# third party
import discord
import tempfile
from bs4 import BeautifulSoup
from discord.ext import commands
from discord.ext.commands.errors import ClientException, MissingRequiredArgument
# local
from .bot import Bot, BOT
class TagTable(Enum):
bot_starting = "<bot"
bot_ending = "/bot>"
run = "<run>"
def indent(text: str, amount, ch=' '):
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(keepends=False))
class Parser:
def __init__(self) -> None:
pass
def parse(self, contents: typing.List[str]):
bots: typing.List[Bot] = []
events: typing.List[typing.Callable] = []
bot_initiated: typing.Optional[bool] = False
content = "\n".join([line.strip(" ") for line in contents])
soup = BeautifulSoup(content, "html5lib")
for bot in soup.find_all("bot"):
prefix = bot.get("prefix")
token = bot.get("token")
prefix_param = Parameter("prefix", _ParameterKind.KEYWORD_ONLY)
token_param = Parameter("token", _ParameterKind.KEYWORD_ONLY)
if not prefix:
raise MissingRequiredArgument(prefix_param)
if not token_param:
raise MissingRequiredArgument(token_param)
bot_initiated = True
bots.append(Bot(prefix, token=token))
bot_obj = bots[0]
events = bot.find_all("event")
for event in events:
type = event.get("type")
params = event.get("parameters")
if not type:
raise MissingRequiredArgument(Parameter("type", _ParameterKind.KEYWORD_ONLY))
response = event.find("response")
content = (response.contents[0])
l = {}
exec(f"async def {type}({params}):\n{indent(text=content, amount=4)}", l)
bot_obj.add_listener((l[type]))
run = soup.find("run")
if run:
if not bot_initiated:
raise ClientException("Bot has not been initialized")
if len(bots) > 1:
raise RuntimeError("Exceeded max bot instances")
(bots[0]).run((bots[0]).token) | 2.4375 | 2 |
tests/data/x-test_pot.py | fKunstner/pytest-optional-tests | 1 | 12764045 | import pytest
def test_no_mark():
pass
@pytest.mark.opt1
def test_opt1():
pass
@pytest.mark.opt1
@pytest.mark.opt2
def test_opt12():
pass
@pytest.mark.opt2
def test_opt2():
pass
@pytest.mark.opt3
def test_opt3():
pass
@pytest.mark.other
def test_other():
pass
| 1.578125 | 2 |
grab/util/module.py | subeax/grab | 0 | 12764046 | <gh_stars>0
"""
The source code of `reraise` and `import_string` was copied from https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/utils.py
"""
import logging
import sys
from grab.spider.base import Spider
from grab.spider.error import SpiderInternalError
from grab.util.misc import camel_case_to_underscore
from grab.util.py3k_support import *
PY2 = True
SPIDER_REGISTRY = {}
string_types = (str, unicode)
logger = logging.getLogger('grab.util.module')
def reraise(tp, value, tb=None):
if sys.version_info < (3,):
from grab.util import py2x_support
py2x_support.reraise(tp, value, tb)
else:
raise tp(value).with_traceback(tb)
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
#XXX: py3 review needed
assert isinstance(import_name, string_types)
# force the import name to automatically convert to strings
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if PY2 and isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2])
def build_spider_registry(config):
# TODO: make smart spider class searching
#for mask in config.SPIDER_LOCATIONS:
#for path in glob.glob(mask):
#if path.endswith('.py'):
#mod_path = path[:-3].replace('/', '.')
#try:
#mod = __import__
SPIDER_REGISTRY.clear()
module_mapping = {}
for path in config.get('GRAB_SPIDER_MODULES', []):
if ':' in path:
path, cls_name = path.split(':')
else:
cls_name = None
try:
mod = __import__(path, None, None, ['foo'])
except ImportError as ex:
if not path in unicode(ex):
logging.error('', exc_info=ex)
else:
for key in dir(mod):
if key == 'Spider':
continue
if cls_name is None or key == cls_name:
val = getattr(mod, key)
if isinstance(val, type) and issubclass(val, Spider):
if val.Meta.abstract:
pass
else:
spider_name = val.get_spider_name()
logger.debug('Module `%s`, found spider `%s` with name `%s`' % (
path, val.__name__, spider_name))
if spider_name in SPIDER_REGISTRY:
raise SpiderInternalError(
'There are two different spiders with the '\
'same name "%s". Modules: %s and %s' % (
spider_name,
SPIDER_REGISTRY[spider_name].__module__,
val.__module__))
else:
SPIDER_REGISTRY[spider_name] = val
return SPIDER_REGISTRY
def load_spider_class(config, spider_name):
if not SPIDER_REGISTRY:
build_spider_registry(config)
if not spider_name in SPIDER_REGISTRY:
raise SpiderInternalError('Unknown spider: %s' % spider_name)
else:
return SPIDER_REGISTRY[spider_name]
| 2.03125 | 2 |
linear_eval/model.py | DAVEISHAN/TCLR | 4 | 12764047 | <gh_stars>1-10
import numpy as np
import torch.nn as nn
import torch
import torchvision
from r3d_classifier import r3d_18_classifier
# from r2p1d import r2plus1d_18, embedder
# from classifier_r2p1d import r2plus1d_18_classifier
# # from try1_model import r2plus1d_18_changed
# # from dilated_r2plus1d import r2plus1d_18
from torchvision.models.utils import load_state_dict_from_url
def build_r3d_classifier(num_classes = 102, kin_pretrained = False, self_pretrained = True, saved_model_file = None, linear = True):
model = r3d_18_classifier(pretrained = kin_pretrained, progress = False)
# if not kin_pretrained:
model.layer4[0].conv1[0] = nn.Conv3d(256, 512, kernel_size=(3, 3, 3),\
stride=(1, 2, 2), padding=(2, 1, 1),dilation = (2,1,1), bias=False)
model.layer4[0].downsample[0] = nn.Conv3d(256, 512,\
kernel_size = (1, 1, 1), stride = (1, 2, 2), bias=False)
if self_pretrained == True:
pretrained = torch.load(saved_model_file)
if 'state_dict' in pretrained.keys():
pretrained_kvpair = pretrained['state_dict']
elif 'bb_state_dict' in pretrained.keys():
pretrained_kvpair = pretrained['bb_state_dict']
# print(pretrained_kvpair)
# exit()
model_kvpair = model.state_dict()
# for layer_name, weights in pretrained_kvpair.items():
# if 'module.1.' in layer_name:
# continue
# elif '1.' == layer_name[:2]:
# continue
# print(layer_name)
# break
# for layer_name, weights in model_kvpair.items():
# print(layer_name)
# break
# exit()
for layer_name, weights in pretrained_kvpair.items():
if 'module.1.' in layer_name:
continue
elif '1.' == layer_name[:2]:
# print(layer_name)
continue
if 'module.0.' in layer_name:
layer_name = layer_name.replace('module.0.','')
if 'module.' in layer_name:
layer_name = layer_name.replace('module.','')
elif '0.' == layer_name[:2]:
layer_name = layer_name[2:]
if 'fc' in layer_name:
continue
# layer_name = layer_name.replace('module.0.','')
model_kvpair[layer_name] = weights
# if linear == True:
# model.layer_name.requires_grad = False
model.load_state_dict(model_kvpair, strict=True)
print(f'model {saved_model_file} loaded successsfully!')
# exit()
model.fc = nn.Linear(512, num_classes)
return model
def load_r3d_classifier(num_classes = 102, saved_model_file = None):
model = r3d_18_classifier(pretrained = False, progress = False)
model.layer4[0].conv1[0] = nn.Conv3d(256, 512, kernel_size=(3, 3, 3),\
stride=(1, 2, 2), padding=(2, 1, 1),dilation = (2,1,1), bias=False)
model.layer4[0].downsample[0] = nn.Conv3d(256, 512,\
kernel_size = (1, 1, 1), stride = (1, 2, 2), bias=False)
model.fc = nn.Linear(512, num_classes)
model_kvpair = model.state_dict()
pretrained = torch.load(saved_model_file)
pretrained_kvpair = pretrained['state_dict']
# print(pretrained_kvpair)
# exit()
# exit()
for layer_name, weights in pretrained_kvpair.items():
model_kvpair[layer_name] = weights
model.load_state_dict(model_kvpair, strict=True)
print(f'model {saved_model_file} loaded successsfully!')
return model
def build_r3d_backbone():
model = r3d_18(pretrained = False, progress = False)
model.layer4[0].conv1[0] = nn.Conv3d(256, 512, kernel_size=(3, 3, 3),\
stride=(1, 2, 2), padding=(2, 1, 1),dilation = (2,1,1), bias=False)
model.layer4[0].downsample[0] = nn.Conv3d(256, 512,\
kernel_size = (1, 1, 1), stride = (1, 2, 2), bias=False)
return model
if __name__ == '__main__':
# summary(model, (16, 3, 112, 112))
input = torch.rand(5, 3, 16, 112, 112).cuda()
model = build_r3d_classifier(num_classes = 102, saved_model_file = '/home/c3-0/ishan/ss_saved_models/r3d82/model_best_e151_loss_0.9281.pth')
| 1.976563 | 2 |
src/Sastrawi/Stemmer/Filter/TextNormalizer.py | ZenaNugraha/PySastrawi | 282 | 12764048 | import re
def normalize_text(text):
result = text.lower() #lower the text even unicode given
result = re.sub(r'[^a-z0-9 -]', ' ', result, flags = re.IGNORECASE|re.MULTILINE)
result = re.sub(r'( +)', ' ', result, flags = re.IGNORECASE|re.MULTILINE)
return result.strip()
| 3.6875 | 4 |
vis_fit.py | motoHiraga/pyMBEANN | 0 | 12764049 | <filename>vis_fit.py
'''
Example for visualizing the fitness transition throughout the evolutionary process.
'''
import os
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.colors import Normalize
path = os.path.join(os.path.dirname(__file__), 'examples/xor/results_xor_2147483648')
# path = os.path.join(os.path.dirname(__file__), 'examples/cart2pole/results_cart_0')
logs = []
with open('{}/log_stats.pkl'.format(path), 'rb') as pkl:
while True:
try:
o = pickle.load(pkl)
except EOFError:
break
logs.append(o)
df = pd.DataFrame(logs[1:], columns=logs[0])
print(df)
fig = plt.figure()
ax = fig.add_subplot(111)
# ax = df.plot(x='Gen', colormap='viridis', linewidth=3.0)
plt_mean = plt.plot(df['Gen'], df['Mean'],
linewidth=2.0,
color='cornflowerblue',
label='mean')
plt_std = plt.fill_between(df['Gen'],
df['Mean'] - df['Std'],
df['Mean'] + df['Std'],
color='cornflowerblue',
alpha=0.4,
linewidth=0,
label='standard deviation \nof mean')
plt_max = plt.plot(df['Gen'], df['Max'],
linewidth=2.0,
linestyle='--',
color='g',
label='max')
plt_min = plt.plot(df['Gen'], df['Min'],
linewidth=2.0,
linestyle=':',
color='y',
label='min')
ax.tick_params(labelsize=14)
ax.set_xlabel(r'Generation', fontsize=18)
ax.set_ylabel(r'Fitness Value', fontsize=18)
ax.legend(fontsize=12)
# ax.legend(loc='lower right', fontsize=12)
plt.tight_layout()
plt.savefig('{}/mbeann_fitness.pdf'.format(path))
plt.show()
| 2.875 | 3 |
Hand_in_pset1/main_K_smith_bis2.py | SkanderGar/QuantMacro | 0 | 12764050 | <reponame>SkanderGar/QuantMacro
import numpy as np
import os
os.chdir('C:/Users/DELL/Desktop/Luis/Pset1')
import matplotlib.pyplot as plt
import K_smith as ks
#####best fit
Beta = np.loadtxt('parameter.txt')
ks1 = ks.K_S(Guess = Beta, N_k = 100, N_K=40)
V_int, gk_int = ks1.Start_VFI()
ks2 = ks.K_S(N_k = 100, N_K=40)
V_int2, gk_int2 = ks2.Start_VFI()
K = 17
grid_k = ks1.grid_k
f2, g_ax1 = plt.subplots(1,1)
f2.set_figheight(5)
f2.set_figwidth(10)
g_ax1.plot(grid_k, V_int[0](grid_k,K), 'b' ,label = 'Not Updating')
#g_ax1.legend(loc = 'upper right')
g_ax1.plot(grid_k, V_int2[0](grid_k,K), 'r', label = 'Updating')
g_ax1.legend(loc = 'upper right')
#Z, E = ks1.Simulation_U_Z()
#k_dist, K = ks1.Simu_2ag(gk_int, gk_int2, Z, E, ret=1)
#k_m = np.mean(k_dist, axis = 0)
#K_m = np.mean(K)
#f2, (g_ax1, g_ax2) = plt.subplots(1,2)
#f2.set_figheight(5)
#f2.set_figwidth(10)
#g_ax1.hist(k_m[:1000], bins =40, label = 'Not Updating')
#g_ax1.legend(loc = 'upper right')
#g_ax2.hist(k_m[1000:], bins =40, label = 'Updating')
#g_ax2.legend(loc = 'upper right')
| 1.953125 | 2 |