content stringlengths 5 1.05M |
|---|
from time import sleep, time
from urllib.parse import urlparse, urljoin
import re
from typing import Pattern, Set
valid_url_pattern = re.compile(r"^(?:http(s)?://)?[\w.-]+(?:\.[\w.-]+)+[\w\-._~:/?#[\]@!$&'\(\)\*\+,;=]+$")
allow_pattern = re.compile(r'^Allow:\s+(.+)$')
disallow_pattern = re.compile(r'^Disallow:\s+(.+)$')
user_agent_pattern = re.compile(r'^User-agent:\s+(.+)$')
class RateLimit:
"""
This is a decorator to throttle any function call. It should be noticed that if it wraps two different functions
*it will throttle them separately*. E.g.
@RateLimit(max_rate=2)
def get(...):
# do stuff
@RateLimit(max_rate=2)
def post(...):
# do stuff
would result in a situation where we are making up to 4 requests per second to the server
"""
def __init__(self, *, max_rate: int) -> None:
self.time_between_actions = 1 / max_rate
self.last_action_time = time() - self.time_between_actions
def __call__(self, wrapped_function):
def wrapper(*args, **kwargs):
wait_for_time = self.last_action_time + self.time_between_actions - time()
if wait_for_time > 0:
sleep(wait_for_time)
self.last_action_time = time()
return wrapped_function(*args, **kwargs)
return wrapper
def href_is_valid_url(href: str) -> bool:
"""
First make sure the href is a non-empty string. This is necessary because there are quite a few <a> tags with no
href attribute. If that test pasts, explicitly match against valid_url_pattern. This avoids non-url hrefs, e.g.,
phone numbers, email addresses and so on
"""
return bool(isinstance(href, str) and href != '' and valid_url_pattern.match(href))
def convert_to_regex(raw_pattern: str) -> Pattern[str]:
"""
The robots.txt provides rules like:
Allow: */data/*.html
We would like to match new paths we find against these, which is slightly messy. Need to make sure to escape
anything like '+' or '.' which is a safe url character, but which might be misconstrued as a regex operator.
Also, need to replace the robots.txt version of the wildcard with '.*'. Finally, there might be a rule like:
Disallow: /data/
Which would recursively disallow everything inside the data directory...we can just make the replacement
'/' --> '/*' and then apply the rules above.
"""
if raw_pattern.endswith('/'):
raw_pattern += '*'
pattern = re.escape(raw_pattern).replace('\\*', '.*')
return re.compile('^' + pattern + '$')
def remove_non_local_urls(urls: Set[str], local_domain: Pattern[str]) -> Set[str]:
local_urls = set()
for url in urls:
if local_domain.match(url):
local_urls.add(url)
return local_urls
def handle_relative_paths(parent_url: str, child_urls: Set[str]) -> Set[str]:
"""
When we scrape parent_url, say https://www.example.com/doc.html, we may get tags of the form:
<a href="some_other_doc.html"></a>
This function can take the parent_url and the child url (in this example some_other_doc.html and produce the
result:
https://www.example.com/some_other_doc.html
"""
fully_qualified_urls = set()
for child_url in child_urls:
parsed_child = urlparse(child_url)
if not parsed_child.netloc:
# To trigger this condition, our child_url *probably* has the form 'doc.html'. It's also possible the url
# is broken.
fully_qualified_urls.add(urljoin(parent_url, child_url))
else:
fully_qualified_urls.add(child_url)
return fully_qualified_urls
|
# pylint: disable-msg = wildcard-import, unused-wildcard-import, unused-import
from phi.flow import *
from .app import *
from .session import *
from .world import *
from .data import *
from .util import *
import tensorflow as tf
|
import numpy
import fileinput
import os
from multiprocessing import Pool
from itertools import repeat
'''
Here we precompute all the possible values for the stated input.
First, we will compute all the prime numbers in the interval from 0 to the
maximum allowed input (B value).
Then, we use these prime numbers to factorize all the numbers in this interval.
We save in a list all these values in order to be used as a precomputed values
in future executions of the program.
'''
def primes(n):
""" Returns a list of primes < n """
sieve = [True] * n
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i]:
sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)
return [2] + [i for i in xrange(3,n,2) if sieve[i]]
def descompositionOfN(n, list_primes):
factors = 0
finish = False
for d in list_primes:
if not finish and d*d <= n:
while (n % d) == 0:
factors += 1
if factors > 2:
finish = True
break
n //= d
else:
break
if n != 1:
factors += 1
return factors == 2
def descomposeSetOfValues(arguments):
index, list_primes, range_size = arguments
results = []
for i in range(range_size):
n = range_size*index + i + 1
if descompositionOfN(n, list_primes):
results.append(n)
return results
def precompute():
MAX_VALUE = 100000000
splits = 1000000
tots_els_primers_imaginables = numpy.array(primes(MAX_VALUE+1))
p = Pool(4)
results = p.map(descomposeSetOfValues, zip(range((MAX_VALUE+1)/splits), repeat(tots_els_primers_imaginables), repeat(splits)))
p.close()
results = numpy.concatenate(results)
f = open("precomputed_almost_prime_numbers", 'w')
numpy.save(f, results)
f.close()
'''
This code just loads the precomputed file and gives the answer by counting the
values which has been found in the precomputing stage.
'''
def solve(A,B, almost_prime_numbers):
return numpy.sum((almost_prime_numbers >= A) * (almost_prime_numbers <= B))
'''
Lets execute!
'''
if __name__ == "__main__":
if not "precomputed_almost_prime_numbers" in os.listdir("."):
precompute()
f = open("precomputed_almost_prime_numbers")
almost_prime_numbers = numpy.load(f)
f.close()
inp = fileinput.input()
cases = int(inp.next()[:-1])
for case in range(cases):
A,B = map(int,inp.next().split(" "))
print solve(A,B, almost_prime_numbers)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Softbank Robotics Europe
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Standard libraries
import collections
from abc import ABCMeta
import sys
# Local modules
from ._textualize import TextualizeMixin
class StructMeta(ABCMeta):
def __new__(mcs, name, bases, attrs, **kwargs):
# Prepare __slots__
attrs["__slots__"]=[]
# If some parameters are defined
if "__ATTRIBUTES__" in attrs.keys():
for parameter in attrs["__ATTRIBUTES__"]:
# Create a slot for each
attrs["__slots__"].append("_"+parameter.id)
# And create a descriptor class to handle their
# get/set/default/doc based on parameter description
class Descriptor(object):
__doc__ ="""%s
Default: %s
"""%(parameter.description,\
str(parameter.default) if parameter.default != "" else "\"\"")
__slots__ = ["_parameter"]
def __init__(self, parameter):
self._parameter = parameter
def __get__(self, instance, owner):
if instance is not None:
try:
return getattr(instance, "_"+self._parameter.id)
except AttributeError:
setattr(instance, "_"+self._parameter.id, self._parameter.normalizer(self._parameter.default))
return getattr(instance, "_"+self._parameter.id)
else:
return self._parameter
def __set__(self, instance, value):
new_value = value if value is not None and value != "" else self._parameter.default
new_value = self._parameter.normalizer(new_value)
return setattr(instance, "_"+self._parameter.id, new_value)
def __delete__(self, instance):
return setattr(instance, "_"+self._parameter.id, self._parameter.normalizer(self._parameter.default))
attrs[parameter.id] = Descriptor(parameter)
return ABCMeta.__new__(mcs, name, bases, attrs)
def __init__(cls, name, bases, attrs, **kwargs):
docu = "%s\n\n"%cls.__DESCRIPTION__
docu += ":Parameters:\n\n"
for parameter in cls.__ATTRIBUTES__:
docu += "\t``%s``\n\n"%parameter.id
docu += "\t\t%s\n\n"%parameter.description
default_string = "``" if not isinstance(parameter.default, Struct) else ":class:`"+type(parameter.default).__name__+"`"
default_string += str(parameter.default).replace("\n","\n\n\t\t\t") if parameter.default != "" else "\"\""
default_string += "``" if not isinstance(parameter.default, Struct) else ""
docu += "\t\tDefault: %s\n\n"%default_string
cls.__doc__ = docu
def __call__(cls, *args, **kwargs):
# Copy constructor
if len(args) == 1 and isinstance(args[0], cls):
kwargs = args[0]
args = []
return type.__call__(cls, *args, **kwargs)
class Struct(collections.Mapping, TextualizeMixin):
__metaclass__=StructMeta
__ATTRIBUTES__ = []
__DESCRIPTION__ = ""
def __init__(self, *args, **kwargs):
for i in range(len(args)):
try:
setattr(self, self.__ATTRIBUTES__[i].id, args[i])
except IndexError:
msg = "__init__() takes "
msg += "no " if len(self.__ATTRIBUTES__)==0 else "at most %d "%len(self.__ATTRIBUTES__)
msg += "arguments (%d given)"%len(args)
raise TypeError(msg)
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as e:
msg = "__init__() got an unexpected keyword argument '%s'"%key
raise TypeError(msg)
def __getitem__(self, key):
return getattr(self, key)
def __setattr__(self, name, value):
keys = list(self.keys())
if (not name in keys) and (not name[1:] in keys):
raise AttributeError("%s object has no attribute %s"%(self.__class__.__name__, name))
object.__setattr__(self, name, value)
def __iter__(self):
for attr in self.__ATTRIBUTES__:
yield attr.id
def __len__(self):
return len(self.__ATTRIBUTES__)
# ─────────
# Operators
def __eq__(self, other):
if not isinstance(other, Struct):
return False
for attribute in self.__ATTRIBUTES__:
if not hasattr(other, attribute.id):
return False
if getattr(self, attribute.id) != getattr(other, attribute.id):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
if sys.version_info >= (3,0):
from ._struct_v3 import Struct
__all__=["Struct"] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: server.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='server.proto',
package='',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0cserver.proto\"\x14\n\x04User\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x18\n\x07\x46riends\x12\r\n\x05names\x18\x01 \x03(\t2$\n\x05Ready\x12\x1b\n\x06Submit\x12\x05.User\x1a\x08.Friends\"\x00\x62\x06proto3'
)
_USER = _descriptor.Descriptor(
name='User',
full_name='User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='User.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=36,
)
_FRIENDS = _descriptor.Descriptor(
name='Friends',
full_name='Friends',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='names', full_name='Friends.names', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=38,
serialized_end=62,
)
DESCRIPTOR.message_types_by_name['User'] = _USER
DESCRIPTOR.message_types_by_name['Friends'] = _FRIENDS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), {
'DESCRIPTOR' : _USER,
'__module__' : 'server_pb2'
# @@protoc_insertion_point(class_scope:User)
})
_sym_db.RegisterMessage(User)
Friends = _reflection.GeneratedProtocolMessageType('Friends', (_message.Message,), {
'DESCRIPTOR' : _FRIENDS,
'__module__' : 'server_pb2'
# @@protoc_insertion_point(class_scope:Friends)
})
_sym_db.RegisterMessage(Friends)
_READY = _descriptor.ServiceDescriptor(
name='Ready',
full_name='Ready',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=64,
serialized_end=100,
methods=[
_descriptor.MethodDescriptor(
name='Submit',
full_name='Ready.Submit',
index=0,
containing_service=None,
input_type=_USER,
output_type=_FRIENDS,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_READY)
DESCRIPTOR.services_by_name['Ready'] = _READY
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing, copy
from collections import OrderedDict
from luigi import configuration
from psutil import virtual_memory
# populations
ANCIENT_POPS = ['DPC']
ALL_POPS = ['BAS', 'DNA', 'DAE', 'DEU', 'DGS', 'DLB', 'DAL', 'DGL', 'DHU', 'DMA', 'DSL', 'DME', 'DPU', 'DID', 'DQA',
'DCH', 'DTI', 'DTM', 'DVN', 'DPC', 'CTVT', 'DIN', 'COY', 'TAI', 'WAM', 'WAS', 'WEU', 'WME', 'OUT']
NJ_POPS = ['BAS', 'COY', 'CTVT', 'DAE', 'DAL', 'DEU', 'DGL', 'DGS', 'DHU', 'DID', 'DIN', 'DLB', 'DMA', 'DME', 'DNA',
'DPC', 'DPU', 'DQA', 'DSL', 'DVN', 'OUT', 'TAI', 'WAM', 'WAS', 'WEU', 'WME']
ALL_DOGS = ['BAS', 'DNA', 'DAE', 'DEU', 'DGS', 'DLB', 'DAL', 'DGL', 'DHU', 'DMA', 'DSL', 'DME', 'DPU', 'DID', 'DQA',
'DCH', 'DTI', 'DTM', 'DVN', 'DPC', 'CTVT', 'DIN']
SNP_ARRAY_v1 = ['AED', 'AM', 'APBT', 'AST', 'BAS', 'BAS2', 'BEA', 'BOX', 'CBR', 'CC', 'CD', 'CHI', 'CLD', 'COY', 'CSP',
'CTVT', 'DAE', 'DAL', 'DCH', 'DEU', 'DGL', 'DGS', 'DHU', 'DID', 'DIN', 'DLB', 'DMA', 'DME', 'DNA',
'DPC', 'DPU', 'DQA', 'DSL', 'DTI', 'DTM', 'DVN', 'EUR', 'FS', 'GSD', 'NEW', 'NGSD', 'NSDTR', 'OUT',
'PIO', 'SAM', 'SH', 'TAI', 'VDB', 'VDB2', 'VDC', 'VDCR', 'VDDR', 'VDH', 'VDIB', 'VDIC', 'VDID', 'VDIH',
'VDIJ', 'VDIM', 'VDIO', 'VDP', 'VDPA', 'VDPC', 'VDPI', 'VDPL', 'VDPNGEH', 'VDPNGPM', 'VDPP', 'VDPR',
'VDUA', 'VDVCB', 'VDVHG', 'VDVLC', 'VDVLS', 'WAM', 'WAS', 'WEU', 'WME', 'XOL']
# added "COO" since v1
SNP_ARRAY_v5 = ['AED', 'AM', 'APBT', 'AST', 'BAS', 'BAS2', 'BEA', 'BOX', 'CBR', 'CC', 'CD', 'CHI', 'CLD', 'COO', 'COY',
'CSP', 'CTVT', 'DAE', 'DAL', 'DCH', 'DEU', 'DGL', 'DGS', 'DHU', 'DID', 'DIN', 'DLB', 'DMA', 'DME',
'DNA', 'DPC', 'DPU', 'DQA', 'DSL', 'DTI', 'DTM', 'DVN', 'EUR', 'FS', 'GSD', 'NEW', 'NGSD', 'NSDTR',
'OUT', 'PIO', 'SAM', 'SH', 'TAI', 'VDB', 'VDB2', 'VDC', 'VDCR', 'VDDR', 'VDH', 'VDIB', 'VDIC', 'VDID',
'VDIH', 'VDIJ', 'VDIM', 'VDIO', 'VDP', 'VDPA', 'VDPC', 'VDPI', 'VDPL', 'VDPNGEH', 'VDPNGPM', 'VDPP',
'VDPR', 'VDUA', 'VDVCB', 'VDVHG', 'VDVLC', 'VDVLS', 'WAM', 'WAS', 'WEU', 'WME', 'XOL']
GRAPH_POPS2 = ['OUT', 'COY', 'WAM', 'WEU', 'DEU', 'DVN', 'DPC', 'DMA']
GRAPH_POPS3 = ['OUT', 'COY', 'WAM', 'WEU', 'DEU', 'DVN', 'DPC', 'DMA', 'CTVT']
GRAPH_TEST1 = ['OUT', 'COY', 'WAM', 'WEU', 'DEU', 'DVN']
# groups of populations for running analyses
GROUPS = {
# ---------------------------
# -- whole genome datasets --
# ---------------------------
# 'merged_v2': {'all-pops': ALL_POPS, 'dog-ctvt' : ALL_DOGS},
# 'merged_v2.random': {'all-pops': ALL_POPS, 'dog-ctvt' : ALL_DOGS},
# 'merged_v2_hq': {'all-pops': ALL_POPS, 'dog-ctvt' : ALL_DOGS},
# 'merged_v2_hq2': {'all-pops': ALL_POPS, 'dog-ctvt' : ALL_DOGS},
# 'merged_v2_TV': {'all-pops': ALL_POPS, 'dog-ctvt' : ALL_DOGS},
# 'merged_v2_TV_hq': {'all-pops': ALL_POPS, 'dog-ctvt' : ALL_DOGS},
# 'merged_v2_njviet': {'nj-pops': NJ_POPS},
'merged_v3': {'all-pops': ALL_POPS, 'dog-ctvt': ALL_DOGS},
'merged_v3_hq': {'all-pops': ALL_POPS, 'dog-ctvt': ALL_DOGS},
'merged_v3_TV': {'all-pops': ALL_POPS, 'dog-ctvt': ALL_DOGS},
'merged_v3_TV_hq': {'all-pops': ALL_POPS, 'dog-ctvt': ALL_DOGS},
'merged_v3_njviet': {'nj-pops': NJ_POPS},
# ----------------------------
# -- qpGraph / treemix data --
# ----------------------------
# 'merged_v2_laurent': {'graph-pops2': GRAPH_POPS2},
# 'merged_v2_TV_laurent': {'graph-pops2': GRAPH_POPS2, 'graph-test1': GRAPH_TEST1},
'merged_v3_TV_laurent': {'graph-pops2': GRAPH_POPS2, 'graph-pops3': GRAPH_POPS3, 'graph-test1': GRAPH_TEST1},
# -------------------
# -- SNParray data --
# -------------------
# 'merged_SNParray': {'all-pops': SNP_ARRAY_v1},
# 'merged_SNParray_v1': {'all-pops': SNP_ARRAY_v1},
# 'merged_SNParray_v1_noCTVT': {'all-pops': SNP_ARRAY_v1},
'merged_SNParray_v5': {'all-pops': SNP_ARRAY_v5},
}
# the population and sample to use for rooting the NJ tree
OUTGROUP_POP = {group: 'OUT' for group in GROUPS}
OUTGROUP_SAMPLE = {group: 'AndeanFox' for group in GROUPS}
# sepcial cases for qpGraph
OUTGROUP_POP['graph-pops1'] = 'COY'
OUTGROUP_SAMPLE['graph-pops1'] = 'C_Cal'
POPULATIONS = OrderedDict([
('BAS', 'African Dogs'),
('BAS2', 'African Dogs'),
('DNA', 'African Dogs'),
('BEA', 'European Dogs'),
('BOX', 'European Dogs'),
('DAE', 'European Dogs'),
('DEU', 'European Dogs'),
('DGS', 'European Dogs'),
('DLB', 'European Dogs'),
('AM', 'Arctic Dogs'),
('DAL', 'Arctic Dogs'),
('DGL', 'Arctic Dogs'),
('DHU', 'Arctic Dogs'),
('DMA', 'Arctic Dogs'),
('DSL', 'Arctic Dogs'),
('EUR', 'Arctic Dogs'),
('FS', 'Arctic Dogs'),
('GSD', 'Arctic Dogs'),
('SAM', 'Arctic Dogs'),
('SH', 'Arctic Dogs'),
('AED', 'American Dogs'),
('APBT', 'American Dogs'),
('AST', 'American Dogs'),
('CD', 'American Dogs'),
('CLD', 'American Dogs'),
('CBR', 'American Dogs'),
('CHI', 'American Dogs'),
('COO', 'American Dogs'),
('DME', 'American Dogs'),
('DPU', 'American Dogs'),
('NEW', 'American Dogs'),
('NSDTR', 'American Dogs'),
('PIO', 'American Dogs'),
('VDB', 'American Dogs'),
('VDB2', 'American Dogs'),
('VDC', 'American Dogs'),
('VDCR', 'American Dogs'),
('VDDR', 'American Dogs'),
('VDH', 'American Dogs'),
('VDP', 'American Dogs'),
('VDPA', 'American Dogs'),
('VDPC', 'American Dogs'),
('VDPI', 'American Dogs'),
('VDPL', 'American Dogs'),
('VDPP', 'American Dogs'),
('VDPR', 'American Dogs'),
('VDUA', 'American Dogs'),
('XOL', 'American Dogs'),
('DID', 'Asian Dogs'),
('DQA', 'Asian Dogs'),
('VDIC', 'Asian Dogs'),
('VDID', 'Asian Dogs'),
('VDIH', 'Asian Dogs'),
('VDIM', 'Asian Dogs'),
('VDIO', 'Asian Dogs'),
('CSP', 'East Asian Dogs'),
('CC', 'East Asian Dogs'),
('DCH', 'East Asian Dogs'),
('DTI', 'East Asian Dogs'),
('DTM', 'East Asian Dogs'),
('DVN', 'East Asian Dogs'),
('NGSD', 'East Asian Dogs'),
('VDIB', 'East Asian Dogs'),
('VDIJ', 'East Asian Dogs'),
('VDPNGEH', 'East Asian Dogs'),
('VDPNGPM', 'East Asian Dogs'),
('VDVCB', 'East Asian Dogs'),
('VDVHG', 'East Asian Dogs'),
('VDVLS', 'East Asian Dogs'),
('VDVLC', 'East Asian Dogs'),
('DPC', 'Pre-contact Dogs'),
('CTVT', 'CTVT'),
('DIN', 'Dingos'),
('COY', 'Coyotes'),
('WAM', 'American Wolves'),
('WAS', 'Eurasian Wolves'),
('WEU', 'Eurasian Wolves'),
('WME', 'Eurasian Wolves'),
('TAI', 'Ancient Wolves'),
('OUT', 'Outgroup')
])
COLOURS = {
'African Dogs': '#a6cee3',
'European Dogs': '#33a02c',
'Arctic Dogs': '#fb9a99',
'American Dogs': '#cab2d6',
'Asian Dogs': '#fdbf6f',
'East Asian Dogs': '#e31a1c',
'Dingos': '#ff7f00',
'Pre-contact Dogs': '#6a3d9a',
'CTVT': '#b2df8a',
'American Wolves': '#b15928',
'Eurasian Wolves': '#003c30',
'Ancient Wolves': '#4d4d4d',
'Coyotes': '#1f78b4',
'Outgroup': '#ae017e',
}
DEFAULT_COLOUR = '#e7298a'
# the maximum number of migration events in Treemix
TREEMIX_MAX_M = 6
# number of SNPs to group for LD
TREEMIX_K = 1000
# what level should Treemix group by, pops OR samples?
GROUP_BY_POPS = 'grp-pops'
GROUP_BY_SMPL = 'grp-smpl'
# the maximum number of ancestral populatons to run admixture for
ADMIXTURE_MAX_K = 5
# the number of bootstrap replicates to run
ADMIXTURE_BOOTSTRAP = 0 # TODO put this back to 100
# which PCA coponents should we print
PCA_COMPONENTS = [(1, 2), (3, 4), (5, 6)]
# the species flag for plink telling it how many chromosomes to expect
PLINK_TAXA = '--dog'
# how many CPU cores does this machine have
TOTAL_CORES = multiprocessing.cpu_count()
# no single worker should use more than 30% of the available cores
CPU_CORES_MED = int(TOTAL_CORES * 0.3) # 30%
CPU_CORES_HIGH = int(TOTAL_CORES * 0.5) # 50%
CPU_CORES_MAX = int(TOTAL_CORES * 0.9) # 90% |
# file with error
def:
pass |
import torch
from genrl.agents.deep.dqn.base import DQN
from genrl.agents.deep.dqn.utils import ddqn_q_target
class DoubleDQN(DQN):
"""Double DQN Class
Paper: https://arxiv.org/abs/1509.06461
Attributes:
network (str): The network type of the Q-value function.
Supported types: ["cnn", "mlp"]
env (Environment): The environment that the agent is supposed to act on
batch_size (int): Mini batch size for loading experiences
gamma (float): The discount factor for rewards
layers (:obj:`tuple` of :obj:`int`): Layers in the Neural Network
of the Q-value function
lr_value (float): Learning rate for the Q-value function
replay_size (int): Capacity of the Replay Buffer
buffer_type (str): Choose the type of Buffer: ["push", "prioritized"]
max_epsilon (str): Maximum epsilon for exploration
min_epsilon (str): Minimum epsilon for exploration
epsilon_decay (str): Rate of decay of epsilon (in order to decrease
exploration with time)
seed (int): Seed for randomness
render (bool): Should the env be rendered during training?
device (str): Hardware being used for training. Options:
["cuda" -> GPU, "cpu" -> CPU]
"""
def __init__(self, *args, **kwargs):
super(DoubleDQN, self).__init__(*args, **kwargs)
self.empty_logs()
if self.create_model:
self._create_model()
def get_target_q_values(
self, next_states: torch.Tensor, rewards: torch.Tensor, dones: torch.Tensor
) -> torch.Tensor:
"""Get target Q values for the DQN
Args:
next_states (:obj:`torch.Tensor`): Next states for which target Q-values
need to be found
rewards (:obj:`list`): Rewards at each timestep for each environment
dones (:obj:`list`): Game over status for each environment
Returns:
target_q_values (:obj:`torch.Tensor`): Target Q values for the DQN
"""
return ddqn_q_target(self, next_states, rewards, dones)
|
import asyncio
import time
from cmyui.logging import Ansi
from cmyui.logging import log
import app.packets
import app.state
import settings
from app.constants.privileges import Privileges
#Discordbot imports
import discordbot.botconfig as configb
import discord
from discord.ext import commands
from discord_slash import SlashCommand
import app.state.discordbot as dbot
from cmyui import Version
import os
__all__ = ("initialize_housekeeping_tasks",)
OSU_CLIENT_MIN_PING_INTERVAL = 300000 // 1000 # defined by osu!
async def initialize_housekeeping_tasks() -> None:
"""Create tasks for each housekeeping tasks."""
loop = asyncio.get_running_loop()
app.state.sessions.housekeeping_tasks.update(
{
loop.create_task(task)
for task in (
_remove_expired_donation_privileges(interval=30 * 60),
_update_bot_status(interval=5 * 60),
_disconnect_ghosts(interval=OSU_CLIENT_MIN_PING_INTERVAL // 3),
_bot_runner(),
)
},
)
async def _remove_expired_donation_privileges(interval: int) -> None:
"""Remove donation privileges from users with expired sessions."""
while True:
if settings.DEBUG:
log("Removing expired donation privileges.", Ansi.LMAGENTA)
expired_donors = await app.state.services.database.fetch_all(
"SELECT id FROM users "
"WHERE donor_end <= UNIX_TIMESTAMP() "
"AND priv & 48", # 48 = Supporter | Premium
)
for expired_donor in expired_donors:
p = await app.state.sessions.players.from_cache_or_sql(
id=expired_donor["id"],
)
assert p is not None
# TODO: perhaps make a `revoke_donor` method?
await p.remove_privs(Privileges.DONATOR)
await app.state.services.database.execute(
"UPDATE users SET donor_end = 0 WHERE id = :id",
{"id": p.id},
)
if p.online:
p.enqueue(
app.packets.notification("Your supporter status has expired."),
)
log(f"{p}'s supporter status has expired.", Ansi.LMAGENTA)
await asyncio.sleep(interval)
async def _disconnect_ghosts(interval: int) -> None:
"""Actively disconnect users above the
disconnection time threshold on the osu! server."""
while True:
await asyncio.sleep(interval)
current_time = time.time()
for p in app.state.sessions.players:
if current_time - p.last_recv_time > OSU_CLIENT_MIN_PING_INTERVAL:
log(f"Auto-dced {p}.", Ansi.LMAGENTA)
p.logout()
async def _update_bot_status(interval: int) -> None:
"""Reroll the bot's status, every `interval`."""
while True:
await asyncio.sleep(interval)
app.packets.bot_stats.cache_clear()
async def _bot_runner() -> None:
dbot.botversion = Version(2, 0, 0)
intents = discord.Intents.all()
#-> Define bot
client = commands.Bot(command_prefix=configb.PREFIX, intents=intents, case_insensitive=True)
slash = SlashCommand(client, sync_commands=True, debug_guild=893809157080223784, sync_on_cog_reload=True)
dbot.client = client
dbot.slash = slash
#-> Cog loading
for filename in os.listdir(f'{configb.PATH_TO_FILES}cogs'):
filename1 = filename
if filename.endswith('.py'):
print(f"Loading {filename1}...")
client.load_extension(f'discordbot.cogs.{filename[:-3]}')
print(f'Loaded {filename1}')
@client.event
async def on_ready() -> None:
log("Bot logged in", Ansi.GREEN)
log(f"Bot name: {client.user.name}")
log(f"Bot ID: {client.user.id}")
log(f"Bot Version: {dbot.botversion}\n")
@client.command()
async def rlc(ctx, cog):
if ctx.author.id not in configb.BOT_OWNERS:
return await ctx.send("You're not an owner")
try:
client.unload_extension(f'discordbot.cogs.{cog}')
client.load_extension(f'discordbot.cogs.{cog}')
log(f"{ctx.author.name}#{ctx.author.discriminator} reloaded cog {cog}", Ansi.YELLOW)
except Exception as e:
log(f"{ctx.author.name}#{ctx.author.discriminator} tried to reload cog {cog} but error occured", Ansi.YELLOW)
log(e, Ansi.RED)
return await ctx.send(f"Error occured while reloading cog\n```{e}```", delete_after=10)
return await ctx.send("Reloaded Cog")
@client.command()
async def load(ctx, cog):
if ctx.author.id not in configb.BOT_OWNERS:
return await ctx.send("You're not an owner")
try:
client.load_extension(f'discordbot.cogs.{cog}')
log(f"{ctx.author.name}#{ctx.author.discriminator} loaded cog {cog}", Ansi.YELLOW)
except Exception as e:
log(f"{ctx.author.name}#{ctx.author.discriminator} tried to load cog {cog} but error occured", Ansi.YELLOW)
log(e, Ansi.RED)
return await ctx.send(f"Error occured while loading cog\n```{e}```", delete_after=10)
return await ctx.send("Loaded Cog")
try:
await client.start(configb.TOKEN)
finally:
await client.close()
log('Bot Connection Closed', Ansi.RED) |
import logging
import subprocess
import configparser
import os
class SSH:
def __init__(self, username, ip, port, identity_file):
self.username = username
self.ip = ip
self.port = port
self.identity_file = identity_file
def connection_str(self):
return '{}@{}'.format(self.username, self.ip)
def run(self, command):
if isinstance(command, str):
command = [command]
logging.debug("Connect to %s:%s and run %s", self.connection_str(), self.port, " ".join(command))
raw = subprocess.check_output(['ssh', self.connection_str(), '-p', self.port, '-i', self.identity_file] + command)
return raw.decode('utf-8')
def create_ssh(ip_script, username, port, identity):
logging.debug("Retrieve IP with %s", ip_script)
ip_raw = subprocess.check_output(ip_script, shell=True)
ip = ip_raw.decode('utf-8').strip()
logging.info("Found backup device at %s", ip)
return SSH(username, ip, port, identity)
def expanduser(path):
if path.startswith("~/"):
username = os.environ.get('SUDO_USER', '')
path = "~" + username + path[1:]
return os.path.expanduser(path)
def read_config(args):
config = configparser.ConfigParser()
config.read(expanduser(args.config))
return config
def setup_logging(verbose, quiet, logfile):
streamlevel = logging.INFO
if verbose:
streamlevel = logging.DEBUG
elif quiet:
streamlevel = logging.WARNING
logging.basicConfig(format='%(levelname)-10s - %(message)s', level=streamlevel)
rootlogger = logging.getLogger()
filelevel = logging.DEBUG
filehandler = logging.FileHandler(filename=logfile)
filehandler.setLevel(filelevel)
filehandler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)-10s - %(message)s'))
rootlogger.addHandler(filehandler)
|
"""
FastAPI for Priority Job Queue.
"""
import asyncio
import datetime
import uuid
import fastapi
import pydantic
TIMEOUT = 30
class Job(pydantic.BaseModel): # pylint: disable=no-member, too-few-public-methods
"""
Job model
"""
jobId: uuid.UUID = pydantic.Field(default_factory=uuid.uuid4)
submitterId: int
priority: int
name: str
_status: str = "new"
class StatusRequest(
pydantic.BaseModel
): # pylint: disable=no-member, too-few-public-methods
"""
Request containing a Status.
"""
status: str
app = fastapi.FastAPI(title="Job Priority Queue")
queue = asyncio.PriorityQueue()
jobs = {}
processing = {}
@app.get("/")
async def root():
"""
Simple welcome message for GETs to the root, directing to the documentation.
"""
return {"message": "Welcome! Check out the interactive documentation at /docs"}
@app.post("/jobs")
async def submit_job(job: Job):
"""
Add job to the priority queue.
jobId is optional and one will be generated.
"""
jobs[job.jobId] = job
await queue.put((job.priority, job.jobId))
return {"jobId": job.jobId}
@app.get("/jobs/next")
async def get_next_job():
"""
Get next job out of the priority queue. The queue will not be altered.
If the queue is empty, an empty response will be returned.
"""
try:
job = queue._queue[0] # pylint: disable=protected-access
except IndexError:
return {}
try:
return jobs[job[1]]
except KeyError:
# Job has already been removed by its jobId
return {}
@app.patch("/jobs/next")
async def patch_next_job(status: StatusRequest):
"""
Patch: pop next job out of the priority queue.
Internally sets the job aside while processing, processing jobs that exceed
the timeout will be placed back in the queue.
The payload of the incoming request should be {"status": "processing"}.
If the queue is empty, an empty response will be returned.
"""
if not status.status == "processing":
raise fastapi.HTTPException(
status_code=400, detail='Request must have {"status": "processing"}'
)
try:
job = queue.get_nowait()
except asyncio.QueueEmpty:
return {}
job_id = job[1]
try:
full_job = jobs[job_id]
except KeyError:
# Job has already been removed by its jobId
return {}
else:
processing[job_id] = datetime.datetime.now()
return full_job
@app.delete("/jobs/next")
async def delete_next_job():
"""
Delete next job out of the priority queue. Empty queue will raise an error.
"""
try:
job = queue.get_nowait()
except asyncio.QueueEmpty:
raise fastapi.HTTPException( # pylint: disable=raise-missing-from
status_code=400, detail="Queue is empty, nothing to delete."
)
else:
del jobs[job[1]]
@app.delete("/jobs/{job_id}")
async def delete_job(job_id):
"""
Delete given job from the processing jobs.
"""
try:
del processing[uuid.UUID(job_id)]
except KeyError:
pass
del jobs[uuid.UUID(job_id)]
async def processing_queue_cleaner():
"""
Check the processing list for any jobs that have been going for more than
set timeout number of seconds and return them to the priority queue.
"""
while True:
for job_id, timestamp in processing.items():
if timestamp + datetime.timedelta(0, TIMEOUT) <= datetime.datetime.now():
# place back in queue
job = jobs[job_id]
await queue.put((job.priority, job.jobId))
del processing[job_id]
else:
break
await asyncio.sleep(0.5)
@app.on_event("startup")
async def startup_event():
"""
On FastAPI server start-up, register tasks such as the processing queue cleaner.
"""
asyncio.create_task(processing_queue_cleaner())
|
from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton
reg_profile = InlineKeyboardMarkup(row_width=3,
inline_keyboard=[
[
InlineKeyboardButton(text="Пройти опрос", callback_data="survey")
],
])
|
# Copyright 2017-2018 Wind River
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import exceptions as keystone_exceptions
from neutronclient.common import exceptions as neutronclient_exceptions
from neutronclient.neutron import client as neutronclient
from oslo_log import log as logging
from oslo_serialization import jsonutils
from dcorch.common import consts
from dcorch.common import exceptions
from dcorch.engine import quota_manager
from dcorch.engine.sync_thread import SyncThread
from dcorch.objects import resource
LOG = logging.getLogger(__name__)
class NetworkSyncThread(SyncThread):
"""Manages tasks related to resource management for neutron."""
def __init__(self, subcloud_engine):
super(NetworkSyncThread, self).__init__(subcloud_engine)
self.endpoint_type = consts.ENDPOINT_TYPE_NETWORK
self.sync_handler_map = {
consts.RESOURCE_TYPE_NETWORK_QUOTA_SET: self.sync_network_resource,
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
self.sync_network_resource,
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
self.sync_network_resource,
}
# Security group needs to come before security group rule to ensure
# that the group exists by the time we try to create the rules.
self.audit_resources = [
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
# note: no audit here for quotas, that's handled separately
]
self.log_extra = {"instance": "{}/{}: ".format(
self.subcloud_engine.subcloud.region_name, self.endpoint_type)}
self.sc_neutron_client = None
self.initialize()
LOG.info("NetworkSyncThread initialized", extra=self.log_extra)
def initialize_sc_clients(self):
super(NetworkSyncThread, self).initialize_sc_clients()
if (not self.sc_neutron_client and self.sc_admin_session):
self.sc_neutron_client = neutronclient.Client(
"2.0", session=self.sc_admin_session,
endpoint_type=consts.KS_ENDPOINT_INTERNAL,
region_name=self.subcloud_engine.subcloud.region_name)
def initialize(self):
# Subcloud may be enabled a while after being added.
# Keystone endpoints for the subcloud could be added in
# between these 2 steps. Reinitialize the session to
# get the most up-to-date service catalog.
super(NetworkSyncThread, self).initialize()
self.m_neutron_client = neutronclient.Client(
"2.0", session=self.admin_session,
endpoint_type=consts.KS_ENDPOINT_INTERNAL,
region_name=consts.VIRTUAL_MASTER_CLOUD)
self.initialize_sc_clients()
LOG.info("session and clients initialized", extra=self.log_extra)
def sync_network_resource(self, request, rsrc):
self.initialize_sc_clients()
# Invoke function with name format "operationtype_resourcetype".
# For example: create_flavor()
try:
func_name = request.orch_job.operation_type + \
"_" + rsrc.resource_type
getattr(self, func_name)(request, rsrc)
except AttributeError:
LOG.error("{} not implemented for {}"
.format(request.orch_job.operation_type,
rsrc.resource_type))
raise exceptions.SyncRequestFailed
except (keystone_exceptions.connection.ConnectTimeout,
keystone_exceptions.ConnectFailure) as e:
LOG.error("sync_network_resource: {} is not reachable [{}]"
.format(self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
raise exceptions.SyncRequestTimeout
except exceptions.SyncRequestFailed:
raise
except Exception as e:
LOG.exception(e)
raise exceptions.SyncRequestFailedRetry
def put_network_quota_set(self, request, rsrc):
project_id = request.orch_job.source_resource_id
# Get the new global limits from the request.
quota_dict = jsonutils.loads(request.orch_job.resource_info)
# Neutron doesn't do user-specific quotas
user_id = None
# Calculate the new limits for this subcloud (factoring in the
# existing usage).
quota_dict = \
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
project_id, user_id, quota_dict,
self.subcloud_engine.subcloud.region_name)
# Apply the limits to the subcloud.
self.sc_neutron_client.update_quota(project_id, {"quota": quota_dict})
# Persist the subcloud resource. (Not really applicable for quotas.)
self.persist_db_subcloud_resource(rsrc.id, rsrc.master_id)
LOG.info("Updated quotas {} for tenant {} and user {}"
.format(quota_dict, rsrc.master_id, user_id),
extra=self.log_extra)
def delete_network_quota_set(self, request, rsrc):
# When deleting the quota-set in the master cloud, we don't actually
# delete it in the subcloud. Instead we recalculate the subcloud
# quotas based on the defaults in the master cloud.
project_id = request.orch_job.source_resource_id
user_id = None
# Get the new master quotas
quota_dict = self.m_neutron_client.show_quota(project_id)['quota']
# Calculate the new limits for this subcloud (factoring in the
# existing usage).
quota_dict = \
quota_manager.QuotaManager.calculate_subcloud_project_quotas(
project_id, user_id, quota_dict,
self.subcloud_engine.subcloud.region_name)
# Apply the limits to the subcloud.
self.sc_neutron_client.update_quota(project_id, {"quota": quota_dict})
# Clean up the subcloud resource entry in the DB since it's been
# deleted in the master cloud.
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
if subcloud_rsrc:
subcloud_rsrc.delete()
def post_security_group(self, request, rsrc):
sec_group_dict = jsonutils.loads(request.orch_job.resource_info)
body = {"security_group": sec_group_dict}
# Create the security group in the subcloud
sec_group = self.sc_neutron_client.create_security_group(body)
sec_group_id = sec_group['security_group']['id']
# Persist the subcloud resource.
subcloud_rsrc_id = self.persist_db_subcloud_resource(rsrc.id,
sec_group_id)
LOG.info("Created security group {}:{} [{}]"
.format(rsrc.id, subcloud_rsrc_id, sec_group_dict['name']),
extra=self.log_extra)
def put_security_group(self, request, rsrc):
sec_group_dict = jsonutils.loads(request.orch_job.resource_info)
body = {"security_group": sec_group_dict}
sec_group_subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
if not sec_group_subcloud_rsrc:
LOG.error("Unable to update security group {}:{},"
"cannot find equivalent security group in subcloud."
.format(rsrc, sec_group_dict),
extra=self.log_extra)
return
# Update the security group in the subcloud
sec_group = self.sc_neutron_client.update_security_group(
sec_group_subcloud_rsrc.subcloud_resource_id, body)
sec_group = sec_group['security_group']
LOG.info("Updated security group: {}:{} [{}]"
.format(rsrc.id, sec_group['id'], sec_group['name']),
extra=self.log_extra)
def delete_security_group(self, request, rsrc):
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
if not subcloud_rsrc:
return
try:
self.sc_neutron_client.delete_security_group(
subcloud_rsrc.subcloud_resource_id)
except neutronclient_exceptions.NotFound:
# security group already deleted in subcloud, carry on.
LOG.info("ResourceNotFound in subcloud, may be already deleted",
extra=self.log_extra)
subcloud_rsrc.delete()
# Master Resource can be deleted only when all subcloud resources
# are deleted along with corresponding orch_job and orch_requests.
LOG.info("Security group {}:{} [{}] deleted"
.format(rsrc.id, subcloud_rsrc.id,
subcloud_rsrc.subcloud_resource_id),
extra=self.log_extra)
def post_security_group_rule(self, request, rsrc):
sec_group_rule_dict = jsonutils.loads(request.orch_job.resource_info)
# Any fields with values of "None" are removed since they are defaults
# and we can't send them to Neutron.
for key in sec_group_rule_dict.keys():
if sec_group_rule_dict[key] is None:
del sec_group_rule_dict[key]
try:
sec_group_rule_dict = self.update_resource_refs(
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE,
sec_group_rule_dict)
except exceptions.SubcloudResourceNotFound:
# If we couldn't find the equivalent internal resource refs,
# we don't know what to create in the subcloud.
raise exceptions.SyncRequestFailed
body = {"security_group_rule": sec_group_rule_dict}
# Create the security group in the subcloud
try:
rule = self.sc_neutron_client.create_security_group_rule(body)
rule_id = rule['security_group_rule']['id']
except neutronclient.common.exceptions.Conflict:
# This can happen if we try to create a rule that is already there.
# If this happens, we'll update our mapping on the next audit.
LOG.info("Problem creating security group rule {}, neutron says"
"it's a duplicate.".format(sec_group_rule_dict))
# No point in retrying.
raise exceptions.SyncRequestFailed
# Persist the subcloud resource.
self.persist_db_subcloud_resource(rsrc.id, rule_id)
LOG.info("Created security group rule {}:{}"
.format(rsrc.id, rule_id),
extra=self.log_extra)
def delete_security_group_rule(self, request, rsrc):
subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id)
if not subcloud_rsrc:
return
try:
self.sc_neutron_client.delete_security_group_rule(
subcloud_rsrc.subcloud_resource_id)
except neutronclient_exceptions.NotFound:
# security group rule already deleted in subcloud, carry on.
LOG.info("ResourceNotFound in subcloud, may be already deleted",
extra=self.log_extra)
subcloud_rsrc.delete()
# Master Resource can be deleted only when all subcloud resources
# are deleted along with corresponding orch_job and orch_requests.
LOG.info("Security group rule {}:{} [{}] deleted"
.format(rsrc.id, subcloud_rsrc.id,
subcloud_rsrc.subcloud_resource_id),
extra=self.log_extra)
# ---- Override common audit functions ----
def get_resource_id(self, resource_type, resource):
if hasattr(resource, 'master_id'):
# If resource from DB, return master resource id
# from master cloud
return resource.master_id
# Else, it is OpenStack resource retrieved from master cloud
if resource_type in (consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE):
return resource['id']
def get_resource_info(self, resource_type, resource, operation_type=None):
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
if isinstance(resource, dict):
tmp = resource.copy()
del tmp['id']
return jsonutils.dumps(tmp)
else:
return jsonutils.dumps(
resource._info.get(
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP))
elif resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
if isinstance(resource, dict):
tmp = resource.copy()
del tmp['id']
return jsonutils.dumps(tmp)
else:
return jsonutils.dumps(resource._info.get(
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE))
else:
return super(NetworkSyncThread, self).get_resource_info(
resource_type, resource, operation_type)
def get_resources(self, resource_type, client):
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
return self.get_security_groups(client)
elif resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
return self.get_security_group_rules(client)
else:
LOG.error("Wrong resource type {}".format(resource_type),
extra=self.log_extra)
return None
def get_subcloud_resources(self, resource_type):
self.initialize_sc_clients()
return self.get_resources(resource_type, self.sc_neutron_client)
def get_master_resources(self, resource_type):
return self.get_resources(resource_type, self.m_neutron_client)
def same_resource(self, resource_type, m_resource, sc_resource):
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP:
return self.same_security_group(m_resource, sc_resource)
elif resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
return self.same_security_group_rule(m_resource, sc_resource)
else:
return True
def audit_discrepancy(self, resource_type, m_resource, sc_resources):
if resource_type in [consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE]:
# It could be that the group/rule details are different
# between master cloud and subcloud now.
# Thus, delete the resource before creating it again.
self.schedule_work(self.endpoint_type, resource_type,
self.get_resource_id(resource_type, m_resource),
consts.OPERATION_TYPE_DELETE)
# Return true to try creating the resource again
return True
def map_subcloud_resource(self, resource_type, m_r, m_rsrc_db,
sc_resources):
# Map an existing subcloud resource to an existing master resource.
# If a mapping is created the function should return True.
# It is expected that update_resource_refs() has been called on m_r.
# Used for security groups since there are a couple of default
# groups (and rules) that get created in the subcloud.
if resource_type in (consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE):
for sc_r in sc_resources:
if self.same_resource(resource_type, m_r, sc_r):
LOG.info(
"Mapping resource {} to existing subcloud resource {}"
.format(m_r, sc_r), extra=self.log_extra)
self.persist_db_subcloud_resource(m_rsrc_db.id,
sc_r['id'])
return True
return False
def update_resource_refs(self, resource_type, m_r):
# Update any references in m_r to other resources in the master cloud
# to use the equivalent subcloud resource instead.
m_r = m_r.copy()
if resource_type == consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP_RULE:
if m_r.get('security_group_id') is not None:
# If the security group id is in the dict then it is for the
# master region, and we need to update it with the equivalent
# id from the subcloud.
master_sec_group_id = m_r['security_group_id']
sec_group_rsrc = resource.Resource.get_by_type_and_master_id(
self.ctxt, consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
master_sec_group_id)
sec_group_subcloud_rsrc = self.get_db_subcloud_resource(
sec_group_rsrc.id)
if sec_group_subcloud_rsrc:
m_r['security_group_id'] = \
sec_group_subcloud_rsrc.subcloud_resource_id
else:
LOG.error(
"Unable to update security group id in {},"
"cannot find equivalent security group in subcloud."
.format(m_r), extra=self.log_extra)
raise exceptions.SubcloudResourceNotFound(
resource=sec_group_rsrc.id)
if m_r.get('remote_group_id') is not None:
# If the remote group id is in the dict then it is for the
# master region, and we need to update it with the equivalent
# id from the subcloud.
master_remote_group_id = m_r['remote_group_id']
remote_group_rsrc = \
resource.Resource.get_by_type_and_master_id(
self.ctxt, consts.RESOURCE_TYPE_NETWORK_SECURITY_GROUP,
master_remote_group_id)
remote_group_subcloud_rsrc = self.get_db_subcloud_resource(
remote_group_rsrc.id)
if remote_group_subcloud_rsrc:
m_r['remote_group_id'] = \
remote_group_subcloud_rsrc.subcloud_resource_id
else:
LOG.error(
"Unable to update remote group id in {},"
"cannot find equivalent remote group in subcloud."
.format(m_r), extra=self.log_extra)
raise exceptions.SubcloudResourceNotFound(
resource=sec_group_rsrc.id)
return m_r
# This will only be called by the audit code.
def create_security_group(self, request, rsrc):
self.post_security_group(request, rsrc)
# This will only be called by the audit code.
def create_security_group_rule(self, request, rsrc):
self.post_security_group_rule(request, rsrc)
def same_security_group(self, qc1, qc2):
return (qc1['description'] == qc2['description'] and
qc1['tenant_id'] == qc2['tenant_id'] and
qc1['name'] == qc2['name'])
def same_security_group_rule(self, qc1, qc2):
# Ignore id, created_at, updated_at, and revision_number
return (qc1['description'] == qc2['description'] and
qc1['tenant_id'] == qc2['tenant_id'] and
qc1['project_id'] == qc2['project_id'] and
qc1['direction'] == qc2['direction'] and
qc1['protocol'] == qc2['protocol'] and
qc1['ethertype'] == qc2['ethertype'] and
qc1['remote_group_id'] == qc2['remote_group_id'] and
qc1['security_group_id'] == qc2['security_group_id'] and
qc1['remote_ip_prefix'] == qc2['remote_ip_prefix'] and
qc1['port_range_min'] == qc2['port_range_min'] and
qc1['port_range_max'] == qc2['port_range_max'])
def get_security_groups(self, nc):
try:
# Only retrieve the info we care about.
# created_at, updated_at, and revision_number can't be specified
# when making a new group. tags would require special handling,
# and security_group_rules is handled separately.
groups = nc.list_security_groups(
retrieve_all=True,
fields=['id', 'name', 'description', 'tenant_id'])
groups = groups['security_groups']
return groups
except (keystone_exceptions.connection.ConnectTimeout,
keystone_exceptions.ConnectFailure) as e:
LOG.info("get_flavor: subcloud {} is not reachable [{}]"
.format(self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
return None
except Exception as e:
LOG.exception(e)
return None
def get_security_group_rules(self, nc):
try:
rules = nc.list_security_group_rules(retrieve_all=True)
rules = rules['security_group_rules']
for rule in rules:
# We don't need these for comparing/creating security groups
# and/or they're not allowed in POST calls.
del rule['created_at']
del rule['updated_at']
del rule['revision_number']
# These would have to be handled separately, not yet supported.
rule.pop('tags', None)
# Some rules have a blank description as an empty string, some
# as None, depending on whether they were auto-created during
# security group creation or added later. Convert the empty
# strings to None.
if rule['description'] == '':
rule['description'] = None
return rules
except (keystone_exceptions.connection.ConnectTimeout,
keystone_exceptions.ConnectFailure) as e:
LOG.info("get_flavor: subcloud {} is not reachable [{}]"
.format(self.subcloud_engine.subcloud.region_name,
str(e)), extra=self.log_extra)
return None
except Exception as e:
LOG.exception(e)
return None
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
java_path = './api/src/main/java/es/bsc/compss/types/annotations/parameter/DataType.java'
cpp_path = './bindings/bindings-common/src/data_type.h'
python_path = './bindings/python/src/pycompss/api/commons/data_type.py'
notification = 'Autogenerated file (see generate_datatype_enums.py). Used DataType.java as the source template.'
'''Get the defined types (and in the right order)
'''
datatypes = []
already = False
for line in open(java_path, 'r'):
if 'public enum' in line:
already = True
continue
if already:
already = not line.startswith('}')
if already:
cur_type = line.split()[0].replace(',', '').replace(';', '').strip()
datatypes.append(cur_type)
already = not line.startswith('}')
# C++
with open(cpp_path, 'w') as f:
f.write('\n')
f.write('// %s\n' % notification)
f.write('enum datatype {\n ')
first = True
for line in datatypes:
line = line.lower().replace('_t', '_dt')
if first:
line += ' = 0'
else:
line = ',\n ' + line
f.write(line)
first = False
f.write('\n};\n')
# Python
with open(python_path, 'w') as f:
f.write('# %s # noqa\n\n\n' % notification)
f.write('class DataType(object):\n')
for (i, line) in enumerate(datatypes):
f.write(' %s = %d\n' % (line.replace('_T', ''), i))
|
#!/usr/bin/env python
__all__ = ['SignerPlugin']
|
from setuptools import find_packages, setup
setup(
name="pysen_ls",
version="0.1.2",
packages=find_packages(),
description="A language server implementation for pysen",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Yuki Igarashi",
author_email="me@bonprosoft.com",
url="https://github.com/bonprosoft/pysen-ls",
license="MIT License",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS",
"Operating System :: Unix",
],
install_requires=[
"dataclasses>=0.6,<1.0;python_version<'3.7'",
"pygls>=0.10.0,<0.11.0",
"pysen>=0.9.1,<0.11.0",
],
package_data={"pysen_ls": ["py.typed"]},
entry_points={"console_scripts": ["pysen_language_server=pysen_ls.__main__:main"]},
)
|
import turtle,random
turtle.mode("logo")
turtle.shape("turtle")
turtle.bgcolor("black")
turtle.speed(0)
#draw the sun
turtle.speed(0)
turtle.pencolor("red")
size_of_sun=10
sun_x = 0
sun_y = 300
for j in range(12):
turtle.penup()
turtle.goto(sun_x,sun_y)
turtle.pendown()
for i in range(3):
turtle.circle(size_of_sun,180)
turtle.right(180)
turtle.right(30)
# end of drawing the sun
turtle.colormode(255)
red = random.randint(0,255)
green = random.randint(0,255)
blue = random.randint(0,255)
turtle.pencolor(red,green,blue)
size=10
for k in range(10):
x = random.randint(-400,400)
y = random.randint(-400,400)
turtle.penup()
turtle.goto(x,y)
turtle.pendown()
for j in range(10):
for i in range(0,2,1):
red = random.randint(230,255)
green = random.randint(150,200)
blue = random.randint(10,60)
turtle.pencolor(red,green,blue)
turtle.forward(size)
turtle.left(60)
turtle.forward(size)
turtle.left(120)
turtle.left(36)
turtle.hideturtle()
|
import re
class LinkHandler:
'''Matches any website links in the text'''
def __init__(self):
http_protocol = r"""h[it]tps?:"""
# generic_protocol = r"""[a-z][\w-]+"""
top_level_domain = r"""(?:com|net|org|edu|gov|mil|aero|asia|biz|""" + \
r"""cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|""" + \
r"""travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|""" + \
r"""au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|""" + \
r"""bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|""" + \
r"""cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|""" + \
r"""es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|""" + \
r"""gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|""" + \
r"""il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|""" + \
r"""kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|""" + \
r"""md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|""" + \
r"""my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|""" + \
r"""pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|""" + \
r"""sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|""" + \
r"""sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|""" + \
r"""tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|""" + \
r"""ye|yt|yu|za|zm|zw)"""
pattern = r"""(?i)\b((?:""" + http_protocol + \
r"""(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.]""" + \
top_level_domain + \
r"""/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)""" + \
r"""[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)""" + \
r"""[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])""" + \
r"""|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.]""" + \
top_level_domain + \
r"""\b/?(?!@)))"""
self.pattern = re.compile(pattern)
def get_tok_indexlist(self, token_list):
'''
Given a list of text tokens, it returns a list with the the elements
representing the starting point of a each token considering the spaces
between the tokens
eg: Input = ["ab","cd","ef"]
Output = [0,3,6]
'''
tok_index = []
for index, _ in enumerate(token_list):
if index == 0:
tok_index.extend([0])
else:
# +1 to consider space
tok_index.extend([tok_index[-1]+len(token_list[index-1])+1])
return tok_index
def char_mapping(self, token_list):
'''
Given a list of text tokens,it returns a dictionary with
a reference mapping of characters to tokens index
eg: Input = ["ab","cd","ef"]
Output = {0: 0, 1: 0, 3: 1, 4: 1, 6: 2, 7: 2}
'''
chars_to_tokens = {}
token_index = self.get_tok_indexlist(token_list)
for index, token in enumerate(token_list):
for i in range(token_index[index], token_index[index]+len(token)):
chars_to_tokens[i] = index
return chars_to_tokens
def find_link_regex(self, text):
links = list()
matches = re.finditer(self.pattern, text.lower())
for match in matches:
links.extend([(match.start()-1,
match.end()-1,
match.group(0).strip())])
return links
def return_link_index(self, matches, char_map, tags_list):
'''
Given the matched groups,token_list,token_tags,char_mapping, extract
index of the tags_list to be qualified as LINK for SPV
'''
words_include = []
for match in matches:
start = match[0]
end = match[1]
include = list(range(start, end, 1))
words = list(
set([value for key, value in char_map.items()
if key in include]))
words_include.extend(words)
existing_tags_index = [idx for idx, tag in
enumerate(tags_list) if tag != "O"]
words_include_filtered = [i for i in words_include
if i not in existing_tags_index]
return words_include_filtered
def match_ref(self, text, token_list, tags_list,
entity='LINK', verbose=False):
link_indices = []
text = text.strip().lower()
matches = self.find_link_regex(text)
if len(matches) > 0:
char_map = self.char_mapping(token_list)
match_indices = self.return_link_index(matches,
char_map,
tags_list)
if len(match_indices) > 0:
for idx in match_indices:
tok = token_list[idx]
alpha_tok = re.sub('[^a-zA-Z]', '', tok)
if len(alpha_tok) < 6:
continue
if alpha_tok.islower() or alpha_tok.isupper():
link_indices.append(idx)
if verbose and len(link_indices) > 0: # pragma: no cover <--
print(f'\nFinal Matches {entity}: '
f'{[token_list[idx] for idx in link_indices]}\n')
return link_indices
if __name__ == '__main__':
import spacy
from pprint import pprint
nlp = spacy.load('en_core_web_sm')
test = "www.google.com/?search Search Results: ..."
doc = nlp(test)
toks = [t.text for t in doc]
toks = [t.strip() for t in toks]
text = " ".join(toks)
tags = ['O']*len(doc)
tag = "LINK"
link_hdlr = LinkHandler()
link_indices = link_hdlr.match_ref(text, toks, tags,
entity=tag,
verbose=True)
tags = [tag if idx in link_indices else 'O'
for idx in range(len(toks))]
print(text)
print(toks)
print(tags)
pprint(list(zip(toks, tags)), compact=True)
|
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from ploomber.exceptions import DAGWithDuplicatedProducts
from ploomber.products.metaproduct import MetaProduct
from ploomber.products import File
from ploomber.io import pretty_print
def _generate_error_message_pair(key, value):
return f'* {key!r} generated by tasks: {pretty_print.iterable(value)}'
def _generate_error_message(duplicated):
return '\n'.join(
_generate_error_message_pair(key, value)
for key, value in duplicated.items())
def check_duplicated_products(dag):
"""
Raises an error if more than one task produces the same product.
Note that this relies on the __hash__ and __eq__ implementations of
each Product to determine whether they're the same or not. This
implies that a relative File and absolute File pointing to the same file
are considered duplicates and SQLRelations (in any of its flavors) are
the same when they resolve to the same (schema, name, type) tuple
(i.e., client is ignored), this because when using the generic SQLite
backend for storing SQL product metadata, the table only relies on schema
and name to retrieve metadata.
"""
prod2tasknames = defaultdict(lambda: [])
for name in dag._iter():
product = dag[name].product
if isinstance(product, MetaProduct):
for p in product.products:
prod2tasknames[p].append(name)
else:
prod2tasknames[product].append(name)
duplicated = {k: v for k, v in prod2tasknames.items() if len(v) > 1}
if duplicated:
raise DAGWithDuplicatedProducts(
'Tasks must generate unique products. '
'The following products appear in more than '
f'one task:\n{_generate_error_message(duplicated)}')
def flatten_products(elements):
flat = []
for prod in elements:
if isinstance(prod, MetaProduct):
flat.extend([p for p in prod if isinstance(p, File) and p.client])
elif isinstance(prod, File) and prod.client:
flat.append(prod)
return flat
def fetch_remote_metadata_in_parallel(dag):
"""Fetches remote metadta in parallel from a list of Files
"""
files = flatten_products(dag[t].product for t in dag._iter()
if isinstance(dag[t].product, File)
or isinstance(dag[t].product, MetaProduct))
if files:
with ThreadPoolExecutor(max_workers=64) as executor:
future2file = {
executor.submit(file._remote._fetch_remote_metadata): file
for file in files
}
for future in as_completed(future2file):
exception = future.exception()
if exception:
local = future2file[future]
raise RuntimeError(
'An error occurred when fetching '
f'remote metadata for file {local!r}') from exception
|
"""
File: import_data.py
Author: Ian Ross
Email: iross@cs.wisc.edu
Description: # TODO
# Expected structure:
## stack_output/
# xml/ %
# html/
# img/ $
# output.csv $
# tables.csv $
# figures.csv $
## images/ &
# %: xml/annotation import
# $: kb import
# &: image import
"""
import uuid
import lxml.etree as etree
import psycopg2
import glob
import os, sys
import shutil
import fnmatch
import re
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import watchdog
import time
import string
from random import choice
from shutil import copyfile
VERBOSE = False
image_prefix_regex = re.compile('^\/images\/?')
obs_regex = re.compile(r'im[0-9a-zA-Z]{16}')
PG_CONN_STR = os.getenv("PG_CONN_STR")
if PG_CONN_STR is None:
PG_CONN_STR="postgresql://postgres:@db:5432/annotations"
conn_attempts = 0
connected = False
while conn_attempts < 5 and not connected:
try:
print("Connecting to %s" % PG_CONN_STR)
conn = psycopg2.connect(PG_CONN_STR)
cur_images = conn.cursor()
cur = conn.cursor()
connected = True
except:
conn_attempts += 1
print("Could not connect! Waiting 10 seconds and trying again. Attempt %s of 5" % conn_attempts)
time.sleep(10)
tag_map = {}
cur.execute("SELECT tag_id, name FROM tag;")
for tag in cur:
tag_map[tag[1]] = tag[0]
class Watcher():
def __init__(self, output_dir, png_path, stack):
self.output_dir = output_dir
self.png_path = png_path
self.stack = stack
self.event_handler = watchdog.events.PatternMatchingEventHandler(patterns=["*.xml", "*.csv", "*.png"],
ignore_patterns=["*/html*/*.png"],
ignore_directories=True)
self.event_handler.on_created = self.on_created
self.observer = Observer()
self.observer.schedule(self.event_handler, self.output_dir, recursive=True)
print("Watching for output files in %s" % self.output_dir)
self.observer.start()
def on_created(self, event):
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
if event.src_path.endswith(".png"):
import_image(os.path.basename(event.src_path), self.png_path)
elif event.src_path.endswith(".xml"):
import_xml(os.path.basename(event.src_path), os.path.dirname(event.src_path), self.png_path, self.stack)
elif event.src_path.endswith(".csv"):
if "output.csv" in event.src_path:
import_equations(event.src_path)
elif "tables.csv" in event.src_path:
import_tables(event.src_path)
elif "figures.csv" in event.src_path:
import_figures(event.src_path)
else:
if VERBOSE:
print("WARNING! Not sure what to do with file (%s)" % event.src_path)
def stop(self):
self.observer.stop()
self.observer.join()
def obfuscate_png(filepath, png_path):
"""
TODO: Docstring for obfuscate_png.
Args:
arg1 (TODO): TODO
Returns: TODO
"""
check = obs_regex.search(filepath)
if check is not None: # already contains obfuscation
return filepath
image_str = 'im%s' % ''.join(choice(string.ascii_letters + string.digits) for i in range(16))
new_filepath = filepath.replace(".png", "_%s.png" % image_str)
shutil.move(os.path.join(png_path, filepath), os.path.join(png_path, new_filepath))
return new_filepath
def parse_docid_and_page_no(filename):
"""
TODO: Docstring for parse_docid.
Args:
arg1 (TODO): TODO
Returns: TODO
"""
doc_id, page_no = os.path.basename(filename).split(".pdf_")
doc_id = doc_id.replace("_input", "")
page_no = int(os.path.splitext(page_no)[0].split("_")[0])
return doc_id, page_no
def import_image(image_filepath, png_path):
"""
From an image's filepath (TODO: filename?), get the docid + page_no, check
if the page image exists in the db, and return the image_id (inserting as needed)
Args:
arg1 (TODO): TODO
Returns: image_id (UUID) - db-level internal id for the image.
"""
# Get rid of leading `/images` if it exists
# print("Importing image %s" % image_filepath)
doc_id, page_no = parse_docid_and_page_no(image_filepath)
cur.execute("SELECT image_id FROM image WHERE doc_id=%s AND page_no=%s", (doc_id, page_no))
check = cur.fetchone()
if check is None:
image_id = uuid.uuid4()
image_filepath = obfuscate_png(image_filepath, png_path)
else: # repeat image
return check[0]
cur.execute("INSERT INTO image (image_id, doc_id, page_no, file_path) VALUES (%s, %s, %s, %s) ON CONFLICT (image_id) DO UPDATE SET image_id=EXCLUDED.image_id RETURNING image_id;", (str(image_id), doc_id, page_no, image_filepath))
conn.commit()
image_id = cur.fetchone()[0]
return image_id
def insert_image_stack(image_id, stack_name):
cur.execute("INSERT INTO image_stack (image_id, stack_id) VALUES (%s, %s) ON CONFLICT (image_id, stack_id) DO UPDATE SET image_id=EXCLUDED.image_id RETURNING image_stack_id", (str(image_id), stack_name))
conn.commit()
return cur.fetchone()[0]
def import_xml(xml_filename, xml_path, png_path, stack):
with open(os.path.join(xml_path, xml_filename)) as fin:
doc = etree.parse(fin)
# try to find the image associated with this xml
try:
image_filepath = doc.xpath("//filename/text()")[0]
_ = glob.glob("%s/%s" % (png_path, image_filepath))[0]
# image_filepath = glob.glob("%s/%s*png" % (png_path, xml_filename.replace(xml_path, "").replace(".xml","")))[0]
except: # something funny in the xml -- try to fall back on filename consistency
image_filepath = os.path.basename(xml_filename).replace(".xml",".png")
check = glob.glob("%s/%s" % (png_path, image_filepath.replace(".pdf", "*.pdf").replace(".png", "_*.png")))
if check == [] or len(check)>1:
if VERBOSE:
print("Couldn't find page-level PNG associated with %s! Skipping." % xml_filename)
return
else:
image_filepath = check[0].replace(png_path + "/", "")
image_id = import_image(image_filepath, png_path)
image_stack_id = insert_image_stack(image_id, stack)
# loop through tags
for record in doc.xpath("//object"):
image_tag_id = uuid.uuid4()
tag_name = record.xpath('name/text()')[0]
tag_id = tag_map[tag_name]
xmin = int(record.xpath('bndbox/xmin/text()')[0])
ymin = int(record.xpath('bndbox/ymin/text()')[0])
xmax = int(record.xpath('bndbox/xmax/text()')[0])
ymax = int(record.xpath('bndbox/ymax/text()')[0])
cur.execute("INSERT INTO image_tag (image_tag_id, image_stack_id, tag_id, geometry, tagger) VALUES (%s, %s, %s, ST_Collect(ARRAY[ST_MakeEnvelope(%s, %s, %s, %s)]), %s) ON CONFLICT DO NOTHING;", (str(image_tag_id), image_stack_id, tag_id, xmin, ymin, xmax, ymax, 'COSMOS'))
conn.commit()
def import_xmls(xml_path, png_path, stack):
"""
TODO: Docstring for import_xml.
Args:
arg1 (TODO): TODO
Returns: TODO
"""
for root, dirnames, filenames in os.walk(xml_path):
for xml_filename in fnmatch.filter(filenames, '*.xml'):
import_xml(xml_filename, root, png_path, stack)
return 0
def import_figures(figure_kb_path):
cur.execute("""
CREATE TABLE IF NOT EXISTS equations.figures_tmp (
target_img_path text,
target_unicode text,
target_tesseract text,
assoc_img_path text,
assoc_unicode text,
assoc_tesseract text,
html_file text
);
""")
conn.commit()
try:
with open(figure_kb_path) as f:
copy_sql = """
COPY equations.figures_tmp(
target_img_path,
target_unicode,
target_tesseract,
assoc_img_path,
assoc_unicode,
assoc_tesseract,
html_file) FROM STDIN WITH DELIMITER ',' CSV HEADER;
"""
cur.copy_expert(sql=copy_sql, file=f)
conn.commit()
except IOError:
if VERBOSE:
print("WARNING! Could not find figures.csv KB dump.")
cur.execute("INSERT INTO equations.figures SELECT * FROM equations.figures_tmp ON CONFLICT DO NOTHING; DROP TABLE equations.figures_tmp;")
conn.commit()
def import_tables(table_kb_path):
cur.execute("""
CREATE TABLE IF NOT EXISTS equations.tables_tmp (
target_img_path text,
target_unicode text,
target_tesseract text,
assoc_img_path text,
assoc_unicode text,
assoc_tesseract text,
html_file text
);
""")
conn.commit()
try:
with open(table_kb_path) as f:
copy_sql = """
COPY equations.tables_tmp(
target_img_path,
target_unicode,
target_tesseract,
assoc_img_path,
assoc_unicode,
assoc_tesseract,
html_file) FROM STDIN WITH DELIMITER ',' CSV HEADER;
"""
cur.copy_expert(sql=copy_sql, file=f)
conn.commit()
except IOError:
if VERBOSE:
print("WARNING! Could not find tables.csv KB dump.")
cur.execute("INSERT INTO equations.tables SELECT * FROM equations.tables_tmp ON CONFLICT DO NOTHING; DROP TABLE equations.tables_tmp;")
conn.commit()
def import_equations(equation_kb_path):
cur.execute("""
CREATE TABLE IF NOT EXISTS equations.output_tmp (
document_name text,
id int,
text text,
document_id int,
equation_id int,
equation_text text,
equation_offset text,
sentence_id int,
sentence_offset int,
sentence_text text,
score float,
var_top int,
var_bottom int,
var_left int,
var_right int,
var_page int,
sent_xpath text,
sent_words text[],
sent_top text[],
sent_table_id int,
sent_section_id int,
sent_row_start int,
sent_row_end int,
sent_right int[],
sent_position int,
sent_pos_tags text[],
sent_paragraph_id int,
sent_page int[],
sent_ner_tags text[],
sent_name text,
sent_lemmas text[],
sent_left int[],
sent_html_tag text,
sent_html_attrs text[],
sent_document_id int,
sent_dep_parents text[],
sent_dep_labels text[],
sent_col_start int,
sent_col_end int,
sent_char_offsets int[],
sent_cell_id int,
sent_bottom int[],
sent_abs_char_offsets int[],
equation_top int,
equation_bottom int,
equation_left int,
equation_right int,
equation_page int,
symbols text[],
phrases text[],
phrases_top text[],
phrases_bottom text[],
phrases_left text[],
phrases_right text[],
phrases_page text[],
sentence_img text,
equation_img text
);
""")
conn.commit()
try:
with open(equation_kb_path) as f:
copy_sql = """
COPY equations.output_tmp(
document_name,
id,
text,
document_id,
equation_id,
equation_text,
equation_offset,
sentence_id,
sentence_offset,
sentence_text,
score,
var_top,
var_bottom,
var_left,
var_right,
var_page,
sent_xpath,
sent_words,
sent_top,
sent_table_id,
sent_section_id,
sent_row_start,
sent_row_end,
sent_right,
sent_position,
sent_pos_tags,
sent_paragraph_id,
sent_page,
sent_ner_tags,
sent_name,
sent_lemmas,
sent_left,
sent_html_tag,
sent_html_attrs,
sent_document_id,
sent_dep_parents,
sent_dep_labels,
sent_col_start,
sent_col_end,
sent_char_offsets,
sent_cell_id,
sent_bottom,
sent_abs_char_offsets,
equation_top,
equation_bottom,
equation_left,
equation_right,
equation_page,
symbols,
phrases,
phrases_top,
phrases_bottom,
phrases_left,
phrases_right,
phrases_page,
sentence_img,
equation_img) FROM STDIN WITH DELIMITER ',' CSV HEADER;
"""
cur.copy_expert(sql=copy_sql, file=f)
conn.commit()
except IOError:
if VERBOSE:
print("WARNING! Could not find output.csv KB dump.")
cur.execute("INSERT INTO equations.output SELECT * FROM equations.output_tmp ON CONFLICT DO NOTHING; DROP TABLE equations.output_tmp;")
conn.commit()
cur.execute("""
REFRESH MATERIALIZED VIEW equations.equation;
REFRESH MATERIALIZED VIEW equations.phrase;
REFRESH MATERIALIZED VIEW equations.sentence;
REFRESH MATERIALIZED VIEW equations.variable;
""")
conn.commit()
def import_kb(output_path):
"""
TODO: Docstring for import_kb.
Args:
arg1 (TODO): TODO
Returns: TODO
"""
import_figures(output_path + "figures.csv")
import_tables(output_path + "tables.csv")
import_equations(output_path + "output.csv")
return 0
def main():
# Need as input: pile of xml, pile of pngs
if len(sys.argv) <= 2:
print("Please specify output and page image directories! python import_segmentations.py [location_of_output] [location_of_pngs]")
sys.exit(1)
# TODO: stack support
output_path = sys.argv[1]
png_path = sys.argv[2]
if len(sys.argv) == 5:
stack = sys.argv[3]
stack_type = sys.argv[4]
else:
stack = "default"
stack_type = "prediction"
cur.execute("SELECT id FROM stack_type")
stack_types = [cur.fetchall()]
if stack_type not in stack_type:
print("Invalid stack type selected! Please specify one of: %s" % stack_types)
print("Example usage: python import_segmentations.py [location_of_output] [location_of_pngs] [stack_name] %s" % stack_types)
sys.exit(1)
cur.execute("INSERT INTO stack (stack_id, stack_type) VALUES (%s, %s) ON CONFLICT DO NOTHING;", (stack, stack_type))
conn.commit()
# TODO: import images to tag? Or toggle via stack?
# images can be the same, but they need to be a different image_stack
for image_filepath in glob.glob(png_path + "*.png"):
image_filepath = os.path.basename(image_filepath)
image_id = import_image(image_filepath, png_path)
if stack_type == "annotation": # if prediction, this will get called as part of the XML import
insert_image_stack(image_id, stack)
if stack_type == "prediction":
import_xmls(output_path, png_path, stack)
import_kb(output_path)
# watch for changes in the output dir
# w = Watcher(output_path, png_path, stack)
# try:
# while True:
# time.sleep(60)
# # temp hack to scan regularly
# for image_filepath in glob.glob(png_path + "*.png"):
# image_filepath = os.path.basename(image_filepath)
# import_image(image_filepath, png_path)
# import_xmls(output_path, png_path, stack)
# import_kb(output_path)
# except:
# w.stop()
# raise
if __name__ == '__main__':
main()
|
from fuzzywuzzy import fuzz
from fuzzywuzzy import process as fuzz_process
import regex
from will import settings
from will.decorators import require_settings
from will.utils import Bunch
from .base import GenerationBackend, GeneratedOption
class FuzzyBestMatch(GenerationBackend):
def _generate_compiled_regex(self, method_meta):
if not hasattr(self, "cached_regex"):
self.cached_regex = {}
method_path = method_meta["plugin_info"]["parent_path"]
if not method_path in self.cached_regex:
regex_string = method_meta["regex_pattern"]
if "case_sensitive" in method_meta and not method_meta["case_sensitive"]:
regex_string = "(?i)%s" % regex_string
if method_meta["multiline"]:
try:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex_string,
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.MULTILINE | regex.DOTALL | regex.ENHANCEMATCH)
except:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex.escape(regex_string),
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.MULTILINE | regex.DOTALL | regex.ENHANCEMATCH)
else:
try:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex_string,
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.ENHANCEMATCH)
except:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex.escape(regex_string),
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.ENHANCEMATCH)
return self.cached_regex[method_path]
def do_generate(self, event):
exclude_list = ["fn", ]
matches = []
message = event.data
# TODO: add token_sort_ratio
if not hasattr(self, "match_choices"):
self.match_choices = []
self.match_methods = {}
if message.content:
for name, l in self.bot.message_listeners.items():
if not l["regex_pattern"] in self.match_methods:
self.match_methods[l["regex_pattern"]] = l
self.match_choices.append(l["regex_pattern"])
match_str, confidence = fuzz_process.extractOne(message.content, self.match_choices)
l = self.match_methods[match_str]
if confidence >= settings.FUZZY_MINIMUM_MATCH_CONFIDENCE:
regex_matches = l["regex"].search(message.content)
if (
# The search regex matches and
# regex_matches
# It's not from me, or this search includes me, and
(
message.will_said_it is False or
("include_me" in l and l["include_me"])
)
# I'm mentioned, or this is an overheard, or we're in a 1-1
and (
message.is_private_chat or
("direct_mentions_only" not in l or not l["direct_mentions_only"]) or
message.is_direct
)
):
fuzzy_regex = self._generate_compiled_regex(l)
regex_matches = fuzzy_regex.search(message.content)
context = Bunch()
for k, v in l.items():
if k not in exclude_list:
context[k] = v
if regex_matches and hasattr(regex_matches, "groupdict"):
context.search_matches = regex_matches.groupdict()
else:
context.search_matches = {}
o = GeneratedOption(context=context, backend="regex", score=confidence)
matches.append(o)
return matches
|
import matplotlib
matplotlib.use('Agg')
from utils.data_reader import Personas_CVAE
from model.common_layer import NoamOpt, evaluate
from utils import config
from model.CVAE.util.config import Model_Config
import torch
import torch.nn as nn
import numpy as np
from random import shuffle
from copy import deepcopy
import math
from tensorboardX import SummaryWriter
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
from model.CVAE.model import Model
def make_infinite(dataloader):
while True:
for x in dataloader:
yield x
def make_infinite_list(personas):
while True:
print("New epoch(shuffle all personas)")
shuffle(personas)
for x in personas:
yield x
def do_learning(model, train_iter, iterations):
p, l = [], []
for i in range(iterations):
# print(train_iter.__next__())
loss, ppl, _ = model.train_one_batch(train_iter.__next__())
l.append(loss)
p.append(ppl)
return loss
def do_learning_early_stop(model, train_iter, val_iter, iterations, strict=1):
# b_loss, b_ppl = do_evaluation(model, val_iter)
b_loss, b_ppl = 100000, 100000
best = deepcopy(model.state_dict())
cnt = 0
idx = 0
for _, _ in enumerate(range(iterations)):
train_l, train_p = [], []
for d in train_iter:
t_loss, t_ppl, _ = model.train_one_batch(d)
train_l.append(t_loss)
train_p.append(t_ppl)
n_loss, n_ppl = do_evaluation(model, val_iter)
## early stopping
if (n_ppl <= b_ppl):
b_ppl = n_ppl
b_loss = n_loss
cnt = 0
idx += 1
best = deepcopy(model.state_dict()) ## save best weights
else:
cnt += 1
if (cnt > strict): break
## load the best model
model.load_state_dict({name: best[name] for name in best})
return (np.mean(train_l), np.mean(train_p), b_loss, b_ppl), idx
def do_learning_fix_step(model, train_iter, val_iter, iterations, test=False):
# fine-tuning and calculate loss
ppl_list = []
nll_list = []
kld_list = []
weight_list = []
loss_sum = 0
for _ in range(iterations):
for batch in train_iter:
model.train_one_batch(batch)
i = 0
for batch in val_iter:
_, ppl, loss, nll_loss, kld_loss, kld_weight = model.train_one_batch(batch, train=False)
loss_sum += loss
ppl_list.append(ppl)
nll_list.append(nll_loss)
kld_list.append(kld_loss)
weight_list.append(kld_weight)
i = i + 1
return loss_sum / i, np.mean(ppl_list), np.mean(nll_list), np.mean(kld_list), np.mean(weight_list)
def do_evaluation(model, test_iter):
p, l, nll, kl, w = [], [], [], [], []
for batch in test_iter:
loss, ppl, _, nll_loss, kl_loss, weight = model.train_one_batch(batch, train=False)
l.append(loss)
p.append(ppl)
nll.append(nll_loss)
kl.append(kl_loss)
w.append(weight)
return np.mean(l), np.mean(p), np.mean(nll), np.mean(kl), np.mean(w)
# =================================main=================================
# Model config for CVAE-dialog
model_config = Model_Config()
# p = Personas()
p = Personas_CVAE()
writer = SummaryWriter(log_dir=config.save_path)
# Build model, optimizer, and set states
if not (config.load_frompretrain == 'None'):
# meta_net = Transformer(p.vocab, model_file_path=config.load_frompretrain, is_eval=False)
meta_net = Model(model_config, p.vocab)
else:
# meta_net = Transformer(p.vocab)
meta_net = Model(model_config, p.vocab)
meta_net.print_parameters() # 输出模型参数个数
if config.USE_CUDA:
meta_net.to('cuda')
if config.meta_optimizer == 'sgd':
meta_optimizer = torch.optim.SGD(meta_net.parameters(), lr=config.meta_lr)
elif config.meta_optimizer == 'adam':
meta_optimizer = torch.optim.Adam(meta_net.parameters(), lr=config.meta_lr)
elif config.meta_optimizer == 'noam':
meta_optimizer = NoamOpt(config.hidden_dim, 1, 4000,
torch.optim.Adam(meta_net.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
else:
raise ValueError
global_step = 0
meta_batch_size = config.meta_batch_size
tasks = p.get_personas('train')
# tasks_loader = {t: p.get_data_loader(persona=t,batch_size=config.batch_size, split='train') for t in tasks}
tasks_iter = make_infinite_list(tasks)
# meta early stop
patience = 300
if config.fix_dialnum_train:
patience = 300
# best_loss = 10000000
best_loss = 100.0
stop_count = 0
# Main loop
for meta_iteration in range(config.epochs):
## save original weights to make the update
weights_original = deepcopy(meta_net.state_dict())
train_loss_before = []
train_loss_meta = []
# loss accumulate from a batch of tasks
batch_loss = 0
batch_loss_before = 0
for _ in range(meta_batch_size):
# Get task
if config.fix_dialnum_train:
train_iter, val_iter = p.get_balanced_loader(persona=tasks_iter.__next__(), batch_size=config.batch_size,
split='train')
else:
train_iter, val_iter = p.get_data_loader(persona=tasks_iter.__next__(), batch_size=config.batch_size,
split='train')
# before first update
v_loss, v_ppl, v_nll, v_kl, v_w = do_evaluation(meta_net, val_iter)
train_loss_before.append(v_ppl)
batch_loss_before += v_loss
# Update fast nets
val_loss, val_ppl, _, _, _ = do_learning_fix_step(meta_net, train_iter, val_iter,
iterations=config.meta_iteration)
train_loss_meta.append(val_ppl)
batch_loss += val_loss
# log
# reset
meta_net.load_state_dict({name: weights_original[name] for name in weights_original})
writer.add_scalars('loss_before', {'train_loss_before': np.mean(train_loss_before)}, meta_iteration)
writer.add_scalars('loss_meta', {'train_loss_meta': np.mean(train_loss_meta)}, meta_iteration)
# print(' loss_before: {}'.format(np.mean(train_loss_before)))
# print(' loss_meta: {}'.format(np.mean(np.mean(train_loss_meta))))
# meta_net.optim.update_lr(global_step) # 每个epoch更新学习率
# meta Update
if (config.meta_optimizer == 'noam'):
meta_optimizer.optimizer.zero_grad()
else:
meta_optimizer.zero_grad()
batch_loss_before /= meta_batch_size
batch_loss /= meta_batch_size
batch_loss.backward()
meta_net.global_step += 1
print('train_loss: {:.3f} ===> {:.3f}'.format(batch_loss_before, batch_loss))
# clip gradient
nn.utils.clip_grad_norm_(meta_net.parameters(), config.max_grad_norm)
meta_optimizer.step()
## Meta-Evaluation
if meta_iteration % 10 == 0:
print('--------- Validation ---------')
print('meta_iteration:', meta_iteration)
val_loss_before = []
val_loss_meta = []
val_batch_loss_before = 0
val_batch_loss_after = 0
val_nll_loss_before = 0
val_nll_loss_after = 0
val_kld_loss_before = 0
val_kld_loss_after = 0
val_kld_weight_before = 0
val_kld_weight_after = 0
weights_original = deepcopy(meta_net.state_dict())
for idx, per in enumerate(p.get_personas('valid')):
# num_of_dialog = p.get_num_of_dialog(persona=per, split='valid')
# for dial_i in range(num_of_dialog):
if config.fix_dialnum_train:
train_iter, val_iter = p.get_balanced_loader(persona=per, batch_size=config.batch_size, split='valid',
fold=0)
else:
train_iter, val_iter = p.get_data_loader(persona=per, batch_size=config.batch_size, split='valid',
fold=0)
# zero shot result
loss, ppl, nll, kld, w = do_evaluation(meta_net, val_iter)
val_loss_before.append(math.exp(loss))
val_batch_loss_before += loss
val_nll_loss_before += nll
val_kld_loss_before += kld
val_kld_weight_before += w
# mate tuning
val_loss, val_ppl, val_nll, val_kld, val_weight = do_learning_fix_step(meta_net, train_iter, val_iter,
iterations=config.meta_iteration)
val_loss_meta.append(math.exp(val_loss.item()))
val_batch_loss_after += val_loss
val_nll_loss_after += val_nll
val_kld_loss_after += val_kld
val_kld_weight_after += val_weight
# updated result
meta_net.load_state_dict({name: weights_original[name] for name in weights_original})
writer.add_scalars('loss_before', {'val_loss_before': np.mean(val_loss_before)}, meta_iteration)
writer.add_scalars('loss_meta', {'val_loss_meta': np.mean(val_loss_meta)}, meta_iteration)
val_batch_loss_before /= 100
val_nll_loss_before /= 100
val_kld_loss_before /= 100
val_kld_weight_before /= 100
val_batch_loss_after /= 100
val_nll_loss_after /= 100
val_kld_loss_after /= 100
val_kld_weight_after /= 100
print('val_loss: {:.3f} ===> {:.3f}'.format(val_batch_loss_before, val_batch_loss_after))
print('zero-shot: {:.3f} = {:.3f} + {:.3f} * {:.3f}'.format(
val_batch_loss_before,
val_nll_loss_before,
val_kld_weight_before,
val_kld_loss_before))
print('few-shot: {:.3f} = {:.3f} + {:.3f} * {:.3f}'.format(
val_batch_loss_after,
val_nll_loss_after,
val_kld_weight_after,
val_kld_loss_after))
# check early stop
# if np.mean(val_loss_meta) < best_loss:
if val_batch_loss_after < best_loss:
# best_loss = np.mean(val_loss_meta)
best_loss = val_batch_loss_after
stop_count = 0
meta_net.save_model(best_loss, meta_iteration, global_step)
else:
stop_count += 1
print('remain patience: {}'.format(patience - stop_count))
if stop_count > patience:
break
print('------------------------------')
|
from typing import Any
from grapl_analyzerlib.analyzer import Analyzer, OneOrMany
from grapl_analyzerlib.prelude import ProcessQuery, FileQuery, ProcessView
from grapl_analyzerlib.execution import ExecutionHit
class UnpackedFileExecuting(Analyzer):
def get_queries(self) -> OneOrMany[ProcessQuery]:
unpacker_names = ["7zip.exe", "winrar.exe", "zip.exe"]
unpacker = ProcessQuery()
for name in unpacker_names:
unpacker.with_process_name(eq=name)
return (
ProcessQuery()
.with_bin_file(
FileQuery()
.with_creator(
unpacker
)
)
)
def on_response(self, response: ProcessView, output: Any):
print(f'Unpacked process: {response.get_process_name()}')
asset_id = response.get_asset().get_hostname()
output.send(
ExecutionHit(
analyzer_name="Process Executing From Unpacked File",
node_view=response,
risk_score=15,
lenses=asset_id,
)
)
|
class Animation():
def __init__(self):
self.vertex_n = 0
self.verticies = []
def get_coord(self, V_kind):
print("\n\t***\nFor vertex %s please enter:\n" % V_kind)
x = input('X: ')
y = input('Y: ')
z = input('Z: ')
pos = [x, y, z]
return pos
def add_Vertex(self):
self.vertex_n +=1
self.verticies.append(Vertex())
self.verticies[self.vertex_n-1].p_zero = self.get_coord("position zero")
self.verticies[self.vertex_n-1].name = "Vertex_"+ str(self.vertex_n)
return
class Vertex(object):
def __init__(self, p_zero = None, position = None, rest = None, name = None):
self.p_zero = p_zero #initial vertex position and this vertex's zero
self.position = position #current vertex position
self.rest = rest #set rest position for the vertex
self.servos = [] #list of servo objects
self.servo_n = 0 #servo count on vertex
self.name = name #Vertex name
def add_servo(self):
self.servos.append(Servo(self.name))
return print("Not Done")
class Servo(object):
def __init__(self, vertex, vector = None, rate = None, track = None):
self.vertex = Vertex #What vertex is this servo linked to
self.vector = vector #Input for movement
self.rate = rate #Acceleration
self.track = track #axis and range of movement
mouth = Animation()
mouth.add_Vertex()
for i, obj in enumerate(mouth.verticies):
print ("Zero position for", obj.name, "is", obj.p_zero) |
"""Helper variable or function for UI Elements."""
import numpy as np
TWO_PI = 2 * np.pi
def clip_overflow(textblock, width, side='right'):
"""Clips overflowing text of TextBlock2D with respect to width.
Parameters
----------
textblock : TextBlock2D
The textblock object whose text needs to be clipped.
width : int
Required width of the clipped text.
side : str, optional
Clips the overflowing text according to side.
It takes values "left" or "right".
Returns
-------
clipped text : str
Clipped version of the text.
"""
original_str = textblock.message
prev_bg = textblock.have_bg
clip_idx = check_overflow(textblock, width, '...', side)
if clip_idx == 0:
return original_str
textblock.have_bg = prev_bg
return textblock.message
def wrap_overflow(textblock, wrap_width, side='right'):
"""Wraps overflowing text of TextBlock2D with respect to width.
Parameters
----------
textblock : TextBlock2D
The textblock object whose text needs to be wrapped.
wrap_width : int
Required width of the wrapped text.
side : str, optional
Clips the overflowing text according to side.
It takes values "left" or "right".
Returns
-------
wrapped text : str
Wrapped version of the text.
"""
original_str = textblock.message
str_copy = textblock.message
prev_bg = textblock.have_bg
wrap_idxs = []
wrap_idx = check_overflow(textblock, wrap_width, '', side)
if wrap_idx == 0:
return original_str
wrap_idxs.append(wrap_idx)
while wrap_idx != 0:
str_copy = str_copy[wrap_idx:]
textblock.message = str_copy
wrap_idx = check_overflow(textblock, wrap_width, '', side)
if wrap_idx != 0:
wrap_idxs.append(wrap_idxs[-1]+wrap_idx+1)
for idx in wrap_idxs:
original_str = original_str[:idx] + '\n' + original_str[idx:]
textblock.message = original_str
textblock.have_bg = prev_bg
return textblock.message
def check_overflow(textblock, width, overflow_postfix='',
side='right'):
"""Checks if the text is overflowing.
Parameters
----------
textblock : TextBlock2D
The textblock object whose text is to be checked.
width: int
Required width of the text.
overflow_postfix: str, optional
Postfix to be added to the text if it is overflowing.
Returns
-------
mid_ptr: int
Overflow index of the text.
"""
side = side.lower()
if side not in ['left', 'right']:
raise ValueError("side can only take values 'left' or 'right'")
original_str = textblock.message
start_ptr = 0
mid_ptr = 0
end_ptr = len(original_str)
prev_bg = textblock.have_bg
textblock.have_bg = False
if side == 'left':
original_str = original_str[::-1]
if textblock.size[0] <= width:
textblock.have_bg = prev_bg
return 0
while start_ptr < end_ptr:
mid_ptr = (start_ptr + end_ptr)//2
textblock.message = original_str[:mid_ptr] + overflow_postfix
if textblock.size[0] < width:
start_ptr = mid_ptr
elif textblock.size[0] > width:
end_ptr = mid_ptr
if mid_ptr == (start_ptr + end_ptr) // 2 or textblock.size[0] == width:
if side == 'left':
textblock.message = textblock.message[::-1]
return mid_ptr
|
from flask import current_app
from flask import request as current_request
from snosearch.adapters.flask.requests import RequestAdapter
from snosearch.adapters.flask.responses import ResponseAdapter
def make_search_request(request=None):
if request is None:
request = current_request
registry = current_app.registry
if not isinstance(request, RequestAdapter):
request = RequestAdapter(request)
search_request = request
search_request.registry = registry
search_request.response = ResponseAdapter()
return search_request
|
from django.contrib import admin
from django.urls import path, include
from .views import *
urlpatterns = [
path('hotels/all', HotelAllView.as_view()),
path('hotels/<int:hotel_id>', show_hotel),
path('login/', user_login),
path('register/', user_register)
]
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import webapp2
from dashboard.pinpoint.models import job as job_module
from dashboard.pinpoint.models import task as task_module
from dashboard.pinpoint.models import event as event_module
from dashboard.pinpoint.models.tasks import evaluator
class Run(webapp2.RequestHandler):
"""Handler that runs a Pinpoint job."""
def post(self, job_id):
job = job_module.JobFromId(job_id)
if job.use_execution_engine:
event = event_module.Event(type='initiate', target_task=None, payload={})
logging.info('Execution Engine: Evaluating initiate event.')
task_module.Evaluate(job, event, evaluator.ExecutionEngine(job))
logging.info('Execution Engine: Evaluation done.')
else:
job.Run()
|
from setuptools import setup
setup( name='ishneholterlib',
version='2017.04.11',
description='A library to work with ISHNE-formatted Holter ECG files',
url='https://bitbucket.org/atpage/ishneholterlib',
author='Alex Page',
author_email='alex.page@rochester.edu',
license='MIT',
packages=['ishneholterlib'],
install_requires=['numpy', 'PyCRC'],
keywords='ISHNE Holter ECG EKG',
zip_safe=False )
|
"""RPC client, aioamqp implementation of RPC"""
import abc
import asyncio
import aioamqp
async def declare_and_bind(channel, exchange_name, queue_name):
await channel.exchange_declare(
exchange_name=exchange_name,
type_name='fanout'
)
await channel.queue_declare(queue_name=queue_name)
await channel.queue_bind(
exchange_name=exchange_name,
queue_name=queue_name,
routing_key=''
)
class AbstractHandler(abc.ABC):
@abc.abstractmethod
async def handle(self, channel, body, envelope, properties):
raise NotImplemented()
def bicondition(a: bool, b: bool) -> bool:
"""
>>> bicondition(True, True)
True
>>> bicondition(True, False)
False
>>> bicondition(False, True)
False
>>> bicondition(False, False)
True
"""
return (a and b) or (not a and not b)
class RpcConsumer:
def __init__(
self,
*,
input_exchange_name: str,
input_queue_name: str,
output_exchange_name: str=None,
output_queue_name: str=None,
handler: AbstractHandler,
loop=None,
chanel_check_delay=10):
is_valid_output_params = not(
bicondition(
bool(output_exchange_name),
bool(output_queue_name)
)
)
if is_valid_output_params:
raise RuntimeError('Bad output parameters')
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._input_exchange_name = input_exchange_name
self._input_queue_name = input_queue_name
self._output_exchange_name = output_exchange_name
self._output_queue_name = output_queue_name
self._transport = None
self._protocol = None
self._channel = None
self._handler = handler
self.chanel_check_delay = chanel_check_delay
async def connect(self):
""" an `__init__` method can't be a coroutine"""
self._transport, self._protocol = await aioamqp.connect()
self._channel = await self._protocol.channel()
await declare_and_bind(
self._channel,
self._input_exchange_name,
self._input_queue_name
)
if self._output_exchange_name and self._output_queue_name:
await declare_and_bind(
self._channel,
self._output_exchange_name,
self._output_queue_name
)
async def start(self):
await self._channel.basic_consume(
self._handler.handle,
queue_name=self._input_queue_name,
)
while self._channel.is_open:
await asyncio.sleep(self.chanel_check_delay)
|
import urllib.request as urllib2
from bs4 import BeautifulSoup
import collections
from datetime import datetime
from multiprocessing import Pool
from operator import itemgetter
import multiprocessing
ans = []
name_question = []
def link_generate(name_question,user,list_links):
for name_question in name_question:
if name_question == '':
a =1
else:
quote_page = "http://www.spoj.com/status/%s,%s/"%(name_question,user) #Replace it with your user name
list_links.append(quote_page)
#crawling pages by link from list
def crawl(name_question):
l = {}
quote_page = name_question
#print(quote_page)
page = urllib2.urlopen(quote_page)
if page.getcode() == 200:
soup = BeautifulSoup(page, "html.parser")
name_box = soup.find_all("td", class_="status_sm")[-1]
name = name_box.text.replace('\n','')#time
l.update({"time" : name, "name" : name_question})#inserting in list (time of submit ,name of question)
#print(l)
return l
#crawling pages by link from list
def main(user):
print("hi")
quote_page = "http://www.spoj.com/users/%s/"%user #Replace it with your user name
page = urllib2.urlopen(quote_page)
soup = BeautifulSoup(page, "html.parser")
name_question = [] #list of solved problems
list_links=[]
#extract name of question from main page of user
#print(soup.find_all("table", class_="table table-condensed"))
for name in soup.find_all("table", class_="table table-condensed"):
name = name.text.replace('\n',' ')
#print(name)
a = len(name)
c=0
d=0
lis = []
for b in range(a):
if(name[b]!=' '):
if(c==0):
lis.append(name[b])
c+=1
elif(d==0 and c!=0):
lis.append(name[b])
elif(name[b]==' ' and c!=0):
c=0
my = ''.join(lis)
print(my)
lis = []
name_question.append(my)
#print(name_question)
# for name in soup.find_all('td'):
# name_question.append(name.text)
# #extract name of question from main page of user
#as each url has fixed pattern , exploit that property and go to each problem page(open the page) and scrap time of each page in dictonary along with their name
i=0
ans = []
print(name_question)
link_generate(name_question,user,list_links)
print(list_links)
startTime = datetime.now()
with Pool(processes=10) as p:
ans = p.map(crawl,list_links)
p.close()
p.join()
print(datetime.now() - startTime) #time taken
ans.sort(key=itemgetter('time'))
# #print(ans)
# for ans in ans:
# #print(ans['name'])
# f = open('submission_list.csv', 'a')
# f.write(ans['name'])
# f.write('\n')
print("Time taken in crawling account")
print(datetime.now() - startTime) #time taken
return(ans) |
import boto3
from boto3.dynamodb.conditions import Key
from ask_sdk_dynamodb.adapter import DynamoDbAdapter
import os
import string
import random
import json
ddb_region = os.environ.get('DYNAMODB_PERSISTENCE_REGION')
ddb_table_name = os.environ.get('DYNAMODB_PERSISTENCE_TABLE_NAME')
ddb_resource = boto3.resource('dynamodb', region_name=ddb_region)
dynamodb_adapter = DynamoDbAdapter(
table_name=ddb_table_name, create_table=False, dynamodb_resource=ddb_resource)
class DynamoDBLayer:
def putAppointment(doctor, sessionAttributes, appointment_time):
id = getRandomId(doctor)
dynamodb = boto3.client('dynamodb')
result = dynamodb.put_item(
TableName=ddb_table_name,
Item = {
'id': {
'S': id
},
'doctor': {
'S': doctor
},
'user_name' : {
'S': sessionAttributes['user_name']
},
'age' : {
'S': str(sessionAttributes['user_age'])
},
'height' : {
'S': str(sessionAttributes['user_height'])
},
'weight' : {
'S': str(sessionAttributes['user_weight'])
},
'temperature' : {
'S': str(sessionAttributes['user_temperature'])
},
'complain' : {
'S': sessionAttributes['complain']
},
'appointment_time' : {
'S': str(appointment_time)
}
}
),
return result
def getRandomId(doctor):
N = 7
res = ''.join(random.choices(string.ascii_uppercase +
string.digits, k = N))
randId = doctor +'_'+ str(res)
return randId
|
"""Service integrations."""
from .legacy_metadata import LegacyMetadataService
from .legacy_pdf import LegacyPDFService
from .legacy_source import LegacySourceService |
from termcolor import colored
from time import time
import sys
from solutions import day_01, day_02, day_03, day_04, day_05, day_06, day_07, \
day_08
NAMES = (
"No Time for a Taxicab",
"Bathroom Security",
"Squares With Three Sides",
"Security Through Obscurity",
"How About a Nice Game of Chess?",
"Signals and Noise",
"Internet Protocol Version 7",
"Two-Factor Authentication",
)
def test(day):
try:
eval(f"day_{day:02d}.test()")
return colored("PASS", "green")
except AssertionError:
return colored("FAIL", "red")
def solve(day, part):
data = open(f"inputs/{day:02d}.txt").read().strip()
return eval(f"day_{day:02d}.part_{part}")(data)
def run(day):
print(f"Day {day}: {NAMES[day-1]} ({test(day)})")
print(f" Part 1: {colored(solve(day, 1), 'cyan')}")
print(f" Part 2: {colored(solve(day, 2), 'cyan')}\n")
start_time = time()
if len(sys.argv) == 1:
for day in range(len(NAMES)):
run(day+1)
else:
run(int(sys.argv[1]))
end_time = time()
time_str = colored(f"{end_time - start_time:.3f}s", "cyan")
print(f"Total time: {time_str} seconds") |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from typing import Dict
from ..core._imperative_rt import OpDef
from ..core.ops import builtin
from ..version import __version__
OPDEF_PARAM_LOADER = {}
def get_opdef_state(obj: OpDef) -> Dict:
state = obj.__getstate__()
state["type"] = type(obj)
state["version"] = __version__
return state
def load_opdef_from_state(state: Dict) -> OpDef:
assert "type" in state and issubclass(state["type"], OpDef)
assert "version" in state
opdef_type = state.pop("type")
if opdef_type in OPDEF_PARAM_LOADER:
loader = OPDEF_PARAM_LOADER[opdef_type]
state = loader(state)
state.pop("version")
opdef_obj = opdef_type()
opdef_obj.__setstate__(state)
return opdef_obj
|
from __future__ import unicode_literals
import unittest
from mock import patch
from snips_nlu.constants import DATA_PATH
from snips_nlu.resources import (
MissingResource, _RESOURCES, _get_resource, clear_resources,
load_resources)
class TestResources(unittest.TestCase):
def test_should_load_resources_from_data_path(self):
# Given
clear_resources()
# When
load_resources("en")
# Then
self.assertTrue(resource_exists("en", "gazetteers"))
def test_should_load_resources_from_package(self):
# Given
clear_resources()
# When
load_resources("snips_nlu_en")
# Then
self.assertTrue(resource_exists("en", "gazetteers"))
def test_should_load_resources_from_path(self):
# Given
clear_resources()
resources_path = DATA_PATH / "en"
# When
load_resources(str(resources_path))
# Then
self.assertTrue(resource_exists("en", "gazetteers"))
def test_should_fail_loading_unknown_resources(self):
# Given
unknown_resource_name = "foobar"
# When / Then
with self.assertRaises(MissingResource):
load_resources(unknown_resource_name)
def test_should_raise_missing_resource_when_language_not_found(self):
# Given
mocked_value = dict()
# When
with patch("snips_nlu.resources._RESOURCES", mocked_value):
with self.assertRaises(MissingResource):
_get_resource("en", "foobar")
def test_should_raise_missing_resource_when_resource_not_found(self):
# Given
mocked_value = {"en": dict()}
# When
with patch("snips_nlu.resources._RESOURCES", mocked_value):
with self.assertRaises(MissingResource):
_get_resource("en", "foobar")
def resource_exists(language, resource_name):
return resource_name in _RESOURCES[language] \
and _RESOURCES[language][resource_name] is not None
|
"""Text formatting constants."""
# Text Effects
RESET = 0
BOLD = 1
FAINT = 2
ITALIC = 3
UNDERLINE = 4
REVERSE = 7
CONCEAL = 8
STRIKEOUT = 9
# Colors
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
# Modifiers
BRIGHT = 60
BR = 60
BACKGROUND = 10
BG = 10
# Standard fonts
SM = "small"
STD = "standard"
BIG = "big"
# Isometric fonts
ISO1 = "isometric1"
ISO2 = "isometric2"
ISO3 = "isometric3"
ISO4 = "isometric4"
# Other fonts
SA = "contessa"
DOOM = "doom"
DP = "drpepper"
L3D = "larry3d"
SMISO = "smisome1"
KB = "smkeyboard"
SLANT = "slant"
SMSLANT = "smslant"
|
import time
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
class TestLogin(unittest.TestCase):
user = (By.ID, 'username')
pwd = (By.ID, 'password')
login = (By.CLASS_NAME, 'radius')
msg = (By.CLASS_NAME, 'subheader')
def setUp(self) -> None:
self.driver = webdriver.Chrome('/Users/paulb/Documents/GitHub/py-test-automation-course/resources/chromedriver')
self.driver.get("https://the-internet.herokuapp.com/login")
def test_invalid_login(self):
self.driver.find_element(*self.user).send_keys('test@gmail.com')
time.sleep(1)
self.driver.find_element(*self.pwd).send_keys('abc')
time.sleep(1)
self.driver.find_element(*self.login).click()
time.sleep(1)
def test_valid_login(self):
self.driver.find_element(*self.user).send_keys('tomsmith')
time.sleep(1)
self.driver.find_element(*self.pwd).send_keys('SuperSecretPassword!')
time.sleep(1)
self.driver.find_element(*self.login).click()
time.sleep(1)
def test_logout(self):
self.test_valid_login()
logout = (By.CSS_SELECTOR, '#content > div > a > i')
self.driver.find_element(*logout).click()
time.sleep(1)
def tearDown(self) -> None:
self.driver.quit()
if __name__ == '__main__':
test = TestLogin()
test.test_invalid_login()
|
#* A binary tree is symmetric if the left subtree from the root and the right subtree from the root are mirrors of eachother.
#1. All we need to do is make a new function, which will hold "copies" of the same tree
#2. We then simply need to check in each iteration if the values of the nodes are the same and most importantly we need to check if the left branch of the left subtree == right branch of the right subtree and vice versa.
#3. This is the most important thing to realise! The recursive checkSubtrees(t1.left, t2.right) and checkSubtrees(t1.right, t2.left) are the most important calls here as they check for the symmetry between both left and right subtrees by comparing opposite ends.
def isSymmetric(self, root):
return checkSubtrees(root, root)
def checkSubtrees(t1, t2):
if t1 is None and t2 is None: return True
if t1 is None or t2 is None: return False
return t1.val == t2.val and checkSubtrees(t1.left, t2.right) and checkSubtrees(t1.right, t2.left) |
import os,unittest
import pandas as pd
from igf_data.illumina.samplesheet import SampleSheet
from igf_data.utils.fileutils import get_temp_dir,remove_dir
from igf_data.utils.samplesheet_utils import get_formatted_samplesheet_per_lane
from igf_data.utils.samplesheet_utils import samplesheet_validation_and_metadata_checking
class SamplesheetUtils_testA(unittest.TestCase):
def setUp(self):
self.temp_dir = get_temp_dir()
self.platform_name = 'HISEQ4000'
self.samplesheet_file = 'data/singlecell_data/SampleSheet_dual.csv'
self.sc_index_json = 'data/singlecell_data/chromium-shared-sample-indexes-plate_20180301.json'
self.sc_dual_index_json = 'data/singlecell_data/chromium_dual_indexes_plate_TT_NT_20210209.json'
def tearDown(self):
remove_dir(self.temp_dir)
def test_get_formatted_samplesheet_per_lane1(self):
output_list = \
get_formatted_samplesheet_per_lane(
samplesheet_file=self.samplesheet_file,
singlecell_barcode_json=self.sc_index_json,
singlecell_dual_barcode_json=self.sc_dual_index_json,
runinfo_file='data/singlecell_data/RunInfo_dual.xml',
output_dir=self.temp_dir,
platform=self.platform_name,
filter_lane=None,
single_cell_tag='10X',
index1_rule=None,
index2_rule=None)
df = pd.DataFrame(output_list)
sa = SampleSheet(df[df['lane_id']=='5']['samplesheet_file'].values[0])
sdf = pd.DataFrame(sa._data)
#print(sdf.to_dict(orient='records'))
self.assertEqual(df[df['lane_id']=='5']['bases_mask'].values[0],'y150n1,i10,i10,y150n1')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index'].values[0],'GTGGCCTCAT')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index2'].values[0],'TCACTTTCGA')
sa = SampleSheet(df[df['lane_id']=='3']['samplesheet_file'].values[0])
self.assertEqual(df[df['lane_id']=='3']['bases_mask'].values[0],'y150n1,i8n2,i8n2,y150n1')
sdf = pd.DataFrame(sa._data)
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index'].values[0],'ATTACTCG')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index2'].values[0],'AGGCTATA')
def test_get_formatted_samplesheet_per_lane2(self):
output_list = \
get_formatted_samplesheet_per_lane(
samplesheet_file=self.samplesheet_file,
singlecell_barcode_json=self.sc_index_json,
singlecell_dual_barcode_json=self.sc_dual_index_json,
runinfo_file='data/singlecell_data/RunInfo_dual.xml',
output_dir=self.temp_dir,
platform=self.platform_name,
filter_lane=None,
single_cell_tag='10X',
index1_rule=None,
index2_rule='REVCOMP')
df = pd.DataFrame(output_list)
#print(df.to_dict(orient='records'))
sa = SampleSheet(df[df['lane_id']=='5']['samplesheet_file'].values[0])
sdf = pd.DataFrame(sa._data)
#print(sdf.to_dict(orient='records'))
self.assertEqual(df[df['lane_id']=='5']['bases_mask'].values[0],'y150n1,i10,i10,y150n1')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index'].values[0],'GTGGCCTCAT')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0009']['index2'].values[0],'TCACTTTCGA')
sa = SampleSheet(df[df['lane_id']=='3']['samplesheet_file'].values[0])
self.assertEqual(df[df['lane_id']=='3']['bases_mask'].values[0],'y150n1,i8n2,i8n2,y150n1')
sdf = pd.DataFrame(sa._data)
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index'].values[0],'ATTACTCG')
self.assertEqual(sdf[sdf['Sample_ID']=='IGF0001']['index2'].values[0],'TATAGCCT')
if __name__=='__main__':
unittest.main() |
import torch.nn as nn
import torch
import torch.nn.functional as F
import pdb
from adet.modeling.layers import qconv, norm, actv, shuffle, concat, split, add
from functools import partial
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY as BACKBONES
from detectron2.modeling.backbone import Backbone as BaseModule
class _ShuffleBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None, mid_stride=1, mid_groups=6, out_groups=2, args=None):
super(_ShuffleBottleneck, self).__init__()
QConv2d = partial(qconv, args=args)
BatchNorm2d = partial(norm, args=args)
QClippedReLU = partial(actv, args=args)
QShuffleOp = partial(shuffle, args=args)
mid_channels = mid_channels or in_channels
self.conv1 = QConv2d(in_channels, mid_channels, kernel_size=1, stride=1, padding=0, groups=2, bias=False)
self.bn1 = BatchNorm2d(mid_channels)
self.relu1 = QClippedReLU()
self.shfl_op = QShuffleOp(2)
self.conv2 = QConv2d(mid_channels, mid_channels, kernel_size=3, stride=mid_stride, padding=1, groups=mid_channels//4, bias=False)
self.bn2 = BatchNorm2d(mid_channels)
self.relu2 = QClippedReLU()
self.conv3 = QConv2d(mid_channels, out_channels, kernel_size=1, stride=1, padding=0, groups=out_groups, bias=False)
self.bn3 = BatchNorm2d(out_channels)
self.relu3 = QClippedReLU()
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.shfl_op(x)
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
return x
class _ShuffleResUnitC(nn.Module):
"""C is short for Concat"""
def __init__(self, in_channels, out_channels, mid_channels=None, mid_groups=6, args=None):
super(_ShuffleResUnitC, self).__init__()
QConcat = partial(concat, args=args)
mid_channels = mid_channels or in_channels
self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
self.bottleneck = _ShuffleBottleneck(in_channels, out_channels - in_channels, mid_channels, mid_stride=2, args=args)
self.concat = QConcat()
def forward(self, x):
return self.concat(self.pooling(x), self.bottleneck(x))
class _ShuffleResUnitE_branch(nn.Module):
def __init__(self, in_channels, mid_stride, args=None):
super(_ShuffleResUnitE_branch, self).__init__()
QConv2d = partial(qconv, args=args)
BatchNorm2d = partial(norm, args=args)
QClippedReLU = partial(actv, args=args)
self.conv1 = QConv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = BatchNorm2d(in_channels)
self.relu1 = QClippedReLU()
self.conv2 = QConv2d(in_channels, in_channels, kernel_size=3, stride=mid_stride, padding=1, groups=in_channels//4, bias=False)
self.bn2 = BatchNorm2d(in_channels)
self.relu2 = QClippedReLU()
self.conv3 = QConv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = BatchNorm2d(in_channels)
self.relu3 = QClippedReLU()
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
return x
class _ShuffleResUnitE(nn.Module):
"""E is short for Eltwise-Add"""
def __init__(self, in_channels, out_channels, stride, mid_channels=None, mid_groups=6, args=None):
super(_ShuffleResUnitE, self).__init__()
QConcat = partial(concat, args=args)
QShuffleOp = partial(shuffle, args=args)
QHalfSplit = partial(split, args=args)
self.stride = stride
assert stride in [1, 2]
self.out_channels = out_channels
self.concat_res = QConcat()
self.shfl_op = QShuffleOp(2)
self.banch2 = _ShuffleResUnitE_branch(out_channels//2, self.stride, args=args)
self.first_half = partial(QHalfSplit, dim=1, first_half=True)()
self.second_split = partial(QHalfSplit, dim=1, first_half=False)()
def forward(self, x):
x1 = self.first_half(x)
x2 = self.second_split(x)
out = self.concat_res(x1, self.banch2(x2))
return self.shfl_op(out)
@BACKBONES.register()
class ShuffleNet(BaseModule):
"""ShuffleNet implementation.
"""
def __init__(self, args=None, is_large=None, cfg="large_9cls", out_features=['res4'], init_cfg=None):
if hasattr(super(ShuffleNet, self), 'size_divisibility'):
super(ShuffleNet, self).__init__()
else:
super(ShuffleNet, self).__init__(init_cfg)
self.args = args
self.out_features = out_features
if is_large is not None:
print("is_large option is deprecated, use cfg instead")
_cfg = {
'large_7cls': [(24, None), (72, 4), (120, 5), (240, 8), (480, 5)],
'large_9cls': [(24, None), (48, 4), (72, 4), (144, 6), (288, 5)],
'small_9cls': [(24, None), (48, 4), (72, 6), (144, 4), (288, 4)],
}
assert cfg in _cfg, "cfg not found in the model definition"
self.cfg = cfg
self._cfg = _cfg
QConv2d = partial(qconv, args=args)
BatchNorm2d = partial(norm, args=args)
QClippedReLU = partial(actv, args=args)
in_channel, _ = _cfg[cfg][0]
self.conv1 = QConv2d(3, in_channel, kernel_size=3, stride=2, padding=1, bias=False)
self.conv1_bn = BatchNorm2d(in_channel)
self.conv1_ReLU = QClippedReLU()
self.name = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
for i, (out_channel, items) in enumerate(_cfg[cfg]):
if items is None or i == 0:
continue
for j in range(items):
if j == 0:
self.add_module("res{}{}".format(i, self.name[j]), _ShuffleResUnitC(in_channel, out_channel, args=args))
else:
self.add_module("res{}{}".format(i, self.name[j]), _ShuffleResUnitE(out_channel, out_channel, stride=1, args=args))
in_channel = out_channel
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
outputs = []
x = self.conv1_ReLU(self.conv1_bn(self.conv1(x)))
if 'stem' in self.out_features:
outputs.append(x)
for i, (out_channel, items) in enumerate(self._cfg[self.cfg]):
if i == 0 or items is None:
continue
for j in range(items):
x = getattr(self, "res{}{}".format(i, self.name[j]))(x)
if 'res{}'.format(i) in self.out_features:
outputs.append(x)
return outputs
class ShuffleNet_(nn.Module):
def __init__(self, args=None, cfg=None):
super(ShuffleNet_, self).__init__()
self.args = args
self.backbone = ShuffleNet(args=args, cfg=cfg)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
outplanes = self.backbone.res4d.out_channels
fc_function = nn.Linear
self.fc = fc_function(outplanes, getattr(args, 'num_classes', 1000))
def forward(self, x):
outputs = self.backbone(x)
assert len(outputs) == 1
x = outputs[0]
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def shufflenet_large(args=None):
return ShuffleNet_(args=args, cfg='large_9cls')
def shufflenet_small(args=None):
return ShuffleNet_(args=args, cfg='small_9cls')
def main():
model = shufflenet_large()
if __name__ == "__main__":
main()
|
from pathlib import Path
import pandas as pd
from typing import Sequence
from visions.core.model.relations import (
IdentityRelation,
InferenceRelation,
TypeRelation,
)
from visions.core.model.type import VisionsBaseType
def _get_relations() -> Sequence[TypeRelation]:
from visions.core.implementations.types import visions_path
relations = [IdentityRelation(visions_existing_path, visions_path)]
return relations
class visions_existing_path(VisionsBaseType):
"""**Existing Path** implementation of :class:`visions.core.model.type.VisionsBaseType`.
Examples:
>>> x = pd.Series([Path('/home/user/file.txt'), Path('/home/user/test2.txt')])
>>> x in visions_existing_path
True
"""
@classmethod
def get_relations(cls) -> Sequence[TypeRelation]:
return _get_relations()
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return all(isinstance(p, Path) and p.exists() for p in series)
|
import sys
import random
import string
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.sql import func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import(TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
Base = declarative_base()
# Secret used for creating and verifying tokens
secret_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
class Users(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(32), index=True)
password_hash = Column(String(64))
created_date = Column(DateTime(timezone=True), server_default=func.now())
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
# Generates tokens for Oauth flow
def generate_auth_token(self, expiration=600):
s = Serializer(secret_key, expires_in=expiration)
return s.dumps({'id': self.id})
# Method for Token Verification
@staticmethod
def verify_auth_token(token):
s = Serializer(secret_key)
try:
data = s.loads(token)
except SignatureExpired:
return None
except BadSignature:
return None
user_id = data['id']
return user_id
class Items(Base):
__tablename__ = 'item'
id = Column(Integer, primary_key=True)
key = Column(String(30), index=True)
value = Column(String(250))
author = relationship(Users)
author_id = Column(Integer, ForeignKey('user.id'))
created_date = Column(DateTime(timezone=True), server_default=func.now())
@property
def serialize(self):
return {
'key': self.key,
'value': self.value,
'id': self.id,
'created_date': self.created_date
}
engine = create_engine('sqlite:///items.db')
Base.metadata.create_all(engine)
|
#
# @file TestSBMLNamespaces.py
# @brief SBMLNamespaces unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSBMLNamespaces.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestSBMLNamespaces(unittest.TestCase):
def test_SBMLNamespaces_L1V1(self):
sbml = libsbml.SBMLNamespaces(1,1)
self.assert_( sbml.getLevel() == 1 )
self.assert_( sbml.getVersion() == 1 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level1" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_L1V2(self):
sbml = libsbml.SBMLNamespaces(1,2)
self.assert_( sbml.getLevel() == 1 )
self.assert_( sbml.getVersion() == 2 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level1" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_L2V1(self):
sbml = libsbml.SBMLNamespaces(2,1)
self.assert_( sbml.getLevel() == 2 )
self.assert_( sbml.getVersion() == 1 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level2" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_L2V2(self):
sbml = libsbml.SBMLNamespaces(2,2)
self.assert_( sbml.getLevel() == 2 )
self.assert_( sbml.getVersion() == 2 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level2/version2" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_L2V3(self):
sbml = libsbml.SBMLNamespaces(2,3)
self.assert_( sbml.getLevel() == 2 )
self.assert_( sbml.getVersion() == 3 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level2/version3" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_L2V4(self):
sbml = libsbml.SBMLNamespaces(2,4)
self.assert_( sbml.getLevel() == 2 )
self.assert_( sbml.getVersion() == 4 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level2/version4" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_L3V1(self):
sbml = libsbml.SBMLNamespaces(3,1)
self.assert_( sbml.getLevel() == 3 )
self.assert_( sbml.getVersion() == 1 )
ns = sbml.getNamespaces()
self.assert_( ns.getLength() == 1 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level3/version1/core" )
self.assert_( ns.getPrefix(0) == "" )
sbml = None
pass
def test_SBMLNamespaces_add_and_remove_namespaces(self):
sbmlns = libsbml.SBMLNamespaces( 3,1 )
self.assert_( sbmlns.getLevel() == 3 )
self.assert_( sbmlns.getVersion() == 1 )
sbmlns.addNamespace("http://www.sbml.org/sbml/level3/version1/group/version1", "group")
sbmlns.addNamespace("http://www.sbml.org/sbml/level3/version1/layout/version1", "layout")
sbmlns.addNamespace("http://www.sbml.org/sbml/level3/version1/render/version1", "render")
sbmlns.addNamespace("http://www.sbml.org/sbml/level3/version1/multi/version1", "multi")
ns = sbmlns.getNamespaces()
self.assert_( ns.getLength() == 5 )
self.assert_( ns.getURI(0) == "http://www.sbml.org/sbml/level3/version1/core" )
self.assert_( ns.getPrefix(0) == "" )
self.assert_( ns.getURI(1) == "http://www.sbml.org/sbml/level3/version1/group/version1" )
self.assert_( ns.getPrefix(1) == "group" )
self.assert_( ns.getURI(2) == "http://www.sbml.org/sbml/level3/version1/layout/version1" )
self.assert_( ns.getPrefix(2) == "layout" )
self.assert_( ns.getURI(3) == "http://www.sbml.org/sbml/level3/version1/render/version1" )
self.assert_( ns.getPrefix(3) == "render" )
self.assert_( ns.getURI(4) == "http://www.sbml.org/sbml/level3/version1/multi/version1" )
self.assert_( ns.getPrefix(4) == "multi" )
sbmlns.removeNamespace("http://www.sbml.org/sbml/level3/version1/layout/version1")
sbmlns.removeNamespace("http://www.sbml.org/sbml/level3/version1/group/version1")
sbmlns.removeNamespace("http://www.sbml.org/sbml/level3/version1/render/version1")
sbmlns.removeNamespace("http://www.sbml.org/sbml/level3/version1/multi/version1")
pass
def test_SBMLNamespaces_getURI(self):
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(1,1) == "http://www.sbml.org/sbml/level1" )
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(1,2) == "http://www.sbml.org/sbml/level1" )
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(2,1) == "http://www.sbml.org/sbml/level2" )
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(2,2) == "http://www.sbml.org/sbml/level2/version2" )
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(2,3) == "http://www.sbml.org/sbml/level2/version3" )
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(2,4) == "http://www.sbml.org/sbml/level2/version4" )
self.assert_( libsbml.SBMLNamespaces.getSBMLNamespaceURI(3,1) == "http://www.sbml.org/sbml/level3/version1/core" )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSBMLNamespaces))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
from torch import nn
import numpy as np
import torch.nn.functional as F
import torch
from typing import Dict
from collections import OrderedDict
import math
##########
# Layers #
##########
class Flatten(nn.Module):
"""Converts N-dimensional Tensor of shape [batch_size, d1, d2, ..., dn] to 2-dimensional Tensor
of shape [batch_size, d1*d2*...*dn].
# Arguments
input: Input tensor
"""
def forward(self, input):
return input.view(input.size(0), -1)
class GlobalMaxPool1d(nn.Module):
"""Performs global max pooling over the entire length of a batched 1D tensor
# Arguments
input: Input tensor
"""
def forward(self, input):
return nn.functional.max_pool1d(input, kernel_size=input.size()[2:]).view(-1, input.size(1))
class GlobalAvgPool2d(nn.Module):
"""Performs global average pooling over the entire height and width of a batched 2D tensor
# Arguments
input: Input tensor
"""
def forward(self, input):
return nn.functional.avg_pool2d(input, kernel_size=input.size()[2:]).view(-1, input.size(1))
def conv_block(in_channels: int, out_channels: int, no_relu: bool = False) -> nn.Module:
"""Returns a Module that performs 3x3 convolution, ReLu activation, 2x2 max pooling.
# Arguments
in_channels:
out_channels:
"""
seq = [
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
]
if no_relu:
del seq[2]
return nn.Sequential(*seq)
def functional_conv_block(x: torch.Tensor, weights: torch.Tensor, biases: torch.Tensor,
bn_weights, bn_biases, no_relu=False) -> torch.Tensor:
"""Performs 3x3 convolution, ReLu activation, 2x2 max pooling in a functional fashion.
# Arguments:
x: Input Tensor for the conv block
weights: Weights for the convolutional block
biases: Biases for the convolutional block
bn_weights:
bn_biases:
"""
x = F.conv2d(x, weights, biases, padding=1)
x = F.batch_norm(x, running_mean=None, running_var=None, weight=bn_weights, bias=bn_biases, training=True)
if not no_relu:
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
return x
##########
# Models #
##########
def get_few_shot_encoder(num_input_channels=1, conv128=False) -> nn.Module:
"""Creates a few shot encoder as used in Matching and Prototypical Networks
# Arguments:
num_input_channels: Number of color channels the model expects input data to contain. Omniglot = 1,
miniImageNet = 3
"""
mult = conv128 + 1
return nn.Sequential(
conv_block(num_input_channels, 64),
conv_block(64, 64),
conv_block(64, 64 * mult),
conv_block(64 * mult, 64 * mult),
Flatten(),
)
def conv64():
return get_few_shot_encoder(3, False)
class FewShotClassifier(nn.Module):
def __init__(self, num_input_channels: int, k_way: int, final_layer_size: int = 64,
dropout = 0., mult = 1, no_relu = False):
"""Creates a few shot classifier as used in MAML.
This network should be identical to the one created by `get_few_shot_encoder` but with a
classification layer on top.
# Arguments:
num_input_channels: Number of color channels the model expects input data to contain. Omniglot = 1,
miniImageNet = 3
k_way: Number of classes the model will discriminate between
final_layer_size: 64 for Omniglot, 1600 for miniImageNet
"""
super(FewShotClassifier, self).__init__()
self.no_relu = no_relu
self.final_layer_size = final_layer_size
self.conv1 = conv_block(num_input_channels, 64)
self.conv2 = conv_block(64, 64)
self.conv3 = conv_block(64, 64 * mult)
self.conv4 = conv_block(64 * mult, 64 * mult, self.no_relu)
self.dropout = nn.Dropout(dropout)
self.logits = nn.Linear(final_layer_size * mult, k_way)
def forward(self, x, output_layer=True):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.view(x.size(0), -1)
self.features = x
if output_layer:
x = self.dropout(x)
return self.logits(x)
else:
return None
def functional_forward(self, x, weights):
"""Applies the same forward pass using PyTorch functional operators using a specified set of weights."""
for block in [1, 2, 3, 4]:
x = functional_conv_block(
x,
weights[f'conv{block}.0.weight'],
weights[f'conv{block}.0.bias'],
weights.get(f'conv{block}.1.weight'),
weights.get(f'conv{block}.1.bias'),
no_relu=False if block < 4 else self.no_relu
)
x = x.view(x.size(0), -1)
self.features = x
x = F.dropout(x, p=self.dropout.p, training=self.training)
x = F.linear(x, weights['logits.weight'], weights['logits.bias'])
return x
def conv(k_way, dropout, no_relu):
return FewShotClassifier(3, k_way, 1600, dropout=dropout, mult=1, no_relu=no_relu)
|
import pandas as pd
import numpy as np
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
def make_link(x):
for ind in x.index:
doc_npi = ''
for col in x.columns:
if col == 'NPI':
doc_npi = x.at[ind,col]
x.at[ind,col] = ('<a href=\"../../graph/npi/{}\">{}</a>'.format(doc_npi,doc_npi))
else:
x.at[ind,col] = ('<a href=\"../../graph/npi/{}\">{}</a>'.format(doc_npi,x.at[ind,col]))
def makelist(df):
l =[]
for i in range(len(df.columns)):
l.append(df.iloc[:,i])
return l
#If you change the name of the inputted df, this wont work
#More customizable way
def results_py(df,dic):
"""
inputs:
df = Dataframe of doc_info
dict = Dictionary of user input of values
"""
orig_df_len = len(df)
d_true_values = {}
for key, value in dic.items():
#Basically making a new dict with only values where a user inputted
if value:
d_true_values[key] = value
for k in d_true_values:
#Go through all the values that were inputted
#If we're not going to get no results back:
if len(df[df[k]==str(d_true_values[k]).upper()]) != 0:
df = df[df[k]==str(d_true_values[k]).upper()]
else:
#If our query makes it so we get no results, break & don't change the df
break
if len(df) == 0 or len(df) == orig_df_len:
return('<p><u><h4><b>Your search returned no results, <a href="../" target="">would you like to search again?</a></b></h4></u></p>')
else:
make_link(df)
bold = lambda x: '<b>'+x+'</b>'
def colors(df):
if len(df) == 1:
return([['white']])
elif len(df)!=1 & len(df) %2 !=0:
return([int((len(df)/2))*['white','rgb(240, 240, 240)']+['white']])
else:
return([['white','rgb(240, 240, 240)'] * int((len(df)/2))])
table = go.Table(
type='table',
columnwidth=[65,55,55,40,35,80,70,40],
header = dict(
values=bold(df.columns),
fill=dict(color='rgb(240, 240, 240)'),
align=['center'],
font=dict(color='black',size=16, family="Roboto Condensed"),
line=dict(width=0,color='black')
),
cells = dict(
values=makelist(df),
fill=dict(color=colors(df)),
font=dict(color='black',size=13, family="Roboto Condensed"),
height=15,
line=dict(width=0),
))
data = [table]
return(plotly.offline.plot(data, include_plotlyjs=False, output_type='div'))
#You'll get an error if you switch the df name
|
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter,too-many-public-methods
# pylint: disable=no-member,missing-function-docstring,redefined-outer-name,unsubscriptable-object
# pylint: disable=too-many-function-args
"""
"""
import functools
from collections import namedtuple
from contextlib import suppress
from . import constants, elements, internal, util
from .base import CodeInfo, preevaluate_predicate, DeferEvaluation
from .. import langref
from ..exceptions import InvalidNameError
from ..grammar import TreeNode
from ..util import is_valid_iterable, ensure_tuple
__all__ = ()
# pylint: disable=too-many-return-statements, too-complex
def _unwrap_node(node):
if node is None:
return None
if is_valid_iterable(node):
items = []
for child in node:
if child is None:
continue
if isinstance(child, TreeNode):
child = _unwrap_node(child)
if isinstance(child, (int, float)) or child:
items.append(child)
if len(items) == 1:
return items[0]
return tuple(items)
if not isinstance(node, TreeNode):
return node
if not node.elements:
return node.text
elements = _unwrap_node(node.elements)
with suppress(TypeError):
if len(elements) == 1 and is_valid_iterable(node):
return elements[0]
return elements
def elements_to_values(func):
"""
"""
@functools.wraps(func)
def _wrapper(*args):
values = []
for element in args[-1]:
if isinstance(element, TreeNode):
element = element.elements or element.text.strip()
if element:
element = _unwrap_node(element)
if element or isinstance(element, (int, float)):
values.append(element)
return func(*args[:-1], values)
return _wrapper
def add_codeinfo(func):
"""
"""
@functools.wraps(func)
def _wrapper(self, text, start, end, values):
if not self.source_code:
self.source_code = text
return func(self, text, start, end, values, self.get_codeinfo(start, end))
return _wrapper
class Actions:
"""
"""
__slots__ = (
'lines',
'source',
)
LineInfo = namedtuple('LineInfo', ('start', 'end', 'number', 'content'))
def __init__(self, source):
self.source = source
self.lines = None
@property
def source_code(self):
"""
"""
if self.lines is None:
return None
return '\n'.join(l.content for l in self.lines)
@source_code.setter
def source_code(self, value):
if self.lines is not None:
return
lines = []
current_pos = 0
for i, line in enumerate(value.splitlines()):
lines.append(
self.LineInfo(
start=current_pos,
end=current_pos + len(line),
number=i + 1,
content=line,
))
current_pos += len(line) + 1
self.lines = tuple(lines)
def get_codeinfo(self, start, end):
"""
"""
lines = []
start_line = end_line = None
for line in self.lines:
if not start_line and line.start <= start <= line.end:
start_line = line
if not end_line and line.start <= end <= line.end:
end_line = line
if start_line or end_line:
lines.append(line)
if start_line and end_line:
break
text = '\n'.join(l.content for l in lines)
startpos = start - lines[0].start
endpos = sum(len(l.content) + 1 for l in lines[:-1]) + (end - lines[-1].start) - 1
return CodeInfo(
text=text,
startpos=startpos,
endpos=endpos,
line_numbers=tuple(l.number for l in lines),
source=self.source,
)
@elements_to_values
@add_codeinfo
def dice(self, text, start, end, values, codeinfo):
return elements.NewDice(*values, codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def binary_op(self, text, start, end, values, codeinfo):
left, op, right = values
if not isinstance(op, elements.Operator):
op = util.get_operator(op)
if isinstance(left, (int, float)) and isinstance(right, (int, float)) \
and op.value in constants.OPERATOR_MAP:
return constants.OPERATOR_MAP[op.value](left, right)
if isinstance(left, elements.StringLiteral) and isinstance(right, elements.StringLiteral):
codeinfo = CodeInfo(
text=text[left.codeinfo.startpos:right.codeinfo.endpos],
startpos=left.codeinfo.startpos,
endpos=right.codeinfo.endpos,
lineno=left.codeinfo.lineno,
)
return elements.StringLiteral((left, right), codeinfo=codeinfo)
return elements.BinaryOp(left, op, right, codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def use_if(self, text, start, end, values, codeinfo):
return elements.UseIf(*values, codeinfo=codeinfo)
@add_codeinfo
def basic_name(self, text, start, end, values, codeinfo):
name = text[start:end]
if name in elements.KEYWORDS:
raise InvalidNameError(f'{name} is a keyword and cannot be used.', codeinfo=codeinfo)
return name
def int_(self, text, start, end, elements):
return int(''.join(text[start:end]))
def float_(self, text, start, end, elements):
return float(''.join(text[start:end]))
@add_codeinfo
def string(self, text, start, end, values, codeinfo):
return elements.StringLiteral(
constants.ESCAPE_STR_PAT.sub(lambda m: constants.ESCAPE_MAP[m.group(1)],
text[start + 1:end - 1]),
codeinfo=codeinfo,
)
def special_entry(self, text, start, end, values):
return elements.SpecialEntry(text[start:end])
def special_accessor(self, text, start, end, values):
return elements.SpecialAccessor(text[start:end])
def special_ref(self, text, start, end, values):
return elements.SpecialReference(text[start:end])
@add_codeinfo
def reference(self, text, start, end, values, codeinfo):
value = text[start:end]
if value in langref.KEYWORD_SET:
return None
return elements.Reference(value, codeinfo=codeinfo)
def text(self, text, start, end, values):
return text[start:end]
def ignore(self, *args):
return None
@elements_to_values
def overload_only_operator(self, text, start, end, values):
if values:
return elements.OverloadOnlyOperator(''.join(values))
return elements.OverloadOnlyOperator(text[start:end])
def one_sided_operator(self, text, start, end, values):
return elements.OneSidedOperator(text[start:end])
def two_sided_operator(self, text, start, end, values):
return elements.TwoSidedOperator(text[start:end])
@elements_to_values
@add_codeinfo
def negate(self, text, start, end, values, codeinfo):
return util.negate(values[0], codeinfo=codeinfo, script=text)
@elements_to_values
@add_codeinfo
def normal_modifier_body(self, text, start, end, values, codeinfo):
params = body = ()
if values and is_valid_iterable(values[0]):
values = values[0]
if values and isinstance(values[0], internal.ItemList):
params = tuple(values.pop(0))
if values:
body = values[0]
if not is_valid_iterable(body):
body = (body,)
else:
body = util.flatten_tuple(body)
return params, body
@add_codeinfo
def small_modifier_body(self, text, start, end, values, codeinfo):
params, body = self.normal_modifier_body(text, start, end, values)
return params, elements.Assignment(
target=elements.SpecialReference.SUBJECT,
value=body[0],
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def modifier_def(self, text, start, end, values, codeinfo):
target, (params, body) = values
if target == '!':
target = elements.SpecialReference.NONE
seen = set()
for param in params:
if param in seen:
#TODO pretty print target
raise InvalidNameError(
f'Parameter {param} of modifier {target} is '
'defined more than once!',
codeinfo=codeinfo)
seen.add(param)
return elements.ModifierDef(
target=target,
parameters=params,
definition=body,
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def first_modifier_call(self, text, start, end, values, codeinfo):
target, op, call = values
if op == ':':
accessing = target
accessors = []
if isinstance(target, elements.Access):
accessing = target.accessing
accessors = list(target.accessors)
if isinstance(call.modifier, elements.Reference):
accessors.append(call.modifier)
elif isinstance(call.modifier, elements.Access):
accessors.append(call.modifier.accessing)
accessors.extend(call.modifier.accessors)
call = call._replace(modifier=elements.Access(
accessing=accessing,
accessors=tuple(accessors),
codeinfo=call.codeinfo,
))
return (target, call)
@elements_to_values
@add_codeinfo
def modifier_call(self, text, start, end, values, codeinfo):
if len(values) == 1:
return elements.ModifierCall(modifier=values[0], args=(), codeinfo=codeinfo)
return elements.ModifierCall(
modifier=values[0],
args=ensure_tuple(values[-1]),
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def modify(self, text, start, end, values, codeinfo):
(subject, first_call), *other_calls = values
calls = [first_call]
if other_calls:
if is_valid_iterable(other_calls[0]):
calls += other_calls[0]
else:
calls.append(other_calls[0])
return elements.Modify(subject=subject, modifiers=tuple(calls), codeinfo=codeinfo)
@elements_to_values
def param_list(self, text, start, end, values):
return internal.ItemList(p.value for p in util.flatten_tuple(values))
@elements_to_values
def arg_list(self, text, start, end, values):
return util.flatten_tuple(values)
@elements_to_values
@add_codeinfo
def left_op_overload(self, text, start, end, values, codeinfo):
return elements.OverloadOperator(operator=elements.TwoSidedOperator(values[0]),
side=elements.OperationSide.LEFT,
codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def right_op_overload(self, text, start, end, values, codeinfo):
return elements.OverloadOperator(operator=elements.TwoSidedOperator(values[0]),
side=elements.OperationSide.RIGHT,
codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def one_sided_op_overload(self, text, start, end, values, codeinfo):
value = values[0]
if not isinstance(value, str):
value = ''.join(value)
return elements.OverloadOperator(operator=elements.OneSidedOperator(value),
side=elements.OperationSide.NA,
codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def raw_accessor(self, text, start, end, values, codeinfo):
return elements.RawAccessor(values[0], codeinfo=codeinfo)
#TODO implement expand
@elements_to_values
@add_codeinfo
def access(self, text, start, end, values, codeinfo):
accessing = values[0]
accessors = ensure_tuple(values[-1])
current_accessors = []
for accessor in accessors:
if accessor == elements.SpecialAccessor.EVERY:
if current_accessors:
accessing = elements.Expand(
elements.Access(accessing=accessing,
accessors=tuple(current_accessors),
codeinfo=codeinfo),
codeinfo=codeinfo,
)
current_accessors = []
else:
accessing = elements.Expand(accessing, codeinfo=codeinfo)
else:
current_accessors.append(accessor)
if current_accessors:
accessing = elements.Access(
accessing=accessing,
accessors=tuple(current_accessors),
codeinfo=codeinfo,
)
return accessing
@elements_to_values
@add_codeinfo
def reduce(self, text, start, end, values, codeinfo):
value = values[0]
if value == '*':
value = elements.SpecialReference.ALL
return elements.Reduce(value, codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def modify_and_assign(self, text, start, end, values, codeinfo):
return (elements.Assignment(
target=values[0],
value=elements.Modify(subject=values[0],
modifiers=util.flatten_tuple(values[1:]),
codeinfo=codeinfo),
codeinfo=codeinfo,
))
@elements_to_values
@add_codeinfo
def assignment(self, text, start, end, values, codeinfo):
target, op, value = values
if len(op) > 1:
value = elements.BinaryOp(left=target,
op=util.get_operator(op[:-1]),
right=value,
codeinfo=codeinfo)
if target == value:
return None
return elements.Assignment(target=target, value=value, codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def reduce_and_assign(self, text, start, end, values, codeinfo):
return elements.Assignment(target=values[0],
value=elements.Reduce(values[0], codeinfo=codeinfo),
codeinfo=codeinfo)
@add_codeinfo
def leave(self, text, start, end, values, codeinfo):
return elements.Leave(codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def otherwise(self, text, start, end, values, codeinfo):
return internal.Otherwise(values[-1], codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def fill(self, text, start, end, values, codeinfo):
return elements.Fill(*values, codeinfo=codeinfo)
@add_codeinfo
def empty_roll(self, text, start, end, values, codeinfo):
return elements.NewRoll((), codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def roll_def(self, text, start, end, values, codeinfo):
if len(values) == 1:
if not is_valid_iterable(values[0]):
return elements.NewRoll(tuple(values), codeinfo=codeinfo)
values = values[0]
return elements.NewRoll(util.flatten_tuple(tuple(values)), codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def new_bag(self, text, start, end, values, codeinfo):
if not values:
return elements.NewBag((), codeinfo=codeinfo)
return elements.NewBag(util.flatten_tuple(values), codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def clear(self, text, start, end, values, codeinfo):
return elements.ClearValue(values[0], codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def load_from_into(self, text, start, end, values, codeinfo):
to_load, load_from, *into = values
if into:
into = into[0]
else:
into = elements.SpecialReference.LOCAL
if to_load == '*':
to_load = elements.SpecialReference.ALL
elif is_valid_iterable(to_load):
to_load = util.flatten_tuple(to_load)
else:
to_load = ensure_tuple(to_load)
return elements.Load(
to_load=to_load,
load_from=load_from,
into=into,
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def load_from(self, text, start, end, values, codeinfo):
to_load, load_from = values
if to_load == '*':
to_load = elements.SpecialReference.ALL
elif is_valid_iterable(to_load):
to_load = util.flatten_tuple(to_load)
else:
to_load = ensure_tuple(to_load)
return elements.Load(
to_load=to_load,
load_from=load_from,
into=elements.SpecialReference.LOCAL,
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def load_into(self, text, start, end, values, codeinfo):
things_to_load_from, into = values
if is_valid_iterable(things_to_load_from):
things_to_load_from = util.flatten_tuple(things_to_load_from)
items = []
for load_from in ensure_tuple(things_to_load_from):
items.append(
elements.Load(
to_load=elements.SpecialReference.ALL,
load_from=load_from,
into=into,
codeinfo=codeinfo,
))
if len(items) == 1:
return items[0]
return tuple(items)
@elements_to_values
@add_codeinfo
def load(self, text, start, end, values, codeinfo):
if is_valid_iterable(values[0]):
values = util.flatten_tuple(values[0])
items = []
for item in values:
items.append(
elements.Load(
to_load=elements.SpecialReference.ALL,
load_from=item,
into=item,
codeinfo=codeinfo,
))
return items[0] if len(items) == 1 else tuple(items)
@elements_to_values
@add_codeinfo
def unless(self, text, start, end, values, codeinfo):
preeval_res = preevaluate_predicate(values[0])
if preeval_res is DeferEvaluation:
return internal.Unless(*values, codeinfo=codeinfo)
if preeval_res:
return internal.Wrapper('unless', values[-1], codeinfo=codeinfo)
return internal.UseOtherwise
@elements_to_values
@add_codeinfo
def if_(self, text, start, end, values, codeinfo):
preeval_res = preevaluate_predicate(values[0])
if preeval_res is DeferEvaluation:
return internal.If(*values, codeinfo=codeinfo)
if preeval_res:
return internal.Wrapper('if', values[-1], codeinfo=codeinfo)
return internal.UseOtherwise
@elements_to_values
@add_codeinfo
def predicated_statement(self, text, start, end, values, codeinfo):
return internal.PredicatedStatement(*values, codeinfo=codeinfo)
@staticmethod
def _create_if_then_element(predicated, otherwise, codeinfo):
if predicated is internal.UseOtherwise:
return otherwise
if isinstance(predicated, internal.Wrapper):
return predicated.value
if codeinfo == 'from-predicated':
codeinfo = predicated.codeinfo
return elements.IfThen(
predicate=predicated.predicate,
then=ensure_tuple(predicated.statement),
otherwise=ensure_tuple(otherwise),
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def if_stmt(self, text, start, end, values, codeinfo):
if_ = values.pop(0)
if not values:
return self._create_if_then_element(if_, (), codeinfo)
otherwise = values.pop(-1)
unlesses = ()
if isinstance(otherwise, internal.Otherwise):
if not values:
return self._create_if_then_element(if_, otherwise.value, codeinfo)
unlesses = ensure_tuple(values[0])
otherwise = otherwise.value
else:
unlesses = ensure_tuple(otherwise)
otherwise = ()
stmt = self._create_if_then_element(if_, otherwise, codeinfo)
for unless in reversed(unlesses):
stmt = self._create_if_then_element(unless, stmt, 'from-predicated')
return stmt
# pylint: disable=protected-access
@elements_to_values
@add_codeinfo
def restart(self, text, start, end, values, codeinfo):
location_specifier = elements.RestartLocationSpecifier(values[0])
target = elements.SpecialReference.NONE
with suppress(IndexError):
target = values[1]
if isinstance(target, elements.Reference) \
and not isinstance(target, elements.SpecialReference):
target = target.value
return elements.Restart(
location_specifier=location_specifier,
target=target,
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def loop_name(self, text, start, end, values, codeinfo):
return internal.LoopName(values[0].value, codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def loop_body(self, text, start, end, values, codeinfo):
return internal.LoopBody(values[0], codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def until_do(self, text, start, end, values, codeinfo):
name = until = do = otherwise = None
if isinstance(values[0], internal.LoopName):
name = values.pop(0)[0]
until = values.pop(0)
do = values.pop(0)[0]
if values and isinstance(values[-1], internal.Otherwise):
otherwise = values.pop(-1).value
if values:
for except_when in ensure_tuple(values[0]):
do = elements.IfThen(
predicate=except_when.predicate,
then=except_when.statement,
otherwise=do,
codeinfo=except_when.codeinfo,
)
return elements.UntilDo(
name=name,
until=until,
do=do,
otherwise=otherwise,
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def for_every(self, text, start, end, values, codeinfo):
loop_name = None
if isinstance(values[0], internal.LoopName):
loop_name = values.pop(0)[0]
item_name = values[0]
if isinstance(item_name, elements.Reference):
item_name = item_name.value
return elements.ForEvery(
name=loop_name,
item_name=item_name,
iterable=values[1],
do=ensure_tuple(values[-1][0]),
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def block(self, text, start, end, values, codeinfo):
if not values:
return ()
if is_valid_iterable(values[0]):
return util.flatten_tuple(values[0])
return values[0]
@elements_to_values
@add_codeinfo
def but_if(self, text, start, end, values, codeinfo):
predicate, statement = values
if predicate == '*':
predicate = elements.SpecialReference.ALL
return elements.ButIf(predicate, statement, codeinfo=codeinfo)
@elements_to_values
@add_codeinfo
def always(self, text, start, end, values, codeinfo):
return internal.Always(values[0])
@elements_to_values
@add_codeinfo
def attempt(self, text, start, end, values, codeinfo):
attempt, *remainder = values
if not remainder:
return elements.Attempt(
attempt=attempt,
buts=(),
always=None,
codeinfo=codeinfo,
)
if remainder:
*buts, always = remainder
if not isinstance(always, internal.Always):
buts = tuple((*buts, always))
always = None
else:
always = always[0]
if buts and is_valid_iterable(buts[0]):
buts = buts[0]
return elements.Attempt(
attempt=attempt,
buts=buts,
always=always,
codeinfo=codeinfo,
)
@elements_to_values
@add_codeinfo
def oops(self, text, start, end, values, codeinfo):
return elements.Oops(values[0], codeinfo=codeinfo)
@elements_to_values
def statements(self, text, start, end, values):
return values
|
"""
Copyright (c) 2017 IBM Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from cStringIO import StringIO
import sys
class Capturing(list):
build = None
def __enter__(self):
self._stdout = sys.stdout
self._stderr = sys.stderr
sys.stdout = sys.stderr = self._stringio = StringIO()
return self
def __exit__(self, *args):
if not hasattr(self, '_stringio'): return
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
sys.stderr = self._stderr
|
#!/usr/bin/env python3
#
# A utility to retrieve Device-to-Cloud messages
#
# Usage:
#
# $ az_d2c_recv.py EVENT_HUB_NAME CONN_STR [offset]
#
# Both EH name and conn string can be obtained from the "Endpoints" section
# of the IoT Hub page. Note that Event Hub name is not the same as IoT Hub
# name and connectio string is not the same as used by the device.
#
# EH name looks like this: iothub-ehub-IOTHUBNAME-112232-5678abcdef
# Conn string starts with Endpoint=sb://ihsuproddbres019dednamespace... etc.
#
# For more info see here:
# https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-messages-read-builtin
#
# This code is adapted from this script:
# https://github.com/Azure/azure-event-hubs-python/blob/master/examples/recv.py
import os
import sys
import urllib
from azure.eventhub import EventHubClient, Receiver, Offset # pip3 install azure-eventhub
hn = sys.argv[1]
ep = sys.argv[2]
OFFSET = Offset(sys.argv[3] if len(sys.argv) == 4 else "-1")
PARTITION = "0"
CONSUMER_GROUP = "$default"
def GetAMQPAddress(hn, ep):
ep_parts = dict(kv.split("=", 1) for kv in ep.split(";"))
return "amqps://%s:%s@%s/%s" % (
urllib.parse.quote(ep_parts["SharedAccessKeyName"], safe=''),
urllib.parse.quote(ep_parts["SharedAccessKey"], safe=''),
ep_parts["Endpoint"].replace("sb://", "").replace("/", ""),
hn
)
client = EventHubClient(GetAMQPAddress(sys.argv[1], sys.argv[2]), debug=False)
try:
receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000, offset=OFFSET)
client.run()
while True:
for ev in receiver.receive():
last_sn = ev.sequence_number
did = str(ev.device_id, 'utf-8')
print(ev.offset, ev.sequence_number, did, ev.message)
client.stop()
except KeyboardInterrupt:
pass
finally:
client.stop()
|
from django.shortcuts import render, redirect, reverse, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import View
from .forms import (CreateCourseForm,
CreateCourseRegistrationForm,
CreateDepartmentForm,
CreateRegistrationForm)
from .models import Course, CourseRegistration, Department, Registration
import datetime
from django.utils import timezone
class CreateCourseView(LoginRequiredMixin, View):
"""
View for admin to create new Course.
"""
template_name = 'academicInfo/create_course.html'
create_course_form = CreateCourseForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_course_form = self.create_course_form()
return render(request, self.template_name, {'create_course_form' : create_course_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_course_form = CreateCourseForm(request.POST)
if create_course_form.is_valid():
course = create_course_form.save()
course.save()
return redirect('view_course')
return render(request, self.template_name, {'create_course_form' : create_course_form})
class CreateCourseRegistrationView(LoginRequiredMixin, View):
"""
View for admin to add Course to the Registration.
"""
template_name = 'academicInfo/create_course_registration.html'
create_course_registration_form = CreateCourseRegistrationForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_course_registration_form = self.create_course_registration_form()
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_course_registration_form = CreateCourseRegistrationForm(request.POST)
if create_course_registration_form.is_valid():
# Add course to registration only if this course is not added already
# in this registration.
course_registration = create_course_registration_form.save(commit=False)
# Check if the registration has already started.
if course_registration.registration.startTime <= timezone.now():
create_course_registration_form.add_error('registration',
'The registration has already started, you cannot add course to it now.')
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
courses_in_registration = course_registration.registration.courseregistration_set.all()
similar_course_registration = courses_in_registration.filter(course=course_registration.course,
semester=course_registration.semester)
# Check if course is not already present in the same registration and semester.
if len(similar_course_registration) == 0:
course_registration.save()
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form,
'success': 'Successfully added course to the registration.'})
else:
create_course_registration_form.add_error('course', 'This course is already added in this semester.')
create_course_registration_form.add_error('semester', 'This semester already has this course.')
return render(request, self.template_name, {'create_course_registration_form' : create_course_registration_form})
class CreateDepartmentView(LoginRequiredMixin, View):
"""
View for admin to add new Department.
"""
template_name = 'academicInfo/create_department.html'
create_department_form = CreateDepartmentForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_department_form = self.create_department_form()
return render(request, self.template_name, {'create_department_form' : create_department_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_department_form = CreateDepartmentForm(request.POST)
# Check if Department with same name does not already exist.
if create_department_form.is_valid():
department = create_department_form.save()
department.save()
return redirect('view_department')
else:
return render(request, self.template_name, {'create_department_form' : create_department_form})
class CreateRegistrationView(LoginRequiredMixin, View):
"""
View for admin to create new Registration.
"""
template_name = 'academicInfo/create_registration.html'
create_registration_form = CreateRegistrationForm
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
create_registration_form = self.create_registration_form()
return render(request, self.template_name, {'create_registration_form' : create_registration_form})
else:
return redirect('home')
def post(self, request, *args, **kwargs):
create_registration_form = CreateRegistrationForm(request.POST)
# Check if the Registration form is valid.
if create_registration_form.is_valid():
days = int(request.POST['days'])
hours = int(request.POST['hours'])
minutes = int(request.POST['minutes'])
# Check if duration is 0 or not.
if days + hours + minutes == 0:
# Duration cannot be 0.
return render(request, self.template_name, {'create_registration_form' : create_registration_form,
'error' : 'Duration cannot be 0.'})
startTime = create_registration_form.cleaned_data['startTime']
duration = datetime.timedelta(days=days, hours=hours, minutes=minutes)
endTime = startTime + duration
registration = Registration.objects.create(name=create_registration_form.cleaned_data['name'],
startTime=startTime,
duration=duration,
endTime=endTime)
registration.save()
return redirect('registration')
return render(request, self.template_name, {'create_registration_form' : create_registration_form})
class RegistrationsView(View):
"""
View for everyone to view all the registrations.
"""
template_name = 'academicInfo/registration.html'
def get(self, request, *args, **kwargs):
time = timezone.now()
future_registrations = Registration.objects.filter(startTime__gt=time).order_by('startTime')
present_registrations = Registration.objects.filter(
endTime__gt=time
).exclude(startTime__gt=time).order_by('endTime')
past_registrations = Registration.objects.filter(endTime__lt=time)
return render(request, self.template_name, {'future_registrations': future_registrations,
'present_registrations': present_registrations,
'past_registrations': past_registrations})
class LiveRegistrationView(LoginRequiredMixin, View):
"""
View for student to register and unregister from live registrations.
"""
template_name = 'academicInfo/live_registration.html'
def get(self, request, *args, **kwargs):
# Render this page only for the students.
if hasattr(request.user, 'student'):
registration = get_object_or_404(Registration, pk=self.kwargs['registration_id'])
time = timezone.now()
# Check if registration is currently live.
if registration.startTime < time and registration.endTime > time:
student = request.user.student
# Show courses which are in either current semester or the next semester of student.
courses_in_registration = registration.courseregistration_set.all()
course_registration = courses_in_registration.filter(
semester__gt=student.get_student_semester
).exclude(semester__gt=student.get_student_semester+1)
return render(request, self.template_name, {'course_registration' : course_registration,
'student_courses' : student.courseregistration_set.all()})
else:
return redirect('registration')
else:
return redirect('home')
def post(self, request, *args, **kwargs):
# Only students should be allowed to register.
if hasattr(request.user, 'student'):
course_registration = get_object_or_404(CourseRegistration,
pk=request.POST['course_registration_id'])
registration = course_registration.registration
currTime = timezone.now()
student = request.user.student
semester = student.get_student_semester
# If student wants to register for the course.
if 'Register' in request.POST:
if (currTime > registration.startTime and
currTime < registration.endTime and
course_registration.semester in range(semester, semester+2)):
if (not student in course_registration.students.all() and
course_registration.remaining_seats > 0):
course_registration.students.add(student)
return redirect(reverse('live_registration',
kwargs={'registration_id' : registration.id}))
else:
return redirect('home')
# If student wants to unregister from the course.
elif 'UnRegister' in request.POST:
if (currTime > registration.startTime and
currTime < registration.endTime and
student in course_registration.students.all()):
course_registration.students.remove(student)
return redirect(reverse('live_registration',
kwargs={'registration_id' : registration.id}))
else:
return redirect('home')
class DepartmentsView(LoginRequiredMixin, View):
"""
View for admin to see departments and add new department.
"""
template_name = 'academicInfo/departments.html'
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
departments = Department.objects.all()
return render(request, self.template_name, {'departments' : departments})
else:
return redirect('home')
class CourseView(LoginRequiredMixin, View):
"""
View for admin to see Courses and add new Course.
"""
template_name = 'academicInfo/courses.html'
def get(self, request, *args, **kwargs):
# Render this page only for the Admin.
if hasattr(request.user, 'staff') and request.user.staff.is_admin:
courses = Course.objects.all()
return render(request, self.template_name, {'courses' : courses})
else:
return redirect('home')
|
import tensorflow as tf
from tensorcv.train.trainer.trainer import Trainer
from tensorcv.train.hooks import CheckpointPerfactSaverHook
class MultiGPUTrainer(Trainer):
def feature_shard(self, feature, num_shards):
if num_shards > 1:
feature_batch = tf.unstack(
feature, num=self.config.batch_size, axis=0)
feature_shards = [[] for i in range(num_shards)]
for i in range(self.config.batch_size):
idx = i % num_shards
feature_shards[idx].append(feature_batch[i])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
else:
feature_shards = [feature]
return feature_shards
def average_gradients(self, tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def get_model_fn(self, model):
def model_fn(features, labels, mode):
is_training = mode == tf.estimator.ModeKeys.TRAIN
if mode == tf.estimator.ModeKeys.PREDICT:
with tf.variable_scope('net'):
net_out = model.net(features, is_training)
predictions = model.predictions(net_out)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions)
if mode == tf.estimator.ModeKeys.EVAL:
with tf.variable_scope('net'):
net_out = model.net(features, is_training)
loss = model.loss(labels, net_out)
predictions = model.predictions(net_out)
metrics = model.metrics(labels, net_out, mode)
model.summary(features, labels, predictions, mode)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
eval_metric_ops=metrics)
assert mode == tf.estimator.ModeKeys.TRAIN
num_gpus = self.config.num_gpus
feature_shards = self.feature_shard(features, num_gpus)
label_shards = self.feature_shard(labels, num_gpus)
lr = model.lr_policy(tf.train.get_global_step())
optimizer = model.optimizer(lr)
tower_losses = []
tower_grads = []
for i in range(num_gpus):
with tf.variable_scope('net', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device('/gpu:%d' % i):
feature = feature_shards[i]
label = label_shards[i]
net_out = model.net(feature, is_training)
loss = model.loss(label, net_out)
grads = optimizer.compute_gradients(loss)
tower_losses.append(loss)
tower_grads.append(grads)
if i == 0:
update_ops = tf.get_collection(
tf.GraphKeys.UPDATE_OPS, name_scope)
net_out_0 = net_out
feature_0 = feature
label_0 = label
metrics = model.metrics(label_0, net_out_0, mode)
grads = self.average_gradients(tower_grads)
loss = tf.reduce_mean(tower_losses)
apply_gradient_op = optimizer.apply_gradients(
grads, global_step=tf.train.get_global_step())
train_op = [apply_gradient_op]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
predictions_0 = model.predictions(net_out_0)
model.summary(feature_0, label_0, predictions_0, mode)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
training_chief_hooks = [
CheckpointPerfactSaverHook(
self.config.model_dir,
save_steps=self.config.model_save_steps)
]
return tf.estimator.EstimatorSpec(
mode=mode,
train_op=train_op,
loss=loss,
training_chief_hooks=training_chief_hooks)
return model_fn
|
#!python
#--coding:utf-8--
"""
getBedpeFBed.py
Transfering single-end BED file to paired-end BEDPE file as input of cLoops2 .
"""
#systematic library
import os, time, gzip, argparse, sys
from datetime import datetime
from argparse import RawTextHelpFormatter
#3rd library
#cLoops2
from cLoops2.ds import PET
from cLoops2.utils import cFlush
def help():
description = """
Transfering single-end BED file to paired-end BEDPE file as input of
cLoops2 for furthur analysis.
The 6th column of the BED file of strand information is used to extend
the fragments.
If no strand information available, default treat it as + strand
Example:
getBedpeFBed.py -f a.bed.gz -o a
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'-f',
dest="fin",
required=True,
type=str,
help=
"Input bed files, or .bed.gz files. "
)
parser.add_argument('-o',
dest="out",
required=True,
type=str,
help="Output file prefix.")
parser.add_argument(
'-ext',
dest="ext",
required=False,
default=150,
type=int,
help=
"The expect fragment length of the bed file to extend from 5' to 3', default is 150."
)
op = parser.parse_args()
return op
def bed2bedpe(fin, fout, ext=150):
"""
Extend the BED file to BEDPE file according to expected fragment size.
"""
if fin.endswith(".gz"):
fino = gzip.open(fin, "rt")
else:
fino = open(fin)
if fout.endswith(".gz"):
fo = gzip.open(fout, "wt")
else:
fo = open(fout, "w")
for i, line in enumerate(fino):
if i % 10000 == 0:
cFlush("%s read from %s" % (i,fin))
line = line.split("\n")[0].split('\t')
if len(line) < 6: #no strand information
nline = [
line[0], line[1], line[2], line[0],
int(line[1]) + ext,
int(line[2]) + ext, ".", "44", "+", "-"
]
elif line[5] == "+":
nline = [
line[0], line[1], line[2], line[0],
int(line[1]) + ext,
int(line[2]) + ext, ".", "44", "+", "-"
]
else:
nline = [
line[0],
max(0, int(line[1])),
max(0,
int(line[2]) - ext), line[0], line[1], line[2], ".", "44",
"+", "-"
]
nline = "\t".join(list(map(str, nline))) + "\n"
fo.write(nline)
fino.close()
fo.close()
def main():
op = help()
bed2bedpe(op.fin, op.out+".bedpe.gz", ext=op.ext)
if __name__ == "__main__":
start_time = datetime.now()
main()
usedtime = datetime.now() - start_time
sys.stderr.write("Process finished. Used CPU time: %s Bye!\n" % usedtime)
|
class Category:
def __init__(self, category):
self.category = category
self.ledger = []
def __str__(self):
title = f"{self.category:*^30}\n"
amount = 0
format_items = ""
for item in self.ledger:
format_items += f"{item['description'][0:23]:23}" + f"{item['amount']:>7.2f}\n"
amount += item['amount']
total = f"Total: {amount}"
return title + format_items + total
def deposit(self, amount, description=""):
if description == "":
self.ledger.append({"amount": amount, "description": ""})
else:
self.ledger.append({"amount": amount, "description": description})
def withdraw(self, amount, description=""):
current_funds = self.get_balance()
if current_funds >= amount:
if description == "":
self.ledger.append({"amount": -amount, "description": ""})
else:
self.ledger.append({"amount": -amount, "description": description})
return True
else:
return False
def get_balance(self):
balance = 0
for item in self.ledger:
balance += item['amount']
return balance
def transfer(self, amount, category):
current_funds = self.get_balance()
if current_funds >= amount:
category.ledger.append({"amount": amount, "description": "Transfer from " + self.category})
self.ledger.append({"amount": -amount, "description": "Transfer to " + category.category})
return True
else:
return False
def check_funds(self, amount):
current_funds = self.get_balance()
return current_funds >= amount
def create_spend_chart(categories):
title = "Percentage spent by category\n"
format_chart = ""
format_categ = ""
space = len(categories) * 3 + 1
x_chart = f"{' ':>3} {'':-^{space}}\n"
total_spent_categ = []
percentage_spent = []
for category in categories:
spent = 0
for item in category.ledger:
if item['amount'] < 0:
spent += abs(item['amount'])
total_spent_categ.append(spent)
total_spent = round(sum(total_spent_categ), 2)
percentage_spent = [int((spent/total_spent)*100) for spent in total_spent_categ]
max_category_len = max([len(category.category) for category in categories])
category_list = [category.category for category in categories]
for x in range(100, -10, -10):
format_chart += f"{x:>3}|"
for percentage in percentage_spent:
if percentage >= x:
format_chart += " o "
else:
format_chart += " "
format_chart += " \n"
for y in range(max_category_len):
format_categ += f"{' ':>5}"
for item in category_list:
if len(item) > y:
format_categ += f"{item[y]} "
else:
format_categ += " "
format_categ += f"\n"
return title + format_chart + x_chart + format_categ.rstrip("\n")
|
#!/usr/bin/env python
'''
Advent of Code 2021 - Day 21: Dirac Dice (Part 1)
https://adventofcode.com/2021/day/21
'''
import re
import time
from collections import deque
from itertools import cycle
WINNING_SCORE = 1000
class Player():
def __init__(self, player_id, position) -> None:
self.id = player_id
self.position = position
self.score = 0
def move(self, spaces: int) -> int:
# Calculate number of spaces between 1 and 10 to move foward
forward = (spaces - 1) % 10 + 1
# Set the new position between 1 and 10
self.position = (self.position + forward - 1) % 10 + 1
# Update the score based on position
self.score += self.position
def check_for_winner(players: list) -> Player:
for player in players:
if player.score >= WINNING_SCORE:
return player
return None
def main():
filename = input("What is the input file name? ")
try:
with open(filename, "r") as file:
players = []
pattern = r'Player (?P<id>\d+) starting position: (?P<position>\d+)'
for line in file:
line = line.strip()
matches = re.match(pattern, line)
if matches:
player_id = int(matches.group('id'))
position = int(matches.group('position'))
player = Player(player_id=player_id, position=position)
players.append(player)
# Play the game
start = time.time()
rolls = 0
roll = cycle(range(1, 101)).__next__
turns = deque(players)
while turns:
# Get the player whose turn it is
player = turns.popleft()
spaces = 0
# Roll the die. Number of spaces to move forward is the total of 3 dice rolls.
for _ in range(3):
spaces += roll()
rolls += 3
# Move the player
player.move(spaces)
# Check for winner (This implementation assumes there are two players only)
winner = check_for_winner(players)
if winner:
print(
f"\nThe winning player is Player {winner.id} with a score of {winner.score}")
loser = turns.popleft()
print(
f"The loser's score ({loser.score}) multiplied by number of dice roles ({rolls}): {loser.score * rolls}\n")
break
# Game continues. Send the player to the back of the turns queue
turns.append(player)
end = time.time()
print(f"Execution time in seconds: {end - start}\n")
except FileNotFoundError:
print(f"No such file or directory: '{filename}'")
if __name__ == "__main__":
main()
|
# Generated by Django 2.1.7 on 2019-06-11 13:09
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("cspatients", "0017_baby_allow_nulls")]
operations = [
migrations.AlterField(
model_name="patiententry",
name="decision_time",
field=models.DateTimeField(default=django.utils.timezone.now),
)
]
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
from ._clrm import bright, cyan, green
DEFAULT_DUP_COL_HANDLER = "rename"
class ResultLogger(object):
@property
def verbosity_level(self):
return self.__verbosity_level
def __init__(self, logger, schema_extractor, result_counter, verbosity_level):
self.__logger = logger
self.__schema_extractor = schema_extractor
self.__result_counter = result_counter
self.__verbosity_level = verbosity_level
def logging_success(self, source, table_name, is_create_table):
table_schema = self.__schema_extractor.fetch_table_schema(table_name.strip())
self.__result_counter.inc_success(is_create_table)
self.__logger.info(
"convert '{source:s}' to '{table_info:s}' table".format(
source=cyan(source),
table_info=bright(
green(
table_schema.dumps(
output_format="text", verbosity_level=self.__verbosity_level
)
)
),
)
)
|
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to boto-based object storage APIs."""
import logging
import time
import boto
import object_storage_interface
class BotoService(object_storage_interface.ObjectStorageServiceBase):
"""An interface to boto-based object storage APIs."""
def __init__(self, storage_schema, host_to_connect=None):
self.storage_schema = storage_schema
self.host_to_connect = host_to_connect
def _StorageURI(self, bucket, object_name=None):
"""Return a storage_uri for the given resource.
Args:
bucket: the name of a bucket.
object_name: the name of an object, if given.
Returns:
A storage_uri. If object is given, the uri will be for
the bucket-object combination. If object is not given, the uri
will be for the bucket.
"""
if object_name is not None:
path = '%s/%s' % (bucket, object_name)
else:
path = bucket
storage_uri = boto.storage_uri(path, self.storage_schema)
if self.host_to_connect is not None:
storage_uri.connect(host=self.host_to_connect)
return storage_uri
def ListObjects(self, bucket, prefix):
bucket_uri = self._StorageURI(bucket)
return [obj.name for obj in bucket_uri.list_bucket(prefix=prefix)]
def DeleteObjects(self, bucket, objects_to_delete, objects_deleted=None):
for object_name in objects_to_delete:
try:
object_uri = self._StorageURI(bucket, object_name)
object_uri.delete_key()
if objects_deleted is not None:
objects_deleted.append(object_name)
except: # pylint:disable=bare-except
logging.exception('Caught exception while deleting object %s.',
object_name)
# Not implementing WriteObjectFromBuffer because the implementation
# is different for GCS and S3.
def ReadObject(self, bucket, object_name):
start_time = time.time()
object_uri = self._StorageURI(bucket, object_name)
object_uri.new_key().get_contents_as_string()
latency = time.time() - start_time
return start_time, latency
|
"""
=========
PointPens
=========
Where **SegmentPens** have an intuitive approach to drawing
(if you're familiar with postscript anyway), the **PointPen**
is geared towards accessing all the data in the contours of
the glyph. A PointPen has a very simple interface, it just
steps through all the points in a call from glyph.drawPoints().
This allows the caller to provide more data for each point.
For instance, whether or not a point is smooth, and its name.
"""
import math
from typing import Any, Optional, Tuple
from fontTools.pens.basePen import AbstractPen, PenError
__all__ = [
"AbstractPointPen",
"BasePointToSegmentPen",
"PointToSegmentPen",
"SegmentToPointPen",
"GuessSmoothPointPen",
"ReverseContourPointPen",
]
class AbstractPointPen:
"""Baseclass for all PointPens."""
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
"""Start a new sub path."""
raise NotImplementedError
def endPath(self) -> None:
"""End the current sub path."""
raise NotImplementedError
def addPoint(
self,
pt: Tuple[float, float],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
identifier: Optional[str] = None,
**kwargs: Any
) -> None:
"""Add a point to the current sub path."""
raise NotImplementedError
def addComponent(
self,
baseGlyphName: str,
transformation: Tuple[float, float, float, float, float, float],
identifier: Optional[str] = None,
**kwargs: Any
) -> None:
"""Add a sub glyph."""
raise NotImplementedError
class BasePointToSegmentPen(AbstractPointPen):
"""
Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes
care of all the edge cases.
"""
def __init__(self):
self.currentPath = None
def beginPath(self, identifier=None, **kwargs):
if self.currentPath is not None:
raise PenError("Path already begun.")
self.currentPath = []
def _flushContour(self, segments):
"""Override this method.
It will be called for each non-empty sub path with a list
of segments: the 'segments' argument.
The segments list contains tuples of length 2:
(segmentType, points)
segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL.
The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has
four items:
(point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair.
For a closed path, the initial moveTo point is defined as
the last point of the last segment.
The 'points' list of "move" and "line" segments always contains
exactly one point tuple.
"""
raise NotImplementedError
def endPath(self):
if self.currentPath is None:
raise PenError("Path not begun.")
points = self.currentPath
self.currentPath = None
if not points:
return
if len(points) == 1:
# Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0]
segments = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments)
return
segments = []
if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0)
else:
# It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve
# point.
firstOnCurve = None
for i in range(len(points)):
segmentType = points[i][1]
if segmentType is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None))
else:
points = points[firstOnCurve+1:] + points[:firstOnCurve+1]
currentSegment = []
for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None:
continue
segments.append((segmentType, currentSegment))
currentSegment = []
self._flushContour(segments)
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
if self.currentPath is None:
raise PenError("Path not begun")
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PointToSegmentPen(BasePointToSegmentPen):
"""
Adapter class that converts the PointPen protocol to the
(Segment)Pen protocol.
NOTE: The segment pen does not support and will drop point names, identifiers
and kwargs.
"""
def __init__(self, segmentPen, outputImpliedClosingLine=False):
BasePointToSegmentPen.__init__(self)
self.pen = segmentPen
self.outputImpliedClosingLine = outputImpliedClosingLine
def _flushContour(self, segments):
if not segments:
raise PenError("Must have at least one segment.")
pen = self.pen
if segments[0][0] == "move":
# It's an open path.
closed = False
points = segments[0][1]
if len(points) != 1:
raise PenError(f"Illegal move segment point count: {len(points)}")
movePt, _, _ , _ = points[0]
del segments[0]
else:
# It's a closed path, do a moveTo to the last
# point of the last segment.
closed = True
segmentType, points = segments[-1]
movePt, _, _ , _ = points[-1]
if movePt is None:
# quad special case: a contour with no on-curve points contains
# one "qcurve" segment that ends with a point that's None. We
# must not output a moveTo() in that case.
pass
else:
pen.moveTo(movePt)
outputImpliedClosingLine = self.outputImpliedClosingLine
nSegments = len(segments)
lastPt = movePt
for i in range(nSegments):
segmentType, points = segments[i]
points = [pt for pt, _, _ , _ in points]
if segmentType == "line":
if len(points) != 1:
raise PenError(f"Illegal line segment point count: {len(points)}")
pt = points[0]
# For closed contours, a 'lineTo' is always implied from the last oncurve
# point to the starting point, thus we can omit it when the last and
# starting point don't overlap.
# However, when the last oncurve point is a "line" segment and has same
# coordinates as the starting point of a closed contour, we need to output
# the closing 'lineTo' explicitly (regardless of the value of the
# 'outputImpliedClosingLine' option) in order to disambiguate this case from
# the implied closing 'lineTo', otherwise the duplicate point would be lost.
# See https://github.com/googlefonts/fontmake/issues/572.
if (
i + 1 != nSegments
or outputImpliedClosingLine
or not closed
or pt == lastPt
):
pen.lineTo(pt)
lastPt = pt
elif segmentType == "curve":
pen.curveTo(*points)
lastPt = points[-1]
elif segmentType == "qcurve":
pen.qCurveTo(*points)
lastPt = points[-1]
else:
raise PenError(f"Illegal segmentType: {segmentType}")
if closed:
pen.closePath()
else:
pen.endPath()
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
del identifier # unused
del kwargs # unused
self.pen.addComponent(glyphName, transform)
class SegmentToPointPen(AbstractPen):
"""
Adapter class that converts the (Segment)Pen protocol to the
PointPen protocol.
"""
def __init__(self, pointPen, guessSmooth=True):
if guessSmooth:
self.pen = GuessSmoothPointPen(pointPen)
else:
self.pen = pointPen
self.contour = None
def _flushContour(self):
pen = self.pen
pen.beginPath()
for pt, segmentType in self.contour:
pen.addPoint(pt, segmentType=segmentType)
pen.endPath()
def moveTo(self, pt):
self.contour = []
self.contour.append((pt, "move"))
def lineTo(self, pt):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self.contour.append((pt, "line"))
def curveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
self.contour.append((pts[-1], "curve"))
def qCurveTo(self, *pts):
if not pts:
raise TypeError("Must pass in at least one point")
if pts[-1] is None:
self.contour = []
else:
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
for pt in pts[:-1]:
self.contour.append((pt, None))
if pts[-1] is not None:
self.contour.append((pts[-1], "qcurve"))
def closePath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
self.contour[0] = self.contour[-1]
del self.contour[-1]
else:
# There's an implied line at the end, replace "move" with "line"
# for the first point
pt, tp = self.contour[0]
if tp == "move":
self.contour[0] = pt, "line"
self._flushContour()
self.contour = None
def endPath(self):
if self.contour is None:
raise PenError("Contour missing required initial moveTo")
self._flushContour()
self.contour = None
def addComponent(self, glyphName, transform):
if self.contour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform)
class GuessSmoothPointPen(AbstractPointPen):
"""
Filtering PointPen that tries to determine whether an on-curve point
should be "smooth", ie. that it's a "tangent" point or a "curve" point.
"""
def __init__(self, outPen, error=0.05):
self._outPen = outPen
self._error = error
self._points = None
def _flushContour(self):
if self._points is None:
raise PenError("Path not begun")
points = self._points
nPoints = len(points)
if not nPoints:
return
if points[0][1] == "move":
# Open path.
indices = range(1, nPoints - 1)
elif nPoints > 1:
# Closed path. To avoid having to mod the contour index, we
# simply abuse Python's negative index feature, and start at -1
indices = range(-1, nPoints - 1)
else:
# closed path containing 1 point (!), ignore.
indices = []
for i in indices:
pt, segmentType, _, name, kwargs = points[i]
if segmentType is None:
continue
prev = i - 1
next = i + 1
if points[prev][1] is not None and points[next][1] is not None:
continue
# At least one of our neighbors is an off-curve point
pt = points[i][0]
prevPt = points[prev][0]
nextPt = points[next][0]
if pt != prevPt and pt != nextPt:
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
a1 = math.atan2(dy1, dx1)
a2 = math.atan2(dy2, dx2)
if abs(a1 - a2) < self._error:
points[i] = pt, segmentType, True, name, kwargs
for pt, segmentType, smooth, name, kwargs in points:
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def beginPath(self, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Path already begun")
self._points = []
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.beginPath(**kwargs)
def endPath(self):
self._flushContour()
self._outPen.endPath()
self._points = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
identifier=None, **kwargs):
if self._points is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self._points.append((pt, segmentType, False, name, kwargs))
def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
if self._points is not None:
raise PenError("Components must be added before or after contours")
if identifier is not None:
kwargs["identifier"] = identifier
self._outPen.addComponent(glyphName, transformation, **kwargs)
class ReverseContourPointPen(AbstractPointPen):
"""
This is a PointPen that passes outline data to another PointPen, but
reversing the winding direction of all contours. Components are simply
passed through unchanged.
Closed contours are reversed in such a way that the first point remains
the first point.
"""
def __init__(self, outputPointPen):
self.pen = outputPointPen
# a place to store the points for the current sub path
self.currentContour = None
def _flushContour(self):
pen = self.pen
contour = self.currentContour
if not contour:
pen.beginPath(identifier=self.currentContourIdentifier)
pen.endPath()
return
closed = contour[0][1] != "move"
if not closed:
lastSegmentType = "move"
else:
# Remove the first point and insert it at the end. When
# the list of points gets reversed, this point will then
# again be at the start. In other words, the following
# will hold:
# for N in range(len(originalContour)):
# originalContour[N] == reversedContour[-N]
contour.append(contour.pop(0))
# Find the first on-curve point.
firstOnCurve = None
for i in range(len(contour)):
if contour[i][1] is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# There are no on-curve points, be basically have to
# do nothing but contour.reverse().
lastSegmentType = None
else:
lastSegmentType = contour[firstOnCurve][1]
contour.reverse()
if not closed:
# Open paths must start with a move, so we simply dump
# all off-curve points leading up to the first on-curve.
while contour[0][1] is None:
contour.pop(0)
pen.beginPath(identifier=self.currentContourIdentifier)
for pt, nextSegmentType, smooth, name, kwargs in contour:
if nextSegmentType is not None:
segmentType = lastSegmentType
lastSegmentType = nextSegmentType
else:
segmentType = None
pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs)
pen.endPath()
def beginPath(self, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Path already begun")
self.currentContour = []
self.currentContourIdentifier = identifier
self.onCurve = []
def endPath(self):
if self.currentContour is None:
raise PenError("Path not begun")
self._flushContour()
self.currentContour = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
if self.currentContour is None:
raise PenError("Path not begun")
if identifier is not None:
kwargs["identifier"] = identifier
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
if self.currentContour is not None:
raise PenError("Components must be added before or after contours")
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
|
import json
import matplotlib.pyplot as plt
import requests
import server_config
def draw_plot(interval):
try:
url = "{}/id0_collection/{}".format(server_config.server_url, interval)
r = requests.get(url=url)
formatted_string = r.text.replace("'", '"')
rows = json.loads(formatted_string)
except:
return {'result': 'http-error'}
rows = rows['result']
print(rows)
# try:
dates = []
timestamp = []
open_price = []
num_3 = []
num_3i = []
num_6 = []
num_6i = []
num_9 = []
num_9i = []
num_100 = []
num_100i = []
cnt = 0
for r in rows:
cnt += 1
if cnt % 100 is 0:
dates.append(r['timestamp'][:16])
else:
dates.append('')
timestamp.append(r['timestamp'])
open_price.append(r['open'])
num_3.append(r['num_3'])
num_3i.append(r['num_3i'])
num_6.append(r['num_6'])
num_6i.append(r['num_6i'])
num_9.append(r['num_9'])
num_9i.append(r['num_9i'])
num_100.append(r['num_100'])
num_100i.append(r['num_100i'])
# except:
# return {'result': 'json-error'}
plt.figure(figsize=(16, 12))
fig, ax1 = plt.subplots()
ax1.set_ylabel('Price')
ax1.plot(timestamp, open_price, 'C0', label='open')
ax1.plot(timestamp, num_3, 'C1', label='num_3')
ax1.plot(timestamp, num_6, 'C3', label='num_6')
ax1.plot(timestamp, num_9, 'C5', label='num_9')
ax1.plot(timestamp, num_100, 'C7', label='num_100')
ax2 = ax1.twinx()
ax2.set_ylabel('Imagine')
ax2.plot(timestamp, num_3i, 'C2:', label='num_3i')
ax2.plot(timestamp, num_6i, 'C4:', label='num_6i')
ax2.plot(timestamp, num_9i, 'C6:', label='num_9i')
ax2.plot(timestamp, num_100i, 'C8:', label='num_100i')
frame1 = fig.gca()
frame1.axes.get_xaxis().set_ticks(dates)
plt.title('Bitmex-id0({})'.format(interval))
plt.xlabel('Timestamp')
ax1.legend()
ax2.legend()
fig.tight_layout()
plt.show()
result = {'result': 'ok'}
return result
if __name__ == '__main__':
print(draw_plot('5m'))
print(draw_plot('1h'))
|
# Generated by Django 3.0.2 on 2020-02-12 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hash', '0002_hash_turn_count'),
]
operations = [
migrations.AlterField(
model_name='hash',
name='turn_count',
field=models.PositiveIntegerField(),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""An implementation of the overfitting test for the Transformer model.
A simple test, which often signifies bugs in the implementation of a model, is the overfitting test. To that end, the
considered model is trained and evaluated on the same tiny dataset, which it should be able to overfit easily.
Therefore, the final model should yield very high probabilities for the desired target values. If this is not the case,
however, then there is probably something wrong with the tested model and/or its implementation.
In this module, we test our implementation of the Transformer model on a super-simple translation task from German to
English. To that end, the considered corpus consists of 5 short and already pre-processed sentences, and is specified in
this file (see below).
"""
import collections
import itertools
import typing
import torch
import transformer
from torch import nn
from torch import optim
__author__ = "Patrick Hohenecker"
__copyright__ = (
"Copyright (c) 2018, Patrick Hohenecker\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are met:\n"
"\n"
"1. Redistributions of source code must retain the above copyright notice, this\n"
" list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright notice,\n"
" this list of conditions and the following disclaimer in the documentation\n"
" and/or other materials provided with the distribution.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n"
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
)
__license__ = "BSD-2-Clause"
__version__ = "2018.1"
__date__ = "Aug 29, 2018"
__maintainer__ = "Patrick Hohenecker"
__email__ = "mail@paho.at"
__status__ = "Development"
Token = collections.namedtuple("Token", ["index", "word"])
"""This is used to store index-word pairs."""
# ==================================================================================================================== #
# C O N S T A N T S #
# ==================================================================================================================== #
# PARALLEL DATA ######################################################################################################
DATA_GERMAN = [
"Alle warten auf das Licht .",
"Fürchtet euch , fürchtet euch nicht .",
"Die Sonne scheint mir aus den Augen .",
"Sie wird heut ' Nacht nicht untergehen .",
"Und die Welt zählt laut bis 10 ."
]
DATA_ENGLISH = [
"Everyone is waiting for the light .",
"Be afraid , do not be afraid .",
"The sun is shining out of my eyes .",
"It will not go down tonight .",
"And the world counts up to 10 loudly ."
]
# SPECIAL TOKENS #####################################################################################################
SOS = Token(0, "<sos>")
"""str: The start-of-sequence token."""
EOS = Token(1, "<eos>")
"""str: The end-of-sequence token."""
PAD = Token(2, "<pad>")
"""str: The padding token."""
# MODEL CONFIG #######################################################################################################
EMBEDDING_SIZE = 300
"""int: The used embedding size."""
GPU = False # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SET THIS TO True, IF YOU ARE USING A MACHINE WITH A GPU!
"""bool: Indicates whether to make use of a GPU."""
NUM_EPOCHS = 200
"""int: The total number of training epochs."""
# ==================================================================================================================== #
# H E L P E R F U N C T I O N S #
# ==================================================================================================================== #
def eval_model(model: transformer.Transformer, input_seq: torch.LongTensor, target_seq: torch.LongTensor) -> None:
"""Evaluates the the provided model on the given data, and prints the probabilities of the desired translations.
Args:
model (:class:`transformer.Transformer`): The model to evaluate.
input_seq (torch.LongTensor): The input sequences, as (batch-size x max-input-seq-len) tensor.
target_seq (torch.LongTensor): The target sequences, as (batch-size x max-target-seq-len) tensor.
"""
probs = transformer.eval_probability(model, input_seq, target_seq, pad_index=PAD.index).detach().numpy().tolist()
print("sample " + ("{} " * len(probs)).format(*range(len(probs))))
print("probability " + ("{:.6f} " * len(probs)).format(*probs))
def fetch_vocab() -> typing.Tuple[typing.List[str], typing.Dict[str, int]]:
"""Determines the vocabulary, and provides mappings from indices to words and vice versa.
Returns:
tuple: A pair of mappings, index-to-word and word-to-index.
"""
# gather all (lower-cased) words that appear in the data
all_words = set()
for sentence in itertools.chain(DATA_GERMAN, DATA_ENGLISH):
all_words.update(word.lower() for word in sentence.split(" "))
# create mapping from index to word
idx_to_word = [SOS.word, EOS.word, PAD.word] + list(sorted(all_words))
# create mapping from word to index
word_to_idx = {word: idx for idx, word in enumerate(idx_to_word)}
return idx_to_word, word_to_idx
def prepare_data(word_to_idx: typing.Dict[str, int]) -> typing.Tuple[torch.LongTensor, torch.LongTensor]:
"""Prepares the data as PyTorch ``LongTensor``s.
Args:
word_to_idx (dict[str, int]): A dictionary that maps words to indices in the vocabulary.
Returns:
tuple: A pair of ``LongTensor``s, the first representing the input and the second the target sequence.
"""
# break sentences into word tokens
german = []
for sentence in DATA_GERMAN:
german.append([SOS.word] + sentence.split(" ") + [EOS.word])
english = []
for sentence in DATA_ENGLISH:
english.append([SOS.word] + sentence.split(" ") + [EOS.word])
# pad all sentences to equal length
len_german = max(len(sentence) for sentence in german)
for sentence in german:
sentence.extend([PAD.word] * (len_german - len(sentence)))
len_english = max(len(sentence) for sentence in english)
for sentence in english:
sentence.extend([PAD.word] * (len_english - len(sentence)))
# map words to indices in the vocabulary
german = [[word_to_idx[word.lower()] for word in sentence] for sentence in german]
english = [[word_to_idx[word.lower()] for word in sentence] for sentence in english]
# create according LongTensors
german = torch.LongTensor(german)
english = torch.LongTensor(english)
return german, english
# ==================================================================================================================== #
# M A I N #
# ==================================================================================================================== #
def main():
# fetch vocabulary + prepare data
idx_to_word, word_to_idx = fetch_vocab()
input_seq, target_seq = prepare_data(word_to_idx)
# create embeddings to use
emb = nn.Embedding(len(idx_to_word), EMBEDDING_SIZE)
emb.reset_parameters()
# create transformer model
model = transformer.Transformer(
emb,
PAD.index,
emb.num_embeddings,
max_seq_len=max(input_seq.size(1), target_seq.size(1))
)
# create an optimizer for training the model + a X-entropy loss
optimizer = optim.Adam((param for param in model.parameters() if param.requires_grad), lr=0.0001)
loss = nn.CrossEntropyLoss()
print("Initial Probabilities of Translations:")
print("--------------------------------------")
eval_model(model, input_seq, target_seq)
print()
# move model + data on the GPU (if possible)
if GPU:
model.cuda()
input_seq = input_seq.cuda()
target_seq = target_seq.cuda()
# train the model
for epoch in range(NUM_EPOCHS):
print("training epoch {}...".format(epoch + 1), end=" ")
predictions = model(input_seq, target_seq)
optimizer.zero_grad()
current_loss = loss(
predictions.view(predictions.size(0) * predictions.size(1), predictions.size(2)),
target_seq.view(-1)
)
current_loss.backward()
optimizer.step()
print("OK (loss: {:.6f})".format(current_loss.item()))
# put model in evaluation mode
model.eval()
print()
print("Final Probabilities of Translations:")
print("------------------------------------")
eval_model(model, input_seq, target_seq)
# randomly sample outputs from the input sequences based on the probabilities computed by the trained model
sampled_output = transformer.sample_output(model, input_seq, EOS.index, PAD.index, target_seq.size(1))
print()
print("Sampled Outputs:")
print("----------------")
for sample_idx in range(input_seq.size(0)):
for token_idx in range(input_seq.size(1)):
print(idx_to_word[input_seq[sample_idx, token_idx].item()], end=" ")
print(" => ", end=" ")
for token_idx in range(sampled_output.size(1)):
print(idx_to_word[sampled_output[sample_idx, token_idx].item()], end=" ")
print()
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
import ssl, json
from os.path import isfile, isdir
from os import sep, name
from sys import exit, argv, version_info, stdout, stderr
from getopt import getopt
from xmlrpc.client import ServerProxy
from urllib.parse import quote_plus
from uuid import uuid4
pluginVersion = "18.12"
hostName = None
userName = None
password = None
logserviceId = None
opts, args = None, None
cacheDirectory = None
uuidFile = None
uuidString = None
blacklistFile = '/opt/exasol/monitoring/check_logservice.blacklist'
cacheDirectory = r'/var/cache/nagios'
if not isdir(cacheDirectory):
from tempfile import gettempdir
cacheDirectory = gettempdir()
try:
opts, args = getopt(argv[1:], 'hVH:i:u:p:b:')
except:
print("Unknown parameter(s): %s" % argv[1:])
opts = []
opts.append(['-h', None])
for opt in opts:
parameter = opt[0]
value = opt[1]
if parameter == '-h':
print("""
EXAoperation XMLRPC log service monitor (version %s)
Options:
-h shows this help
-V shows the plugin version
-H <license server> domain of IP of your license server
-i <logservice id> interger id of the used logservice
-u <user login> EXAoperation login user
-p <password> EXAoperation login password
-b <blacklist file> Blacklist all unwanted logservice lines
""" % (pluginVersion))
exit(0)
elif parameter == '-V':
print("EXAoperation XMLRPC log service monitor (version %s)" % pluginVersion)
exit(0)
elif parameter == '-H':
hostName = value.strip()
elif parameter == '-u':
userName = value.strip()
elif parameter == '-p':
password = value.strip()
elif parameter == '-i':
logserviceId = int(value)
elif parameter == '-b':
blacklistFile = value.strip()
if not (hostName and userName and password and logserviceId != None):
print('Please define at least the following parameters: -H -u -p -i')
exit(4)
def XmlRpcCall(urlPath = ''):
url = 'https://%s:%s@%s/cluster1%s' % (quote_plus(userName), quote_plus(password), hostName, urlPath)
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
sslcontext.verify_mode = ssl.CERT_NONE
sslcontext.check_hostname = False
return ServerProxy(url, context=sslcontext)
try:
blacklistArray = []
if isfile(blacklistFile):
with open(blacklistFile) as f:
line = f.readline().strip()
while (line):
blacklistArray.append(line)
line = f.readline().strip()
uuidFile = '%s%scheck_logservice_%s_%s.uuid' % (cacheDirectory, sep, logserviceId, hostName)
if isfile(uuidFile):
with open(uuidFile) as f:
uuidString = f.read().strip()
else:
with open(uuidFile, 'w') as f:
uuidString = uuid4().hex
f.write(uuidString)
cluster = XmlRpcCall('/')
logservice = XmlRpcCall('/logservice%i' % logserviceId)
logserviceUserId = 'check_logservice_%s_%s_%s_%i' % (uuidString, hostName, userName, logserviceId)
logEntries = logservice.logEntriesTagged(logserviceUserId)
logMessages = ''
logPriority = 0
for logEntry in logEntries[2]:
logEntryPriority = logEntry['priority']
if logEntryPriority in ['Warning','Error'] and not any(item in logEntry['message'] for item in blacklistArray):
logMessages += '\n%s' % logEntry['message'].replace('|', '!')
if logEntryPriority == 'Warning': logPriority |= 1
elif logEntryPriority == 'Error': logPriority |= 2
if logPriority > 0:
if logPriority & 2:
print('CRITICAL - log messages found - please check logservice on cluster | %s' % (logMessages))
exit(2)
elif logPriority & 1:
print('WARNING - log messages found - please check logservice on cluster | %s' % (logMessages))
exit(1)
else:
print('OK - No new messages found')
exit(0)
except Exception as e:
message = str(e).replace('%s:%s@%s' % (userName, password, hostName), hostName)
if 'unauthorized' in message.lower():
print('no access to EXAoperation: username or password wrong')
elif 'Unexpected Zope exception: NotFound: Object' in message:
print('database instance not found')
else:
print('UNKNOWN - internal error %s | ' % message.replace('|', '!').replace('\n', ';'))
exit(3)
|
#!/usr/bin/python3
"""Encrypt and decrypt a string with a simple algorithm.
To encrypt, reverse and increase the ASCII code by 1. To decrypt, do the
reverse.
"""
def encrypt(plain):
output = []
for char in plain:
output.append(chr(ord(char)+1))
output.reverse()
return ''.join(output)
def decrypt(encrypted):
output = []
for char in encrypted:
output.append(chr(ord(char)-1))
output.reverse()
return ''.join(output)
def main():
plain = input("Enter some plain text ")
print(f"Encrypting {plain}")
encrypted = encrypt(plain)
print(encrypted)
encrypted = input("Enter some encrypted text ")
print(f"Encrypting {encrypted}")
plain = decrypt(encrypted)
print(plain)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding: utf-8
from datetime import timedelta
import mock
import pytest
import time
from dockerma.cache import FileCache, Entry
TEST_KEY = "test-key"
TEST_DATA = "test-data"
TEST_ENTRY_PATH = "test-entry-path"
class TestCache(object):
@pytest.fixture
def cache(self, request):
yield FileCache(request.node.nodeid)
@pytest.fixture
def mock_os(self):
with mock.patch("dockerma.cache.os") as m:
yield m
@pytest.fixture
def load_entry_data(self):
with mock.patch("dockerma.cache.open", mock.mock_open()) as _, \
mock.patch("dockerma.cache.pickle.load") as mock_load:
yield mock_load
@pytest.fixture
def save_entry_data(self):
with mock.patch("dockerma.cache.open", mock.mock_open()) as mock_open, \
mock.patch("dockerma.cache.pickle.dump") as mock_dump:
yield mock_open, mock_dump
@pytest.fixture(autouse=True)
def get_entry_path(self, cache):
with mock.patch.object(cache, "_get_entry_path") as m:
m.return_value = TEST_ENTRY_PATH
yield m
def test_delete(self, cache, mock_os):
mock_os.path.exists.return_value = True
cache.delete(TEST_KEY)
mock_os.unlink.assert_called_once_with(TEST_ENTRY_PATH)
def test_get(self, cache, mock_os, load_entry_data):
mock_os.path.exists.return_value = True
load_entry_data.return_value = Entry(0, TEST_DATA)
value = cache.get(TEST_KEY)
assert value == TEST_DATA
@pytest.mark.usefixtures("mock_os")
def test_set(self, cache, save_entry_data):
mock_open, mock_dump = save_entry_data
cache.set(TEST_KEY, TEST_DATA)
mock_dump.assert_called_with(Entry(mock.ANY, TEST_DATA), mock.ANY)
mock_open.assert_called_with(TEST_ENTRY_PATH, "wb")
def test_clean_expired(self, cache, mock_os):
mock_os.walk.return_value = [("/root", [], ["too_old", "fresh"])]
mock_os.path.join.side_effect = lambda *args: args[-1]
cache._get_entry = mock.MagicMock()
cache._delete_entry = mock.MagicMock()
def entries(path):
if path == "too_old":
return Entry(time.time() - timedelta(days=1).total_seconds(), TEST_DATA)
else:
return Entry(time.time() + timedelta(days=1).total_seconds(), TEST_DATA)
cache._get_entry.side_effect = entries
cache.clean_expired()
assert cache._delete_entry.call_args_list == [mock.call("too_old")]
|
from setuptools import find_packages, setup
setup(
name="PyFLocker",
version="0.3.1",
author="Arunanshu Biswas",
author_email="mydellpc07@gmail.com",
packages=find_packages(exclude=["tests"]),
description="Python Cryptographic (File Locking) Library",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
license="MIT License",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Topic :: Security :: Cryptography",
],
python_requires=">=3.8",
install_requires=[
"cryptography>=2.7",
# for best case: we don't want python to
# ignore an older Crypto library and later
# this library raises errors!
"pycryptodomex>=3.9.8",
],
url="https://github.com/arunanshub/pyflocker",
)
|
"""Top-level package for Yet Another Workflow Language for Python."""
__author__ = """Fabio Fumarola"""
__email__ = 'fabiofumarola@gmail.com'
__version__ = '0.3.0'
|
#!/usr/bin/env python
# pyClearURLs
# Copyright (c) 2020 pilate
# Copyright (c) 2020-present Marco Romanelli
# See LICENSE for details.
from collections import defaultdict
from urllib.parse import unquote
import re
import json
import os.path
PATH_PACKAGE_DATA = os.path.join(os.path.dirname(os.path.realpath(__file__)), "package_data")
PATH_RULESETS = os.path.join(PATH_PACKAGE_DATA, "data.min.json")
URL_RE = re.compile("\\?.*")
QUERY_RE = re.compile(".*?\\?")
FINAL_RE = re.compile("[^\\/|\\?|&]+=[^\\/|\\?|&]+")
class URLCleaner:
"""This is the main class of the module.
It compiles the regular expressions found in the rules database and
provides the method to clean URLs.
:param database: ClearURLs rules, defaults to None
:type database: dict
:raises ValueError: When the database is not provided
"""
def __init__(self):
"""Constructor method. """
with open(PATH_RULESETS, "r") as infile:
database = json.load(infile)
self._compile_rules(database)
def _compile_rules(self, database):
"""Compile the regular expressions found inside the ClearURLs rules.
:param database: ClearURLs rules
:type database: str
"""
self._compiled = defaultdict(lambda: defaultdict(list))
for _, rules in database.get("providers", {}).items():
pattern = rules.get("urlPattern", {})
current = self._compiled[re.compile(pattern)]
for query_rule in (rules.get("rules", []) + rules.get("referralMarketing", [])):
rule_re = re.compile("([\\/|\\?]|(&|&))("+query_rule+"=[^\\/|\\?|&]*)")
current["param_rules"].append(rule_re)
current["exceptions"] = list(map(re.compile, rules.get("exceptions", [])))
current["redirections"] = list(map(re.compile, rules.get("redirections", [])))
current["full_rules"] = list(map(re.compile, rules.get("rawRules", [])))
def _find_providers(self, url):
"""Finds the providers for the given URL.
:param url: An URL
:type url: str
:return: Returns an iterator of the provider regular expressions that matched the URL
:rtype: :class:`filter`
"""
return filter(bool, map(lambda r: r.match(url), self._compiled))
def _apply_provider(self, url, provider):
"""Apply the provider regular expressions to the URL.
:param url: An URL
:type url: str
:param provider: Provider regular expressions
:type provider: :class:`re.Match`
:return: Cleaned URL
:rtype: str
"""
domain = URL_RE.sub("", url)
fields = "?" + QUERY_RE.sub("", url)
rules = self._compiled[provider.re]
for exception in rules["exceptions"]:
if exception.match(url):
return url
for redirection in rules["redirections"]:
redir = redirection.match(url)
if redir:
return unquote(redir.group(1))
for rule in rules["param_rules"]:
fields = rule.sub("", fields)
for raw_rule in rules["full_rules"]:
domain = raw_rule.sub("", domain)
final_fields = FINAL_RE.findall(fields)
if len(final_fields):
return domain + "?" + "&".join(final_fields)
return domain
def clean(self, url):
"""Returns a cleaned URL, by removing tracking elements from it.
:param url: URL to clean
:type url: str
:return: Cleaned URL
:rtype: str
"""
providers = self._find_providers(url)
for provider in providers:
url = self._apply_provider(url, provider)
return url
|
from typing import Optional
import numpy as np
from arch.univariate.distribution import SkewStudent as SS
from scipy.stats import t, uniform
from ._base import DistributionMixin, _format_simulator
class SkewStudent(DistributionMixin, SS):
def __init__(self, random_state=None):
DistributionMixin.__init__(self)
SS.__init__(self, random_state)
@_format_simulator
def _simulator(self, size: int, reps: Optional[int] = None) -> np.ndarray:
if self.custom_dist is None:
if reps is not None:
size = size, reps
return self.ppf(uniform.rvs(size=size), self._parameters)
else:
self.derive_dist_size(size)
ppf = self.ppf(self.custom_dist[:size], self._parameters)
self.custom_dist = None # reset simulator
return ppf
def ppf(self, pits, parameters=None):
self._check_constraints(parameters)
scalar = np.isscalar(pits)
if scalar:
pits = np.array([pits])
eta, lam = parameters
a = self.__const_a(parameters)
b = self.__const_b(parameters)
cond = pits < (1 - lam) / 2
# slight speed up for really large problems
icdf1 = t._ppf(pits[cond] / (1 - lam), eta)
icdf2 = t._ppf(.5 + (pits[~cond] - (1 - lam) / 2) / (1 + lam), eta)
icdf = -999.99 * np.ones_like(pits)
icdf[cond] = icdf1
icdf[~cond] = icdf2
icdf = (icdf * (1 + np.sign(pits - (1 - lam) / 2) * lam) * (1 - 2 / eta) ** .5 - a)
icdf = icdf / b
if scalar:
icdf = icdf[0]
return icdf
|
################################################################################
# MIT License
#
# Copyright (c) 2017 OpenDNA Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
from asyncio import AbstractEventLoop
from os import environ
from ssl import SSLContext
from typing import List, Union, Iterable
from autobahn.wamp.interfaces import ISerializer
from opendna.autobahn.repl.abc import (
AbstractConnection,
AbstractConnectionManager,
AbstractSession
)
from opendna.autobahn.repl.mixins import ManagesNames, HasLoop, HasName, \
ManagesNamesProxy
from opendna.autobahn.repl.utils import get_class
__author__ = 'Adam Jorgensen <adam.jorgensen.za@gmail.com>'
class Connection(HasName, ManagesNames, AbstractConnection):
def __init__(self,
manager: Union[ManagesNames, AbstractConnectionManager],
uri: str,
realm: str,
extra: dict=None,
serializers: List[ISerializer]=None,
ssl: Union[SSLContext, bool]=None,
proxy: dict=None,
headers: dict=None):
super().__init__(
manager=manager, uri=uri, realm=realm, extra=extra,
serializers=serializers, ssl=ssl, proxy=proxy, headers=headers
)
self.__init_has_name__(manager)
self.__init_manages_names__()
self._sessions_proxy = ManagesNamesProxy(self)
@property
def sessions(self) -> ManagesNamesProxy:
return self._sessions_proxy
def name_for(self, item):
session_class = get_class(environ['session'])
assert isinstance(item, session_class)
return super().name_for(id(item))
@ManagesNames.with_name
def session(self,
authmethods: Union[str, List[str]]= 'anonymous',
authid: str=None,
authrole: str=None,
authextra: dict=None,
resumable: bool=None,
resume_session: int=None,
resume_token: str=None,
*,
name: str=None,
**session_kwargs) -> AbstractSession:
print(
f'Generating {authmethods} session to {self._realm}@{self._uri} '
f'with name {name}'
)
session_class = get_class(environ['session'])
session = session_class(
connection=self, authmethods=authmethods, authid=authid,
authrole=authrole, authextra=authextra, resumable=resumable,
resume_session=resume_session, resume_token=resume_token,
**session_kwargs
)
session_id = id(session)
self._items[session_id] = session
self._items__names[session_id] = name
self._names__items[name] = session_id
return session
def __call__(self,
authmethods: Union[str, List[str]]= 'anonymous',
authid: str=None,
authrole: str=None,
authextra: dict=None,
resumable: bool=None,
resume_session: int=None,
resume_token: str=None,
*,
name: str=None,
**session_kwargs):
return self.session(
authmethods, authid, authrole, authextra, resumable, resume_session,
resume_token, name=name, **session_kwargs
)
class ConnectionManager(ManagesNames, HasLoop, AbstractConnectionManager):
def __init__(self, loop: AbstractEventLoop):
super().__init__()
self.__init_manages_names__()
self.__init_has_loop__(loop)
def name_for(self, item):
connection_class = get_class(environ['connection'])
assert isinstance(item, connection_class)
return super().name_for(id(item))
@ManagesNames.with_name
def __call__(self,
uri: str,
realm: str=None,
extra=None,
serializers=None,
ssl=None,
proxy=None,
headers=None,
*,
name: str=None) -> AbstractConnection:
print(f'Generating connection to {realm}@{uri} with name {name}')
connection_class = get_class(environ['connection'])
connection = connection_class(
manager=self, uri=uri, realm=realm, extra=extra,
serializers=serializers, ssl=ssl, proxy=proxy, headers=headers
)
connection_id = id(connection)
self._items[connection_id] = connection
self._items__names[connection_id] = name
self._names__items[name] = connection_id
return connection
|
#!/usr/bin/env python
# coding: utf-8
# #### Spyking circus using spikeinterface
import os
import spikeinterface.extractors as se
import spikeinterface.toolkit as st
import spikeinterface.sorters as ss
# Here are all the informations that we need like the file location
data_path = "P:\\raviku53\\test_recording_file\\"
file_name = "batchrun1.bin"
num_channels = 256 # we need the number of channels only for the .bin file
sampling_frequency = 30000 # in Hz
full_path = data_path + file_name
recording_sparrow = 0
recording_sparrow_filtered = 0
def func():
print("reading data")
recording_sparrow = se.BinDatRecordingExtractor(file_path= full_path, sampling_frequency=sampling_frequency, numchan = num_channels, dtype = 'int16')
print("reading probe file")
Probe = "C:\\Users\\raviku53\\OneDrive - imec\\New Folder\\probeQ2W5.prb"
recording_sparrow = recording_sparrow.load_probe_file(probe_file=Probe)
recording_sparrow_filtered = st.preprocessing.bandpass_filter(recording_sparrow, filter_type='butter', freq_min=300, freq_max=6000)
recording_cmr_sparrow = st.preprocessing.common_reference(recording_sparrow_filtered, reference='median')
print('Installed sorters', ss.installed_sorters())
channel_ids = recording_sparrow.get_channel_ids()
fs = recording_sparrow.get_sampling_frequency()
num_chan = recording_sparrow.get_num_channels()
print('Channel ids:', channel_ids)
print('Sampling frequency:', fs)
print('Number of channels:', num_chan)
print(ss.get_default_params('spykingcircus'))
print("sorting")
sorting_SC = ss.run_spykingcircus(recording=recording_cmr_sparrow, output_folder = "P:\\raviku53\\test_recording_file\\flask")
print('Units found by SC:', sorting_SC.get_unit_ids())
print(sorting_SC)
print(f'Spike train of a unit: {sorting_SC.get_unit_spike_train(0)}')
return |
# -*- coding: utf-8 -*-
## Variaveis
# 21.07.2020
# DB Functions for DB writes and reports
import sys
import os
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('bot'))
from bdFluxQueries import TraceReport, OccupancyReport, BestDayReport, BestDay
from config import le_config
"""
Definition of paramater that allow connecting to the SocialDistance DB
"""
from config_shared import INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_DBUSER, INFLUXDB_DBPASSWORD, INFLUXDB_DBNAME, INFLUXDB_USER, INFLUXDB_PASSWORD
from config_shared import TABELA_MV, TABELA_TOTAL, TABELA_TRACE
from influxdb import InfluxDBClient
import json
import time
"""TimeSeries DataBase Class."""
# Objeto do banco
# metodo de escrita do trace
class DBClient():
"""
This class allows to instantiate a handler for the SocialDistance InfluxDB database.
When a object is instantitated, connection to the database is opened.
"""
def __init__(self):
# Connects to the database
self._host = INFLUXDB_HOST
self._port = INFLUXDB_PORT
self._user = INFLUXDB_USER
self._password = INFLUXDB_PASSWORD
self._dbname = INFLUXDB_DBNAME
self._client = InfluxDBClient(self._host, self._port, self._user, self._password, self._dbname)
def Close(self):
# Closes DB
self._client.close()
def peopleLog(self, local: str, userid: str, status: str, origem: str):
"""
Escreve no banco quando user entra ou sai
"""
# Prepare JSON with data to be writte in PeopleLog (trace) measurement
json_body = {}
json_body["measurement"] = TABELA_TRACE
json_body["tags"] = {}
json_body["tags"]["local"] = local
json_body["fields"] = {}
json_body["fields"]["userid"] = userid
json_body["fields"]["state"]=status
json_body["fields"]["origin"]=origem
# Write data to InfluxDB
self._client.write_points([json_body])
#Return True to indicate that data was recorded
return True
def TotalCount(self, local: str, total: int, origem: str, people:list):
"""
Escreve no banco total de usuarios naquele local
"""
# Prepare JSON with data to be writte in TotalPeopleCount measurement
json_body = {}
json_body["measurement"] = TABELA_TOTAL
json_body["tags"] = {}
json_body["tags"]["location"] = local
json_body["tags"]["origin"] = origem
json_body["tags"]["people"] = people
json_body["fields"] = {}
json_body["fields"]["count"] = total
# Write data to InfluxDB
self._client.write_points([json_body])
#Return True to indicate that data was recorded
return True
def SanityMask(self, local: str, network:str, serial:str, url:str, time:str):
"""
Escreve no banco eventos quando pessoas sem máscara
"""
# Prepare JSON with data to be writte in SanityMask measurement
json_body = {}
json_body["measurement"] = TABELA_MV
json_body["tags"] = {}
json_body["tags"]["local"] = local
json_body["tags"]["network"] = network
json_body["tags"]["serial"] = serial
json_body["fields"] = {}
json_body["fields"]["url"] = url
json_body["time"]= time
# Write data to InfluxDB
self._client.write_points([json_body])
#Return True to indicate that data was recorded
return True
### FALTA FUNCOES DE CONSULTA
def ConsultaMask(self, tabela: str, tempo:str):
# Count qty of events in the Mask table
query= 'SELECT count(url) from ' + tabela
# Consulta
# tenta a consulta, do contrário devolve erro
try:
consulta=self._client.query(query+" where time > now() - "+tempo)
# trabalha resultado
resultado=list(consulta.get_points())
try:
x=resultado[0]['count']
except:
# se chegou aqui, não voltou nenhum resultado, portanto zero.
x=0
texto = f"{x} events in the past {tempo} time."
except:
texto = "Error in the query. Try XXd (for past days)"
msg=texto
return msg
# BD report
def bd_consulta(tabela,filtro):
# 21.7.2020
# English
# This function will run reports on the DB
# Reports use regular influx for (Mask detection) and Flux for complex reports (BestDay, Occupancy/History and TracePeople)
# Code will call each function basead on a table (tabela) variable and filter (filtro) defined by user
# Most of cases, filtro is a point in the time where data is collected for the investigation
# Returns code in JSON format
msg=""
global TABELA_MV, TABELA_TOTAL, TABELA_TRACE
if tabela=="totalcount":
# Reports Social Distance Out of Compliance in room by shifts
# Chama consulta e retorna os períodos fora de compliance
# Chama consulta Flux para Coleta da base de dados conforme filtro
try:
x=OccupancyReport(le_config(), filtro)
except:
#Msg de erro caso filtro venha errado
msg="Query Error"
try:
#Formata msg de saída do Filtro
for b in x['PerShiftHistory']:
msg=msg+f"Room: {b['location']} Data: {b['day']}-{b['month']}-{b['year']} Shift: {b['shift']} \n"
print (msg)
except:
#Msg de erro caso filtro venha errado
msg="Error in the query. Try XXd (for past days)"
if msg=="":
msg="No data \n"
elif tabela=="peoplelog":
# Reports tracing of people (who's close to) in a certain period of time
params=filtro.split('&')
# Checa e só continua se qtde de parametros correta
if len(params)!=3:
msg="Trace: Missing parameters. Requires: userid, start time, end time. \n"
elif len(params)==3:
# parametros ok, continua
personid=params[0]
start=params[1]
end=params[2]
print(params)
try:
# realiza consulta
print ("trace")
x=TraceReport(personid,start,end)
print (x)
minhalista=list()
except:
#Msg de erro caso filtro venha errado
msg="Query Error.Format: personid: username, start: YYYY-MM-DD, end: YYYY-MM-dd"
try:
#montagem da resposta
for b in x:
minhalista.append(b['userid'])
minhalista=list(dict.fromkeys(minhalista))
msg="List of users close: \n"
for b in minhalista:
msg=msg+f"{b} \n"
if len(minhalista)==0:
msg="No data."
except:
msg="No Data or Query Error. \n"
elif tabela=="bestday":
# Report Best Day to go to the office
#
#try:
# Massa de dados do periodo informado
x=BestDayReport(filtro)
# consolida os melhores dias durante o horário comercial
y=BestDay(x,9,12)
print (y)
# monta a mensagem
for b in y:
msg=msg+f"Room: {b['location']} - Best Days: {b['bestday']} \n"
print (msg)
#return msg
#except:
#msg="No Data and/or Query Error. \n"
#return "No Data and/or Query Error. \n"
elif tabela=="sanityMask":
# Reports qty of events of people not wearing Mask during a certain period of time
# Chama consulta dos eventos sem mascara
banco=DBClient()
tabela=TABELA_MV
msg=banco.ConsultaMask(tabela,filtro)
banco.Close()
else:
msg="Table information not found."
json = { "msg":msg}
return json
# BD update via JSON content
def bd_update(json_content):
# 21.7.2020
# English
# This function updates DB according to data type and parameters send in format of a JSON content
# Details bellow of JSON format
# Code will identify tipo and calls appropiate function to write to the DB
# initiate DB
banco=DBClient()
# faz upload do BD conforme JSON POSTADO
# Tipo 1 - raw de entrada e saida de usuario
# Type 1 - raw data of login/logoff user
# {
# "type":"peoplelog",
# "local":"LOG1",
# "origem":"python",
# "userid":"dvicenti",
# "status":"entrou"
#}
# Tipo 2 - Total de pessoas na sala
# Typo 2 - Total people in a room
# {
# "type":"totalcount",
# "local":"SALA_log2",
# "origin": "camera"
# "total":100
# "people":"people1@email,people2@email"
#}
# Tipo 3 - Pessoas detectadas sem mascara
# Type 3 - No Mask detected people
# {
# "type":"sanitymask",
# "local":"SALA-Log3"
# "network":"XPTO",
# "serial":"XPTO",
# "url":"http://x.com/foto.png"
# "time": "2018-03-28T8:01:00Z"
#}
# check if it an expected data
try:
tipo = json_content["type"]
except:
print ("not a valid json expected")
print (json_content)
return "erro"
# calls BD to write date
try:
if tipo == "peoplelog":
banco.peopleLog(json_content["local"],json_content["userid"],json_content["status"],json_content["origem"])
elif tipo == "totalcount":
banco.TotalCount(json_content["location"],json_content["count"],json_content['origin'],json_content['people'])
elif tipo == "sanitymask":
banco.SanityMask(json_content["local"],json_content['network'],json_content['serial'],json_content['url'],json_content["time"])
# returns ok if ok
banco.Close()
print (json_content)
return "ok"
except:
# returns error if not ok
print ("missing fields or BD error")
print (json_content)
return "erro" |
# coding: utf-8
# # Using VGG16
# In[1]:
import numpy as np
from keras.applications import vgg16
from keras.preprocessing import image
# In[2]:
model = vgg16.VGG16(weights='imagenet')
# In[3]:
img = image.load_img('images/spoon.jpeg',target_size=(224,224))
img
# In[4]:
# Convert to Numpy array
arr = image.img_to_array(img)
arr.shape
# In[5]:
# expand dimension
arr = np.expand_dims(arr, axis=0)
arr.shape
# In[6]:
# preprocessing
arr = vgg16.preprocess_input(arr)
arr
# In[7]:
# predict
preds = model.predict(arr)
preds
# In[8]:
# predictions for top 5
vgg16.decode_predictions(preds, top=5)
# ## Test using another image
# In[9]:
img2 = image.load_img('images/fly.jpeg',target_size=(224,224))
img2
# In[10]:
arr2 = image.img_to_array(img2)
arr2 = np.expand_dims(arr2,axis=0)
arr2 = vgg16.preprocess_input(arr2)
preds2 = model.predict(arr2)
vgg16.decode_predictions(preds2, top=5)
|
import webbrowser
from PyQt5 import QtWidgets, QtCore
import xappt
from xappt_qt.gui.widgets.tool_page.converters.base import ParameterWidgetBase
from xappt_qt.gui.widgets.file_edit import FileEdit
from xappt_qt.gui.widgets.text_edit import TextEdit
from xappt_qt.gui.widgets.table_edit import TableEdit
from xappt_qt.utilities.text import to_markdown
class ParameterWidgetStr(ParameterWidgetBase):
def get_widget(self, param: xappt.Parameter) -> QtWidgets.QWidget:
if param.choices is not None:
return self._convert_str_choice(param)
else:
return self._convert_str_edit(param)
def _convert_str_choice(self, param: xappt.Parameter) -> QtWidgets.QWidget:
w = QtWidgets.QComboBox()
w.addItems(param.choices)
for v in (param.value, param.default):
if v is not None and v in param.choices:
index = w.findText(v)
w.setCurrentIndex(index)
break
else:
param.value = w.currentText()
self._setup_combobox(param, w)
w.currentIndexChanged[str].connect(lambda x: self.onValueChanged.emit(param.name, x))
self._getter_fn = w.currentText
self._setter_fn = lambda s, widget=w: widget.setCurrentIndex(widget.findText(s))
return w
def _convert_str_edit(self, param: xappt.Parameter) -> QtWidgets.QWidget:
ui = param.options.get("ui")
if ui == "folder-select":
w = FileEdit(mode=FileEdit.MODE_CHOOSE_DIR)
w.onSetFile.connect(lambda x: self.onValueChanged.emit(param.name, x))
elif ui == "file-open":
w = FileEdit(accept=param.options.get("accept"), mode=FileEdit.MODE_OPEN_FILE)
w.onSetFile.connect(lambda x: self.onValueChanged.emit(param.name, x))
elif ui == "file-save":
w = FileEdit(accept=param.options.get("accept"), mode=FileEdit.MODE_SAVE_FILE)
w.onSetFile.connect(lambda x: self.onValueChanged.emit(param.name, x))
elif ui == "multi-line":
w = TextEdit()
w.editingFinished.connect(lambda widget=w: self.onValueChanged.emit(param.name, widget.text()))
elif ui in ("label", "markdown"):
w = QtWidgets.QLabel()
w.setTextFormat(QtCore.Qt.RichText)
w.setWordWrap(True)
self.caption = ""
w.linkActivated.connect(self.link_activated)
elif ui == "csv":
w = TableEdit(header_row=param.options.get("header_row", False),
editable=param.options.get("editable", False),
csv_import=param.options.get("csv_import", False),
csv_export=param.options.get("csv_export", True),
sorting_enabled=param.options.get("sorting_enabled", True))
w.data_changed.connect(lambda widget=w: self.onValueChanged.emit(param.name, widget.text()))
else:
w = QtWidgets.QLineEdit()
if ui == "password":
w.setEchoMode(QtWidgets.QLineEdit.Password)
w.editingFinished.connect(lambda widget=w: self.onValueChanged.emit(param.name, widget.text()))
self._getter_fn = w.text
if ui == "markdown":
self._setter_fn = lambda t, widget=w: w.setText(to_markdown(t))
else:
self._setter_fn = w.setText
for v in (param.value, param.default):
if v is not None:
self._setter_fn(v)
break
else:
self._setter_fn("")
return w
@staticmethod
def link_activated(url: str):
webbrowser.open(url)
|
from collections import Counter
from dataclasses import dataclass, field
from urllib import request
from bs4 import BeautifulSoup
from tabulate import tabulate
@dataclass
class Person:
name: str = ''
company: str = ''
@property
def company_first_word(self):
return self.company.split(' ', 1)[0]
@dataclass
class Persons:
persons: list = field(default_factory=list)
def add_person(self, person):
self.persons.append(person)
def company_counts(self):
counter = Counter([
self.company_map(person.company)
for person
in self.persons
])
return counter.most_common()
@property
def ranked_company_counts(self):
return [
list((i,) + row)
for (i, row)
in enumerate(self.company_counts(), 1)
]
def company_map(self, company_name):
mapping = {
'Aiirbnb': 'Airbnb',
'Intel': 'Intel Corporation',
'Schireson': 'Schireson Associates'
}
return mapping.get(company_name, company_name)
@dataclass
class InsightDataFellows:
base_url: str = ''
fellows: Persons = Persons()
fellow_items: list = field(default_factory=list)
def setup(self):
self.parse_page_from_url(self.base_url)
def parse_page_from_url(self, url):
page = request.urlopen(url)
page = BeautifulSoup(page, 'html.parser')
fellow_items = page.select('div.fellow_item')
for item in fellow_items:
name = item.find('div', class_='fellow_name').text
company = item.find('div', class_='fellow_company').text
fellow = Person(name, company)
self.fellows.add_person(fellow)
self.get_next_page(page)
def get_next_page(self, page):
pagination = page.find('div', class_='w-pagination-wrapper').find('a', class_='w-pagination-next')
if pagination:
next_page_link = pagination['href']
next_page_url = self.base_url + next_page_link
self.parse_page_from_url(next_page_url)
def tabulate_company_counts(self):
headers = ['rank', 'company', 'person_count']
self.tabulate(headers, self.fellows.ranked_company_counts)
def tabulate(self, headers=[], items=[]):
print(tabulate(
items,
headers=headers,
tablefmt='orgtbl'
))
# Lets use it!
url = 'https://www.insightdatascience.com/fellows'
parser = InsightDataFellows(url)
parser.setup()
parser.tabulate_company_counts()
|
import os
from os.path import splitext, getmtime, isfile
from pyflu.setuptools.base import CommandBase
from pyflu.path import iter_files
from setuptools import setup, find_packages, Extension
class CompileCythonCommand(CommandBase):
description = "compile all cython files"
user_options = [
("include=", "I", "cython include directories (separated by ',')"),
("exclude=", "e", "comma separated list of folders to exclude "
"from search for .pyx files"),
("cplus", None, "compile to c++ (default: False)"),
("all", "a", "recompile all cython files (default: False)"),
]
boolean_options = ["cplus", "all"]
defaults = {
"include": "",
"exclude": "",
"cplus": False,
"all": False,
}
def finalize_options(self):
self.include = [p for p in self.include.split(",") if p]
self.exclude = [p for p in self.exclude.split(",") if p]
def run(self):
for infile in self.cython_files(exclude=self.exclude):
outfile = splitext(infile)[0] + self.outfiles_ext()
if not self.all and \
isfile(outfile) and getmtime(infile) <= getmtime(outfile):
# Source is older than compiled output, skip it
continue
cmd = "cython "
if self.cplus:
cmd += "--cplus "
if self.include:
cmd += "".join(["-I %s " % i for i in self.include])
cmd += infile
print cmd
os.system(cmd)
def outfiles_ext(self):
if self.cplus:
return ".cpp"
return ".c"
@classmethod
def cython_files(cls, exclude=None):
return iter_files(".pyx", exclude=exclude)
@classmethod
def extensions(cls, include_dirs=None, libraries=None, library_dirs=None,
cplus=None, additional_sources=None, extra_link_args=None,
extra_compile_args=None, exclude=None):
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
if cplus is None:
cplus = cls.defaults.get("cplus", False)
if cplus:
ext_ext = ".cpp"
else:
ext_ext = ".c"
if additional_sources is None:
additional_sources = {}
if extra_link_args is None:
extra_link_args = []
if exclude is None:
exclude = cls.defaults.get("exclude", "")
exclude = [e for e in exclude.split(",") if e]
ret = []
for path in cls.cython_files(exclude=exclude):
base, ext = splitext(path)
ext_path = base + ext_ext
module_name = base.replace(os.sep, ".")
while module_name.startswith("."):
module_name = module_name[1:]
add_srcs = additional_sources.get(module_name, [])
ret.append(Extension(module_name, [ext_path] + add_srcs,
include_dirs=include_dirs,
libraries=libraries,
library_dirs=library_dirs,
extra_link_args=extra_link_args,
extra_compile_args=extra_compile_args))
return ret
|
from azure.common.credentials import ServicePrincipalCredentials
class AzureContext(object):
"""Azure Security Context.
remarks:
This is a helper to combine service principal credentials with the subscription id.
See README for information on how to obtain a service principal attributes client id, secret, etc. for Azure
"""
def __init__(self, subscription_id, client_id, client_secret, tenant):
self.credentials = ServicePrincipalCredentials(
client_id = client_id,
secret = client_secret,
tenant = tenant
)
self.subscription_id = subscription_id
#Service Principle Creds for ACI
azure_context = AzureContext(
subscription_id = '',
client_id = '', #Service Principle ID
client_secret = '', #SP Key
tenant = '' #Directory ID
)
#ACI Specific configurations
ACI_CONFIG = {
"subscriptionId": "",
"resourceGroup": "",
"location": ""
}
#Cosmosdb mongodb api connection string
DATABASE_URI = ""
#Service Bus Queue Config
queueConf = {
'service_namespace': '',
'saskey_name': '',
'saskey_value': '',
'queue_name': ''
}
|
import torch
from torch import Tensor
from torch import nn
from torch._C import dtype
from torch.nn.utils.rnn import pad_sequence
import math
from transformers import RobertaConfig, RobertaModel
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
######################################
# Char Word Transformer Encoder Model#
######################################
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int = 256, dropout: float = 0.1, max_len: int = 400):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(dropout)
position = torch.arange(max_len).unsqueeze(dim=1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(100000) / d_model))
self.position_encoding = torch.zeros(max_len, d_model).to(device)
self.position_encoding[:, 0::2] = torch.sin(position * div_term)
self.position_encoding[:, 1::2] = torch.cos(position * div_term)
def forward(self, x: Tensor) -> Tensor:
"""x: shape [batch_size, seq_length, embedding_dim] --> return [batch_size, seq_length, embedding_dim]"""
x += self.position_encoding[:x.size(1)]
return self.dropout(x)
def generate_square_mask(sequence_size: int):
mask = (torch.triu(torch.ones((sequence_size, sequence_size), device=device)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_source_mask(src: Tensor, mask_token_id: int):
src_mask = (src == mask_token_id)
return src_mask
class CharEncoderTransformers(nn.Module):
def __init__(self, n_chars: int, mask_token_id: int, d_model: int = 256, d_hid: int = 256, n_head: int = 4,
n_layers: int = 4,
dropout: float = 0.2):
super(CharEncoderTransformers, self).__init__()
self.position_encoding = PositionalEncoding(d_model, dropout, 512)
self.encoder_layer = nn.TransformerEncoderLayer(d_model, n_head, d_hid, dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, n_layers)
self.char_embedding = nn.Embedding(n_chars, d_model)
self.d_model = d_model
self.max_char = 50
self.linear_char = nn.Linear(self.max_char * self.d_model, self.d_model)
self.mask_token_id = mask_token_id
self.init_weight()
def init_weight(self):
init_range = 0.1
self.char_embedding.weight.data.uniform_(-init_range, init_range)
def merge_embedding(self, embeddings: Tensor, sequence_split, mode='avg') -> Tensor:
"""
:param embeddings: chars embedding [batch_size, length_seq, d_hid]
:param sequence_split: number character for each word list[int]
:param mode: calculate average or add embedding
:return: [batch_size, num_words, embedding_dim]
"""
original_sequence_split = sequence_split.copy()
sequence_split = [value + 1 for value in sequence_split] # plus space
sequence_split[-1] -= 1 # remove for the last token
embeddings = embeddings[:sum(sequence_split)]
embeddings = torch.split(embeddings, sequence_split, dim=0)
embeddings = [embedd[:-1, :] if i != (len(sequence_split) - 1) else embedd for i, embedd in
enumerate(embeddings)]
if mode == 'avg':
embeddings = pad_sequence(embeddings, padding_value=0, batch_first=True) # n_word*max_length*d_hid
seq_splits = torch.tensor(original_sequence_split).reshape(-1, 1).to(device)
outs = torch.div(torch.sum(embeddings, dim=1), seq_splits)
elif mode == 'add':
embeddings = pad_sequence(embeddings, padding_value=0, batch_first=True) # n_word*max_length*d_hid
outs = torch.sum(embeddings, dim=1)
elif mode == 'linear':
embeddings =[
torch.cat(
(
embedding_tensor.reshape(-1),
torch.tensor(
[0] * (self.max_char - embedding_tensor.size(0)) * self.d_model,
dtype=torch.long
).to(device)
)
)
for embedding_tensor in embeddings
]
embeddings = torch.stack(embeddings, dim=0)
outs = self.linear_char(embeddings)
else:
raise Exception('Not Implemented')
return outs
def forward(self, src: Tensor,
batch_splits,
src_mask: Tensor = None,
src_key_padding_mask: Tensor = None
) -> Tensor:
"""
:param src: char token ids [batch_size, max_len(setence_batch)]
:param batch_splits:
:param src_mask:
:param src_key_padding_mask: mask pad token
:return: word embedding after combine from char embedding [batch_size*n_words*d_hid]
"""
src_embeddings = self.char_embedding(src) # batch_size * len_seq * embedding_dim
src_embeddings = self.position_encoding(src_embeddings)
if src_mask is None or src_mask.size(0) != src.size(1):
src_mask = generate_square_mask(src.size(1))
if src_key_padding_mask is None:
src_key_padding_mask = generate_source_mask(src, self.mask_token_id)
outputs = self.transformer_encoder(
src_embeddings.transpose(0, 1),
mask=src_mask,
src_key_padding_mask=src_key_padding_mask
).transpose(0, 1) # batch_size*len(sentence)*d_hid
outputs = pad_sequence(
[self.merge_embedding(embedding, sequence_split) for embedding, sequence_split in
zip(outputs, batch_splits)],
padding_value=0,
batch_first=True
)
return outputs
class CharWordTransformerEncoding(nn.Module):
def __init__(self, n_words: int, n_chars: int, n_label_errors: int,
mask_token_id: int,
use_detection_context: bool = True, d_model: int = 512, d_hid: int = 768,
n_head: int = 12, n_layers: int = 12, dropout: float = 0.2):
super(CharWordTransformerEncoding, self).__init__()
self.position_encoding = PositionalEncoding(d_model, dropout, 256)
self.char_transformer_encoder = CharEncoderTransformers(n_chars, mask_token_id)
self.encoder_layer = nn.TransformerEncoderLayer(d_model + self.char_transformer_encoder.d_model, n_head, d_hid,
dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, n_layers)
self.word_embedding = nn.Embedding(n_words, d_model)
self.d_model = d_model
self.mask_token_id = mask_token_id
self.use_detection_context = use_detection_context
self.fc1 = nn.Linear(d_hid, n_label_errors)
if use_detection_context:
self.softmax = nn.Softmax(dim=-1)
self.linear_detection_context = nn.Linear(n_label_errors, 20)
self.d_out_hid = d_hid + 20
else:
self.d_out_hid = d_hid
self.fc2 = nn.Linear(self.d_out_hid, n_words)
self.init_weight()
def init_weight(self):
init_range = 0.1
self.word_embedding.weight.data.uniform_(-init_range, init_range)
def forward(self, src_word_error_ids: Tensor,
src_char_ids: Tensor,
batch_splits,
src_mask: Tensor = None,
src_key_padding_mask: Tensor = None
):
"""
:param src_word_error_ids: word token ids [batch_size, n_words]
:param src_char_ids: char token ids [batch_size, seq_len]
:param batch_splits:
:param src_mask:
:param src_key_padding_mask: mask pad token
:return: detection outputs [batch_size * n_words * n_errors] and correction outputs [batch_size * n_words * n_words]
"""
src_word_embeddings = self.word_embedding(src_word_error_ids)
src_word_embeddings = self.position_encoding(src_word_embeddings)
src_words_from_chars = self.char_transformer_encoder(src_char_ids,
batch_splits) # batch_size*n_words*d_model_char
src_word_embeddings = torch.cat((src_word_embeddings, src_words_from_chars),
dim=-1) # batch_size*n_words*(d_model_char+d_model_word)
# if src_mask is None or src_mask.size(0) != src_word_error_ids.size(1): # sequence_size
# src_mask = generate_square_mask(src_word_error_ids.size(1))
if src_key_padding_mask is None:
src_key_padding_mask = generate_source_mask(src_word_error_ids, self.mask_token_id)
outputs = self.transformer_encoder(
src_word_embeddings.transpose(0, 1), # n_words * batch_size * hidden_size
# mask=src_mask, # n_words * n_words
src_key_padding_mask=src_key_padding_mask # batch_size * n_words * hidden_size
).transpose(0, 1) # batch_size * n_words * d_hid
detection_outputs = self.fc1(outputs) # batch_size * n_words * n_errors
if self.use_detection_context:
detection_context = self.softmax(detection_outputs)
detection_context = self.linear_detection_context(detection_context) # batch_size * n_words * d_hid
outputs = torch.cat((outputs, detection_context), dim=-1) # batch_size * n_words * d_out_hid
correction_outputs = self.fc2(outputs)
return detection_outputs, correction_outputs
class PhoBertEncoder(nn.Module):
def __init__(self, n_words: int, n_labels_error: int,
fine_tuned: bool = False, use_detection_context: bool = False):
super(PhoBertEncoder, self).__init__()
self.bert_config = RobertaConfig.from_pretrained('vinai/bartpho-word', return_dict=True,
output_hidden_states=True)
self.bert = RobertaModel.from_pretrained('vinai/bartpho-word', config=self.bert_config)
self.d_hid = self.bert.config.hidden_size
self.detection = nn.Linear(self.d_hid, n_labels_error)
self.use_detection_context = use_detection_context
if self.use_detection_context:
self.detection_context_layer = nn.Sequential(
nn.Softmax(dim=-1),
nn.Linear(n_labels_error,self.d_hid)
)
self.max_n_subword = 30
self.linear_subword_embedding = nn.Linear(self.max_n_subword * self.d_hid, self.d_hid)
self.fine_tuned = fine_tuned
self.correction = nn.Linear(self.d_hid, n_words)
self.is_freeze_model()
def is_freeze_model(self):
for child in self.bert.children():
for param in child.parameters():
param.requires_grad = self.fine_tuned
def merge_embedding(self, sequence_embedding: Tensor, sequence_split, mode='avg'):
sequence_embedding = sequence_embedding[1: sum(sequence_split) + 1] # batch_size*seq_length*hidden_size
embeddings = torch.split(sequence_embedding, sequence_split, dim=0)
word_embeddings = pad_sequence(
embeddings,
padding_value=0,
batch_first=True
)
if mode == 'avg':
temp = torch.tensor(sequence_split).reshape(-1, 1).to(device)
outputs = torch.div(torch.sum(word_embeddings, dim=1), temp)
elif mode == 'add':
outputs = torch.sum(word_embeddings, dim=1)
elif mode == 'linear':
embeddings = [
torch.cat((
embedding_subword_tensor.reshape(-1),
torch.tensor([0] * (self.max_n_subword -embedding_subword_tensor.size(0)) * self.d_hid)
))
for embedding_subword_tensor in embeddings
]
embeddings = torch.stack(embeddings, dim=0)
outputs = self.linear_subword_embedding(embeddings)
else:
raise Exception('Not Implemented')
return outputs
def forward(self, input_ids: Tensor,
attention_mask: Tensor,
batch_splits,
token_type_ids: Tensor = None
):
outputs = self.bert(input_ids, attention_mask)
hidden_states = outputs.hidden_states
stack_hidden_state = torch.stack(
[hidden_states[-1], hidden_states[-2], hidden_states[-3], hidden_states[-4]],
dim=0
)
mean_hidden_state = torch.mean(stack_hidden_state, dim=0)
outputs = pad_sequence(
[self.merge_embedding(sequence_embedding, sequence_split) for sequence_embedding, sequence_split in
zip(mean_hidden_state, batch_splits)],
padding_value=0,
batch_first=True
)
detection_outputs = self.detection(outputs)
if self.use_detection_context:
detection_context = self.detection_context_layer(detection_outputs) # batch_size*seq_length*hidden_size
outputs = outputs + detection_context
correction_outputs = self.correction(outputs)
return detection_outputs, correction_outputs
class GRUDetection(nn.Module):
def __init__(self, n_words: int, n_labels_error: int, d_model: int = 512, d_hid: int = 512, n_layers: int = 2,
bidirectional: bool = True, dropout: float = 0.2):
super(GRUDetection, self).__init__()
self.word_embedding = nn.Embedding(n_words, d_model)
self.gru = nn.GRU(
input_size=d_model,
hidden_size=d_hid,
num_layers=n_layers,
bidirectional=bidirectional,
batch_first=True
)
self.output_dim = d_hid * 2 if bidirectional else d_hid
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(self.output_dim, n_labels_error)
def forward(self, src):
"""
:param src: word error token ids
:return: probability for each error type [batch_size, n_words, n_errors] and word error embedding [batch_size * seq_len * d_model]
"""
embeddings = self.word_embedding(src)
outputs, _ = self.gru(embeddings) # batch_size*seq_length*(2*hidden_size)
outputs = self.dropout(self.linear(outputs))
return self.softmax(outputs), embeddings
class MaskedSoftBert(nn.Module):
def __init__(self, n_words: int, n_labels_error: int, mask_token_id: int,
n_head: int = 8, n_layer_attn: int = 6, d_model: int = 512, d_hid: int = 512,
n_layers_gru: int = 2, bidirectional: bool = True, dropout: float = 0.2):
super(MaskedSoftBert, self).__init__()
self.detection = GRUDetection(n_words=n_words,
n_labels_error=n_labels_error,
d_model=d_model,
n_layers=n_layers_gru,
bidirectional=bidirectional
)
self.position_encoding = PositionalEncoding(d_model, dropout, max_len=128)
self.encoder_layer = nn.TransformerEncoderLayer(d_hid, n_head, d_hid, dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, n_layer_attn)
self.mask_token_id = mask_token_id
self.correction = nn.Linear(d_hid, n_words)
def forward(self, src: Tensor,
src_mask: Tensor = None,
src_key_padding_mask: Tensor = None
):
"""
:param src: word error token ids
:param batch_splits:
:param src_mask:
:param src_key_padding_mask:
:return: detection outputs [batch_size * n_words * n_errors] and correction outputs [batch_size * n_words * n_words]
"""
mask_embedding = self.detection.word_embedding(torch.tensor([[self.mask_token_id]]).to(device))
detection_outputs, embeddings = self.detection(src)
prob_correct_word = detection_outputs[:, :, 0].unsqueeze(2) # batch_size * n_words *1
# embedding: batch_size * n_words * d_model
soft_mask_embedding = prob_correct_word * embeddings + (1 - prob_correct_word) * mask_embedding
soft_mask_embedding = self.position_encoding(soft_mask_embedding)
if src_mask is None or src_mask.size(0) != src.size(1):
src_mask = generate_square_mask(src.size(1))
if src_key_padding_mask is None:
src_key_padding_mask = generate_source_mask(src, self.mask_token_id)
outputs = self.transformer_encoder(
soft_mask_embedding.transpose(0, 1), # seq_len * batch_size * hidden_size
mask=src_mask, # seq_len * seq_len
src_key_padding_mask=src_key_padding_mask # batch_size*seq_len
).transpose(0, 1) # batch_size * n_words * d_hid
outputs +=embeddings
correction_outputs = self.correction(outputs)
return detection_outputs, correction_outputs
|
"""
Credits: https://github.com/valeoai/BF3S
"""
import torch
def create_rotations_labels(batch_size, device):
"""Creates the rotation labels."""
labels_rot = torch.arange(4, device=device).view(4, 1)
labels_rot = labels_rot.repeat(1, batch_size).view(-1)
return labels_rot
def apply_2d_rotation(input_tensor, rotation):
"""Apply a 2d rotation of 0, 90, 180, or 270 degrees to a tensor.
The code assumes that the spatial dimensions are the last two dimensions,
e.g., for a 4D tensors, the height dimension is the 3rd one, and the width
dimension is the 4th one.
"""
assert input_tensor.dim() >= 2
height_dim = input_tensor.dim() - 2
width_dim = height_dim + 1
flip_upside_down = lambda x: torch.flip(x, dims=(height_dim,))
flip_left_right = lambda x: torch.flip(x, dims=(width_dim,))
spatial_transpose = lambda x: torch.transpose(x, height_dim, width_dim)
if rotation == 0: # 0 degrees rotation
return input_tensor
elif rotation == 90: # 90 degrees rotation
return flip_upside_down(spatial_transpose(input_tensor))
elif rotation == 180: # 90 degrees rotation
return flip_left_right(flip_upside_down(input_tensor))
elif rotation == 270: # 270 degrees rotation / or -90
return spatial_transpose(flip_upside_down(input_tensor))
else:
raise ValueError(
"rotation should be 0, 90, 180, or 270 degrees; input value {}".format(rotation)
)
def create_4rotations_images(images, stack_dim=None):
"""Rotates each image in the batch by 0, 90, 180, and 270 degrees."""
images_4rot = []
for r in range(4):
images_4rot.append(apply_2d_rotation(images, rotation=r * 90))
if stack_dim is None:
images_4rot = torch.cat(images_4rot, dim=0)
else:
images_4rot = torch.stack(images_4rot, dim=stack_dim)
return images_4rot
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 8 2012)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import time
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 500,300 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
bSizer3 = wx.BoxSizer( wx.VERTICAL )
self.m_panel3 = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
#bSizer4 = wx.BoxSizer( wx.VERTICAL )
self.m_button25 = wx.Button( self.m_panel3, wx.ID_ANY, u"MyButton", wx.Point(0, 0), wx.DefaultSize, 0 )
#bSizer4.Add( self.m_button25, 0, 0, 5 )
#self.m_button25.Hide()
#self.m_panel3.SetSizer( bSizer4 )
#self.m_panel3.Layout()
#bSizer4.Fit( self.m_panel3 )
bSizer3.Add( self.m_panel3, 1, wx.EXPAND, 5 )
#self.SetSizer( bSizer3 )
#self.Layout()
self.Centre( wx.BOTH )
for i in range(30):
self.m_button25.SetPosition(wx.Point(i, 30))
time.sleep(0.1)
class MyApp(wx.App):
def OnInit(self):
frame = MyFrame(None)
frame.Show(True)
self.SetTopWindow(frame)
return True
app = MyApp(0)
app.MainLoop()
|
# -*- coding: utf-8 -*-
#
# MCR-Analyser
#
# Copyright (C) 2021 Martin Knopp, Technical University of Munich
#
# This program is free software, see the LICENSE file in the root of this
# repository for details
from pathlib import Path
from qtpy import QtCore, QtGui, QtWidgets
import mcr_analyser.utils as util
from mcr_analyser.database.database import Database
from mcr_analyser.ui.exporter import ExportWidget
from mcr_analyser.ui.importer import ImportWidget
from mcr_analyser.ui.measurement import MeasurementWidget
from mcr_analyser.ui.welcome import WelcomeWidget
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle(_("MCR-Analyzer"))
self.tab_widget = QtWidgets.QTabWidget(self)
self.setCentralWidget(self.tab_widget)
self.welcome_widget = WelcomeWidget()
self.import_widget = ImportWidget()
self.measurement_widget = MeasurementWidget()
self.export_widget = ExportWidget()
self.create_actions()
self.create_menus()
self.create_status_bar()
self.update_recent_files()
self.tab_widget.addTab(self.welcome_widget, _("&Welcome"))
self.tab_widget.addTab(self.import_widget, _("&Import measurements"))
self.tab_widget.addTab(self.measurement_widget, _("&Measurement && Data Entry"))
self.tab_widget.addTab(self.export_widget, _("&Export"))
self.welcome_widget.changedDatabase.connect(
self.measurement_widget.switchDatabase
)
self.welcome_widget.changedDatabase.connect(self.update_recent_files)
self.import_widget.importDone.connect(self.measurement_widget.refreshDatabase)
# Open last active database
settings = QtCore.QSettings()
recent_files = util.ensure_list(settings.value("Session/Files"))
try:
path = Path(recent_files[0])
if path.exists():
db = Database()
db.connect_database(f"sqlite:///{path}")
self.measurement_widget.switchDatabase()
except IndexError:
pass
self.tab_widget.setCurrentIndex(settings.value("Session/ActiveTab", 0, int))
def closeEvent(self, event: QtGui.QCloseEvent):
settings = QtCore.QSettings()
settings.setValue("Session/ActiveTab", self.tab_widget.currentIndex())
event.accept()
def create_actions(self):
self.about_action = QtWidgets.QAction(_("&About"), self)
self.about_action.triggered.connect(self.show_about_dialog)
self.new_action = QtWidgets.QAction(_("Create &new database..."), self)
self.new_action.setShortcut(QtGui.QKeySequence.New)
self.new_action.setStatusTip(_("Create a new MCR-Analyser database."))
self.new_action.triggered.connect(self.welcome_widget.clicked_new_button)
self.open_action = QtWidgets.QAction(_("&Open existing database..."), self)
self.open_action.setShortcut(QtGui.QKeySequence.Open)
self.open_action.setStatusTip(_("Open an existing MCR-Analyser database."))
self.open_action.triggered.connect(self.welcome_widget.clicked_open_button)
self.quit_action = QtWidgets.QAction(_("&Quit"), self)
self.quit_action.setShortcut(QtGui.QKeySequence.Quit)
self.quit_action.setStatusTip(_("Terminate the application."))
self.quit_action.triggered.connect(self.close)
def create_menus(self):
file_menu = self.menuBar().addMenu(_("&File"))
file_menu.addAction(self.new_action)
file_menu.addAction(self.open_action)
file_menu.addSeparator()
self.recent_menu = file_menu.addMenu(_("Recently used databases"))
file_menu.addSeparator()
file_menu.addAction(self.quit_action)
self.menuBar().addSeparator()
help_menu = self.menuBar().addMenu(_("&Help"))
help_menu.addAction(self.about_action)
def create_status_bar(self):
self.statusBar()
def sizeHint(self):
return QtCore.QSize(1700, 900)
def show_about_dialog(self):
QtWidgets.QMessageBox.about(
self,
f"About {self.windowTitle()}",
_(
"""
<h1>MCR-Analyser</h1>
<p>Copyright © 2021 Martin Knopp,
Technical University of Munich</p>
<p>Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:</p>
<p>The above copyright notice and this permission notice shall
be included in all copies or substantial portions of the
Software.</p>
<p>THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.</p>
"""
),
)
def update_recent_files(self):
self.recent_menu.clear()
settings = QtCore.QSettings()
recent_files = util.ensure_list(settings.value("Session/Files"))
if recent_files:
for item in recent_files:
path = Path(item)
try:
menu_entry = f"~/{path.relative_to(Path.home())}"
except (KeyError, RuntimeError, ValueError):
menu_entry = str(path)
action = QtWidgets.QAction(menu_entry, self.recent_menu)
action.setData(str(path))
action.triggered.connect(self.open_recent_file)
self.recent_menu.addAction(action)
if self.recent_menu.isEmpty():
self.recent_menu.setEnabled(False)
else:
self.recent_menu.setEnabled(True)
def open_recent_file(self):
file_name = Path(self.sender().data())
if file_name.exists():
db = Database()
db.connect_database(f"sqlite:///{file_name}")
# Update recent files
settings = QtCore.QSettings()
recent_files = util.ensure_list(settings.value("Session/Files"))
recent_files.insert(0, str(file_name))
recent_files = util.ensure_list(util.remove_duplicates(recent_files))
settings.setValue(
"Session/Files",
util.simplify_list(
recent_files[0 : settings.value("Preferences/MaxRecentFiles", 5)]
),
)
self.measurement_widget.switchDatabase()
else:
# Update recent files
settings = QtCore.QSettings()
recent_files = util.ensure_list(settings.value("Session/Files"))
try:
recent_files.remove(str(file_name))
except ValueError:
pass
settings.setValue("Session/Files", util.simplify_list(recent_files))
QtWidgets.QMessageBox.warning(
self,
_("File not found"),
_(
"'{}' could not be found. "
"It might have been deleted or the drive or network path is currently not accessible.".format(
file_name
)
),
)
|
from neuroml import NeuroMLDocument
from neuroml import IzhikevichCell
from neuroml.writers import NeuroMLWriter
from neuroml.utils import validate_neuroml2
def write_izhikevich(filename="./tmp/SingleIzhikevich_test.nml"):
nml_doc = NeuroMLDocument(id="SingleIzhikevich")
nml_filename = filename
iz0 = IzhikevichCell(id="iz0", v0="-70mV", thresh="30mV", a="0.02", b="0.2", c="-65.0", d="6")
nml_doc.izhikevich_cells.append(iz0)
NeuroMLWriter.write(nml_doc, nml_filename)
validate_neuroml2(nml_filename)
write_izhikevich()
|
# -*- coding: utf-8 -*-
#
# zernike.py
# aopy
#
# Created by Alexander Rudy on 2013-08-09.
# Copyright 2013 Alexander Rudy. All rights reserved.
#
"""
Zernike Polynomials
===================
The `zernike polynomials`_ are a modal basis set defined on a circular aperture, and so are useful for optics.
They are defined in an even/odd fashion, with the following convention
.. math::
Z_{n}^{m}(\\rho,\\varphi) = R_{n}^{m}(\\rho) \cos(m \\varphi)
Z_{n}^{-m}(\\rho,\\varphi) = R_{n}^{m}(\\rho) \sin(m \\varphi)
where :math:`R_{n}^{m}` is given by
.. math::
R_{n}^{m}(\\rho) = \sum_{k=0}^{(n-m)/2} \\frac{(-1)^k (n-k)!}{k! ((n+m)/2 - k)! ((n-m)/2 - k)!} \\rho^{n-2k}
For all of the functions below, the zernike polynomial is calculated assuming a radius of 1. Radii or indicies should be scaled appropriately to the desired aperture.
Calculating Zernike Polynomials
-------------------------------
.. autofunction:: zernike_polar
.. autofunction:: zernike_cartesian
.. autofunction:: zernike_noll_polar
.. autofunction:: zernike_noll_cartesian
Calculating Zernike Slopes
--------------------------
.. autofunction:: zernike_slope_polar
.. autofunction:: zernike_slope_cartesian
Index Conversion Tools
----------------------
.. autofunction:: noll_to_zern
.. autofunction:: zern_to_noll
Zernike Triangle Diagrams
-------------------------
A triangle of zernike polynomials is often set out like Pascal's triangle. These functions do that in matplotlib.
.. autofunction:: zernike_triangle
.. autofunction:: zernike_slope_triangle
Zernike Utility Functions
-------------------------
These functions are used internally to calculated the Zernike polynomials. They are used to prevent repetitive input of calculations and reduce errors.
.. autofunction:: zernike_ks
.. autofunction:: zernike_rho
.. autofunction:: zernike_rho_slope
.. autofunction:: zernike_phi_slope
.. _zernike polynomials: http://en.wikipedia.org/wiki/Zernike_polynomials
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
from scipy.misc import factorial
def zernike_ks(n, m, slope=False):
r"""
The zernike factorial terms.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:returns: ``(ks, kterm)`` where ``kterm`` is the math below, and ``ks`` is the vector in k-space.
.. math::
\sum_{k=0}^{(n-m)/2} \frac{(-1)^k (n-k)!}{k! ((n+m)/2 - k)! ((n-m)/2 - k)!}
"""
ks = np.arange(0, (n-m)/2 + 1)
if slope:
ks = ks[(2*ks != n)]
kterm = ((-1.0)**ks * factorial(n-ks)) / (factorial(ks) * factorial((n+m)/2.0 - ks) * factorial((n-m)/2.0 - ks))
return ks, kterm
def zernike_rho(n, m, Rho):
r"""
Calculate the radial component of the zernike polynomial, often called :math:`R_{n}^{m}`.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param Rho: The radii on which to calculate.
:returns: The :math:`R_{n}^{m}` zernike component.
This is defined piecewise. For :math:`m-n` odd, the radial component is defined as 0.
For :math:`m-n` even, they are defined as
.. math::
R_{n}^{m}(\rho) = \sum_{k=0}^{(n-m)/2} \frac{(-1)^k (n-k)!}{k! ((n+m)/2 - k)! ((n-m)/2 - k)!} \rho^{n-2k}
"""
# The Zernike Polynomials are identically 0 for (m-n) odd.
if np.mod(n-m, 2) == 1:
return np.zeros_like(Rho)
ks, kterm = zernike_ks(n, m)
R = np.power(Rho[...,np.newaxis], n - 2.0 * ks) * kterm
return np.sum(R, axis=-1)
def zernike_rho_slope(n, m, Rho):
r"""
Zernike radial slope.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param Rho: The radii on which to calculate.
.. math::
\frac{\partial R_{n}^{m}}{\partial \rho}
"""
ks, kterm = zernike_ks(n, m, slope=True)
Rp = np.power(Rho[...,np.newaxis], n - 2.0 * ks - 1 ) * (n - 2.0 * ks) * kterm
return np.sum(Rp, axis=-1)
def zernike_phi_slope(n, m, Rho):
r"""
Zernike Phi slope.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param Rho: The radii on which to calculate.
.. math::
\frac{\partial R_{n}^{m}}{\partial \varphi}
"""
ks, kterm = zernike_ks(n, m, slope=False)
Rthetap = np.power(Rho[...,np.newaxis], n - 2.0 * ks ) * kterm
return np.sum(Rthetap, axis=-1)
def zernike_polar(n, m, Rho, Phi):
r"""
Calculate the zernike polynomial in polar coordinates. The radius of the polynomial is 1.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param Rho: The radii on which to calculate.
:param Phi: The angles on which to calculate.
This is defined by
.. math::
Z_{n}^{m}(\rho,\varphi) = R_{n}^{m}(\rho) \cos(m \varphi)
Z_{n}^{-m}(\rho,\varphi) = R_{n}^{m}(\rho) \sin(m \varphi)
where
.. math::
R_{n}^{m}(\rho) = \sum_{k=0}^{(n-m)/2} \frac{(-1)^k (n-k)!}{k! ((n+m)/2 - k)! ((n-m)/2 - k)!} \rho^{n-2k}
"""
if m > 0:
Z = zernike_rho(n, m, Rho) * np.cos(m * Phi)
elif m < 0:
Z = zernike_rho(n, np.abs(m), Rho) * np.sin(np.abs(m) * Phi)
else:
Z = zernike_rho(n, 0, Rho)
return Z * np.sqrt((2 * (n+1))/(1 + int(m == 0)))
def zernike_slope_polar(n, m, Rho, Phi):
"""
Calculate the slope of the Zernike polynomials in polar coordinates.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param Rho: The radii on which to calculate.
:param Phi: The angles on which to calculate.
:returns: ``S_Rho, S_Phi``, the pair of radial and angular slopes.
"""
if m > 0:
S_Rho = zernike_rho_slope(n, np.abs(m), Rho) * np.cos(m * Phi)
S_Phi = zernike_rho(n, np.abs(m), Rho) * -m * np.sin(m * Phi)
elif m < 0:
S_Rho = zernike_rho_slope(n, np.abs(m), Rho) * -np.sin(m * Phi)
S_Phi = zernike_rho(n, np.abs(m), Rho) * -m * np.cos(m * Phi)
else:
S_Rho = zernike_rho_slope(n, 0, Rho)
S_Phi = np.zeros_like(S_Rho)
S_Rho *= np.sqrt((2.0 * (n+1))/(1.0 + int(m == 0)))
S_Phi *= np.sqrt((2.0 * (n+1))/(1.0 + int(m == 0)))
return S_Rho, S_Phi
def zernike_cartesian(n, m, X, Y):
"""
Calculate the zernike polynomials on a cartesian grid.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param X: The X on which to calculate.
:param Y: The Y on which to calculate.
"""
Rho, Phi = cartesian_to_polar(X, Y)
return zernike_polar(n, m, Rho, Phi)
def zernike_slope_cartesian(n, m, X, Y):
"""
Calculate the zernike polynomial slopes on a cartesian grid.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
:param X: The X on which to calculate.
:param Y: The Y on which to calculate.
"""
Rho, Phi = cartesian_to_polar(X, Y)
DRho = Rho.copy()
DRho[Rho == 0] = 1
S_Rho, S_Phi = zernike_slope_polar(n, m, Rho, Phi)
S_Phi[Rho == 0] = 0
X_s = np.cos(Phi) * S_Rho - np.sin(Phi) * S_Phi / DRho
Y_s = np.sin(Phi) * S_Rho + np.cos(Phi) * S_Phi / DRho
return X_s, Y_s
def noll_to_zern(j):
"""
Convert from linear Noll index to a tuple of Zernike indicies.
:param int j: Linear Noll Index
"""
if (j < 0):
raise ValueError("Noll indices start at 0. j={:d}".format(j))
n = np.ceil((-3 + np.sqrt(9 + 8*j))/2)
m = 2*j - n*(n+2)
if not int(n) == n:
raise ValueError("This should never happen, n={:f} should be an integer.".format(n))
if not int(m) == m:
raise ValueError("This should never happen, m={:f} should be an integer.".format(m))
return (int(n), int(m))
def zern_to_noll(n, m):
"""
Convert a Zernike index pair, (n,m) to a Linear Noll index.
:param n: Zernike `n` index.
:param m: Zernike `m` index.
"""
j = (n * (n+1))/2 + (n+m)/2
if not int(j) == j:
raise ValueError("This should never happen, j={:f} should be an integer.".format(j))
return int(j)
def zernike_noll_polar(j, Rho, Phi):
"""
Calculate the jth linear Noll indexed Zernike mode.
:param j: The noll index.
:param Rho: The radii on which to calculate.
:param Phi: The angles on which to calculate.
"""
n, m = noll_to_zern(j)
return zernike_polar(n, m, Rho, Phi)
def zernike_noll_cartesian(j, X, Y):
"""
Calculate the jth linear Noll indexed Zernike mode.
:param j: The noll index.
:param X: The X on which to calculate.
:param Y: The Y on which to calculate.
"""
Rho, Phi = cartesian_to_polar(X, Y)
return zernike_noll_polar(j, Rho, Phi)
def cartesian_to_polar(X, Y):
"""
Convert a coordinate grid/pair from cartesian to polar coordinates.
:param X: The X to convert.
:param Y: The Y to convert.
:return: ``Rho, Phi``, the radial and angular components.
"""
Rho = np.sqrt(X**2 + Y**2)
Phi = np.arctan2(Y, X)
return Rho, Phi
def polar_to_cartesian(Rho, Phi):
"""
Convert a coordinate grid/pair from polar to cartesian coordinates.
:param Rho: The radial component to convert.
:param Phi: The angular component to convert.
:return: ``X, Y``, the X and Y components.
"""
X = np.cos(Phi) * Rho
Y = np.sin(Phi) * Rho
return X, Y
def _find_triangular_number(n_items):
"""
Find the next largest triangular number that fits the given number of items.
Returns `(n, T_n)`, where `T_n` is the triangular number, and `n` is the number of rows in the triangle.
"""
from scipy.special import binom
n = 1
T_n = 1
while T_n < n_items:
n = n + 1
T_n = binom(n+1, 2)
return (n, T_n)
def zernike_triangle(figure=None, noll=16, size=40, radius=18):
"""
Make a zernike triangle diagram.
:param figure: The matplotlib figure instance to use.
:param int noll: The maximum linear noll index zernike mode to display. This number will be increased up to the next triangular number.
:param int size: The size of the grid to use.
:param int radius: The radius scaling to use.
"""
import matplotlib.gridspec
if figure is None:
import matplotlib.pyplot
figure = matplotlib.pyplot.figure()
X, Y = np.mgrid[-size/2:size/2,-size/2:size/2] / radius
ap = (np.sqrt(X**2 + Y**2) < 1.0).astype(np.int)
rows, figures = _find_triangular_number(noll)
cols = (rows * 2)
gs = matplotlib.gridspec.GridSpec(rows, cols)
for j in range(int(figures)):
n, m = noll_to_zern(j)
x = rows + m + 1
Z = zernike_noll_cartesian(j, X, Y)
Z[ap != 1] = np.nan
ax = figure.add_subplot(gs[n,x-1:x+1])
ax.imshow(Z, interpolation='nearest')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.set_frame_on(False)
ax.text(-0.1,0, r"$Z_{{{n}}}^{{{m}}}$".format(n=n,m=m), transform=ax.transAxes, ha='left', va='bottom')
def zernike_slope_triangle(figure=None, noll=16, size=40, radius=18):
"""
Make a zernike slope triangle diagram
:param figure: The matplotlib figure instance to use.
:param int noll: The maximum linear noll index zernike mode to display. This number will be increased up to the next triangular number.
:param int size: The size of the grid to use.
:param int radius: The radius scaling to use.
"""
import matplotlib.gridspec
if figure is None:
import matplotlib.pyplot
figure = matplotlib.pyplot.figure()
X, Y = np.mgrid[-size/2:size/2,-size/2:size/2] / radius
ap = (np.sqrt(X**2 + Y**2) < 1.0).astype(np.int)
rows, figures = _find_triangular_number(noll)
cols = (rows * 4)
label = r"$\frac{{\partial Z_{{{n}}}^{{{m}}}}}{{\partial {ax}}}$"
gs = matplotlib.gridspec.GridSpec(rows, cols)
for j in range(1,int(figures)+1):
n, m = noll_to_zern(j)
x = 2 * (rows + m + 1)
Z_x, Z_y = zernike_slope_cartesian(n, m, X, Y)
Z_x[ap != 1] = np.nan
Z_y[ap != 1] = np.nan
Z = np.hstack((Z_x, Z_y))
ax = figure.add_subplot(gs[n - 1,x-2:x+2])
ax.imshow(Z, interpolation='nearest')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.text(0.25,0, label.format(n=n,m=m,ax='x'), fontsize=8,
transform=ax.transAxes, ha='center', va='top')
ax.text(0.75,0, label.format(n=n,m=m,ax='y'), fontsize=8,
transform=ax.transAxes, ha='center', va='top')
ax.set_title("({n},{m})".format(n=n,m=m))
|
#!/usr/bin/env python
'''
Class 9 - Exercise 3
'''
def func3(param='Whatever'):
'''
Print a parameter when a function is called
'''
print param
if __name__ == '__main__':
print 'whatever is just a module, you need to import it'
|
import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import GaussianMixture as skGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import GaussianMixture
class GaussianMixtureTestCase(unittest.TestCase):
def setUp(self):
self.num_components = 3
self.num_pred = 1
self.num_training_samples = 100
self.pi = np.array([0.35, 0.4, 0.25])
self.means = np.array([0, 5, 10])
self.sigmas = np.array([0.5, 0.5, 1.0])
self.components = np.random.randint(0,
self.num_components,
self.num_training_samples)
X = np.random.normal(loc=self.means[self.components],
scale=self.sigmas[self.components])
X.shape = (self.num_training_samples, 1)
self.X_train, self.X_test = train_test_split(X, test_size=0.3)
self.test_GMM = GaussianMixture()
self.test_nuts_GMM = GaussianMixture()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
# class GaussianMixtureFitTestCase(GaussianMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_GMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_GMM.num_pred)
# self.assertEqual(self.num_components, self.test_GMM.num_components)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_GMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_GMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_GMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_GMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_GMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_GMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_GMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_GMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_GMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_GMM.fit(self.X_train,
# inference_type='nuts')
#
# self.assertEqual(self.num_pred, self.test_nuts_GMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_GMM.num_components)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_GMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_GMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_GMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_GMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_GMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_GMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_GMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_GMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_GMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
class GaussianMixturePredictTestCase(GaussianMixtureTestCase):
# def test_predict_returns_predictions(self):
# print('')
# self.test_GMM.fit(self.X_train, self.y_train)
# preds = self.test_GMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_GMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_GMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
def test_predict_raises_error_if_not_fit(self):
print('')
with self.assertRaises(NotFittedError) as no_fit_error:
test_GMM = GaussianMixture()
test_GMM.predict(self.X_train)
expected = 'Run fit on the model before predict.'
self.assertEqual(str(no_fit_error.exception), expected)
# class GaussianMixtureScoreTestCase(GaussianMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skGMM = skGaussianMixture(n_components=3)
# skGMM.fit(self.X_train)
# skGMM_score = skGMM.score(self.X_test)
#
# self.test_GMM.fit(self.X_train)
# test_GMM_score = self.test_GMM.score(self.X_test)
#
# self.assertAlmostEqual(skGMM_score, test_GMM_score, 0)
#
#
# class GaussianMixtureSaveAndLoadTestCase(GaussianMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_GMM.fit(self.X_train)
# score1 = self.test_GMM.score(self.X_test)
# self.test_GMM.save(self.test_dir)
#
# GMM2 = GaussianMixture()
# GMM2.load(self.test_dir)
#
# self.assertEqual(self.test_GMM.inference_type, GMM2.inference_type)
# self.assertEqual(self.test_GMM.num_pred, GMM2.num_pred)
# self.assertEqual(self.test_GMM.num_training_samples,
# GMM2.num_training_samples)
# pd.testing.assert_frame_equal(summary(self.test_GMM.trace),
# summary(GMM2.trace))
#
# score2 = GMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
|
from django.contrib import admin
from comment.models import Comment
admin.site.register(Comment)
|
import requests
import constants
import course_class
def parse_to_html(html_file):
'''Parse courses from Albert into data.html file.'''
# Constants needed for the request
url = "https://m.albert.nyu.edu/app/catalog/getClassSearch"
payload='CSRFToken=0cacdd6a262ee0c2540ca0f1d44089d2&acad_group=UH&catalog_nbr=&class_nbr=&keyword=&nyu_location=&subject=&term=1224'
headers = {
'sec-ch-ua': '"Google Chrome";v="95", "Chromium";v="95", ";Not A Brand";v="99"',
'Accept': '*/*',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36',
'sec-ch-ua-platform': '"macOS"',
'Cookie': 'ADRUM=s=1636870439764&r=https%3A%2F%2Fbrightspace.nyu.edu%2Fd2l%2Flms%2Fdropbox%2Fuser%2Ffolder_user_view_feedback.d2l%3F150855492; BIGipServer~SNS-LOW~prod-sis-pool=rd3133o00000000000000000000ffff0a2f060co8066; ExpirePage=https://sis.portal.nyu.edu/psp/ihprod/; HPTabName=IS_SSS_TAB; HPTabNameRemote=; LastActiveTab=IS_SSS_TAB; PHPSESSID=3mstcj041d770t3aof5f447t52; PS_DEVICEFEATURES=width:1440 height:900 pixelratio:2 touch:0 geolocation:1 websockets:1 webworkers:1 datepicker:1 dtpicker:1 timepicker:1 dnd:1 sessionstorage:1 localstorage:1 history:1 canvas:1 svg:1 postmessage:1 hc:0 maf:0; PS_LASTSITE=https://sis.portal.nyu.edu/psp/ihprod/; PS_LOGINLIST=https://sis.nyu.edu/csprod https://sis.portal.nyu.edu/ihprod; PS_TOKEN=pQAAAAQDAgEBAAAAvAIAAAAAAAAsAAAABABTaGRyAk4Abwg4AC4AMQAwABTTIyhuXc421buKdmehGQ5pbbYAJmUAAAAFAFNkYXRhWXicJYpLDkBAEAXLEEs3IWNGfJYW2Mkk7MUJLFzP4TyjX6q6k34XkKUmSbQfQ5zi5KbGM9Aq+cTKQhHYmNk5CIw0DotTrRS/vezkSli5jvf37ZVO8AKCVAt8; PS_TOKENEXPIRE=14_Nov_2021_0:18_GMT; PS_TokenSite=https://sis.portal.nyu.edu/psp/ihprod/?pco01lw-1556s-8051-PORTAL-PSJSESSIONID; SignOnDefault=; __utma=57748789.1922840866.1628059481.1634405387.1635238799.10; __utmz=57748789.1634405387.9.9.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); _drip_client_6158192=vid%253D54db9895aa194ce784fc3ae4202d043c%2526pageViews%253D1%2526sessionPageCount%253D1%2526lastVisitedAt%253D392646895%2526weeklySessionCount%253D1%2526lastSessionAt%253D392646895; _fbp=fb.1.1628059634773.4892; _ga=GA1.2.1922840866.1628059481; _gid=GA1.2.1265445606.1636786654; _scid=71aa006a-23f4-42ea-890c-89907fa69fd4; _sctr=1|1635192000000; fpestid=S50C2-epUV24gCdlSadqrCOEpyczyqXcq5JUStKCxPPUNkP-2_ps6xbyuKkOfVNY6mLbdw; https%3a%2f%2fsis.portal.nyu.edu%2fpsp%2fihprod%2femployee%2fempl%2frefresh=list:%20%3Ftab%3Dremoteunifieddashboard%7C%3Frp%3Dremoteunifieddashboard%7C%7C%7C; lcsrftoken=hopTsZFqR3gKZ7dDdRhSzNX1Le2ImkVuOMdo+arSF7I=; nmstat=791ca8c8-78ab-1aed-df5c-5280f1b7d038; nyuad=eyJpdiI6InAwd0NwRlY2YjA3T01VYlN6VXRnN1E9PSIsInZhbHVlIjoiM1JHeWRxaEVjTW5XYW1jXC9BV2lQcUVGWHZhXC9DS2lIdHhSbnlnM1VwWnhPUjVsYzdXQVBmWUtLaTNoc1U0UTdhTFVEMittN3NnUGo5cHJHbzdBQW1tdz09IiwibWFjIjoiZWZmMDM4MzQ3MDZiZTQ1NGM2NjBkODZjNzBkYTNjZTQ1MTRkNTgwMTA4MDE1ZWI5YTdlN2YwYzJiODNjN2ZjMyJ9; pco01lw-1537s-8051-PORTAL-PSJSESSIONID=yAEdyHAO9VuqIwlgf9jAK87lXuRmxvEH!1483518525; pco01lw-1556s-8051-PORTAL-PSJSESSIONID=SM0dyGzyrYMhlq0-Q5RcjnLWMnP27kDu!-665168926; ps_theme=node:EMPL portal:EMPLOYEE theme_id:NYU_THEME accessibility:N formfactor:3 piamode:2; psback=%22%22url%22%3A%22https%3A%2F%2Fsis.portal.nyu.edu%2Fpsp%2Fihprod%2FEMPLOYEE%2FEMPL%2Fh%2F%3Ftab%3DIS_SSS_TAB%22%20%22label%22%3A%22LabelNotFound%22%20%22origin%22%3A%22PIA%22%20%22layout%22%3A%220%22%20%22refurl%22%3A%22https%3A%2F%2Fsis.portal.nyu.edu%2Fpsp%2Fihprod%2FEMPLOYEE%2FEMPL%2Fh%2F%3Ftab%3DIS_SSS_TAB%22%22; BIGipServer~SNS-LOW~prod-m-albert-pool=rd3133o00000000000000000000ffff0a2f060eo80; BIGipServer~SNS-LOW~prod-sis-portal-pool=rd3133o00000000000000000000ffff0a2f0614o8666; CSRFCookie=0cacdd6a262ee0c2540ca0f1d44089d2; _ga=GA1.4.1922840866.1628059481; _gid=GA1.4.1265445606.1636786654; highpoint_cs=rb7j695s1t5irkrqec3etmn3t2'
}
try:
response = requests.request("POST", url, headers=headers, data=payload)
# saving into data.html file
html_file = open(html_file, 'w')
html_file.write(response.text)
html_file.close()
# saving the latest version into the backup file
backup_file = open(constants.BACKUP_HTML ,'w')
backup_file.write(response.text)
backup_file.close()
except requests.exceptions.RequestException as e: # Error of not being able to parse data
print('Error')
# copying contents from backup file into the data.html file
with open(constants.BACKUP_HTML,'r') as firstfile, open(html_file, 'w') as secondfile:
for line in firstfile:
secondfile.write(line)
if __name__ == '__main__':
parse_to_html(constants.COURSES_HTML) |
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Play a trained model.
You can visualize playing of the trained model by running:
.. code-block:: bash
cd ${PROJECT}/alf/examples;
python -m alf.bin.play \
--root_dir=~/tmp/cart_pole \
--alsologtostderr
"""
from absl import app
from absl import flags
from absl import logging
import copy
import gin
import os
import subprocess
import sys
import torch
from alf.algorithms.data_transformer import create_data_transformer
from alf.environments.utils import create_environment
from alf.trainers import policy_trainer
from alf.utils import common
import alf.utils.external_configurables
def _define_flags():
flags.DEFINE_string(
'root_dir', os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_integer(
'checkpoint_step', None,
"the number of training steps which is used to "
"specify the checkpoint to be loaded. If None, the latest checkpoint under "
"train_dir will be used.")
flags.DEFINE_float('epsilon_greedy', 0., "probability of sampling action.")
flags.DEFINE_integer('random_seed', None, "random seed")
flags.DEFINE_integer('num_episodes', 10, "number of episodes to play")
flags.DEFINE_integer(
'append_blank_frames', 0,
"If >0, wil append such number of blank frames at the "
"end of each episode in the rendered video file.")
flags.DEFINE_float('sleep_time_per_step', 0.01,
"sleep so many seconds for each step")
flags.DEFINE_string(
'record_file', None, "If provided, video will be recorded"
"to a file instead of shown on the screen.")
# use '--norender' to disable frame rendering
flags.DEFINE_bool(
'render', True,
"Whether render ('human'|'rgb_array') the frames or not")
# use '--alg_render' to enable algorithm specific rendering
flags.DEFINE_bool('alg_render', False,
"Whether enable algorithm specific rendering")
flags.DEFINE_string('gin_file', None, 'Path to the gin-config file.')
flags.DEFINE_multi_string('gin_param', None, 'Gin binding parameters.')
flags.DEFINE_string('conf', None, 'Path to the alf config file.')
flags.DEFINE_multi_string('conf_param', None, 'Config binding parameters.')
flags.DEFINE_string(
'ignored_parameter_prefixes', "",
"Comma separated strings to ingore the parameters whose name has one of "
"these prefixes in the checkpoint.")
flags.DEFINE_bool(
'use_alf_snapshot', False,
'Whether to use ALF snapshot stored in the model dir (if any). You can set '
'this flag to play a model trained with legacy ALF code.')
FLAGS = flags.FLAGS
def play():
if torch.cuda.is_available():
alf.set_default_device("cuda")
alf.summary.render.enable_rendering(FLAGS.alg_render)
seed = common.set_random_seed(FLAGS.random_seed)
alf.config('create_environment', nonparallel=True)
alf.config('TrainerConfig', mutable=False, random_seed=seed)
conf_file = common.get_conf_file()
assert conf_file is not None, "Conf file not found! Check your root_dir"
try:
common.parse_conf_file(conf_file)
except Exception as e:
alf.close_env()
raise e
config = policy_trainer.TrainerConfig(root_dir="")
env = alf.get_env()
env.reset()
data_transformer = create_data_transformer(config.data_transformer_ctor,
env.observation_spec())
config.data_transformer = data_transformer
# keep compatibility with previous gin based config
common.set_global_env(env)
observation_spec = data_transformer.transformed_observation_spec
common.set_transformed_observation_spec(observation_spec)
algorithm_ctor = config.algorithm_ctor
algorithm = algorithm_ctor(
observation_spec=observation_spec,
action_spec=env.action_spec(),
reward_spec=env.reward_spec(),
config=config)
algorithm.set_path('')
try:
policy_trainer.play(
common.abs_path(FLAGS.root_dir),
env,
algorithm,
checkpoint_step=FLAGS.checkpoint_step or "latest",
epsilon_greedy=FLAGS.epsilon_greedy,
num_episodes=FLAGS.num_episodes,
sleep_time_per_step=FLAGS.sleep_time_per_step,
record_file=FLAGS.record_file,
append_blank_frames=FLAGS.append_blank_frames,
render=FLAGS.render,
ignored_parameter_prefixes=FLAGS.ignored_parameter_prefixes.split(
",") if FLAGS.ignored_parameter_prefixes else [])
finally:
alf.close_env()
def launch_snapshot_play():
"""This play function uses historical ALF snapshot for playing a trained
model, consistent with the code snapshot that trains the model.
In the newer version of ``train.py``, a ALF snapshot is saved to ``root_dir``
right before the training begins. So this function prepends ``root_dir`` to
``PYTHONPATH`` to allow using the snapshot ALF repo in that place.
Note that for any old training ``root_dir`` prior to snapshot being enabled,
this function doesn't have any effect and the most up-to-date ALF will
be used by play.
"""
root_dir = common.abs_path(FLAGS.root_dir)
alf_repo = os.path.join(root_dir, "alf")
env_vars = common.get_alf_snapshot_env_vars(root_dir)
flags = sys.argv[1:]
flags.append('--nouse_alf_snapshot')
args = ['python', '-m', 'alf.bin.play'] + flags
try:
if os.path.isdir(alf_repo):
logging.info("=== Using an ALF snapshot at '%s' ===" % alf_repo)
else:
logging.info(
"=== Didn't find a snapshot; using update-to-date ALF ===")
subprocess.check_call(
" ".join(args),
env=env_vars,
stdout=sys.stdout,
stderr=sys.stdout,
shell=True)
except subprocess.CalledProcessError:
# No need to output anything
pass
def main(_):
if not FLAGS.use_alf_snapshot:
play()
else:
launch_snapshot_play()
if __name__ == '__main__':
_define_flags()
flags.mark_flag_as_required('root_dir')
logging.set_verbosity(logging.INFO)
app.run(main)
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on May 29, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
import pyocni.pyocni_tools.config as config
import pyocni.junglers.managers.backendManager as backend_m
from pyocni.dataBakers.resource_dataBaker import ResourceDataBaker
from postMan.the_post_man import PostMan
from pyocni.pyocni_tools.config import return_code
from pyocni.junglers.pathJungler import PathManager
from pyocni.junglers.managers.linkManager import LinkManager
from pyocni.junglers.managers.resourceManager import ResourceManager
try:
import simplejson as json
except ImportError:
import json
# getting the Logger
logger = config.logger
#=======================================================================================================================
# MultiEntityManager
#=======================================================================================================================
class MultiEntityJungler(object):
"""
Handles requests concerning multiple entities
"""
def __init__(self):
self.manager_r = ResourceManager()
self.manager_l = LinkManager()
self.jungler_p = PathManager()
self.rd_baker = ResourceDataBaker()
self.PostMan = PostMan()
def channel_post_multi_resources(self, jreq, req_path):
"""
Identifies the post path's goal : create a resource instance or update a mixin collection
Args:
@param jreq: Body content of the post request
@param req_path: Address to which this post request was sent
"""
#Step[1]: detect the goal of the request
if jreq.has_key('resources') or jreq.has_key('links'):
is_kind_loc = True
else:
is_kind_loc = False
if is_kind_loc is True:
#Step[2a]: This is a create new resources request
db_occi_ids_locs = self.rd_baker.bake_to_post_multi_resources_2a()
#Step[3a]: Look for the default attributes to complete the attribute description of the resource:
default_attributes = self.rd_baker.bake_to_get_default_attributes(req_path)
if db_occi_ids_locs is None or default_attributes is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
else:
#Step[4a]: Ask the managers to create the new resources
if jreq.has_key('resources'):
logger.debug(
"===== Channel_post_multi_resources ==== : Post on kind path to create a new resource channeled")
new_resources, resp_code_r = self.manager_r.register_resources(jreq['resources'], req_path,
db_occi_ids_locs, default_attributes)
else:
new_resources = list()
resp_code_r = return_code['OK, and location returned']
if jreq.has_key('links'):
logger.debug(
"===== Channel_post_multi_resources ==== : Post on kind path to create a new link channeled")
new_links, resp_code_l = self.manager_l.register_links_explicit(jreq['links'], req_path,
db_occi_ids_locs, default_attributes)
else:
new_links = list()
resp_code_l = return_code['OK, and location returned']
if resp_code_r is not return_code['OK, and location returned']\
or resp_code_l is not return_code['OK, and location returned']:
return "An error has occurred, please check log for more details", return_code['Bad Request']
#Step[5a]: Save the new resources
entities = new_resources + new_links
self.PostMan.save_registered_docs_in_db(entities)
logger.debug("===== Channel_post_multi_resources ==== : Finished (2a) with success")
locations = list()
for item in entities:
locations.append(item['OCCI_Location'])
backend_m.create_entities(entities)
return locations, return_code['OK, and location returned']
#Step[2b]: This is an associate mixins to resources request
elif jreq.has_key('X-OCCI-Location'):
#Step[3b]: Get the necessary data from DB
nb_res, mix_id = self.rd_baker.bake_to_post_multi_resources_2b(req_path)
if nb_res is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
elif nb_res is 0:
return "An error has occurred, please check log for more details", return_code['Not Found']
else:
to_search_for = jreq['X-OCCI-Location']
db_docs = self.rd_baker.bake_to_post_multi_resources_2b2(to_search_for)
if db_docs is 0:
return "An error has occurred, please check log for more details", return_code['Not Found']
elif db_docs is None:
return "An error has occurred, please check log for more details", return_code[
'Internal Server Error']
else:
#Step[4b]: Ask the managers to associate mixins to resources
logger.debug(
"===== Channel_post_multi_resources ==== : Post on mixin path to associate a mixin channeled")
updated_entities, resp_code_e = associate_entities_to_a_mixin(mix_id, db_docs)
self.PostMan.save_updated_docs_in_db(updated_entities)
logger.debug("===== Channel_post_multi_resources ==== : Finished (2b) with success")
backend_m.update_entities(db_docs, updated_entities)
return "", return_code['OK']
else:
return "An error has occurred, please check log for more details", return_code['Bad Request']
def channel_get_all_entities(self, req_path, jreq):
"""
Retrieve all entities belonging to a kind or a mixin or get on a path
Args:
@param req_path: Address to which this post request was sent
@param jreq: Data provided for filtering
"""
#Step[1]: Retrieve the kind/mixin from DB
res = self.rd_baker.bake_to_channel_get_all_entities(req_path)
if res is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
elif res is 0:
logger.warning("===== Channel_get_all_multi_entities ===== : This is a get on a path " + req_path)
#Step[1b]: Get on path to retrieve the entities under that path
var, resp_code = self.jungler_p.channel_get_on_path(req_path, jreq)
return var, resp_code
else:
q = res.first()
#Step[2]: Retrieve the entities related to the kind/mixin
entities = self.rd_baker.bake_to_get_all_entities(q['value'][1], q['value'][0])
if entities is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
else:
logger.debug("===== Channel_get_all_entities ==== : Finished with success")
return entities, return_code['OK']
def channel_get_filtered_entities(self, req_path, terms):
"""
Retrieve entities belonging to a kind or a mixin matching the terms specified or get entities on a path with filtering
Args:
@param req_path: Address to which this post request was sent
@param terms: Terms to filter entities
"""
#Step[1]: Get all the entities related to kind/mixin or get on a path with filtering
entities, ok = self.channel_get_all_entities(req_path, terms)
if ok == return_code['OK']:
#Note: We now need the resources OCCI descriptions so we go back to the database
descriptions_res, descriptions_link = self.rd_baker.bake_to_get_filtered_entities(entities)
if descriptions_res is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
else:
#Step[2]: Ask the managers to do the filtering
if terms.has_key('resources'):
logger.debug("===== Channel_get_filtered: Resources are sent to filter =====")
filtered_res, resp_code_r = self.manager_r.get_filtered_resources(terms['resources'],
descriptions_res)
else:
logger.debug("===== Channel_get_filtered: No Resource filter =====")
filtered_res = list()
resp_code_r = return_code['OK']
if terms.has_key('links'):
logger.debug("===== Channel_get_filtered: Links are sent to filter =====")
filtered_links, resp_code_l = self.manager_l.get_filtered_links(terms['links'], descriptions_link)
else:
logger.debug("===== Channel_get_filtered: No Links filter =====")
filtered_links = list()
resp_code_l = return_code['OK']
if resp_code_l is not return_code['OK'] or resp_code_r is not return_code['OK']:
return "An error has occurred, please check log for more details", return_code['Bad Request']
result = filtered_res + filtered_links
logger.debug("===== Channel_get_filtered_entities ==== : Finished with success")
return result, return_code['OK']
def channel_put_multi(self, jreq, req_url):
"""
Update the mixin collection of resources
Args:
@param jreq: OCCI_Locations of the resources
@param req_url: URL of the request
"""
#Step[1]: Get the necessary data from DB
nb_res, mix_id = self.rd_baker.bake_to_post_multi_resources_2b(req_url)
if nb_res is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
elif nb_res is 0:
return "An error has occurred, please check log for more details", return_code['Not Found']
else:
to_search_for = jreq['X-OCCI-Location']
db_docs = self.rd_baker.bake_to_post_multi_resources_2b2(to_search_for)
if db_docs is 0:
return "An error has occurred, please check log for more details", return_code['Not Found']
elif db_docs is None:
return "An error has occurred, please check log for more details", return_code[
'Internal Server Error']
else:
#Step[2]: Ask the managers to associate mixins to resources
logger.debug(
"===== Channel_put_multi_resources ==== : Put on mixin path to associate a mixin channeled")
updated_entities, resp_code_e = associate_entities_to_a_mixin(mix_id, db_docs)
self.PostMan.save_updated_docs_in_db(updated_entities)
logger.debug("===== Channel_put_multi_resources ==== : Finished (2b) with success")
backend_m.update_entities(db_docs, updated_entities)
return "", return_code['OK']
def channel_delete_multi(self, jreq, req_url):
"""
Remove the mixin from the resources
Args:
@param jreq: OCCI_Locations of the resources
@param req_url: URL of the request
"""
#Step[1]: Get the necessary data from DB
nb_res, mix_id = self.rd_baker.bake_to_post_multi_resources_2b(req_url)
if nb_res is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
elif nb_res is 0:
return "An error has occurred, please check log for more details", return_code['Not Found']
else:
to_search_for = jreq['X-OCCI-Location']
db_docs = self.rd_baker.bake_to_post_multi_resources_2b2(to_search_for)
if db_docs is 0:
return "An error has occurred, please check log for more details", return_code['Not Found']
elif db_docs is None:
return "An error has occurred, please check log for more details", return_code[
'Internal Server Error']
else:
logger.debug(" ===== Delete_multi_entities : Delete on mixin to Dissociate mixins channeled =====")
updated_entities, resp_code_e = dissociate_entities_from_a_mixin(mix_id, db_docs)
if resp_code_e is not return_code['OK']:
return "An error has occurred, please check log for more details", return_code['Bad Request']
self.PostMan.save_updated_docs_in_db(updated_entities)
backend_m.update_entities(db_docs,updated_entities)
return "", return_code['OK']
def channel_trigger_actions(self, jBody, req_url, triggered_action):
"""
Trigger action on a collection of resources related to a kind or mixin
Args:
@param jBody: Action provided
@param req_url: URL of the request
@param triggered_action: Action name
"""
#Step[1]: Get the necessary data:
kind_ids, entities = self.rd_baker.bake_to_channel_trigger_actions(req_url)
if kind_ids is None:
return "An error has occurred, please check log for more details", return_code['Internal Server Error']
if kind_ids is 0:
return "An error has occured, please check log for more details", return_code['Not Found']
else:
providers = list()
#Step[2]: Get the providers of the instances
for item in kind_ids:
provider = self.rd_baker.bake_to_get_provider(item)
providers.append(provider)
if jBody.has_key('attributes') is True:
parameters = jBody['attributes']
else:
parameters = None
#Step[3]: Ask the backend to trigger the action on the resources
backend_m.trigger_action_on_multi_resource(entities,providers, jBody['actions'][0],parameters)
return "", return_code['OK']
#=======================================================================================================================
# Independent Functions
#=======================================================================================================================
def associate_entities_to_a_mixin(mix_id, db_docs):
"""
Add a single mixin to entities
Args:
@param mix_id: OCCI ID of the mixin
@param db_docs: documents of the entities already contained in the database
"""
if mix_id is not None:
for doc in db_docs:
if doc['OCCI_Description'].has_key('mixins'):
var = doc['OCCI_Description']['mixins']
try:
var.index(mix_id)
except ValueError:
var.append(mix_id)
doc['OCCI_Description']['mixins'] = var
else:
doc['OCCI_Description']['mixins'] = [mix_id]
logger.debug("Associate mixin : Mixin associated with success")
return db_docs, return_code['OK']
else:
logger.debug("Associate mixin : Mixin description problem")
return list(), return_code['Not Found']
def dissociate_entities_from_a_mixin(mix_id, db_docs):
"""
Remove a single mixin from entities
Args:
@param mix_id: OCCI ID of the mixin
@param db_docs: documents of the entities already contained in the database
"""
if mix_id is not None:
for doc in db_docs:
if doc['OCCI_Description'].has_key('mixins'):
var = doc['OCCI_Description']['mixins']
try:
var.remove(mix_id)
doc['OCCI_Description']['mixins'] = var
except ValueError as e:
logger.error('Diss a mixin: ' + e.message)
logger.debug("Dissociate mixin : Mixin dissociated with success")
return db_docs, return_code['OK']
else:
logger.debug("Dissociate mixin : Mixin description problem")
return list(), return_code['Not Found'] |
import re
import unittest
from unittest.mock import MagicMock, Mock
from genie.libs.sdk.apis.nxos.aci.utils import (
copy_from_device, copy_to_device)
class TestUtilsApi(unittest.TestCase):
def test_copy_from_device_nxos_aci(self):
device = MagicMock()
device.hostname = 'router'
device.os = 'nxos'
device.platform = 'aci'
device.via = 'cli'
device.api.get_mgmt_src_ip_addresses = Mock(return_value=['127.0.0.1'])
device.api.get_local_ip = Mock(return_value='127.0.0.1')
device.execute = Mock()
copy_from_device(device, local_path='/tmp/test.txt')
assert re.search(r'curl --upload-file /tmp/test.txt -u \w+:\w+ http://127.0.0.1:\d+/router_test.txt', str(device.execute.call_args))
def test_copy_to_device_nxos_aci(self):
device = MagicMock()
device.os = 'nxos'
device.platform = 'aci'
device.via = 'cli'
device.api.get_mgmt_src_ip_addresses = Mock(return_value=['127.0.0.1'])
device.api.get_local_ip = Mock(return_value='127.0.0.1')
device.execute = Mock()
copy_to_device(device, remote_path='/tmp/test.txt')
assert re.search(r'curl -u \w+:\w+ http://127.0.0.1:\d+//tmp/test.txt -O', str(device.execute.call_args)) |
"""Link regular expressions used by link_transformer_preprocessor module."""
import re
# Handle TW wikilink inner text
TW_RC_LINK_RE = re.compile(
(
r"rc:\/\/"
r"(?P<lang_code>[^\[\]\(\)]+?)"
r"\/tw\/dict\/bible\/(?:kt|names|other)\/"
r"(?P<word>[^\[\]\(\)]+?)$"
)
)
# Handle wiki style rc links.
# e.g., [[rc://*/tw/dict/bible/kt/foo.md]]
# NOTE(id:regex_transformation_order) Ensure this doesn't interfere with
# TW_WIKI_RC_SEE_LINK_RE. Negative look-behind regex won't work because
# negative look-behinds must be of fixed length and there is no
# gaurantee of fixed-width string preceding the link regex since the
# preceding string could vary by language and by actual phrase, e.g.,
# (Veja: ...) or (See: ...) or (Blah blah blah: ...). Thus to ensure
# that we don't orphan the preceding text we need to run the
# TW_WIKI_RC_SEE_LINK_RE regex transformations first. The same is true
# for the TA and TN regexes below.
TW_WIKI_RC_LINK_RE = (
r"\[\[rc:\/\/\*\/tw\/dict\/bible\/(?:kt|names|other)\/(?P<word>[^\]]+?)\]\]"
)
# TW prefixed rc wikilink regex
# (See: [[rc://en/tw/dict/bible/kt/reveal]])
TW_WIKI_PREFIXED_RC_LINK_RE = r"\((?P<prefix_text>.+?):* *\[\[rc://(?P<lang_code>[^\[\]]+?)\/tw\/dict\/bible\/(?:kt|names|other)\/(?P<word>[^\[\]]+?)\]\]\)*"
# TW prefixed rc wikilink with no close parens regex
# (See: [[rc://en/tw/dict/bible/kt/reveal]])
# E.g., The first link in (See: [[rc://en/tw/dict/bible/kt/justice]] and [[rc://en/tw/dict/bible/kt/lawofmoses]])
TW_WIKI_PREFIXED_RC_LINK_NO_CLOSE_PAREN_RE = r"\((?P<prefix_text>.+?):* *\[\[rc://(?P<lang_code>[^\[\]]+?)\/tw\/dict\/bible\/(?:kt|names|other)\/(?P<word>[^)\[\]]+?)\]\][^)]"
# TW markdown link regex
# e.g., [foo](../kt/foo.md) links.
# NOTE See id:regex_transformation_order above
TW_MARKDOWN_LINK_RE = r"\[(?P<link_text>[^\[\]\(\)]+?)\]\(\.+\/(?:kt|names|other)\/(?P<word>[^\[\]\(\)]+?)\.md\)"
# TA rc wikilink prepended by open paren and text regex
# e.g., (See: [[rc://en/ta/man/jit/translate-names]])
TA_WIKI_PREFIXED_RC_LINK_RE = r"\((?P<prefix_text>.+?):* *\[\[rc://(?P<lang_code>[^\[\]\(\)]+?)\/ta\/man\/.+?\/(?P<word>[^\[\]]+?)\]\]\)"
# There can be malformed TA wikilinks of this form. Ideally, they
# should be fixed in the source text.
# e.g., [[rc://en/ta/man/jit/figs-metaphor]] Notice the leading
# comma and space.
# NOTE See id:regex_transformation_order above
TA_WIKI_RC_LINK_RE = r",* *\[\[rc://(?P<lang_code>[^\[\]\(\)]+?)\/ta\/man\/\w+?\/(?P<word>[^\[\]]+?)\]\]\)*"
# TA markdown style links
# e.g., [*](rc://lang_code/ta/man/translate/*.md)
# e.g., Found in files for ephraim.md and honey.md in pt-br_tw directory.
# e.g. (See: [synecdoche](rc://pt-br/ta/man/translate/figs_synecdoche.md))
# There can be more than one link inside parens:
# e.g., (Veja também: [simile](rc://pt-br/ta/man/translate/figs_simile.md), [metáfora](rc://pt-br/ta/man/translate/figs_metaphor.md))
# e.g., (Veja: [Simile](rc://pt-br/ta/man/translate/figs_simile.md))
# Handles the following style link also (notice there is no file
# suffix which may be malformed and may need to be fixed in markdown
# source by translators):
# e.g., (See: [synecdoche](rc://pt-br/ta/man/translate/figs-synecdoche) )
TA_PREFIXED_MARKDOWN_LINK_RE = r"\(.+?:* *\[(?P<link_text>[^\[\]\(\)]*?)\] *\(rc:\/\/(?P<lang_code>[^\[\]\(\)]+?)\/ta\/man\/translate\/(?P<word>[^\[\]\(\)]+?)(.md)*\) *\),*.*"
# TA markdown https style see link regex
# e.g., (Veja: [sinédoque] (https://git.door43.org/Door43/en-ta-translate-vol2/src/master/content/figs_synecdoche.md))
# in ../../../working/temp/pt-br_tw/pt-br_tw/bible/names/naphtali.md
# e.g., (Veja: [eufemismo] (https://git.door43.org/Door43/en-ta-translate-vol2/src/master/content/figs_euphemism.md))
# e.g., (Veja: [metonímia] (https://git.door43.org/Door43/en-ta-translate-vol2/src/master/content/figs_metonymy.md))
# FIXME There still exist some links that are malformed such as this
# next example. We could catch these as well, but then they would never
# get fixed in the markdown source.
# e.g., (Veja: [metonímia} (https://git.door43.org/Door43/en-ta-translate- vol2/src/master/content/figs_metonymy.md)).
TA_PREFIXED_MARKDOWN_HTTPS_LINK_RE = r"\(.+?:* *\[(?P<link_text>.*?)\] *\(https:\/\/(?P<domain>.+?)\/.+?-ta-.*?\/(?P<work>.+?).md\)\),*.*"
# TA markdown https style link regex
# e.g., [Como Traduzir Nomes] (https://git.door43.org/Door43/en-ta-translate-vol1/src/master/content/translate_names.md)
# NOTE See id:regex_transformation_order above
TA_MARKDOWN_HTTPS_LINK_RE = r"\[(?P<link_text>.*?)\] *\(https:\/\/(?P<domain>.+?)\/.+?-ta-.*?\/(?P<work>.+?).md\),*.*"
# TN markdown style scripture link regex
# e.g., [Genesis 46: 33-34](rc://gu/tn/help/gen/46/33)
# e.g.,: [Jude 01:17-19](rc://en/tn/help/jud/01/17)
# e.g., These types of links are found in the 'Bible References'
# section of a translation word definition file. For example,
# ../../../working/temp/en_tw-wa/en_tw/bible/kt/kingofthejews.md.
# NOTE See id:regex_transformation_order above
TN_MARKDOWN_SCRIPTURE_LINK_RE = r"\[(?P<scripture_ref>.+?)\]\(rc:\/\/(?P<lang_code>.+?)\/tn\/help\/(?P<resource_code>(?!obs).+?)\/(?P<chapter_num>\d+?)\/(?P<verse_ref>.+?)\)"
MARKDOWN_LINK_RE = r"(?<!\\)\[(?P<link_text>.+?)\]\((?P<url>.+?)\)"
# WIKI_LINK_RE = r"\[\[(?:[^|\]]*\|)?([^\]]+)\]\]"
WIKI_LINK_RE = r"\[\[(?P<url>[^\]]+)\]\]"
# TN markdown relative file path scripture link regex
# e.g., ([Colossians 1:24](../../col/01/24.md))
# e.g., in intro to Colossians chapter 1
#
# NOTE See id:regex_transformation_order above
TN_MARKDOWN_RELATIVE_SCRIPTURE_LINK_RE = r"\(\[(?P<scripture_ref>.+?)\]\((\.\.\/)+(?P<resource_code>\w+?)\/(?P<chapter_num>\d+?)\/(?P<verse_ref>.+?).md\)\)"
# FIXME Handle ([Colossians 1:7](../01/07.md)) and
# [Colossians 2:8](../02/08.md) still show up, but
# this is because technically the link is malformed as it is missing
# the resource_code, i.e., the book_id: col.
# TN_MARKDOWN_RELATIVE_TO_CURRENT_BOOK_SCRIPTURE_LINK_RE = r"\[(?P<scripture_ref>.+?)\]\((?:\.\.\/)+(?P<chapter_num>.+?)\/(?P<verse_ref>.+?).md\)"
TN_MARKDOWN_RELATIVE_TO_CURRENT_BOOK_SCRIPTURE_LINK_RE = r"\(\[(?P<scripture_ref>.+?)\]\((?:\.\.\/)+(?P<chapter_num>\d+?)\/(?P<verse_ref>.+?).md\)\)"
# TN_MARKDOWN_RELATIVE_TO_CURRENT_CHAPTER_SCRIPTURE_LINK_RE = r"\(\[(?P<scripture_ref>.+?)\]\((?:\.\.\/)+(?P<chapter_num>\d+?)\/(?P<verse_ref>.+?).md\)\)"
# TN OBS markdown link regex
# NOTE Not currently used since TN bible reference and OBS bible
# reference sections are removed by remove_section_preprocessor.py
# FIXME OBS yet to be supported. We need to find out what URL(s) to
# use for TN OBS resources.
# [21:9](rc://gu/tn/help/obs/21/09)
# __[14:02](rc://en/tn/help/obs/14/02)__
TN_OBS_MARKDOWN_LINK_RE = r"\[(?P<link_text>.+?)\] *\(rc:\/\/(?P<lang_code>.+?)\/tn\/help\/obs\/(?P<chapter_num>.+?)\/(?P<verse_ref>.+?)\)"
# TN_VERSE_ID_REGEX = r"id=\"(?P<lang_code>.*?)-(?P<book_num>.*?)-tn-ch-(?P<chapter_num>.*?)-v-(?P<verse_ref>.*?)\""
|
from distutils.core import setup
setup(
name='qsubpy',
version='0.1dev',
packages=['qsubpy',],
)
|
import sys
from PySide2.QtWidgets import QApplication, QListWidget
if __name__ == '__main__':
"""
Simple QListWidget showing some items
"""
app = QApplication(sys.argv)
# Let's make the QListWidget show this data
data = ["ONE", "TWO", "THREE", "FOUR", "FIVE"]
list_widget = QListWidget()
list_widget.show()
list_widget.addItems(data)
sys.exit(app.exec_())
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetProductResult',
'AwaitableGetProductResult',
'get_product',
'get_product_output',
]
@pulumi.output_type
class GetProductResult:
def __init__(__self__, description=None, display_name=None, name=None, product_category=None, product_labels=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if product_category and not isinstance(product_category, str):
raise TypeError("Expected argument 'product_category' to be a str")
pulumi.set(__self__, "product_category", product_category)
if product_labels and not isinstance(product_labels, list):
raise TypeError("Expected argument 'product_labels' to be a list")
pulumi.set(__self__, "product_labels", product_labels)
@property
@pulumi.getter
def description(self) -> str:
"""
User-provided metadata to be stored with this product. Must be at most 4096 characters long.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The user-provided name for this Product. Must not be empty. Must be at most 4096 characters long.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name of the product. Format is: `projects/PROJECT_ID/locations/LOC_ID/products/PRODUCT_ID`. This field is ignored when creating a product.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="productCategory")
def product_category(self) -> str:
"""
Immutable. The category for the product identified by the reference image. This should be one of "homegoods-v2", "apparel-v2", "toys-v2", "packagedgoods-v1" or "general-v1". The legacy categories "homegoods", "apparel", and "toys" are still supported, but these should not be used for new products.
"""
return pulumi.get(self, "product_category")
@property
@pulumi.getter(name="productLabels")
def product_labels(self) -> Sequence['outputs.KeyValueResponse']:
"""
Key-value pairs that can be attached to a product. At query time, constraints can be specified based on the product_labels. Note that integer values can be provided as strings, e.g. "1199". Only strings with integer values can match a range-based restriction which is to be supported soon. Multiple values can be assigned to the same key. One product may have up to 500 product_labels. Notice that the total number of distinct product_labels over all products in one ProductSet cannot exceed 1M, otherwise the product search pipeline will refuse to work for that ProductSet.
"""
return pulumi.get(self, "product_labels")
class AwaitableGetProductResult(GetProductResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProductResult(
description=self.description,
display_name=self.display_name,
name=self.name,
product_category=self.product_category,
product_labels=self.product_labels)
def get_product(location: Optional[str] = None,
product_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProductResult:
"""
Gets information associated with a Product. Possible errors: * Returns NOT_FOUND if the Product does not exist.
"""
__args__ = dict()
__args__['location'] = location
__args__['productId'] = product_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:vision/v1:getProduct', __args__, opts=opts, typ=GetProductResult).value
return AwaitableGetProductResult(
description=__ret__.description,
display_name=__ret__.display_name,
name=__ret__.name,
product_category=__ret__.product_category,
product_labels=__ret__.product_labels)
@_utilities.lift_output_func(get_product)
def get_product_output(location: Optional[pulumi.Input[str]] = None,
product_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProductResult]:
"""
Gets information associated with a Product. Possible errors: * Returns NOT_FOUND if the Product does not exist.
"""
...
|
import pandas as pd
import datetime
import time
import pytz
import numpy
import json
import mysql.connector
# ADMIN DATA
admin = {
'user_name': 'IngenHouzs',
'password':'ILoveIndonesia'
}
def connect_db(machine):
global local_item_list
global dropbox_items
global dtbs
global dtbs_cursor
global dtbs_cursor_codelistings
global transaction_code_listings
global marker
global clientside_dtbs
global clientdtbs_cursor
dtbs = mysql.connector.connect(
host = machine,
user = 'root',
password ='KonohaGakure123',
database = 'database transaction general'
)
clientside_dtbs = mysql.connector.connect(
host = machine,
user = 'root',
password ='KonohaGakure123',
database = 'client details database'
)
dtbs_cursor = dtbs.cursor()
dtbs_cursor.execute('SELECT * FROM `item list`')
local_item_list = list(zip(*dtbs_cursor.fetchall()))
marker = machine
clientdtbs_cursor = clientside_dtbs.cursor()
connect_db('127.0.0.1')
dropbox_items = [(items + '-' + str(local_item_list[1][local_item_list[0].index(items)]))for items in local_item_list[0]]
|
import bpy, _cycles
import bmesh
import random
import math
import numpy as np
from mathutils import Vector, Euler
import os
import addon_utils
import string
import pickle
from bpy_extras.object_utils import world_to_camera_view
############################################################################################
# Adding Film object and Shader
############################################################################################
def add_film(film_path):
# bpy.ops.object.mode_set(mode="OBJECT")
film_name = film_path.split('/')[-1]
root_path = os.path.dirname(film_path)
bpy.ops.import_image.to_plane(files=[{"name":film_name,"name":film_name}],directory=root_path,align_axis='Z+', relative=False)
label = film_name.split('.')[0]
film = bpy.data.objects[label]
select_object(film)
film.data.materials.append(bpy.data.materials[label])
mat = bpy.data.materials[label]
film.active_material = mat
# pos_x = random.rand(0.5,1.5)
# pos_y = random.random(0.5,1.5)
# pos_z = random.random(2,5)
# film.location = (pos_x, pos_y, pos_z)
nodes = mat.node_tree.nodes
links = mat.node_tree.links
texture_node = nodes['Image Texture']
texture_node.extension='EXTEND'
output_node = nodes['Material Output']
# Notes: modified by trans
trans_bsdf = nodes.new(type='ShaderNodeBsdfTransparent')
mix_shader = nodes.new(type='ShaderNodeMixShader')
bsdf_node = nodes['Principled BSDF']
bsdf_node.inputs['Metallic'].default_value=0.#random.uniform(0.6,0.7)
bsdf_node.inputs['Roughness'].default_value=0.08
bsdf_node.inputs['IOR'].default_value=random.uniform(1.1,1.6)
bsdf_node.inputs['Transmission'].default_value=1.0
bsdf_node.inputs['Specular'].default_value= 0.08
bsdf_node.inputs['Clearcoat'].default_value=0.03 #0.3
bsdf_node.inputs['Alpha'].default_value=1.0
#trans_node = nodes.new(type='ShaderNodeBsdfTransparent')
#mix_node = nodes.new(type='ShaderNodeMixShader')
#mix_node.inputs[0].default_value = 0.018
texturecoord_node = nodes.new(type='ShaderNodeTexCoord')
for link in links:
links.remove(link)
links.new(texture_node.outputs['Color'],bsdf_node.inputs['Base Color'])
# Notes: trans
links.new(texture_node.outputs['Alpha'], mix_shader.inputs['Fac'])
links.new(trans_bsdf.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(bsdf_node.outputs['BSDF'], mix_shader.inputs['Shader'])
links.new(mix_shader.outputs['Shader'], output_node.inputs['Surface'])
# links.new(bsdf_node.outputs['BSDF'],output_node.inputs[0])
links.new(texture_node.inputs[0],texturecoord_node.outputs[2])
return film
#############################################################################################
# Physics Simulation
#############################################################################################
def simulation_softbody(obj, frame = 10, sub_num = 12, choice_num = 5):
select_object(obj)
subdivide(obj, sub_num)
bpy.ops.object.mode_set(mode="EDIT")
label = obj.name
mesh = bpy.data.meshes[label]
bm = bmesh.from_edit_mesh(mesh)
choice_verts = np.random.choice(bm.verts,choice_num)
index_list = [v.index for v in choice_verts]
bpy.ops.object.mode_set(mode="OBJECT")
group = obj.vertex_groups.new(name = 'Group')
control = obj.vertex_groups.new(name = 'Control')
if np.random.rand():
for i in index_list:
group.add([i], random.choice([0.75,0.85,0.8,0.9,1]), 'REPLACE')
else:
group.add(index_list, random.choice([0.75,0.85,0.8,0.9]), 'REPLACE')
control.add([v.index for v in mesh.vertices], 1., 'REPLACE')
bpy.ops.object.modifier_add(type='SOFT_BODY')
bpy.context.object.modifiers["Softbody"].settings.vertex_group_goal = "Group"
bpy.context.object.modifiers["Softbody"].settings.goal_spring = 0.9
bpy.context.object.modifiers["Softbody"].settings.goal_default = 1
bpy.context.object.modifiers["Softbody"].settings.goal_max = 1
bpy.context.object.modifiers["Softbody"].settings.goal_min = 0.65
bpy.context.object.modifiers["Softbody"].settings.pull = 0.9
bpy.context.object.modifiers["Softbody"].settings.push = 0.9
bpy.context.object.modifiers["Softbody"].settings.damping = 0
bpy.context.object.modifiers["Softbody"].settings.bend = 10
bpy.context.object.modifiers["Softbody"].settings.spring_length = 100
bpy.context.object.modifiers["Softbody"].settings.use_stiff_quads = True
bpy.context.object.modifiers["Softbody"].settings.use_self_collision = True
bpy.ops.ptcache.bake_all(bake=True)
bpy.context.scene.frame_set(frame)
count = 0
v = True
while not too_unwarp(obj):
count = count + 1
frame = random.uniform(10,150)
bpy.context.scene.frame_set(frame)
if count == 50:
v = False
break
# for i in range(1,frame+1):
# bpy.context.scene.frame_set(i)
select_contour(obj)
bpy.ops.object.modifier_add(type='SUBSURF')
bpy.context.object.modifiers["Subdivision"].render_levels = 6
bpy.context.object.modifiers["Subdivision"].quality = 6
reset_camera(obj)
# bpy.ops.ptcache.bake_all(bake=False)
# reset_camera(label=obj.name)
# group2 = obj.vertex_groups.new(name = 'Control')
# all_index = [v.index for v in bm.verts]
# group2.add(all_index, 1., 'REPLACE')
return v
def simulation_cloth(obj, frame = 10, sub_num = 12, choice_num = 5):
select_object(obj)
subdivide(obj, sub_num)
bpy.ops.object.mode_set(mode="EDIT")
label = obj.name
mesh = bpy.data.meshes[label]
bm = bmesh.from_edit_mesh(mesh)
choice_verts = np.random.choice(bm.verts,choice_num)
index_list = [v.index for v in choice_verts]
bpy.ops.object.mode_set(mode="OBJECT")
group = obj.vertex_groups.new(name = 'Group')
if np.random.rand()<0.6:
for i in index_list:
group.add([i], random.choice([0.6,0.7,0.8,0.9,1]), 'REPLACE')
else:
group.add(index_list, random.choice([0.6,0.7,0.8,0.9]), 'REPLACE')
bpy.ops.object.modifier_add(type='CLOTH')
bpy.context.object.modifiers["Cloth"].settings.quality = 15
bpy.context.object.modifiers["Cloth"].settings.mass = 0.4
bpy.context.object.modifiers["Cloth"].settings.tension_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.compression_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.shear_stiffness = 80
bpy.context.object.modifiers["Cloth"].settings.bending_stiffness = 150
bpy.context.object.modifiers["Cloth"].settings.tension_damping = 25
bpy.context.object.modifiers["Cloth"].settings.compression_damping = 25
bpy.context.object.modifiers["Cloth"].settings.shear_damping = 25
bpy.context.object.modifiers["Cloth"].settings.air_damping = 1
bpy.context.object.modifiers["Cloth"].settings.vertex_group_mass = "Group"
bpy.context.scene.frame_set(frame)
bpy.ops.ptcache.bake_all(bake=False)
bpy.context.view_layer.update()
# group2 = obj.vertex_groups.new(name = 'Control')
# all_index = [v.index for v in bm.verts]
# group2.add(all_index, 1., 'REPLACE')
return
|
class DeviceService(object):
def __init__(self):
self.serviceId = "serviceId"
self.serviceType = "serviceType"
self.data = "data"
self.eventTime = "eventTime"
def getServiceId(self):
return self.serviceId
def setServiceId(self, serviceId):
self.serviceId = serviceId
def getServiceType(self):
return self.serviceType
def setServiceType(self, serviceType):
self.serviceType = serviceType
def getData(self):
return self.data
def setData(self, data):
self.data = data
def getEventTime(self):
return self.eventTime
def setEventTime(self, eventTime):
self.eventTime = eventTime
|
import unittest
from collections import defaultdict
from unittest import mock
import torch
from reagent.core.types import PolicyGradientInput
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.samplers.discrete_sampler import SoftmaxActionSampler
from reagent.models.dueling_q_network import DuelingQNetwork
from reagent.models.fully_connected_network import FloatFeatureFullyConnected
from reagent.training.parameters import PPOTrainerParameters
from reagent.training.ppo_trainer import PPOTrainer
from reagent.workflow.types import RewardOptions
class TestPPO(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.batch_size = 3
self.state_dim = 10
self.action_dim = 2
self.num_layers = 2
self.sizes = [20 for _ in range(self.num_layers)]
self.activations = ["relu" for _ in range(self.num_layers)]
self.use_layer_norm = False
self.softmax_temperature = 1
self.actions = [str(i) for i in range(self.action_dim)]
self.params = PPOTrainerParameters(actions=self.actions, normalize=False)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.policy_network = DuelingQNetwork.make_fully_connected(
state_dim=self.state_dim,
action_dim=self.action_dim,
layers=self.sizes,
activations=self.activations,
)
self.sampler = SoftmaxActionSampler(temperature=self.softmax_temperature)
self.policy = Policy(scorer=self.policy_network, sampler=self.sampler)
self.value_network = FloatFeatureFullyConnected(
state_dim=self.state_dim,
output_dim=1,
sizes=self.sizes,
activations=self.activations,
use_layer_norm=self.use_layer_norm,
)
def _construct_trainer(self, new_params=None, use_value_net=True):
value_network = self.value_network if use_value_net else None
params = new_params if new_params else self.params
trainer = PPOTrainer(
policy=self.policy, value_net=value_network, **params.asdict()
)
trainer.optimizers = mock.Mock(return_value=[0, 0])
return trainer
def test_init(self):
trainer = self._construct_trainer()
self.assertEqual(
type(trainer.value_loss_fn), type(torch.nn.MSELoss(reduction="mean"))
)
with self.assertRaises(AssertionError):
new_params = PPOTrainerParameters(ppo_epsilon=-1)
self._construct_trainer(new_params)
with self.assertRaises(AssertionError):
new_params = PPOTrainerParameters(ppo_epsilon=2)
self._construct_trainer(new_params)
with self.assertRaises(AssertionError):
params = PPOTrainerParameters(actions=["1", "2"], normalize=True)
trainer = self._construct_trainer(new_params=params)
def test__trajectory_to_losses(self):
inp = PolicyGradientInput.input_prototype(
batch_size=self.batch_size,
action_dim=self.action_dim,
state_dim=self.state_dim,
)
# Normalize + offset clamp min
params = PPOTrainerParameters(
actions=["1", "2"], normalize=True, offset_clamp_min=True
)
trainer = self._construct_trainer(new_params=params, use_value_net=False)
losses = trainer._trajectory_to_losses(inp)
self.assertEqual(len(losses), 1)
self.assertTrue("ppo_loss" in losses)
trainer = self._construct_trainer()
losses = trainer._trajectory_to_losses(inp)
self.assertEqual(len(losses), 2)
self.assertTrue("ppo_loss" in losses and "value_net_loss" in losses)
# entropy weight should always lower ppo_loss
trainer.entropy_weight = 1.0
entropy_losses = trainer._trajectory_to_losses(inp)
self.assertTrue(entropy_losses["ppo_loss"] < losses["ppo_loss"])
def test_configure_optimizers(self):
# Ordering is value then policy
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertTrue(
torch.all(
torch.isclose(
optimizers[0]["optimizer"].param_groups[0]["params"][0],
list(trainer.value_net.fc.dnn[0].parameters())[0],
)
)
)
self.assertTrue(
torch.all(
torch.isclose(
optimizers[1]["optimizer"].param_groups[0]["params"][0],
list(trainer.scorer.shared_network.fc.dnn[0].parameters())[0],
)
)
)
def test_get_optimizers(self):
# ordering covered in test_configure_optimizers
trainer = self._construct_trainer()
optimizers = trainer.get_optimizers()
self.assertIsNotNone(optimizers[0])
trainer = self._construct_trainer(use_value_net=False)
optimizers = trainer.get_optimizers()
self.assertIsNone(optimizers[0])
def test_training_step(self):
trainer = self._construct_trainer()
inp = defaultdict(lambda: torch.ones(1, 5))
trainer.update_model = mock.Mock()
trainer.training_step(inp, batch_idx=1)
trainer.update_model.assert_called_with()
trainer.update_freq = 10
trainer.update_model = mock.Mock()
trainer.training_step(inp, batch_idx=1)
trainer.update_model.assert_not_called()
def test_update_model(self):
trainer = self._construct_trainer()
# can't update empty model
with self.assertRaises(AssertionError):
trainer.update_model()
# _update_model called with permutation of traj_buffer contents update_epoch # times
trainer = self._construct_trainer(
new_params=PPOTrainerParameters(
ppo_batch_size=1,
update_epochs=2,
update_freq=2,
normalize=False,
)
)
trainer.traj_buffer = [1, 2]
trainer._update_model = mock.Mock()
trainer.update_model()
calls = [mock.call([1]), mock.call([2]), mock.call([1]), mock.call([2])]
trainer._update_model.assert_has_calls(calls, any_order=True)
# trainer empties buffer
self.assertEqual(trainer.traj_buffer, [])
# _update_model
trainer = self._construct_trainer()
value_net_opt_mock = mock.Mock()
ppo_opt_mock = mock.Mock()
trainer.get_optimizers = mock.Mock(
return_value=[value_net_opt_mock, ppo_opt_mock]
)
trainer._trajectory_to_losses = mock.Mock(
side_effect=[
{"ppo_loss": torch.tensor(1), "value_net_loss": torch.tensor(2)},
{"ppo_loss": torch.tensor(3), "value_net_loss": torch.tensor(4)},
]
)
trainer.manual_backward = mock.Mock()
inp1 = PolicyGradientInput.input_prototype(
batch_size=1, action_dim=1, state_dim=1
)
inp2 = PolicyGradientInput.input_prototype(
batch_size=1, action_dim=1, state_dim=1
)
trainer._update_model([inp1, inp2])
trainer._trajectory_to_losses.assert_has_calls(
[mock.call(inp1), mock.call(inp2)]
)
value_net_opt_mock.zero_grad.assert_called()
value_net_opt_mock.step.assert_called()
ppo_opt_mock.zero_grad.assert_called()
ppo_opt_mock.step.assert_called()
trainer.manual_backward.assert_has_calls(
[mock.call(torch.tensor(6)), mock.call(torch.tensor(4))]
)
|
import unittest
from dxl.fs.path import Path
from dxl.fs.file import File, NotAFileError
from fs.memoryfs import MemoryFS
from fs.tempfs import TempFS
class TestFile(unittest.TestCase):
def test_exist(self):
mfs = MemoryFS()
mfs.touch('test.txt')
f = File('test.txt', mfs)
self.assertTrue(f.exists())
def test_not_exist(self):
mfs = MemoryFS()
f = File('test.txt', mfs)
self.assertFalse(f.exists())
def test_non_file_error(self):
mfs = MemoryFS()
mfs.makedir('test')
f = File('test', mfs)
with self.assertRaises(NotAFileError) as target:
f.exists()
def test_remove(self):
with TempFS() as tfs:
tfs.touch('test')
f = File('test', tfs)
self.assertTrue(tfs.exists('test'))
f.remove()
self.assertFalse(tfs.exists('test')) |
#!/usr/bin/python3
import pytest
from brownie.test import strategy
from hypothesis import HealthCheck
class StateMachine:
coin = strategy('uint16', max_value=6)
valueEth = strategy('uint256', min_value=9 * 10 ** 17, max_value=11 * 10 ** 17)
valueUSD6 = strategy('uint256', min_value=900 * 10 ** 6, max_value=1100 * 10 ** 6)
valueUSD18 = strategy('uint256', min_value=900 * 10 ** 18, max_value=1100 * 10 ** 18)
valueBTC = strategy('uint256', min_value=9 * 10 ** 6, max_value=11 * 10 ** 6)
pool = strategy('uint16', max_value=31)
def __init__(self, MyCurveExchangeAdd, MyCurveExchangeRemove, UniswapV2Router02, DAI, USDC, USDT, WETH, WBTC, accounts, Contract):
self.coins = [
"0x0000000000000000000000000000000000000000",
"0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE",
USDC,
DAI,
USDT,
WETH,
WBTC
]
self.pools = [
"0xA2B47E3D5c44877cca798226B7B8118F9BFb7A56",
"0x52EA46506B9CC5Ef470C5bf89f17Dc28bB35D85C",
"0x06364f10B501e868329afBc005b3492902d6C763",
"0x45F783CCE6B7FF23B2ab2D70e416cdb7D6055f51",
"0x79a8C46DeA5aDa233ABaFFD40F3A0A2B1e5A4F27",
"0xA5407eAE9Ba41422680e2e00537571bcC53efBfD",
"0x93054188d876f558f4a66B2EF1d97d16eDf0895B",
"0x7fC77b5c7614E1533320Ea6DDc2Eb61fa00A9714",
"0x4CA9b3063Ec5866A4B82E437059D2C43d1be596F",
"0xbEbc44782C7dB0a1A60Cb6fe97d0b483032FF1C7",
"0x4f062658EaAF2C1ccf8C8e36D6824CDf41167956",
"0x3eF6A01A0f81D6046290f3e2A8c5b843e738E604",
"0x3E01dD8a5E1fb3481F0F589056b428Fc308AF0Fb",
"0x0f9cb53Ebe405d49A0bbdBD291A65Ff571bC83e1",
"0xE7a24EF0C5e95Ffb0f6684b813A78F2a3AD7D171",
"0x8474DdbE98F5aA3179B3B3F5942D724aFcdec9f6",
"0xC18cC39da8b11dA8c3541C598eE022258F9744da",
"0xC25099792E9349C7DD09759744ea681C7de2cb66",
"0x8038C01A0390a8c547446a0b2c18fc9aEFEcc10c",
"0x7F55DDe206dbAD629C080068923b36fe9D6bDBeF",
"0x071c661B4DeefB59E2a3DdB20Db036821eeE8F4b",
"0xd81dA8D904b52208541Bade1bD6595D8a251F8dd",
"0x890f4e345B1dAED0367A877a1612f86A1f86985f",
"0x0Ce6a5fF5217e38315f87032CF90686C96627CAA",
"0xc5424B857f758E906013F3555Dad202e4bdB4567",
"0xDeBF20617708857ebe4F679508E7b7863a8A8EeE",
"0xDC24316b9AE028F1497c275EB9192a3Ea0f67022",
"0xEB16Ae0052ed37f479f7fe63849198Df1765a733",
"0xA96A65c051bF88B4095Ee1f2451C2A9d43F53Ae2",
"0x42d7025938bEc20B69cBae5A77421082407f053A",
"0x2dded6Da1BF5DBdF597C45fcFaa3194e53EcfeAF",
"0xF178C0b5Bb7e7aBF4e12A4838C7b7c5bA2C623c0"
]
self.tokens = [
"0x845838DF265Dcd2c412A1Dc9e959c7d08537f8a2",
"0x9fC689CCaDa600B6DF723D9E47D84d76664a1F23",
"0xD905e2eaeBe188fc92179b6350807D8bd91Db0D8",
"0xdF5e0e81Dff6FAF3A7e52BA697820c5e32D806A8",
"0x3B3Ac5386837Dc563660FB6a0937DFAa5924333B",
"0xC25a3A3b969415c80451098fa907EC722572917F",
"0x49849C98ae39Fff122806C06791Fa73784FB3675",
"0x075b1bb99792c9E1041bA13afEf80C91a1e70fB3",
"0xb19059ebb43466C323583928285a49f558E572Fd",
"0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490",
"0xD2967f45c4f384DEEa880F807Be904762a3DeA07",
"0x5B5CFE992AdAC0C9D48E05854B2d91C73a003858",
"0x97E2768e8E73511cA874545DC5Ff8067eB19B787",
"0x4f3E8F405CF5aFC05D68142F3783bDfE13811522",
"0x6D65b498cb23deAba52db31c93Da9BFFb340FB8F",
"0x1AEf73d49Dedc4b1778d0706583995958Dc862e6",
"0xC2Ee6b0334C261ED60C72f6054450b61B8f18E35",
"0x64eda51d3Ad40D56b9dFc5554E06F94e1Dd786Fd",
"0x3a664Ab939FD8482048609f652f9a0B0677337B9",
"0xDE5331AC4B3630f94853Ff322B66407e0D6331E8",
"0x410e3E86ef427e30B9235497143881f717d93c2A",
"0x2fE94ea3d5d4a175184081439753DE15AeF9d614",
"0x94e131324b6054c0D789b190b2dAC504e4361b53",
"0x194eBd173F6cDacE046C53eACcE9B953F28411d1",
"0xA3D87FffcE63B53E0d54fAa1cc983B7eB0b74A9c",
"0xFd2a8fA60Abd58Efe3EeE34dd494cD491dC14900",
"0x06325440D014e39736583c165C2963BA99fAf14E",
"0x02d341CcB60fAaf662bC0554d13778015d1b285C",
"0xaA17A236F2bAdc98DDc0Cf999AbB47D47Fc0A6Cf",
"0x7Eb40E450b9655f4B3cC4259BCC731c63ff55ae6",
"0x5282a4eF67D9C33135340fB3289cc1711c13638C",
"0xcee60cFa923170e4f8204AE08B4fA6A3F5656F3a",
]
self.MyCurveExchangeAdd = MyCurveExchangeAdd
self.MyCurveExchangeRemove = MyCurveExchangeRemove
self.accounts = accounts
self.Contract = Contract
UniswapV2Router02.swapETHForExactTokens(40000 * 10 ** 6, [WETH, USDC], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 100 * 10 ** 18})
UniswapV2Router02.swapETHForExactTokens(40000 * 10 ** 6, [WETH, USDT], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 100 * 10 ** 18})
UniswapV2Router02.swapETHForExactTokens(40000 * 10 ** 18, [WETH, DAI], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 100 * 10 ** 18})
WETH.deposit({"from":accounts[0], "value": 40 * 10 ** 18})
UniswapV2Router02.swapETHForExactTokens(4 * 10 ** 8, [WETH, WBTC], accounts[0], 2 ** 256 - 1, {"from":accounts[0], "value": 200 * 10 ** 18})
def rule_test(self, coin, valueEth, valueUSD6, valueUSD18, valueBTC, pool):
values = [
valueEth,
valueEth,
valueUSD6,
valueUSD18,
valueUSD6,
valueEth,
valueBTC
]
accounts = self.accounts
lpToken = self.Contract.from_abi("CrvLPToken", self.tokens[pool], [{"name":"Transfer","inputs":[{"type":"address","name":"_from","indexed":True},{"type":"address","name":"_to","indexed":True},{"type":"uint256","name":"_value","indexed":False}],"anonymous":False,"type":"event"},{"name":"Approval","inputs":[{"type":"address","name":"_owner","indexed":True},{"type":"address","name":"_spender","indexed":True},{"type":"uint256","name":"_value","indexed":False}],"anonymous":False,"type":"event"},{"outputs":[],"inputs":[{"type":"string","name":"_name"},{"type":"string","name":"_symbol"}],"stateMutability":"nonpayable","type":"constructor"},{"name":"decimals","outputs":[{"type":"uint256","name":""}],"inputs":[],"stateMutability":"view","type":"function","gas":261},{"name":"transfer","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_to"},{"type":"uint256","name":"_value"}],"stateMutability":"nonpayable","type":"function","gas":74713},{"name":"transferFrom","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_from"},{"type":"address","name":"_to"},{"type":"uint256","name":"_value"}],"stateMutability":"nonpayable","type":"function","gas":111355},{"name":"approve","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_spender"},{"type":"uint256","name":"_value"}],"stateMutability":"nonpayable","type":"function","gas":37794},{"name":"increaseAllowance","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_spender"},{"type":"uint256","name":"_added_value"}],"stateMutability":"nonpayable","type":"function","gas":39038},{"name":"decreaseAllowance","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_spender"},{"type":"uint256","name":"_subtracted_value"}],"stateMutability":"nonpayable","type":"function","gas":39062},{"name":"mint","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_to"},{"type":"uint256","name":"_value"}],"stateMutability":"nonpayable","type":"function","gas":75652},{"name":"burnFrom","outputs":[{"type":"bool","name":""}],"inputs":[{"type":"address","name":"_to"},{"type":"uint256","name":"_value"}],"stateMutability":"nonpayable","type":"function","gas":75670},{"name":"set_minter","outputs":[],"inputs":[{"type":"address","name":"_minter"}],"stateMutability":"nonpayable","type":"function","gas":36458},{"name":"set_name","outputs":[],"inputs":[{"type":"string","name":"_name"},{"type":"string","name":"_symbol"}],"stateMutability":"nonpayable","type":"function","gas":178219},{"name":"name","outputs":[{"type":"string","name":""}],"inputs":[],"stateMutability":"view","type":"function","gas":7763},{"name":"symbol","outputs":[{"type":"string","name":""}],"inputs":[],"stateMutability":"view","type":"function","gas":6816},{"name":"balanceOf","outputs":[{"type":"uint256","name":""}],"inputs":[{"type":"address","name":"arg0"}],"stateMutability":"view","type":"function","gas":1636},{"name":"allowance","outputs":[{"type":"uint256","name":""}],"inputs":[{"type":"address","name":"arg0"},{"type":"address","name":"arg1"}],"stateMutability":"view","type":"function","gas":1881},{"name":"totalSupply","outputs":[{"type":"uint256","name":""}],"inputs":[],"stateMutability":"view","type":"function","gas":1481},{"name":"minter","outputs":[{"type":"address","name":""}],"inputs":[],"stateMutability":"view","type":"function","gas":1511}])
if coin > 1:
self.coins[coin].approve(self.MyCurveExchangeAdd, values[coin], {"from": accounts[0]})
self.MyCurveExchangeAdd.investTokenForCrvPair(self.coins[coin], values[coin], self.pools[pool], 1, 2 ** 256 - 1, {"from":accounts[0], "value": 1 * 10 ** 16})
print("Test pool " + str(pool) + "\ncoin " + str(coin))
print("Token Amount: " + str(self.coins[coin].balanceOf(accounts[0])))
print("CrvLPToken : " + str(lpToken.balanceOf(accounts[0])))
lpToken.approve(self.MyCurveExchangeRemove, lpToken.balanceOf(accounts[0]), {"from":accounts[0]})
self.MyCurveExchangeRemove.divestTokenForCrvPair(lpToken, lpToken.balanceOf(accounts[0]), self.coins[coin], 1, {"from":accounts[0], "value":1 * 10 ** 16})
print("Token Amount: " + str(self.coins[coin].balanceOf(accounts[0])))
print("CrvLPTokenAfterRemove : " + str(lpToken.balanceOf(accounts[0])))
else:
self.MyCurveExchangeAdd.investTokenForCrvPair(self.coins[coin], values[coin], self.pools[pool], 1, 2 ** 256 - 1, {"from":accounts[0], "value": values[coin] + 1 * 10 ** 16})
print("Test pool " + str(pool) + "\ncoin " + str(coin))
print("Token Amount: " + str(accounts[0].balance()))
print("CrvLPToken : " + str(lpToken.balanceOf(accounts[0])))
lpToken.approve(self.MyCurveExchangeRemove, lpToken.balanceOf(accounts[0]), {"from":accounts[0]})
self.MyCurveExchangeRemove.divestTokenForCrvPair(lpToken, lpToken.balanceOf(accounts[0]), self.coins[coin], 1, {"from":accounts[0], "value":1 * 10 ** 16})
print("Token Amount: " + str(accounts[0].balance()))
print("CrvLPTokenAfterRemove : " + str(lpToken.balanceOf(accounts[0])))
def test_main(MyCurveExchangeAdd, MyCurveExchangeRemove, UniswapV2Router02, DAI, USDC, USDT, WETH, WBTC, accounts, Contract, state_machine):
settings = {"suppress_health_check": HealthCheck.all(), "max_examples": 20}
state_machine(StateMachine, MyCurveExchangeAdd, MyCurveExchangeRemove, UniswapV2Router02, DAI, USDC, USDT, WETH, WBTC, accounts, Contract, settings=settings)
|
# -*-coding:utf-8-*-
# coding=utf-8
import random
import math
def neighbor_x(x=1, p=1, d=1):
'''
initial value:param x:
dimension:param p:
distance:param d:
a value of x's nerghbor:return:
'''
if p == 1:
N_x = (-1) ** random.randint(0, 1) * random.random() * d
N_x = N_x+x
return N_x
elif p == 2:
N_xx = (-1) ** random.randint(0, 1) * random.random()
N_xy = (-1) ** random.randint(0, 1) * random.random(0, math.sqrt(1 - N_xx ** 2))
N_x = [N_xx * d, N_xy * d]
N_x = [x[0] + N_x[0], x[1] + N_x[1]]
return N_x
elif p == 3:
N_xx = (-1) ** random.randint(0, 1) * random.random()
N_xy = (-1) ** random.randint(0, 1) * random.random() * math.sqrt(1 - N_xx ** 2)
N_xz = (-1) ** random.randint(0, 1) * random.random() * math.sqrt(1 - N_xx ** 2 - N_xy ** 2)
N_x = [N_xx * d, N_xy * d, N_xz * d]
N_x = [x[0]+N_x[0],x[1]+N_x[1],x[2]+N_x[2]]
return N_x
else:
print('The current program does not support higher dimensions temporarily')
exit(1)
def iterative_inner(f=sum, x_0=None, g=None, t_0=100, iter_num=20):
'''
inner interative function, to calculate the value of iteration
energies:param f:
initial value:param x_0:
initial temperature:param t_0:
number of inition:param iter_num:
the best value:return:
'''
p = len(x_0)
for n in list(range(0, iter_num)):
f_i= f(x_0)
N_x = neighbor_x(x_0, p=p, d=0.1)
x_1 = N_x
# choose N(x)
while not g(x_1):
N_x = neighbor_x(x_0, p=p, d=0.1)
x_1 = N_x
f_j = f(x_1)
print('***************' + str(x_1) + '****************\n'+'----'+str(f_j)+'----\n')
delta_f = f_j - f_i
if delta_f <= 0:
A_ij = 1
else:
A_ij = math.exp(-delta_f/t_0)
if A_ij > random.random():
x_0 = x_1
else:
continue
return x_0
# x_0 = encode(x_0)
# x_1 = x_0[:rand_num]+turnbio(x_0[rand_num])+x_0[rand_num+1:]
# G_ij = 1; #Transfer probability
# A_ij = 1; #Accepted probability
# p_ij = G_ij*A_ij;
__version__ = '0.3'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.