content stringlengths 5 1.05M |
|---|
"""
instabot example
Workflow:
Follow user's followers by username.
"""
import argparse
import os
import sys
import random
import time
sys.path.append(os.path.join(sys.path[0], "../"))
from instabot import Bot # noqa: E402
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument("-u", type=str, help="username")
parser.add_argument("-p", type=str, help="password")
parser.add_argument("-proxy", type=str, help="proxy")
args = parser.parse_args()
bot = Bot(
filter_users=True,
filter_private_users=False,
filter_previously_followed=True,
filter_business_accounts=True,
filter_verified_accounts=True,
max_followers_to_follow=1000000,
max_following_to_follow=200000,
follow_delay=1,
max_follows_per_day=100000,
max_comments_per_day=10000
)
bot.login(username=args.u, password=args.p, proxy=args.proxy)
media_id = bot.get_media_id_from_link("https://www.instagram.com/p/CJmd49VgYT7/?igshid=13g2yiy5zex8o")
user_id = bot.get_user_id_from_username("lu__psiloveu")
following = bot.get_user_following(user_id)
for i in range(0, 1000):
index = random.randint(0, len(following)-1)
username = bot.get_username_from_user_id(following[index])
bot.comment(media_id, '@'+username)
print("Comment "+str(i)+": "+"@"+username)
time.sleep(10)
#for username in args.users:
#bot.follow_followers(username)
|
from cipherkit.alphabets import ascii_basic
from cipherkit.alphabets import decimal
from cipherkit.alphabets import english
from cipherkit.alphabets import spanish
def test_alphabet_spanish():
expected_alphabet = "ABCDEFGHIJKLMNÑOPQRSTUVWXYZ"
assert spanish() == expected_alphabet
def test_alphabet_english():
expected_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
assert english() == expected_alphabet
def test_alphabet_decimal():
expected_alphabet = "0123456789"
assert decimal() == expected_alphabet
def test_alphabet_ascii_basic():
expected_alphabet = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~"
assert ascii_basic() == expected_alphabet
|
import shutil
import json
import bs4
import requests
from .constants import BASE_DIR, CONFIG
def download_response_sheet_json(url_to_response_sheet):
""" Incase the response_sheet file is not present or user has explicitly requested it. """
with open('./temp/response_sheet.html', 'wb') as response_sheet_file:
response = requests.get(url_to_response_sheet)
response.raise_for_status()
response_sheet_file.write(response.content)
# We update and save the file here. Prevents redownloading.
shutil.copy("./temp/response_sheet.html", "./response_sheet/response_sheet.html")
def parse_type(question_soup):
""" Returns MCQ or SA """
# Since this is the first tr, we can just get it, the td with value is always bold
# We use a different implement here cuz we dont know the question type and cannot rely on length of tr or td
return question_soup.find('table', class_="menu-tbl").find('tr').find('td', class_="bold").string.__str__().strip()
def single_choice_question_handler(question_soup):
question_id = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[1].find_all('td')[-1].string).strip()
question_data = {"type": "SCQ"}
# This can be Answered or Not Answered.
question_data['status'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[-2].find_all('td')[-1].string).strip()
# This options are all long integers, we keep it as string cuz performance
question_data['A'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[2].find_all('td')[-1].string).strip()
question_data['B'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[3].find_all('td')[-1].string).strip()
question_data['C'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[4].find_all('td')[-1].string).strip()
question_data['D'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[5].find_all('td')[-1].string).strip()
# This gives the "Integer" value of the option,
# The mapping is 1 -> A etc.
question_data['chosen_option'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[-1].find_all('td')[-1].string).strip()
option = question_data['chosen_option']
if option == "1":
question_data['answer_given'] = question_data['A']
elif option == "2":
question_data['answer_given'] = question_data['B']
elif option == "3":
question_data['answer_given'] = question_data['C']
elif option == "4":
question_data['answer_given'] = question_data['D']
return question_id, question_data
def multiple_choice_question_handler():
raise NotImplementedError
def integer_choice_question_handler(question_soup):
question_id = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[1].find_all('td')[-1].string).strip()
# implementation details are given in single_choice_question_handler() and notes.md
question_data = {"type": "INT"}
question_data['status'] = str(question_soup.find(class_="menu-tbl").find('tbody').find_all('tr')[-1].find_all('td')[-1].string).strip()
question_data['answer_given'] = str(question_soup.find(class_="questionRowTbl").find('tbody').find_all('tr')[-1].find_all('td')[-1].string).strip()
return question_id, question_data
def section_handler(section_soup):
questions = section_soup.find_all('div', class_="question-pnl")
return_data = {}
# We need to only check the first question
# since the questions are sorted.
question_type = parse_type(questions[1])
if question_type == "MCQ":
for question in questions:
key, value = single_choice_question_handler(question)
return_data[key] = value
elif question_type == "SA":
for question in questions:
key, value = integer_choice_question_handler(question)
return_data[key] = value
return return_data
def info_panel_handler(info_soup, language="ENG"):
return_data = {}
for tr in info_soup.find_all('tr'):
key, value = tr.find_all('td')
return_data[str(key.string).strip()] = str(value.string).strip()
# Shift Code Logic
date = return_data["Test Date"]
day, month, year = date.split('/')
raw_subject = return_data["Subject"]
subject = "MASTER"
if raw_subject.find('TECH') != -1 : subject = "TECH"
elif raw_subject.find('PL') != -1 and raw_subject.find('AR') != -1: subject = "PLAR"
elif raw_subject.find('PL') != -1: subject = "PLAN"
elif raw_subject.find('AR') != -1: subject = "ARCH"
time = return_data["Test Time"]
shift = "E" if time.find("3:00") != -1 else "M"
shift_code = "-".join((year, month, day, shift, language, subject))
return_data["shift_code"] = shift_code
return return_data
def create_response_sheet_json(download=False, url=CONFIG['response_sheet_url']):
if download: # Do this only if explicitly stated.
download_response_sheet_json(url)
# Parsing Logic Here
print("[I] Parsing Response Sheet")
with open(BASE_DIR / 'save_response_sheet_here' / 'response_sheet.html') as file:
soup = bs4.BeautifulSoup(file.read(), features="html5lib")
# We now successfully have a Soup object
# This is the object containing all the data.
response_sheet_content = {}
# Information Logic
info_table = soup.find(class_="main-info-pnl")
response_sheet_content['info'] = info_panel_handler(info_table)
# for tag in
# Questions Logic
sections = soup.find_all(class_="section-cntnr")
response_sheet_content["physics-single"] = section_handler(sections[0])
response_sheet_content["physics-integer"] = section_handler(sections[1])
response_sheet_content["chemistry-single"] = section_handler(sections[2])
response_sheet_content["chemistry-integer"] = section_handler(sections[3])
try:
response_sheet_content["maths-single"] = section_handler(sections[4])
response_sheet_content["maths-integer"] = section_handler(sections[5])
except KeyError:
# Added logic incase two sections are given, The code keys don't reflect the content. A final Result is what matters
# Planning has Maths-Apt-Planning
# Arch has Maths-Apt
pass
# Removed the logic for saving parsed response sheet.
#//with open(BASE_DIR / 'temp' / 'parsed_response_sheet.json', "w") as file:
#// file.write(json.dumps(response_sheet_content))
return response_sheet_content['info']["shift_code"], response_sheet_content
def main():
create_response_sheet_json()
if __name__ == "__main__":
main()
|
def csv_parser(delimiter=','):
field = []
while True:
char = (yield(''.join(field)))
field = []
leading_whitespace = []
while char and char == ' ':
leading_whitespace.append(char)
char = (yield)
if char == '"' or char == "'":
suround = char
char = (yield)
while True:
if char == suround:
char = (yield)
if char != suround:
break
field.append(char)
char = (yield)
while not char == delimiter:
if not char:
(yield(''.join(field)))
char = (yield)
else:
field = leading_whitespace
while char != delimiter:
if not char:
(yield(''.join(field)))
field.append(char)
char = (yield)
def parser(s):
queue = list(s)
cells = []
while queue:
cell = []
leading = []
c = queue.pop(0)
while c == " ":
leading.append(c)
if not queue:
break
c = queue.pop(0)
if c == "\"":
c = queue.pop(0)
while True:
if c == "\"":
c = queue.pop(0)
if c != "\"":
break
cell.append(c)
if not queue:
break
c = queue.pop(0)
else:
cell += leading
while c != ",":
cell.append(c)
if not queue:
break
c = queue.pop(0)
cells.append("".join(cell))
return cells
def parse_csv(csv_text):
processor = csv_parser()
next(processor)
split_result = []
for c in list(csv_text) + [None]:
emit = processor.send(c)
if emit:
split_result.append(emit)
return split_result
s = '1997,Ford,E350,"Super, luxurious truck"'
s = 'Weronika,Zaborska,njkfdsv@dsgfk.sn,"running, sci-fi",new,Krakow,25'
s = 'Ryuichi,Akiyama,jkg@ljnsfd.fjn,music,guide,Tokyo,65'
s = 'Elena, 42 years old, is from Valencia and is interested in cooking, traveling.'
s = 'Elena,Martinez,emrt@lsofnbr.rt,"cooking, traveling",superhost,Valencia,42'
s = '"John ""Mo""",Smith,sfn@flkaei.km,biking and hiking,,"Seattle, WA",23'
print(list(" 1 2"))
# print(parse_csv(s))
idx_fname = 0
idx_age = 6
idx_city = 5
idx_interests = 3
formatter = "%s, %s years old, is from %s and is interseted in %s."
cells = parser(s)
print(formatter%(cells[idx_fname],
cells[idx_age],
cells[idx_city],
cells[idx_interests]))
# submission
import sys
class Parser:
key_fname = "first_name"
key_lname = "last_name"
key_email = "email"
key_interests = "interests"
key_notes = "notes"
key_city = "city"
key_age = "age"
formatter = "%s, %s years old, is from %s and is interested in %s."
keys = [key_fname, key_lname, key_email, key_interests, key_notes, key_city, key_age]
# default delimitor is comma, but user may specify as needed
def __init__(self, delimitor=","):
self.delimitor = delimitor
# Time complexity: O(n), each character is read once
# Space complexity: O(n), use use queue to represent line, and cells as storage space
def parseLine(self, line):
# process the line/row as a queue, remove "\n" at the end
queue = list(line[:-1])
# storing delimitor-separated cells
cells = []
while queue:
c = queue.pop(0)
# word buffer
cell = []
# handle leading space
leading = []
while c == " ":
leading.append(c)
if not queue:
break
c = queue.pop(0)
# if char is quote, ignore delimitor until end quote
if c == "\"":
c = queue.pop(0)
while True:
if c == "\"":
# quote ended
c = queue.pop(0)
if c != "\"":
break
cell.append(c)
if not queue:
break
c = queue.pop(0)
else:
# while char is not delimitor, add to buffer
cell += leading
while c != self.delimitor:
cell.append(c)
if not queue:
break
c = queue.pop(0)
# when reaching an end quote or a delimitor, a whole cell has been retrieved
cells.append("".join(cell))
return {self.keys[i]: cells[i] for i in range(len(self.keys))}
def getBio(self, line):
cells = self.parseLine(line)
return self.formatter % (cells[self.key_fname],
cells[self.key_age],
cells[self.key_city],
cells[self.key_interests])
parser = Parser()
for line in sys.stdin:
print(parser.getBio(line))
|
import django_filters
from django.db import transaction
from django.db.models import Q
from django.http import Http404
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers, status, viewsets
from rest_framework.response import Response
from metarecord.models import Action, Function, Phase, Record
from ..utils import validate_uuid4
from .base import (
ClassificationRelationSerializer,
DetailSerializerMixin,
HexRelatedField,
StructuralElementSerializer
)
class RecordSerializer(StructuralElementSerializer):
class Meta(StructuralElementSerializer.Meta):
model = Record
read_only_fields = ('index',)
name = serializers.CharField(read_only=True, source='get_name')
action = HexRelatedField(read_only=True)
parent = HexRelatedField(read_only=True)
class ActionSerializer(StructuralElementSerializer):
class Meta(StructuralElementSerializer.Meta):
model = Action
read_only_fields = ('index',)
name = serializers.CharField(read_only=True, source='get_name')
phase = HexRelatedField(read_only=True)
records = RecordSerializer(many=True)
class PhaseSerializer(StructuralElementSerializer):
class Meta(StructuralElementSerializer.Meta):
model = Phase
read_only_fields = ('index',)
name = serializers.CharField(read_only=True, source='get_name')
function = HexRelatedField(read_only=True)
actions = ActionSerializer(many=True)
class FunctionListSerializer(StructuralElementSerializer):
version = serializers.IntegerField(read_only=True)
modified_by = serializers.SerializerMethodField()
state = serializers.CharField(read_only=True)
classification_code = serializers.ReadOnlyField(source='get_classification_code')
classification_title = serializers.ReadOnlyField(source='get_name')
# TODO these three are here to maintain backwards compatibility,
# should be removed as soon as the UI doesn't need these anymore
function_id = serializers.ReadOnlyField(source='get_classification_code')
# there is also Function.name field which should be hidden for other than templates when this is removed
name = serializers.ReadOnlyField(source='get_name')
parent = serializers.SerializerMethodField()
classification = ClassificationRelationSerializer()
class Meta(StructuralElementSerializer.Meta):
model = Function
exclude = StructuralElementSerializer.Meta.exclude + ('index', 'is_template')
def get_fields(self):
fields = super().get_fields()
if self.context['view'].action == 'create':
fields['phases'] = PhaseSerializer(many=True, required=False)
else:
fields['phases'] = HexRelatedField(many=True, read_only=True)
return fields
def _create_new_version(self, function_data):
user = self.context['request'].user
user_data = {'created_by': user, 'modified_by': user}
phase_data = function_data.pop('phases', [])
function_data.update(user_data)
function = Function.objects.create(**function_data)
for index, phase_datum in enumerate(phase_data, 1):
action_data = phase_datum.pop('actions', [])
phase_datum.update(user_data)
phase = Phase.objects.create(function=function, index=index, **phase_datum)
for index, action_datum in enumerate(action_data, 1):
record_data = action_datum.pop('records', [])
action_datum.update(user_data)
action = Action.objects.create(phase=phase, index=index, **action_datum)
for index, record_datum in enumerate(record_data, 1):
record_datum.update(user_data)
Record.objects.create(action=action, index=index, **record_datum)
return function
def get_modified_by(self, obj):
return obj._modified_by or None
def get_parent(self, obj):
if obj.classification and obj.classification.parent:
parent_functions = (
Function.objects
.filter(classification__uuid=obj.classification.parent.uuid)
)
if parent_functions.exists():
return parent_functions[0].uuid.hex
return None
def validate(self, data):
new_valid_from = data.get('valid_from')
new_valid_to = data.get('valid_to')
if new_valid_from and new_valid_to and new_valid_from > new_valid_to:
raise exceptions.ValidationError(_('"valid_from" cannot be after "valid_to".'))
if not self.instance:
if Function.objects.filter(classification=data['classification']).exists():
raise exceptions.ValidationError(
_('Classification %s already has a function.') % data['classification'].uuid.hex
)
if not data['classification'].function_allowed:
raise exceptions.ValidationError(
_('Classification %s does not allow function creation.') % data['classification'].uuid.hex
)
return data
@transaction.atomic
def create(self, validated_data):
user = self.context['request'].user
if not user.has_perm(Function.CAN_EDIT):
raise exceptions.PermissionDenied(_('No permission to create.'))
validated_data['modified_by'] = user
new_function = self._create_new_version(validated_data)
new_function.create_metadata_version()
return new_function
class FunctionDetailSerializer(FunctionListSerializer):
version_history = serializers.SerializerMethodField()
def get_fields(self):
fields = super().get_fields()
fields['phases'] = PhaseSerializer(many=True)
return fields
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['classification'].required = False
if self.partial:
self.fields['state'].read_only = False
def validate(self, data):
data = super().validate(data)
if self.partial:
if not any(field in data for field in ('state', 'valid_from', 'valid_to')):
raise exceptions.ValidationError(_('"state", "valid_from" or "valid_to" required.'))
new_state = data.get('state')
if new_state:
self.check_state_change(self.instance.state, new_state)
if self.instance.state == Function.DRAFT and new_state != Function.DRAFT:
errors = self.get_attribute_validation_errors(self.instance)
if errors:
raise exceptions.ValidationError(errors)
else:
classification = data['classification']
if classification.uuid != self.instance.classification.uuid:
raise exceptions.ValidationError(
_('Changing classification is not allowed. Only version can be changed.')
)
return data
@transaction.atomic
def update(self, instance, validated_data):
user = self.context['request'].user
if self.partial:
allowed_fields = {'state', 'valid_from', 'valid_to'}
data = {field: validated_data[field] for field in allowed_fields if field in validated_data}
if not data:
return instance
data['modified_by'] = user
# ignore other fields than state, valid_from and valid_to
# and do an actual update instead of a new version
new_function = super().update(instance, data)
new_function.create_metadata_version()
return new_function
if not user.has_perm(Function.CAN_EDIT):
raise exceptions.PermissionDenied(_('No permission to edit.'))
if instance.state in (Function.SENT_FOR_REVIEW, Function.WAITING_FOR_APPROVAL):
raise exceptions.ValidationError(
_('Cannot edit while in state "sent_for_review" or "waiting_for_approval"')
)
if not validated_data.get('classification'):
validated_data['classification'] = instance.classification
validated_data['modified_by'] = user
new_function = self._create_new_version(validated_data)
new_function.create_metadata_version()
return new_function
def check_state_change(self, old_state, new_state):
user = self.context['request'].user
if old_state == new_state:
return
valid_changes = {
Function.DRAFT: {Function.SENT_FOR_REVIEW},
Function.SENT_FOR_REVIEW: {Function.WAITING_FOR_APPROVAL, Function.DRAFT},
Function.WAITING_FOR_APPROVAL: {Function.APPROVED, Function.DRAFT},
Function.APPROVED: {Function.DRAFT},
}
if new_state not in valid_changes[old_state]:
raise exceptions.ValidationError({'state': [_('Invalid state change.')]})
state_change_required_permissions = {
Function.SENT_FOR_REVIEW: Function.CAN_EDIT,
Function.WAITING_FOR_APPROVAL: Function.CAN_REVIEW,
Function.APPROVED: Function.CAN_APPROVE,
}
relevant_state = new_state if new_state != Function.DRAFT else old_state
required_permission = state_change_required_permissions[relevant_state]
if not user.has_perm(required_permission):
raise exceptions.PermissionDenied(_('No permission for the state change.'))
def get_version_history(self, obj):
request = self.context['request']
functions = Function.objects.filter_for_user(request.user).filter(uuid=obj.uuid).order_by('version')
ret = []
for function in functions:
version_data = {attr: getattr(function, attr) for attr in ('state', 'version', 'modified_at')}
if not request or function.can_view_modified_by(request.user):
version_data['modified_by'] = function.get_modified_by_display()
ret.append(version_data)
return ret
class FunctionFilterSet(django_filters.FilterSet):
class Meta:
model = Function
fields = ('valid_at', 'version', 'classification_code', 'information_system')
valid_at = django_filters.DateFilter(method='filter_valid_at')
modified_at__lt = django_filters.DateTimeFilter(field_name='modified_at', lookup_expr='lt')
modified_at__gt = django_filters.DateTimeFilter(field_name='modified_at', lookup_expr='gt')
classification_code = django_filters.CharFilter(field_name='classification__code')
information_system = django_filters.CharFilter(field_name='phases__actions__records__attributes__InformationSystem',
lookup_expr='icontains')
def filter_valid_at(self, queryset, name, value):
# if neither date is set the function is considered not valid
queryset = queryset.exclude(Q(valid_from__isnull=True) & Q(valid_to__isnull=True))
# null value means there is no bound in that direction
queryset = queryset.filter(
(Q(valid_from__isnull=True) | Q(valid_from__lte=value)) &
(Q(valid_to__isnull=True) | Q(valid_to__gte=value))
)
return queryset
class FunctionViewSet(DetailSerializerMixin, viewsets.ModelViewSet):
queryset = Function.objects.filter(is_template=False)
queryset = queryset.select_related('modified_by', 'classification').prefetch_related('phases')
queryset = queryset.order_by('classification__code')
serializer_class = FunctionListSerializer
serializer_class_detail = FunctionDetailSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
filterset_class = FunctionFilterSet
lookup_field = 'uuid'
def get_queryset(self):
queryset = self.queryset.filter_for_user(self.request.user)
if 'version' in self.request.query_params:
return queryset
state = self.request.query_params.get('state')
if state == 'approved':
return queryset.latest_approved()
return queryset.latest_version()
def retrieve(self, request, *args, **kwargs):
if not validate_uuid4(self.kwargs.get('uuid')):
raise exceptions.ValidationError(_('Invalid UUID'))
try:
instance = self.get_object()
except (Function.DoesNotExist, Http404):
instance = None
if not instance:
filter_kwargs = {self.lookup_field: self.kwargs[self.lookup_field]}
if 'version' in self.request.query_params:
filter_kwargs = {**filter_kwargs, 'version': self.request.query_params['version']}
qs = Function.objects.filter(**filter_kwargs)
# When unauthenticated user is requesting object, the get_object will filter out functions
# that are not approved. Here we are checking is there requested function with any state
# in the database, if there are we return not authenticated. This was requested feature by
# users and product owner to notify users that they should log in.
if qs.exists():
raise exceptions.NotAuthenticated
raise exceptions.NotFound
serializer = self.get_serializer(instance)
return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
user = request.user
if not instance.can_user_delete(user):
raise exceptions.PermissionDenied(_('No permission to delete or state is not "draft".'))
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
import logging
import multiprocessing
from multiprocessing.managers import (
BaseManager,
)
import tempfile
import pytest
from hvm.chains.ropsten import ROPSTEN_GENESIS_HEADER, ROPSTEN_NETWORK_ID
from hvm.db.atomic import (
AtomicDB,
)
from hvm.db.chain import (
ChainDB,
)
from helios.chains import (
get_chaindb_manager,
)
from helios.config import (
ChainConfig,
)
from helios.db.chain import ChainDBProxy
from helios.db.base import DBProxy
from helios.utils.ipc import (
wait_for_ipc,
kill_process_gracefully,
)
def serve_chaindb(manager):
server = manager.get_server()
server.serve_forever()
@pytest.fixture
def database_server_ipc_path():
core_db = AtomicDB()
core_db[b'key-a'] = b'value-a'
chaindb = ChainDB(core_db)
# TODO: use a custom chain class only for testing.
chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
with tempfile.TemporaryDirectory() as temp_dir:
chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID, max_peers=1, data_dir=temp_dir)
manager = get_chaindb_manager(chain_config, core_db)
chaindb_server_process = multiprocessing.Process(
target=serve_chaindb,
args=(manager,),
)
chaindb_server_process.start()
wait_for_ipc(chain_config.database_ipc_path)
try:
yield chain_config.database_ipc_path
finally:
kill_process_gracefully(chaindb_server_process, logging.getLogger())
@pytest.fixture
def manager(database_server_ipc_path):
class DBManager(BaseManager):
pass
DBManager.register('get_db', proxytype=DBProxy)
DBManager.register('get_chaindb', proxytype=ChainDBProxy)
_manager = DBManager(address=str(database_server_ipc_path))
_manager.connect()
return _manager
def test_chaindb_over_ipc_manager(manager):
chaindb = manager.get_chaindb()
header = chaindb.get_canonical_head()
assert header == ROPSTEN_GENESIS_HEADER
def test_db_over_ipc_manager(manager):
db = manager.get_db()
assert b'key-a' in db
assert db[b'key-a'] == b'value-a'
with pytest.raises(KeyError):
db[b'not-present']
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="behavior_mapper",
version="1.2.1.dev1",
author="Jason Summer",
author_email="jasummer92@gmail.com",
description="Clusters channel activities or steps according to the transactions offered within a given organization's channel",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jasonsum/behavior_mapping",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Text Processing"
],
packages=['behavior_mapper'],
python_requires='>=3.8',
license='MIT',
install_requires=['pandas','nltk','gensim','numpy','scikit-learn', 'unittest', 'glob']
#project_urls={'':'',},
) |
import pyodbc
connstr = pyodbc.connect('DRIVER={SQL Server};SERVER=CATL0DB728\INTCCIPROD;DATABASE=itt;Trusted_Connection=yes;')
cursor = connstr.cursor()
cursor.execute("SELECT u_server_name FROM LCM_SNOW")
#for row in cursor:
# print(row)
columns = [column[0] for column in cursor.description] |
print("Test".ljust(20,".")+"20$")
print("Pear".ljust(20,".")+"99$")
print("Apple".ljust(20,".")+"120$")
|
#--------------------------------------------------------------------------------
## Protein Pow(d)er
#--------------------------------------------------------------------------------
## This program looks for an optimal folding configuration for a protein.
#--------------------------------------------------------------------------------
## Authors: Eva van der Heide, Kaiden Sewradj and Wouter Vincken (BioTrio)
## License: unilicense
## Version: 1.0.0
## Version status: Complete
#--------------------------------------------------------------------------------
from code.classes.protein import Protein
from code.algorithms.depth_first import DepthFirst
from code.algorithms.greedy import Greedy, GreedyLookahead
from code.algorithms.hill_climber import HillClimber, HillClimber_Pull
from code.algorithms.randomize import Random
from code.algorithms.simulated_annealing import Simulated_Annealing, Simulated_Annealing_Pull
from code.visualisation.output import writecsv
from code.visualisation.visualize import visualize, bar, solution_count
import sys
import time
if __name__ == "__main__":
# Order: data/file, protein id
if len(sys.argv) == 3:
source = sys.argv[1]
protein_id = sys.argv[2]
protein = Protein(source, protein_id)
while True:
algor = input("Which algorithm do you want to run?\nr = random\ng = greedy\nl = greedy lookahead\nh = hill climber\np = hill climber (pull version)\ns = simulated annealing\nsp = simulated annealing (using pull hill climber)\nd = depth first\n")
if algor in ['r', 'g', 'l', 'h', 'p', 's', 'sp', 'd']:
break
else:
print("Please select a valid algorithm.")
if algor != 'd':
while True:
runs = input("How often do you want to run this algorithm?\n")
try:
runs = int(runs)
break
except ValueError:
print("Please give a positive integer.")
if algor in ['s', 'sp']:
while True:
temp = input("What initial temperature do you want?\n")
try:
temp = int(temp)
break
except ValueError:
print("Please give a positive integer.")
if algor in ['h', 's']:
while True:
mutations = input("How many mutations do you want to make per run?\n")
try:
mutations = int(mutations)
break
except ValueError:
print("Please give a positive integer.")
if algor == 'r':
art= Random()
start_time = time.time()
art.run_random(protein, runs)
print("Algoritm took %s seconds to run (without visualisation)" % (time.time() - start_time))
best = art.get_best()
elif algor == 'g':
art = Greedy(protein)
start_time = time.time()
art.run_greedy(protein, runs)
print("Algoritm took %s seconds to run (without visualisation)" % (time.time() - start_time))
best = art.get_best()
elif algor == 'l':
while True:
lookahead = input("How many amino acids do you want to look ahead per placement?\n")
try:
lookahead = int(lookahead)
except ValueError:
print("Please give a positive integer.")
else:
if 1 <= lookahead <= 7:
break
else:
print("Please give an integer in range of 1 - 7.")
art = GreedyLookahead(protein, lookahead)
start_time = time.time()
art.run_greedy(protein, runs)
elif algor == 'd':
art = DepthFirst(protein)
start_time = time.time()
art.run_depthfirst()
elif algor == 'h':
art = HillClimber(protein, runs)
start_time = time.time()
art.hike(mutations)
elif algor == 'p':
art = HillClimber_Pull(protein, runs)
start_time = time.time()
art.hike()
elif algor == 's':
art = Simulated_Annealing(protein, temp, runs)
start_time = time.time()
art.hike(mutations)
elif algor == 'sp':
art = Simulated_Annealing_Pull(protein, temp, runs)
start_time = time.time()
art.hike()
print("Algoritm took %s seconds to run (without visualisation)" % (time.time() - start_time))
best = art.get_best()
writecsv(protein, best, source)
solution_count(art)
visualize(best)
bar(art, algor)
print("Program completed!") |
# This script combines all csv files in the current folder
# It assumes all csv files in this folder have the same header/formats
import os
import pandas as pd
combined_csv_file = "combined_csv.csv"
df = None
for root, dirs_list, files_list in os.walk('.'):
for file_name in files_list:
extension = os.path.splitext(file_name)[-1]
if os.path.splitext(file_name)[-1] == '.csv' and file_name != combined_csv_file:
df_temp = pd.read_csv(file_name, index_col=False)
if df_temp is None:
df = df_temp
else:
df = pd.concat([df,df_temp], axis=0, ignore_index=True)
df.to_csv(combined_csv_file, index=False) |
#//
#//-----------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2019 Tuomas Poikela
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//-----------------------------------------------------------------------------
from ..base.uvm_component import UVMComponent
class UVMScoreboard(UVMComponent):
"""
The `UVMScoreboard` class should be used as the base class for
user-defined scoreboards.
Deriving from `UVMScoreboard` will allow you to distinguish scoreboards from
other component types inheriting directly from `UVMComponent`. Such
scoreboards will automatically inherit and benefit from features that may be
added to `UVMScoreboard` in the future.
"""
# // Function: new
# //
# // Creates and initializes an instance of this class using the normal
# // constructor arguments for `UVMComponent`: ~name~ is the name of the
# // instance, and ~parent~ is the handle to the hierarchical parent, if any.
def __init__(self, name, parent):
UVMComponent.__init__(self, name, parent)
type_name = "uvm_scoreboard"
def get_type_name(self):
return UVMScoreboard.type_name
|
import os
from PIL import Image, ImageOps
import click
import shutil
from typing import Optional
IMAGES_EXTS = ["jpg", "png"]
@click.command(name="reduce-image-size")
@click.argument("path-to-images")
@click.argument("new_width", default=1920)
@click.option("--quality", default=95)
@click.option("--grayscale", is_flag=True)
@click.option("--move-original")
def reduce_image_size_command(
path_to_images: str,
new_width: int,
quality: int,
grayscale: bool,
move_original: Optional[str],
):
for root, _, files in os.walk(path_to_images):
for file in files:
file_path = os.path.join(root, file)
file_name, ext = os.path.splitext(file)
ext = ext[1:]
if ext not in IMAGES_EXTS:
continue
image = Image.open(file_path)
output_path = os.path.join(
os.path.dirname(file_path), f"{file_name}m.{ext}"
)
width, height = image.size
if width > height:
image = image.resize(
(new_width, new_width * height // width), Image.DEFAULT_STRATEGY
)
else:
image = image.resize(
(new_width * width // height, new_width), Image.DEFAULT_STRATEGY
)
image.save(output_path, optimize=True, quality=quality)
if grayscale:
gray_image = ImageOps.grayscale(image)
gray_image.save(output_path)
if move_original:
original_dir_path = os.path.join(root, move_original)
if not os.path.exists(original_dir_path):
os.makedirs(original_dir_path)
shutil.move(file_path, os.path.join(root, move_original, file))
if __name__ == "__main__":
reduce_image_size_command()
|
from fib_route import FibRoute
def test_constructor():
_route = FibRoute("1.0.0.0/8", ["nh1", "nh2"])
def test_property_prefix():
route = FibRoute("1.0.0.0/8", ["nh1", "nh2"])
assert route.prefix == "1.0.0.0/8"
def test_str():
route = FibRoute("1.0.0.0/8", ["nh2", "nh1"])
assert str(route) == "1.0.0.0/8 -> nh1, nh2"
route = FibRoute("0.0.0.0/0", [])
assert str(route) == "0.0.0.0/0 -> "
|
from sympy import (
Rational, Symbol, N, I, Abs, sqrt, exp, Float, sin,
cos, symbols)
from sympy.matrices import eye, Matrix, dotprodsimp
from sympy.core.singleton import S
from sympy.testing.pytest import raises, XFAIL
from sympy.matrices.matrices import NonSquareMatrixError, MatrixError
from sympy.simplify.simplify import simplify
from sympy.matrices.immutable import ImmutableMatrix
from sympy.testing.pytest import slow
from sympy.testing.matrices import allclose
def test_eigen():
R = Rational
M = Matrix.eye(3)
assert M.eigenvals(multiple=False) == {S.One: 3}
assert M.eigenvals(multiple=True) == [1, 1, 1]
assert M.eigenvects() == (
[(1, 3, [Matrix([1, 0, 0]),
Matrix([0, 1, 0]),
Matrix([0, 0, 1])])])
assert M.left_eigenvects() == (
[(1, 3, [Matrix([[1, 0, 0]]),
Matrix([[0, 1, 0]]),
Matrix([[0, 0, 1]])])])
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
assert M.eigenvects() == (
[
(-1, 1, [Matrix([-1, 1, 0])]),
( 0, 1, [Matrix([0, -1, 1])]),
( 2, 1, [Matrix([R(2, 3), R(1, 3), 1])])
])
assert M.left_eigenvects() == (
[
(-1, 1, [Matrix([[-2, 1, 1]])]),
(0, 1, [Matrix([[-1, -1, 1]])]),
(2, 1, [Matrix([[1, 1, 1]])])
])
a = Symbol('a')
M = Matrix([[a, 0],
[0, 1]])
assert M.eigenvals() == {a: 1, S.One: 1}
M = Matrix([[1, -1],
[1, 3]])
assert M.eigenvects() == ([(2, 2, [Matrix(2, 1, [-1, 1])])])
assert M.left_eigenvects() == ([(2, 2, [Matrix([[1, 1]])])])
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a = R(15, 2)
b = 3*33**R(1, 2)
c = R(13, 2)
d = (R(33, 8) + 3*b/8)
e = (R(33, 8) - 3*b/8)
def NS(e, n):
return str(N(e, n))
r = [
(a - b/2, 1, [Matrix([(12 + 24/(c - b/2))/((c - b/2)*e) + 3/(c - b/2),
(6 + 12/(c - b/2))/e, 1])]),
( 0, 1, [Matrix([1, -2, 1])]),
(a + b/2, 1, [Matrix([(12 + 24/(c + b/2))/((c + b/2)*d) + 3/(c + b/2),
(6 + 12/(c + b/2))/d, 1])]),
]
r1 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
r = M.eigenvects()
r2 = [(NS(r[i][0], 2), NS(r[i][1], 2),
[NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]
assert sorted(r1) == sorted(r2)
eps = Symbol('eps', real=True)
M = Matrix([[abs(eps), I*eps ],
[-I*eps, abs(eps) ]])
assert M.eigenvects() == (
[
( 0, 1, [Matrix([[-I*eps/abs(eps)], [1]])]),
( 2*abs(eps), 1, [ Matrix([[I*eps/abs(eps)], [1]]) ] ),
])
assert M.left_eigenvects() == (
[
(0, 1, [Matrix([[I*eps/Abs(eps), 1]])]),
(2*Abs(eps), 1, [Matrix([[-I*eps/Abs(eps), 1]])])
])
M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])
M._eigenvects = M.eigenvects(simplify=False)
assert max(i.q for i in M._eigenvects[0][2][0]) > 1
M._eigenvects = M.eigenvects(simplify=True)
assert max(i.q for i in M._eigenvects[0][2][0]) == 1
M = Matrix([[Rational(1, 4), 1], [1, 1]])
assert M.eigenvects(simplify=True) == [
(Rational(5, 8) - sqrt(73)/8, 1, [Matrix([[-sqrt(73)/8 - Rational(3, 8)], [1]])]),
(Rational(5, 8) + sqrt(73)/8, 1, [Matrix([[Rational(-3, 8) + sqrt(73)/8], [1]])])]
with dotprodsimp(True):
assert M.eigenvects(simplify=False) == [
(Rational(5, 8) - sqrt(73)/8, 1, [Matrix([[-1/(-Rational(3, 8) + sqrt(73)/8)], [1]])]),
(Rational(5, 8) + sqrt(73)/8, 1, [Matrix([[8/(3 + sqrt(73))], [1]])])]
# issue 10719
assert Matrix([]).eigenvals() == {}
assert Matrix([]).eigenvals(multiple=True) == []
assert Matrix([]).eigenvects() == []
# issue 15119
raises(NonSquareMatrixError,
lambda: Matrix([[1, 2], [0, 4], [0, 0]]).eigenvals())
raises(NonSquareMatrixError,
lambda: Matrix([[1, 0], [3, 4], [5, 6]]).eigenvals())
raises(NonSquareMatrixError,
lambda: Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals())
raises(NonSquareMatrixError,
lambda: Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals())
raises(NonSquareMatrixError,
lambda: Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals(
error_when_incomplete = False))
raises(NonSquareMatrixError,
lambda: Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals(
error_when_incomplete = False))
m = Matrix([[1, 2], [3, 4]])
assert isinstance(m.eigenvals(simplify=True, multiple=False), dict)
assert isinstance(m.eigenvals(simplify=True, multiple=True), list)
assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=False), dict)
assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=True), list)
@slow
def test_eigen_slow():
# issue 15125
from sympy.core.function import count_ops
q = Symbol("q", positive = True)
m = Matrix([[-2, exp(-q), 1], [exp(q), -2, 1], [1, 1, -2]])
assert count_ops(m.eigenvals(simplify=False)) > \
count_ops(m.eigenvals(simplify=True))
assert count_ops(m.eigenvals(simplify=lambda x: x)) > \
count_ops(m.eigenvals(simplify=True))
def test_float_eigenvals():
m = Matrix([[1, .6, .6], [.6, .9, .9], [.9, .6, .6]])
evals = [
Rational(5, 4) - sqrt(385)/20,
sqrt(385)/20 + Rational(5, 4),
S.Zero]
n_evals = m.eigenvals(rational=True, multiple=True)
n_evals = sorted(n_evals)
s_evals = [x.evalf() for x in evals]
s_evals = sorted(s_evals)
for x, y in zip(n_evals, s_evals):
assert abs(x-y) < 10**-9
@XFAIL
def test_eigen_vects():
m = Matrix(2, 2, [1, 0, 0, I])
raises(NotImplementedError, lambda: m.is_diagonalizable(True))
# !!! bug because of eigenvects() or roots(x**2 + (-1 - I)*x + I, x)
# see issue 5292
assert not m.is_diagonalizable(True)
raises(MatrixError, lambda: m.diagonalize(True))
(P, D) = m.diagonalize(True)
def test_issue_8240():
# Eigenvalues of large triangular matrices
x, y = symbols('x y')
n = 200
diagonal_variables = [Symbol('x%s' % i) for i in range(n)]
M = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
M[i][i] = diagonal_variables[i]
M = Matrix(M)
eigenvals = M.eigenvals()
assert len(eigenvals) == n
for i in range(n):
assert eigenvals[diagonal_variables[i]] == 1
eigenvals = M.eigenvals(multiple=True)
assert set(eigenvals) == set(diagonal_variables)
# with multiplicity
M = Matrix([[x, 0, 0], [1, y, 0], [2, 3, x]])
eigenvals = M.eigenvals()
assert eigenvals == {x: 2, y: 1}
eigenvals = M.eigenvals(multiple=True)
assert len(eigenvals) == 3
assert eigenvals.count(x) == 2
assert eigenvals.count(y) == 1
def test_eigenvals():
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
# if we cannot factor the char poly, we raise an error
m = Matrix([
[3, 0, 0, 0, -3],
[0, -3, -3, 0, 3],
[0, 3, 0, 3, 0],
[0, 0, 3, 0, 3],
[3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.eigenvals())
def test_eigenvects():
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert M*vec_list[0] == val*vec_list[0]
def test_left_eigenvects():
M = Matrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.left_eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert vec_list[0]*M == val*vec_list[0]
@slow
def test_bidiagonalize():
M = Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
assert M.bidiagonalize() == M
assert M.bidiagonalize(upper=False) == M
assert M.bidiagonalize() == M
assert M.bidiagonal_decomposition() == (M, M, M)
assert M.bidiagonal_decomposition(upper=False) == (M, M, M)
assert M.bidiagonalize() == M
import random
#Real Tests
for real_test in range(2):
test_values = []
row = 2
col = 2
for _ in range(row * col):
value = random.randint(-1000000000, 1000000000)
test_values = test_values + [value]
# L -> Lower Bidiagonalization
# M -> Mutable Matrix
# N -> Immutable Matrix
# 0 -> Bidiagonalized form
# 1,2,3 -> Bidiagonal_decomposition matrices
# 4 -> Product of 1 2 3
M = Matrix(row, col, test_values)
N = ImmutableMatrix(M)
N1, N2, N3 = N.bidiagonal_decomposition()
M1, M2, M3 = M.bidiagonal_decomposition()
M0 = M.bidiagonalize()
N0 = N.bidiagonalize()
N4 = N1 * N2 * N3
M4 = M1 * M2 * M3
N2.simplify()
N4.simplify()
N0.simplify()
M0.simplify()
M2.simplify()
M4.simplify()
LM0 = M.bidiagonalize(upper=False)
LM1, LM2, LM3 = M.bidiagonal_decomposition(upper=False)
LN0 = N.bidiagonalize(upper=False)
LN1, LN2, LN3 = N.bidiagonal_decomposition(upper=False)
LN4 = LN1 * LN2 * LN3
LM4 = LM1 * LM2 * LM3
LN2.simplify()
LN4.simplify()
LN0.simplify()
LM0.simplify()
LM2.simplify()
LM4.simplify()
assert M == M4
assert M2 == M0
assert N == N4
assert N2 == N0
assert M == LM4
assert LM2 == LM0
assert N == LN4
assert LN2 == LN0
#Complex Tests
for complex_test in range(2):
test_values = []
size = 2
for _ in range(size * size):
real = random.randint(-1000000000, 1000000000)
comp = random.randint(-1000000000, 1000000000)
value = real + comp * I
test_values = test_values + [value]
M = Matrix(size, size, test_values)
N = ImmutableMatrix(M)
# L -> Lower Bidiagonalization
# M -> Mutable Matrix
# N -> Immutable Matrix
# 0 -> Bidiagonalized form
# 1,2,3 -> Bidiagonal_decomposition matrices
# 4 -> Product of 1 2 3
N1, N2, N3 = N.bidiagonal_decomposition()
M1, M2, M3 = M.bidiagonal_decomposition()
M0 = M.bidiagonalize()
N0 = N.bidiagonalize()
N4 = N1 * N2 * N3
M4 = M1 * M2 * M3
N2.simplify()
N4.simplify()
N0.simplify()
M0.simplify()
M2.simplify()
M4.simplify()
LM0 = M.bidiagonalize(upper=False)
LM1, LM2, LM3 = M.bidiagonal_decomposition(upper=False)
LN0 = N.bidiagonalize(upper=False)
LN1, LN2, LN3 = N.bidiagonal_decomposition(upper=False)
LN4 = LN1 * LN2 * LN3
LM4 = LM1 * LM2 * LM3
LN2.simplify()
LN4.simplify()
LN0.simplify()
LM0.simplify()
LM2.simplify()
LM4.simplify()
assert M == M4
assert M2 == M0
assert N == N4
assert N2 == N0
assert M == LM4
assert LM2 == LM0
assert N == LN4
assert LN2 == LN0
M = Matrix(18, 8, range(1, 145))
M = M.applyfunc(lambda i: Float(i))
assert M.bidiagonal_decomposition()[1] == M.bidiagonalize()
assert M.bidiagonal_decomposition(upper=False)[1] == M.bidiagonalize(upper=False)
a, b, c = M.bidiagonal_decomposition()
diff = a * b * c - M
assert abs(max(diff)) < 10**-12
def test_diagonalize():
m = Matrix(2, 2, [0, -1, 1, 0])
raises(MatrixError, lambda: m.diagonalize(reals_only=True))
P, D = m.diagonalize()
assert D.is_diagonal()
assert D == Matrix([
[-I, 0],
[ 0, I]])
# make sure we use floats out if floats are passed in
m = Matrix(2, 2, [0, .5, .5, 0])
P, D = m.diagonalize()
assert all(isinstance(e, Float) for e in D.values())
assert all(isinstance(e, Float) for e in P.values())
_, D2 = m.diagonalize(reals_only=True)
assert D == D2
m = Matrix(
[[0, 1, 0, 0], [1, 0, 0, 0.002], [0.002, 0, 0, 1], [0, 0, 1, 0]])
P, D = m.diagonalize()
assert allclose(P*D, m*P)
def test_is_diagonalizable():
a, b, c = symbols('a b c')
m = Matrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
assert not Matrix(2, 2, [1, 1, 0, 1]).is_diagonalizable()
m = Matrix(2, 2, [0, -1, 1, 0])
assert m.is_diagonalizable()
assert not m.is_diagonalizable(reals_only=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# the next two tests test the cases where the old
# algorithm failed due to the fact that the block structure can
# *NOT* be determined from algebraic and geometric multiplicity alone
# This can be seen most easily when one lets compute the J.c.f. of a matrix that
# is in J.c.f already.
m = Matrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = Matrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
P, J = A.jordan_form()
assert simplify(P*J*P.inv()) == A
assert Matrix(1, 1, [1]).jordan_form() == (Matrix([1]), Matrix([1]))
assert Matrix(1, 1, [1]).jordan_form(calc_transform=False) == Matrix([1])
# make sure if we cannot factor the characteristic polynomial, we raise an error
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.jordan_form())
# make sure that if the input has floats, the output does too
m = Matrix([
[ 0.6875, 0.125 + 0.1875*sqrt(3)],
[0.125 + 0.1875*sqrt(3), 0.3125]])
P, J = m.jordan_form()
assert all(isinstance(x, Float) or x == 0 for x in P)
assert all(isinstance(x, Float) or x == 0 for x in J)
def test_singular_values():
x = Symbol('x', real=True)
A = Matrix([[0, 1*I], [2, 0]])
# if singular values can be sorted, they should be in decreasing order
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
# since Abs(x) cannot be sorted, test set equality
assert set(vals) == {5, 1, Abs(x)}
A = Matrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S.One, S.One]
A = Matrix([
[2, 4],
[1, 3],
[0, 0],
[0, 0]
])
assert A.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))]
assert A.T.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0]
def test___eq__():
assert (Matrix(
[[0, 1, 1],
[1, 0, 0],
[1, 1, 1]]) == {}) is False
def test_definite():
# Examples from Gilbert Strang, "Introduction to Linear Algebra"
# Positive definite matrices
m = Matrix([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[5, 4], [4, 5]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Positive semidefinite matrices
m = Matrix([[2, -1, -1], [-1, 2, -1], [-1, -1, 2]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[1, 2], [2, 4]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Examples from Mathematica documentation
# Non-hermitian positive definite matrices
m = Matrix([[2, 3], [4, 8]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Hermetian matrices
m = Matrix([[1, 2*I], [-I, 4]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
# Symbolic matrices examples
a = Symbol('a', positive=True)
b = Symbol('b', negative=True)
m = Matrix([[a, 0, 0], [0, a, 0], [0, 0, a]])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == False
m = Matrix([[b, 0, 0], [0, b, 0], [0, 0, b]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == False
assert m.is_negative_definite == True
assert m.is_negative_semidefinite == True
assert m.is_indefinite == False
m = Matrix([[a, 0], [0, b]])
assert m.is_positive_definite == False
assert m.is_positive_semidefinite == False
assert m.is_negative_definite == False
assert m.is_negative_semidefinite == False
assert m.is_indefinite == True
m = Matrix([
[0.0228202735623867, 0.00518748979085398,
-0.0743036351048907, -0.00709135324903921],
[0.00518748979085398, 0.0349045359786350,
0.0830317991056637, 0.00233147902806909],
[-0.0743036351048907, 0.0830317991056637,
1.15859676366277, 0.340359081555988],
[-0.00709135324903921, 0.00233147902806909,
0.340359081555988, 0.928147644848199]
])
assert m.is_positive_definite == True
assert m.is_positive_semidefinite == True
assert m.is_indefinite == False
# test for issue 19547: https://github.com/sympy/sympy/issues/19547
m = Matrix([
[0, 0, 0],
[0, 1, 2],
[0, 2, 1]
])
assert not m.is_positive_definite
assert not m.is_positive_semidefinite
|
import diy
w = diy.mpi.MPIComm()
m = diy.Master(w)
diy.add_my_block(m, 0)
diy.add_my_block(m, 5)
print(m)
|
# title
# defines the title of the whole set of queries
# OPTIONAL, if not set, timestamp will be used
title = "General overview queries"
# description
# defines the textual and human-intended description of the purpose of these queries
# OPTIONAL, if not set, nothing will be used or displayed
description = "Queries extracted from google doc: https://docs.google.com/document/d/1aJnpoMIr2MUOLlGKk3ZvLTE5mEGk6fzaixunrMF4HhE/edit#heading=h.3i3qrymun2lk"
# output_destination
# defines where to save the results, input can be:
# * a local path to a folder
# * a URL for a google sheets document
# * a URL for a google folder
# NOTE: On windows, folders in a path use backslashes, in such a case it is mandatory to attach a 'r' in front of the quotes, e.g. r"C:\Users\sresch\.."
# In the other cases the 'r' is simply ignored; thus best would be to always leave it there.
# OPTIONAL, if not set, folder of executed script will be used
output_destination = r"https://drive.google.com/drive/folders/13v-SQyeene9-YpUtJPeb79pSqD4DE6rw"
# output_format
# defines the format in which the result data shall be saved (currently available: csv, tsv, xml, json, xlsx)
# OPTIONAL, if not set, csv will be used
output_format = ""
# summary_sample_limit
# defines how many rows shall be displayed in the summary
# OPTIONAL, if not set, 5 will be used
summary_sample_limit = 3
# cooldown_between_queries
# defines how many seconds should be waited between execution of individual queries in order to prevent exhaustion of Google API due to too many writes per time-interval
# OPTIONAL, if not set, 0 will be used
cooldown_between_queries = 10
# endpoint
# defines the SPARQL endpoint against which all the queries are run
# MANDATORY
endpoint = "https://virtuoso.parthenos.d4science.org/sparql"
# queries
# defines the set of queries to be run.
# MANDATAORY
queries = [
{
"title" : "Q1 - ?subject-class ?predicate ?object-class" ,
"description" : "A complete overview/summary of all types of relations in the data." ,
"query" : """
SELECT ?st ?p ?ot ( COUNT( ?p ) AS ?pCount ) WHERE {
GRAPH ?g {
?s ?p ?o .
?s a ?st .
?o a ?ot
}
}
GROUP BY ?st ?p ?ot
ORDER BY DESC ( ?pCount )
"""
},
{
"title" : "Q2 - ?subject-class ?predicate" ,
"description" : "Reducing above query (Q1) to just combinations of subject-class and predicate." ,
"query" : """
SELECT ?st ?p COUNT(?p) AS ?pCount WHERE {
GRAPH ?g {
?s ?p ?o .
?s a ?st .
}
}
GROUP BY ?st ?p
ORDER BY DESC ( ?pCount )
"""
},
{
"title" : "Q3 - all used predicates + frequencies" ,
"description" : "" ,
"query" : """
SELECT ?p (COUNT(?p) as ?pCount) WHERE {
[] ?p []
}
GROUP BY ?p
ORDER BY DESC(?pCount)
"""
},
{
"title" : "Q4 - all used Subject types + frequencies" ,
"description" : "" ,
"query" : """
SELECT ?type (COUNT(?type) as ?typeCount) WHERE {
[] a ?type
}
GROUP BY ?type
ORDER BY DESC(?typeCount)
"""
},
{
"title" : "Q5 - just CIDOC-CRM types + frequencies" ,
"description" : "" ,
"query" : """
SELECT ?type (COUNT(?type) as ?typeCount) WHERE {
[] a ?type.
FILTER(STRSTARTS(STR(?type), "crm:"))
}
GROUP BY ?type
ORDER BY ?typeCount
"""
},
{
"title" : "Q5a - Why is PC14 an entity type? #11653" ,
"description" : "" ,
"query" : """
SELECT ?p ?ot ( COUNT( ?p ) as ?pCount ) WHERE {
graph ?g {
?s ?p ?o .
?s a <crm:PC14_carried_out_by> .
?o a ?ot
}
}
GROUP BY ?p ?ot
ORDER BY DESC ( ?pCount )
"""
},
{
"title" : "Q5a - Why is PC14 an entity type? #11653" ,
"description" : "" ,
"query" : """
SELECT ?g ?s ?p ?o WHERE {
graph ?g {
?s ?p ?o .
?s a <crm:PC14_carried_out_by> .
}
}
"""
},
{
"title" : "Q6 - just CIDOC-PE types + frequencies" ,
"description" : "" ,
"query" : """
SELECT ?type (COUNT(?type) as ?typeCount) WHERE {
[] a ?type.
FILTER(STRSTARTS(STR(?type), "pe:"))
}
GROUP BY ?type ORDER BY DESC(?typeCount)
"""
},
{
"title" : "Q6b - CIDOC-PE types with inheritance" ,
"description" : "Same as Q6a but with inference activated:" ,
"query" : """
DEFINE input:inference 'parthenos_rules'
SELECT ?type (COUNT(?type) as ?typeCount) WHERE {
[] a ?type.
FILTER(STRSTARTS(STR(?type), "pe:"))
}
GROUP BY ?type ORDER BY DESC(?typeCount)
"""
},
{
"title" : "Q7 Find out all datasets and calculate how many triples there are per graph" ,
"description" : "" ,
"query" : """
SELECT DISTINCT ?g (count(?p) as ?triples) WHERE {
GRAPH ?g { ?s ?p ?o }
}
GROUP BY ?g
ORDER BY DESC (?triples)
"""
},
{
"title" : "Q10 The number of nodes equals to the sum of distinct subjects and objects." ,
"description" : "" ,
"query" : """
SELECT (COUNT (DISTINCT ?node) AS ?vNum) WHERE {
{ ?node ?p ?obj } UNION
{ ?obj ?p ?node }
}
"""
},
{
"title" : "Q11 Number of single triples between two nodes" ,
"description" : "" ,
"query" : """
SELECT ?s ?o (COUNT (*) AS ?tNum) WHERE {
{ ?s ?p ?o } UNION
{ ?o ?q ?s }
}
GROUP BY ?s ?o ORDER BY DESC (?tNum)
"""
},
{
"title" : "Q12 - Return most connected entities (ignoring related graphs)" ,
"description" : "" ,
"query" : """
SELECT ?resource COUNT(*) AS ?countOfConnections WHERE {
{ ?resource ?pTo ?rTo } UNION
{ ?rFrom ?pFrom ?resource }
}
GROUP BY ?resource
ORDER BY DESC ( ?countOfConnections )
"""
},
{
"title" : "Q13 - Return most connected entities while differentiating between incoming and outgoing edges (ignoring related graphs)" ,
"description" : "" ,
"query" : """
SELECT
?resource
COUNT(?pFrom) AS ?countPredicates_FromResource
COUNT(?pTo) AS ?countPredicates_ToResource
WHERE {
{ ?resource ?pFrom ?resourceTo } UNION
{ ?resourceFrom ?pTo ?resource }
}
GROUP BY ?resource
ORDER BY DESC ( ?countPredicates_FromResource )
"""
},
{
"title" : "Q14 - Return most connected entities (including related graphs)" ,
"description" : "" ,
"query" : """
SELECT ?graph ?resource COUNT(*) AS ?countOfConnections WHERE {
GRAPH ?graph {
{ ?resource ?pTo ?resourceTo } UNION
{ ?resourceFrom ?pFrom ?resource }
}
}
GROUP BY ?graph ?resource
ORDER BY DESC (?countOfConnections) ?graph
"""
},
{
"title" : "Q15 - Return most connected entities while differentiating between incoming and outgoing edges (including related graphs)" ,
"description" : "" ,
"query" : """
SELECT ?graph ?resource COUNT(?pFrom) AS ?countPredicates_FromResource COUNT(?pTo) AS ?countPredicates_ToResource WHERE {
GRAPH ?graph {
{ ?resource ?pFrom ?resourceTo } UNION
{ ?resourceFrom ?pTo ?resource }
}
}
GROUP BY ?graph ?resource
ORDER BY DESC ( ?countPredicates_FromResource )
"""
},
{
"title" : "Q16 - Return identical triples and the number of graphs they are spread over" ,
"description" : "" ,
"query" : """
SELECT ?s ?p ?o COUNT(?g) AS ?count_graphs WHERE {
GRAPH ?g { ?s ?p ?o }
}
GROUP BY ?s ?p ?o
HAVING ( COUNT( ?g ) > 1)
ORDER BY DESC ( ?count_graphs )
"""
},
{
"title" : "Q17 - count of graphs grouped by their count of triples" ,
"description" : "Returns a meta-count, i.e. first the query counts all triples per graphs, resulting in ?triplesInGraphs and then it counts how many graphs have such a ?triplesInGraphs number. So it returns a compressed statistic about the size-distribution of graphs." ,
"query" : """
SELECT COUNT(?g) AS ?numberOfGraphs ?triplesInGraphs WHERE {
SELECT ?g COUNT(*) AS ?triplesInGraphs WHERE {
GRAPH ?g { ?s ?p ?o } .
}
GROUP BY ?g
}
GROUP BY ?triplesInGraphs
ORDER BY ?triplesInGraphs
"""
},
{
"title" : "Q18 Graphs per Provenance " ,
"description" : "" ,
"query" : """
SELECT ?source (COUNT(DISTINCT ?g) as ?gcnt) WHERE {
GRAPH ?g {?s ?p ?o .} .
GRAPH <http://www.d-net.research-infrastructures.eu/provenance/graph> {?g <http://www.d-net.research-infrastructures.eu/provenance/collectedFrom> ?api . ?api <http://www.d-net.research-infrastructures.eu/provenance/isApiOf> ?source.}
}
GROUP BY ?source
"""
},
]
# Notes on syntax of queries-set:
# * the set of queries is enclosed by '[' and ']'
# * individual queries are enclosed by '{' and '},'
# * All elements of a query (title, description, query) need to be defined using quotes as well as their contents, and both need to be separated by ':'
# * All elements of a query (title, description, query) need to be separated from each other using quotes ','
# * The content of a query needs to be defined using triple quotes, e.g. """ SELECT * WHERE .... """
# * Any indentation (tabs or spaces) do not influence the queries-syntax, they are merely syntactic sugar.
|
class Solution:
def find_min(self, nums: list[int]) -> int:
lo, hi = 0, len(nums)
while lo < hi:
mid: int = (lo+hi) // 2
if nums[mid] >= nums[0]: lo = mid+1
else: hi = mid
return lo
def bi_search(self, nums: list[int], target: int, lo: int, hi: int) -> int:
while lo < hi:
mid: int = (lo+hi) // 2
if nums[mid] < target: lo = mid+1
elif nums[mid] > target: hi = mid
else: return mid
return -1
def search(self, nums: list[int], target: int) -> int:
if not nums: return -1
pivot = self.find_min(nums)
if nums[0] <= target: return self.bi_search(nums, target, 0, pivot)
return self.bi_search(nums, target, pivot, len(nums)) |
"""
Generate a random permutation of a finite sequence
Shuffle an array
"""
import random
def shuffle_std(arr):
"""Shuffle an array using the standard library in-place"""
random.shuffle(arr)
def shuffle_fy(arr):
"""
Fisher-Yates shuffle
generates a random permutation of a finite sequence
in-place
Time: O(n)
Space: O(1)
Links:
https://en.wikipedia.org/wiki/Fisher–Yates_shuffle
Args:
seq (sequence): sequence to be shuffled
Returns:
(sequence): shuffled list
"""
for i, elem in enumerate(arr):
rand_idx = random.randrange(i, len(arr))
arr[i], arr[rand_idx] = arr[rand_idx], elem
return arr
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
# Create your models here.
class PersonalInfo(AbstractBaseUser):
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', message='Only alphanumeric characters are allowed.')
username = models.CharField(unique=True, max_length=20, validators=[alphanumeric])
email = models.EmailField(verbose_name='email address', unique=True, max_length=244)
first_name = models.CharField(max_length=30, null=True, blank=True)
last_name = models.CharField(max_length=50, null=True, blank=True)
is_active = models.BooleanField(default=True, null=False)
is_staff = models.BooleanField(default=False, null=False)
father_name = models.CharField(_("Father Name"), max_length=20)
street_name = models.CharField(_("Street Name"), max_length=20)
city = models.CharField(_("City"), max_length=20)
state = models.CharField(_("State"), max_length=20)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def get_full_name(self):
fullname = self.first_name + " " + self.last_name
return self.fullname
def get_short_name(self):
return self.username
def __str__(self):
return self.email
class Mobile(models.Model):
regex = RegexValidator(regex=r'^[789]\d{9}$', message="Invalid Mobile Number")
mobile = models.CharField(_("mobile number"), validators=[regex], blank=True, null=True, max_length=10,
help_text="Enter a valid 10 digit mobile number.")
is_mobile_verified = models.BooleanField(_("is mobile verified"), default=False, blank=False, null=False)
def __str__(self):
return self.mobile
class Meta:
abstract = True
class PreviousWork(models.Model):
title = models.CharField(_("Title"), max_length=50)
description = models.TextField(_("Description"))
start_date = models.DateField(_("Start Date"))
end_date = models.DateField(_("End Date"))
class Meta:
abstract = True
class TypesOfPosition(models.Model):
name = models.CharField(_("Name"), max_length=50)
description = models.TextField(_("Description"))
def __str__(self):
return self.name
|
#!/usr/bin/env python
import os, sys, string, re, csv, xmlrpc.client, pickle, signal
import pandas as pd
import patients
import concept_finder
# path to temporary progress tracking file
progress_path = 'data/mimic/extract_concepts_progress'
# path to MIMIC-III's NOTEEVENTS.csv
noteevents_path = 'mimic-iii-clinical-database-1.4/NOTEEVENTS.csv'
trans = str.maketrans('-/\n', ' ',
string.punctuation.replace('-', '').replace('/', ''))
def preprocess(text):
text = text.replace('\r\n', '\n')
text = re.sub('\\[(.*?)\\]', '', text) # remove deidentified parts
text = re.sub('--|__|==', '', text)
sentences = re.split('\. |\.\n|\n\n|: |:\n', text)
sentences = [sentence.strip().lower().translate(trans)
for sentence in sentences]
sentences = [sentence for sentence in sentences if sentence != '']
return sentences
hadm_id2path = dict()
for ep in patients.episodes():
ep_df = ep.get_info()
if len(ep_df) == 0:
continue
stay_df = ep.get_stay()
hadm_id = stay_df['HADM_ID']
hadm_id2path[hadm_id] = os.path.join(ep.patient.directory,
'episode' + ep.number + '_noteconcepts.csv')
num_rows = 2083180
# use a file to track the progress since this takes some time
progf = open(progress_path, 'a+')
progf.seek(0)
try:
done_notes = int(progf.read())
except ValueError:
done_notes = 0
cf = concept_finder.concept_finder()
with open(noteevents_path, 'r') as f:
csvr = csv.DictReader(f)
for (i_note, row) in enumerate(csvr):
if i_note < done_notes:
continue # skip already done notes
if i_note % 100 == 0:
print(f'{i_note}/{num_rows}', flush=True, end='\r')
if not row['HADM_ID'] or int(row['HADM_ID']) not in hadm_id2path:
continue
sentences = preprocess(row['TEXT'])
cuis = cf.extract_concepts(sentences)
# Pause SIGINT (KeyboardInterrupt) while writing the data to avoid
# corrupting anything. Almost all time should be spent in
# cf.extract_concepts, but you never know.
oldhandler = signal.signal(signal.SIGINT, signal.SIG_IGN)
path = hadm_id2path[int(row['HADM_ID'])]
f_existed = os.path.isfile(path)
with open(path, 'a') as epf:
writer = csv.DictWriter(epf,
fieldnames=['CHARTDATE', 'CONCEPTS'])
if not f_existed:
writer.writeheader()
writer.writerow({
'CHARTDATE': row['CHARTDATE'],
'CONCEPTS': ' '.join(cuis)
})
progf.truncate(0)
progf.write(str(i_note + 1))
# Resume SIGINT
signal.signal(signal.SIGINT, oldhandler)
print(f'{num_rows}/{num_rows}')
|
import time
import threading
def calcSquare(numbers):
print("Calculating square numbers")
for n in numbers:
time.sleep(0.2)
print("square:", n*n)
def calcCube(numbers):
print("Calculating cube numbers")
for n in numbers:
time.sleep(0.2)
print("cube:", n*n*n)
array = [2,3,8,9]
start = time.time()
thread1 = threading.Thread(target=calcSquare, args=(array,))
thread2 = threading.Thread(target=calcCube, args=(array,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print("Done in ", time.time()-start, " seconds") |
#########################################################
### There is no EBS Snapshot provider in CloudFormation #
### like in Terraform. Leaving this placeholder #
#########################################################
|
from pathlib import Path
import pytest
@pytest.fixture(scope="session")
def data_dir() -> Path:
"""Data directory fixture"""
return Path(__file__).parent / "data"
@pytest.fixture(scope="session")
def genome_fasta_dir(data_dir: Path) -> Path:
"""Genome fasta direcotry"""
return data_dir / "genome_fasta"
|
"""shared raw nodes that shared transformer act on"""
import pathlib
from dataclasses import dataclass
from typing import Union
from marshmallow import missing
from . import base_nodes
@dataclass
class RawNode(base_nodes.NodeBase):
pass
@dataclass
class ResourceDescription(RawNode, base_nodes.ResourceDescription):
pass
@dataclass
class URI(RawNode, base_nodes.URI):
pass
@dataclass
class Dependencies(RawNode, base_nodes.Dependencies):
file: Union[URI, pathlib.Path] = missing
@dataclass
class ImplicitInputShape(RawNode, base_nodes.ImplicitInputShape):
pass
@dataclass
class ImplicitOutputShape(RawNode, base_nodes.ImplicitOutputShape):
pass
@dataclass
class ImportableModule(RawNode, base_nodes.ImportableModule):
pass
@dataclass
class ImportableSourceFile(RawNode, base_nodes.ImportableSourceFile):
source_file: URI = missing
ImportableSource = Union[ImportableModule, ImportableSourceFile]
|
from mwapi import *
print(messages)
print(services) |
"""
light cone generator test
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2017, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from yt.units.yt_array import \
YTQuantity
from yt.utilities.on_demand_imports import \
_h5py as h5py
import numpy as np
import os
import shutil
import tempfile
from yt_astro_analysis.cosmological_observation.api import \
LightCone
from yt.testing import \
assert_equal, \
requires_module
from yt.utilities.answer_testing.framework import \
AnswerTestingTest, \
requires_sim
ETC = "enzo_tiny_cosmology/32Mpc_32.enzo"
_funits = {'density': YTQuantity(1, 'g/cm**3'),
'temperature': YTQuantity(1, 'K'),
'length': YTQuantity(1, 'cm')}
@requires_module("h5py")
class LightConeProjectionTest(AnswerTestingTest):
_type_name = "LightConeProjection"
_attrs = ()
def __init__(self, parameter_file, simulation_type,
field, weight_field=None):
self.parameter_file = parameter_file
self.simulation_type = simulation_type
self.ds = os.path.basename(self.parameter_file)
self.field = field
self.weight_field = weight_field
@property
def storage_name(self):
return "_".join(
(os.path.basename(self.parameter_file),
self.field, str(self.weight_field)))
def run(self):
# Set up in a temp dir
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
lc = LightCone(
self.parameter_file, self.simulation_type, 0., 0.1,
observer_redshift=0.0, time_data=False)
lc.calculate_light_cone_solution(
seed=123456789, filename="LC/solution.txt")
lc.project_light_cone(
(600.0, "arcmin"), (60.0, "arcsec"), self.field,
weight_field=self.weight_field, save_stack=True)
dname = "%s_%s" % (self.field, self.weight_field)
fh = h5py.File("LC/LightCone.h5")
data = fh[dname][()]
units = fh[dname].attrs["units"]
if self.weight_field is None:
punits = _funits[self.field] * _funits['length']
else:
punits = _funits[self.field] * _funits[self.weight_field] * \
_funits['length']
wunits = fh['weight_field_%s' % self.weight_field].attrs['units']
pwunits = _funits[self.weight_field] * _funits['length']
assert wunits == str(pwunits.units)
assert units == str(punits.units)
fh.close()
# clean up
os.chdir(curdir)
shutil.rmtree(tmpdir)
mean = data.mean()
mi = data[data.nonzero()].min()
ma = data.max()
return np.array([mean, mi, ma])
def compare(self, new_result, old_result):
assert_equal(new_result, old_result, verbose=True)
@requires_sim(ETC, "Enzo")
def test_light_cone_projection():
yield LightConeProjectionTest(ETC, "Enzo", 'density')
yield LightConeProjectionTest(ETC, "Enzo", 'temperature',
weight_field='density')
|
from engine import Decoration
from engine import Wall
from engine import Map2d
from engine import Player
def test_create_with_pattern():
pattern = "0B010F110A\n3CFF3E 3C"
map2d = Map2d.create_with_pattern(pattern)
grid = map2d.grid
assert isinstance(grid.get_block(0, 0), Wall)
assert grid.get_block(0, 0).type_id == 11
assert isinstance(grid.get_block(1, 0), Wall)
assert grid.get_block(1, 0).type_id == 1
assert isinstance(grid.get_block(2, 0), Wall)
assert grid.get_block(2, 0).type_id == 15
assert isinstance(grid.get_block(3, 0), Wall)
assert grid.get_block(3, 0).type_id == 17
assert isinstance(grid.get_block(4, 0), Wall)
assert grid.get_block(4, 0).type_id == 10
assert isinstance(grid.get_block(0, 1), Decoration)
assert grid.get_block(0, 1).type_id == 60
assert grid.get_block(0, 1).is_solid
assert isinstance(grid.get_block(2, 1), Decoration)
assert grid.get_block(2, 1).type_id == 62
assert not grid.get_block(2, 1).is_solid
assert grid.get_block(3, 1) is None
assert isinstance(grid.get_block(4, 1), Decoration)
assert grid.get_block(4, 1).type_id == 60
assert grid.get_block(4, 1).is_solid
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
import subprocess
if sys.version_info >= (3, 0):
import pathlib as pathlib
else:
import pathlib2 as pathlib
import click_spinner
import yaml
from aiida_project import constants
def clone_git_repo_to_disk(github_url, location, branch=None):
"""
Clone the git repository at github_url to location on disk.
:param str github_url: URL to github repository
:param str branch: Specific branch of the github repository
:param str location: path to the location disk
"""
git_clone_args = ["git", "clone", "--single-branch"]
if branch:
git_clone_args.append("--branch {}".format(branch))
git_clone_args.append("{}".format(github_url))
git_clone_args.append("{}".format(location))
git_clone_command = " ".join(git_clone_args)
print("Cloning repository {} ...".format(github_url))
with click_spinner.spinner():
errcode, stdout, stderr = run_command(git_clone_command, shell=True)
if errcode:
raise Exception("Cloning the repository from GitHub failed. Used "
"command {}, STDERR={}"
.format(git_clone_command, stderr))
def build_source_url(username, repository):
"""
Create valid GitHub url for a user's repository.
:param str username: username of the repository owner
:param str repository: name of the target repository
"""
base_url = 'https://github.com/{username}/{repository}'
return base_url.format(username=username, repository=repository)
def run_command(command, shell=True, env=None):
"""Run a command through python subprocess."""
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell, env=env)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout.decode(), stderr.decode())
def assert_valid_aiida_version(aiida_version_string):
"""Verify that given aiida version is of type N.N.N(a/bN)."""
# Regular expression to check for canonical version format according
# to PEP440, taken from https://www.python.org/dev/peps/pep-0440
regex = re.compile(r'^([1-9][0-9]*!)?(0|[1-9][0-9]*)'
r'(\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))?'
r'(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$')
return re.match(regex, aiida_version_string) is not None
def assert_valid_package_def(package_definition):
"""
Verify package definition is formatted as <username>/<repository>:<branch>
:param str package_definition: String of the form
<username>/<repositor>:<branchname> defining the source of a package
"""
charset = r"A-Za-z0-9_\.\\\-~"
regex = (r"^[{}]+\/[{}]+\:[{}]+$|^[{}]+\/[{}]+$"
.format(*(charset,) * 5))
return re.match(regex, package_definition) is not None
def assert_package_is_source(package_definition):
"""Check if a defined package refers to a source repo."""
# basically identical to assert_valid_package_def but this regex will
# also match <username>/<repository>:<branchname>[extras]
charset = r"A-Za-z0-9_\.\\\-~"
regex = (r"^[{}]+\/[{}]+\:[{}]+$|^[{}]+\/[{}]+"
.format(*(charset,) * 5))
return re.search(regex, package_definition) is not None
def assert_package_has_extras(package):
return re.search(r"\[.*\]$", package) is not None
def unpack_package_def(package_definition):
"""
Create a valid github URL from a given package definition.
:param str package_definition: String of the form
<username>/<repositor>:<branchname> defining the source of a package
:returns: tuple containing (username, repository, branch) where branch
is set to `None` for strings of the form <username>/<repository>
:rtype: tuple
"""
return (re.split(r"[\/\:]", package_definition) + [None])[:3]
def unpack_raw_package_input(package):
"""
Split the raw user package input
Raw input for source packages can be of the form
username/repository:branch but could potentially also incude
additional extras definitions, i.e.
aiidateam/aiida-core:deveop[docs] which need to be removed before
further processing of the string.
:param str package_definition: String of the form
<username>/<repositor>:<branchname>[extras] defining the source of a
package including possible extras of the package
"""
extras_regex = r"\[.*\]"
package_extras = re.search(extras_regex, package)
if package_extras:
package_extras = package_extras.group(0)
package_definition = re.sub(extras_regex, '', package)
return(package_definition, package_extras)
else:
return (package, '')
def check_command_avail(command, test_version=True):
"""
Test if a command is available in the current shell environment.
:param str command: Command to test
:param bool test_version: If `True` command --version will be checked
instead of the plain command
"""
# run command --version because some commands do not exit with
# exitcode 0 when called without any arguments (i.e. git)
if test_version:
command_to_check = "{} --version".format(command)
else:
command_to_check = command
errno, stdout, stderr = run_command(command_to_check, shell=True)
if errno:
print("Failed! Command {} not found".format(command_to_check))
return False
else:
return True
def load_project_spec():
"""Load config specs from .projects file."""
home = pathlib.Path().home()
config_folder = home / constants.CONFIG_FOLDER
projects_file = str(config_folder / constants.PROJECTS_FILE)
try:
with open(projects_file, 'r') as f:
project_specs = yaml.safe_load(f)
except FileNotFoundError:
project_specs = {}
return project_specs
def save_project_spec(project_spec):
"""Save project specfication to .projects file."""
home = pathlib.Path().home()
config_folder = home / constants.CONFIG_FOLDER
if not config_folder.exists():
config_folder.mkdir()
projects_file = str(config_folder / constants.PROJECTS_FILE)
project_specs = load_project_spec()
project_name = project_spec.pop('project_name')
project_specs.update({project_name: project_spec})
with open(projects_file, 'w') as f:
yaml.dump(project_specs, f, default_flow_style=False)
def project_name_exists(project_name):
"""Check if the project name is already in use."""
project_names = load_project_spec().keys()
return project_name in project_names
|
#!/usr/bin/env python
# coding: utf-8
# # Overview
#
# This is the __expert level__ version of [question 3](../novice/Q3.ipynb) from the novice level and [question 3](../intermediate/Q3.ipynb) from the intermediate level. Previously we focused on the frequency of the different types of lesion diagnosis and finding if there is a statistical difference between lesion types (regarding malignancy). This notebook focuses on clustering those same lesion diagnoses by looking at the images and using K-means. We want to see if the size of the clusters are similar to the frequencies found in the previous question. We do this to answer the following question: Does the clustering of lesion diagnosis align with the frequency chart from the beginner section?
#
# # Table of Content
#
# 1. [Setup](#setup_cell)
# 2. [Data Loading](#loading)
# 3. [Analysis](#analyze)
# 4. [Visualization](#viz_cell)
# 5. [Discussion](#discussion)
# # Import <a id="setup_cell"></a>
# In[1]:
import PIL
import cv2
import os
from os import listdir
from os.path import isfile, join
import json
import numpy as np
import seaborn as sns # pip install -U seaborn
from matplotlib import pyplot as plt
import pandas as pd
import glob
from IPython.display import display
from sklearn.cluster import KMeans
# from kmeans_pytorch import kmeans, kmeans_predict
# import torch
from tqdm import tqdm
# # Data loading <a id="loading"></a>
# In[22]:
img_filepaths = glob.glob('../../sample_imgs/*.jp*')
seg_filepaths = glob.glob('../../sample_segs/*.png')
dsc_filepaths = glob.glob('../../sample_dscs/*')
img_filepaths = sorted(img_filepaths)
seg_filepaths = sorted(seg_filepaths)
dsc_filepaths = sorted(dsc_filepaths)
im_file_numbers = [str(i.split("_")[-1].split(".")[0]) for i in img_filepaths]
seg_file_numbers = [str(seg_filepaths[i].split("_")[2]) for i in range(len(seg_filepaths))]
des_file_numbers = [str(dsc_filepaths[i].split("_")[2]) for i in range(len(dsc_filepaths))]
all_files = [im_file_numbers, seg_file_numbers, des_file_numbers]
total_file_count = np.inf
total_files = []
for directory in all_files:
if len(directory) < total_file_count:
total_file_count = len(directory)
total_files = directory
# Here we have to make sure all of the files are in the same order so we're using the same segmentations for the images
# In[23]:
def consistency_fix(total_files):
s = "_"
images = []
segs = []
dscs = []
im = img_filepaths[0].split("_")
j = seg_filepaths[0].split("_")
k = dsc_filepaths[0].split("_")
for i in total_files:
im_file = s.join(im[:2]) + "_" + i + "." + im[-1].split(".")[-1]
seg_file = s.join(j[:2]) + "_" + i + "_" + j[-1]
des_file = s.join(k[:2]) + "_" + i
if isfile(im_file) & isfile(seg_file) & isfile(des_file):
images.append(im_file)
segs.append(seg_file)
dscs.append(des_file)
else:
continue
return images, segs, dscs
#while consistent != True:
fixed_img, fixed_seg, fixed_dsc = consistency_fix(total_files)
print(len(fixed_dsc))
print(len(fixed_img))
print(len(fixed_seg))
# In[25]:
for i in range(len(fixed_img)):
image_number = int(fixed_img[0].split("_")[-1].split(".")[0])
segmentation_number = int(fixed_seg[0].split("_")[2])
description_number = int(fixed_dsc[0].split("_")[2])
if image_number != segmentation_number or image_number != description_number:
print("Error in file order")
break
img_filepaths = fixed_img
seg_filepaths = fixed_seg
dsc_filepaths = fixed_dsc
# Checks to make sure each image corresponds to it's segmentation
# # Analysis <a id="analyze"></a>
# In[51]:
all_images = []
classes = []
for i in tqdm(range(len(img_filepaths))): # replace with length of sample_imgs
# Grab orignal image and segmented version
color = PIL.Image.open(img_filepaths[i])
segged = PIL.Image.open(seg_filepaths[i])
json_file = open(dsc_filepaths[i])
description = json.load(json_file)
try:
diag_class = description["meta"]["clinical"]['diagnosis']
if diag_class != None:
classes.append(diag_class)
else:
classes.append("None")
except KeyError:
i = i #continue the loop
# Try using different attributes for your classes
#classes.append(description["meta"]["clinical"]['anatom_site_general'])
#classes.append(description["meta"]["clinical"]['benign_malignant'])
# Get blank background
np_im = np.zeros((300,400))
backtorgb = cv2.cvtColor(np.float32(np_im),cv2.COLOR_GRAY2RGB)
blank_array = backtorgb * 255
blank_array = blank_array.astype(np.uint8)
sam = PIL.Image.fromarray(blank_array)
# Copy original picture on blank background
back_im = sam.copy()
back_im.paste(color, (0, 0), segged)
im_matrix = np.array(back_im)
im_matrix = im_matrix.flatten()
all_images.append(im_matrix)
# K means needs numpy arrays
# In[52]:
all_images = np.array(all_images) # HERE
total_classes = len(set(classes))
print(len(classes))
# Performs K-means algorithm <br>
# K-means boils down to 5 steps: <br>
# 1. Randomly select centroids (centers of each cluster).
# 2. Calculate the distance of all data points to the centroids.
# 3. Assign data points to the closest cluster.
# 4. Find the new centroids of each cluster by taking the mean of all data points in the cluster.
# 5. Repeat steps 2,3 and 4 until all points converge or you reach your max iterations.
# In[6]:
all_images = all_images.astype('float64')
print("Starting KMeans")
# Sklearn version (no GPU)
kmeans = KMeans(init='k-means++', n_clusters=total_classes, max_iter=500)
kmeans.fit(all_images)
# We get the cluster predictions and all the different classes for the images
# In[53]:
print("Starting predictions")
pred_classes = kmeans.predict(all_images)
classes = np.array(classes)
# In[6]:
#all_images = torch.from_numpy(all_images)
# set device
#if torch.cuda.is_available():
# device = torch.device('cuda:0')
#else:
# device = torch.device('cpu')
#
#print ("Running on: ", device)
#pred_classes, cluster_centers = kmeans(
# X=all_images, num_clusters=total_classes, distance='euclidean', device=device
#)
#classes = classes.astype('float64')
#pred_classes = pred_classes.astype('float64')
#pred_classes =pred_classes.numpy()
print(type(all_images))
print("Done Clustering")
# ### Here we do a couple of things: <br>
# 1. We look at the number of clusters we have
# 2. Then find classes that have been assigned to that cluster
# 3. We then count the number of times that class has appeared
# In[8]:
print(len(classes))
print(len(pred_classes))
cluster_data = dict()
for cluster in range(total_classes):
mini_dic = dict()
for i in np.unique(classes):
index = np.where(pred_classes == cluster)
#print(index)
mini_dic[i] = list(classes[index]).count(i)
cluster_data[cluster] = mini_dic
# Our data is a dictionary of dictionaries, which isn't very easy to visualize. So we are going to turn it into a few lists
# In[13]:
# In[14]:
all_diagnosis = classes
all_clusters = pred_classes
print("Done getting diagnosis counts")
# # Visualization <a id="viz_cell"><a/>
# ### It is hard to visualize clustering especially when working with high dimensional data like images. So you might have to get creative.
#
# All the features need to be numpy arrays for visualization
# In[15]:
print(len(all_clusters))
print(len(all_diagnosis))
all_diagnosis = np.array(all_diagnosis)
all_clusters = np.array(all_clusters)
# Here we used color to represent the clusters, count to represent the size of the clusters, and the x-axis for the classes. Usually we are not concerned with the exact class labels but in this case we want to know the number of different diagnoses in a cluster.
# In[48]:
print("Plotting")
colors = plt.cm.jet(np.linspace(0,1,total_classes))
fig, ax = plt.subplots(figsize=(14,10))
cluster_sizes = dict()
counted_clusters = 0
print(len(all_clusters))
print(len(all_diagnosis))
colors = plt.cm.jet(np.linspace(0,1,total_classes))
fig, ax = plt.subplots(figsize=(14,10))
def get_diagnosis(cluster, cluster_data, tried_diagnoses):
candidates = list(cluster_data[cluster].keys())
candidate_counts = list(cluster_data[cluster].values())
if len(tried_diagnoses) != 0:
for diagnosis in tried_diagnoses:
if diagnosis in candidates:
candidate_counts.pop(candidates.index(diagnosis))
candidates.pop(candidates.index(diagnosis))
count = max(candidate_counts)
#print(candidates)
#print(candidate_counts)
dominant_diagnosis = candidates[candidate_counts.index(count)]
return dominant_diagnosis, count
cluster_sizes = dict()
counted_clusters = 0
for cluster in range(total_classes):
while len(cluster_sizes) == counted_clusters:
found_clusters = list(cluster_sizes.keys())
diagnosis, count = get_diagnosis(cluster, cluster_data, found_clusters)
if diagnosis not in found_clusters:
cluster_sizes[diagnosis] = count
else:
diagnosis, count = get_diagnosis(cluster, cluster_data, found_clusters)
counted_clusters += 1
for cluster in np.unique(all_clusters):
ax.scatter(list(cluster_data[cluster].keys()), list(cluster_data[cluster].values()), c = colors[cluster], label = "Cluster " + str(cluster + 1), s = 100)
plt.ylabel("Count")
plt.xlabel("Diagnosis")
plt.legend()
plt.ylabel("Count")
plt.xlabel("Diagnosis")
plt.legend()
plt.savefig("../expert_Q3_cluster.png")
#plt.imread("../expert_Q3_cluster.png")
# Now that we know the sizes of the cluster we want to see how well they represent the distribution. Here we load the data from question one.
# In[ ]:
plt.close()
frequency = pd.read_csv('../diagnosis_distribution.csv')
cluster_sizes = pd.DataFrame(list(zip(cluster_sizes.keys(), cluster_sizes.values())), columns= ["Diagnosis", "Count"] )
# Next we simply look at the difference of the two values obtained for each diagnosis and visualize it.
# In[49]:
difference = dict()
for diagnosis in cluster_sizes["Diagnosis"]:
cluster_info = int(cluster_sizes[cluster_sizes["Diagnosis"] == diagnosis]["Count"])
d = frequency[frequency["Diagnosis"] == diagnosis]["Count"]
print(d)
try:
frequency_info = int(d)
except TypeError:
frequency_info = 0
difference[diagnosis] = abs(cluster_info - frequency_info)
# In[61]:
ax = plt.gca()
plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right')
plt.bar(list(difference.keys()), list(difference.values()))
plt.savefig("../expert_Q3_error.png")
#plt.imread("../expert_Q3_error.png")
# # Discussion <a id=discussion></a>
# Did KMeans put the diagnoses in the appropriate clusters? <br>
# Does the amount of diagnoses look similar to the distribution in question 1 for novice? <br>
# Was the difference large or small?
# In[ ]:
# In[50]:
#get_ipython().system('jupyter nbconvert --to script Q3.ipynb')
# In[ ]:
|
'''
Disclaimer: this code is highly based on trpo_mpi at @openai/baselines and @openai/imitation
'''
import argparse
import os.path as osp
import logging
import numpy as np
import gym
import os
from mpi4py import MPI
from tqdm import tqdm
from baselines.gail import mlp_policy
from baselines.common import set_global_seeds, tf_util as U
from baselines.common.misc_util import boolean_flag
from baselines import logger
from baselines.gail.dataset.mujoco_dset import Mujoco_Dset
from baselines.gail.adversary import TransitionClassifier
def argsparser():
parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL")
# Environment Configuration
parser.add_argument('--env_id', help='environment ID', default='Hopper-v1')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--max_path_length', help='Max path length', type=int, default=1000)
# parser.add_argument('--delay_freq', help='Delay frequency', type=int, default=10)
parser.add_argument('--expert_path', type=str, default='dataset/hopper.npz')
# Task Configuration
parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train')
# ------------------------------------------------------------------------------------------------------------------
# Evaluate Configuration
boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')
boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not')
# ------------------------------------------------------------------------------------------------------------------
# Train Configuration
# Mujoco Dataset Configuration
parser.add_argument('--traj_limitation', type=int, default=-1)
parser.add_argument('--subsample_freq', type=int, default=20)
# Optimization Configuration
parser.add_argument('--timesteps_per_batch', help='number of timesteps in each batch', type=int, default=1000)
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=1)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=5)
# Network Configuration (Using MLP Policy)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
boolean_flag(parser, 'gaussian_fixed_var', default=False, help='use the fixed var for each state')
# Algorithms Configuration
parser.add_argument('--algo', type=str, choices=['trpo', 'ppo'], default='trpo')
boolean_flag(parser, 'obs_normalize', default=False, help='whether to perform obs normalization in the policy')
parser.add_argument('--max_kl', type=float, default=0.01)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3)
# Training Configuration
parser.add_argument('--num_epochs', help='Number of training epochs', type=int, default=2e3)
parser.add_argument('--evaluation_freq', help='Number of updates to evaluate', type=int, default=10)
parser.add_argument('--log_dir', help='the directory to save log file', default='log')
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
parser.add_argument('--save_per_iter', help='save model every xx iterations', type=int, default=100)
parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint')
# Behavior Cloning
boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain')
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=1e4)
# ------------------------------------------------------------------------------------------------------------------
return parser.parse_args()
def get_task_name(args):
task_name = "run_gail_env_" + args.env_id
if args.pretrained:
task_name += "_with_pretrained"
if args.obs_normalize:
task_name += "_with_obs_normalize"
if args.traj_limitation != np.inf:
task_name += "_traj_limitation_" + str(args.traj_limitation)
task_name += "_subsample_freq_" + str(args.subsample_freq)
task_name = task_name + "_g_step_" + str(args.g_step) + "_d_step_" + str(args.d_step) + \
"_policy_entcoeff_" + str(args.policy_entcoeff) \
+ "_adversary_entcoeff_" + str(args.adversary_entcoeff) \
+ "_timesteps_per_batch_" + str(args.timesteps_per_batch) + "_gaussian_fixed_var_" + str(args.gaussian_fixed_var)
task_name += "_seed_" + str(args.seed)
return task_name
def main(args):
U.make_session(num_cpu=1).__enter__()
set_global_seeds(args.seed)
env = gym.make(args.env_id)
# env = DelayRewardWrapper(env, args.delay_freq, args.max_path_length)
eval_env = gym.make(args.env_id)
logger.configure(os.path.join("log", "GAIL", args.env_id, "subsample_{}".format(args.subsample_freq),
"traj_{}".format(args.traj_limitation), "seed_{}".format(args.seed)))
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
reuse=reuse, hid_size=args.policy_hidden_size, num_hid_layers=2,
gaussian_fixed_var=args.gaussian_fixed_var, obs_normalize=args.obs_normalize)
env.seed(args.seed)
eval_env.seed(args.seed)
gym.logger.setLevel(logging.WARN)
task_name = get_task_name(args)
args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name)
args.log_dir = osp.join(args.log_dir, "GAIL", task_name)
if args.task == 'train':
dataset = Mujoco_Dset(expert_path=args.expert_path, traj_limitation=args.traj_limitation, data_subsample_freq=args.subsample_freq)
reward_giver = TransitionClassifier(env, args.adversary_hidden_size, entcoeff=args.adversary_entcoeff, obs_normalize=args.obs_normalize)
train(env,
eval_env,
args.seed,
policy_fn,
reward_giver,
dataset,
args.algo,
args.g_step,
args.d_step,
args.policy_entcoeff,
args.save_per_iter,
args.checkpoint_dir,
args.log_dir,
args.pretrained,
args.BC_max_iter,
args.num_epochs,
args.evaluation_freq,
args.timesteps_per_batch,
task_name,
)
elif args.task == 'evaluate':
runner(env,
policy_fn,
args.load_model_path,
timesteps_per_batch=args.timesteps_per_batch,
number_trajs=10,
stochastic_policy=args.stochastic_policy,
save=args.save_sample
)
else:
raise NotImplementedError
env.close()
def train(env, eval_env, seed, policy_fn, reward_giver, dataset, algo,
g_step, d_step, policy_entcoeff, save_per_iter,
checkpoint_dir, log_dir, pretrained, BC_max_iter, num_epochs, evaluation_freq, timesteps_per_batch,
task_name=None):
pretrained_weight = None
if pretrained and (BC_max_iter > 0):
# Pretrain with behavior cloning
from baselines.gail import behavior_clone
pretrained_weight = behavior_clone.learn(env, policy_fn, dataset,
max_iters=BC_max_iter)
if algo == 'trpo':
from baselines.gail import trpo_mpi
# Set up for MPI seed
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
env.seed(workerseed)
trpo_mpi.learn(env, eval_env, policy_fn, reward_giver, dataset, rank,
pretrained=pretrained, pretrained_weight=pretrained_weight,
g_step=g_step, d_step=d_step,
entcoeff=policy_entcoeff,
ckpt_dir=checkpoint_dir, log_dir=log_dir,
save_per_iter=save_per_iter,
timesteps_per_batch=timesteps_per_batch,
max_kl=0.01, cg_iters=10, cg_damping=0.1,
gamma=0.995, lam=0.97,
vf_iters=5, vf_stepsize=1e-3,
num_epochs=num_epochs,
evaluation_freq=evaluation_freq,
task_name=task_name)
else:
raise NotImplementedError
def runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,
stochastic_policy, save=False, reuse=False):
# Setup network
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func("pi", ob_space, ac_space, reuse=reuse)
# U.initialize()
# Prepare for rollouts
# ----------------------------------------
# U.load_state(load_model_path)
obs_list = []
acs_list = []
len_list = []
ret_list = []
for _ in tqdm(range(number_trajs)):
traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)
obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']
obs_list.append(obs)
acs_list.append(acs)
len_list.append(ep_len)
ret_list.append(ep_ret)
if stochastic_policy:
print('stochastic policy:')
else:
print('deterministic policy:')
if save:
filename = load_model_path.split('/')[-1] + '.' + env.spec.id
np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),
lens=np.array(len_list), rets=np.array(ret_list))
output_infos = {"avg_return": np.mean(ret_list),
"std_return": np.std(ret_list),
"max_return": np.max(ret_list),
"min_return": np.min(ret_list),}
return output_infos
# Sample one trajectory (until trajectory end)
def traj_1_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
# Initialize history arrays
obs = []
rews = []
news = []
acs = []
while True:
ac, vpred = pi.act(stochastic, ob)
obs.append(ob)
news.append(new)
acs.append(ac)
ob, rew, new, _ = env.step(ac)
rews.append(rew)
cur_ep_ret += rew
cur_ep_len += 1
if new or t >= horizon:
break
t += 1
obs = np.array(obs)
rews = np.array(rews)
news = np.array(news)
acs = np.array(acs)
traj = {"ob": obs, "rew": rews, "new": news, "ac": acs,
"ep_ret": cur_ep_ret, "ep_len": cur_ep_len}
return traj
if __name__ == '__main__':
args = argsparser()
args.num_epochs = int(args.num_epochs)
args.expert_path = 'dataset/{}.npz'.format(args.env_id).lower().replace("-v1", "") # set expert path
main(args)
|
import numpy as np
import random
from cost_functions import *
from constants import *
import heapq
# TODO - tweak weights to existing cost functions
WEIGHTED_COST_FUNCTIONS = [
(time_diff_cost, 1), # requested duration cost
(s_diff_cost, 8), # s coordinate differ from the goal cost
(d_diff_cost, 7), # d coordinate differ from the goal cost
(collision_cost, 20), # collisions cost
(buffer_cost, 1), # getting close to other vehicles cost
(exceeds_speed_limit_cost, 1), #
(efficiency_cost, 1), # rewards high average speeds cost
(total_accel_cost, 1), #
(max_accel_cost, 1), #
(max_jerk_cost, 1), #
(total_jerk_cost, 1), #
]
def PTG(start_s, start_d, target_vehicle, delta, T, predictions):
"""
Finds the best trajectory according to WEIGHTED_COST_FUNCTIONS (global).
arguments:
start_s - [s, s_dot, s_ddot]
start_d - [d, d_dot, d_ddot]
target_vehicle - id of leading vehicle (int) which can be used to retrieve
that vehicle from the "predictions" dictionary. This is the vehicle that
we are setting our trajectory relative to.
delta - a length 6 array indicating the offset we are aiming for between us
and the target_vehicle. So if at time 5 the target vehicle will be at
[100, 10, 0, 0, 0, 0] and delta is [-10, 0, 0, 4, 0, 0], then our goal
state for t = 5 will be [90, 10, 0, 4, 0, 0]. This would correspond to a
goal of "follow 10 meters behind and 4 meters to the right of target vehicle"
T - the desired time at which we will be at the goal (relative to now as t=0)
predictions - dictionary of {v_id : vehicle }. Each vehicle has a method
vehicle.state_in(time) which returns a length 6 array giving that vehicle's
expected [s, s_dot, s_ddot, d, d_dot, d_ddot] state at that time.
return:
(best_s, best_d, best_t) where best_s are the 6 coefficients representing s(t)
best_d gives coefficients for d(t) and best_t gives duration associated w/
this trajectory.
"""
target = predictions[target_vehicle]
# generate alternative goals
all_goals = []
timestep = 0.5
t = T - 4 * timestep
while t <= T + 4 * timestep:
target_state = np.array(target.state_in(t)) + np.array(delta)
goal_s = target_state[:3]
goal_d = target_state[3:]
goals = [(goal_s, goal_d, t)]
for _ in range(N_SAMPLES):
perturbed = perturb_goal(goal_s, goal_d)
# filter all invalid perturbed goal
invalid = [(abs(goal_s[0] - s) * abs(goal_d[0] - d) > 10)
for s in perturbed[0] for d in perturbed[1] ]
if True in invalid: continue
goals.append((perturbed[0], perturbed[1], t))
all_goals += goals
t += timestep
# find best trajectory
trajectories_heap = []
others = []
for goal in all_goals:
s_goal, d_goal, t = goal
s_coefficients = JMT(start_s, s_goal, t)
d_coefficients = JMT(start_d, d_goal, t)
trajectory = tuple([s_coefficients, d_coefficients, t])
cost = calculate_cost(trajectory, target_vehicle, delta, T,
predictions, WEIGHTED_COST_FUNCTIONS)
heapq.heappush(trajectories_heap, (cost, trajectory))
best = heapq.heappop(trajectories_heap)
others = [other[1] for other in trajectories_heap]
print("Best cost : ")
calculate_cost(best[1], target_vehicle, delta, T,
predictions, WEIGHTED_COST_FUNCTIONS, verbose=True)
return best[1], others
def calculate_cost(trajectory, target_vehicle, delta, goal_t,
predictions, cost_functions_with_weights, verbose=False):
cost = 0
for cost_function, weight in cost_functions_with_weights:
new_cost = weight * cost_function(trajectory, target_vehicle,
delta, goal_t, predictions)
cost += new_cost
if verbose:
print(" cost for {:<40}: {:+4.2f} weight: {}".format(cost_function.__name__,
new_cost, weight))
return cost
def perturb_goal(goal_s, goal_d):
"""
Returns a "perturbed" version of the goal.
"""
#random.seed(0)
new_s_goal = []
for mu, sig in zip(goal_s, SIGMA_S):
new_s_goal.append(random.gauss(mu, sig))
new_d_goal = []
for mu, sig in zip(goal_d, SIGMA_D):
new_d_goal.append(random.gauss(mu, sig))
return tuple([new_s_goal, new_d_goal])
def JMT(start, end, T):
"""
Calculates Jerk Minimizing Trajectory for start, end and T.
"""
a_0, a_1, a_2 = start[0], start[1], start[2] / 2.0
c_0 = a_0 + a_1 * T + a_2 * T**2
c_1 = a_1 + 2* a_2 * T
c_2 = 2 * a_2
A = np.array([
[ T**3, T**4, T**5],
[3*T**2, 4*T**3, 5*T**4],
[ 6*T, 12*T**2, 20*T**3],
])
B = np.array([
end[0] - c_0,
end[1] - c_1,
end[2] - c_2
])
a_3_4_5 = np.linalg.solve(A,B)
alphas = np.concatenate([np.array([a_0, a_1, a_2]), a_3_4_5])
return alphas
|
"""Components that apply forcing. See jax_cfd.base.forcings for forcing API."""
from typing import Callable
import gin
from jax_cfd.base import equations
from jax_cfd.base import forcings
from jax_cfd.base import grids
from jax_cfd.spectral import forcings as spectral_forcings
ForcingFn = forcings.ForcingFn
ForcingModule = Callable[..., ForcingFn]
gin.external_configurable(spectral_forcings.kolmogorov_forcing_fn)
gin.external_configurable(spectral_forcings.spectral_no_forcing)
def sum_forcings(*forces: ForcingFn) -> ForcingFn:
"""Sum multiple forcing functions."""
def forcing(v):
return equations.sum_fields(*[forcing(v) for forcing in forces])
return forcing
@gin.register
def filtered_linear_forcing(grid: grids.Grid,
scale: float,
lower_wavenumber: float = 0,
upper_wavenumber: float = 4) -> ForcingFn:
return forcings.filtered_linear_forcing(lower_wavenumber,
upper_wavenumber,
coefficient=scale,
grid=grid)
@gin.register
def linear_forcing(grid: grids.Grid,
scale: float) -> ForcingFn:
return forcings.linear_forcing(grid, scale)
@gin.register
def kolmogorov_forcing(grid: grids.Grid, # pylint: disable=missing-function-docstring
scale: float = 0,
wavenumber: int = 2,
linear_coefficient: float = 0,
swap_xy: bool = False) -> ForcingFn:
force_fn = forcings.kolmogorov_forcing(grid, scale, wavenumber, swap_xy)
if linear_coefficient != 0:
linear_force_fn = forcings.linear_forcing(grid, linear_coefficient)
force_fn = forcings.sum_forcings(force_fn, linear_force_fn)
return force_fn
@gin.register
def taylor_green_forcing(grid: grids.Grid,
scale: float = 0,
wavenumber: int = 2,
linear_coefficient: float = 0) -> ForcingFn:
force_fn = forcings.taylor_green_forcing(grid, scale, wavenumber)
if linear_coefficient != 0:
linear_force_fn = forcings.linear_forcing(grid, linear_coefficient)
force_fn = forcings.sum_forcings(force_fn, linear_force_fn)
return force_fn
@gin.register
def no_forcing(grid: grids.Grid) -> ForcingFn:
return forcings.no_forcing(grid)
|
"""FastAPI main module for the Clearboard application.
origins : string[],
url to whitelist and on which the fastapi server should listen (basicly the core address)
"""
import base64
import os
import shutil
from functools import lru_cache
from typing import List, Optional
from fastapi import FastAPI, File, Response, UploadFile, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
import cv2 # Import the OpenCV library
import numpy as np
from pydantic import BaseModel
from . import black_n_white, color, config, contrast, coord_loader, parallax
app = FastAPI()
class ConnectionManager:
"""Class to monitor websocket communication"""
def __init__(self):
self.active_connections: List[(WebSocket, str)] = []
async def connect(self, websocket: WebSocket, room_name: str):
"""accept websocket sent by the front"""
await websocket.accept()
self.active_connections.append((websocket, room_name))
def disconnect(self, websocket: WebSocket, room_name):
"""disconnect the websocket"""
self.active_connections.remove((websocket, room_name))
async def broadcast(self, message: str, room_name: str):
"""given a room name send a meesage to all the clients present in this room name"""
for connection in self.active_connections:
if room_name == connection[1]:
await connection[0].send_text(message)
class Coordinates(BaseModel):
"""given a specific room name, class to define the coordinates for cropping"""
coord: List[List[str]] = []
room_name: str
class Process(BaseModel):
"""given a specific room name, class to define the image process used"""
process: str
room_name: str
manager = ConnectionManager()
@lru_cache()
def get_settings():
"""get settings form env"""
return config.Settings()
origins = get_settings()
MEDIA_ROOT = origins.MEDIA_ROOT
ORIGINS = origins.ORIGINS.split(",")
async def send_message_true_broadcast(room_name):
"""notify all the participants of a room of a new picture"""
await manager.broadcast("true", room_name)
app.add_middleware(
CORSMiddleware,
allow_origins=ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post("/picture")
async def post_picture(file: UploadFile = File(...)):
"""receive image not processed from the jitsi box, not from the student interface"""
if not file:
return {"message": "error"}
path = f"{MEDIA_ROOT}/{file.filename[:-4]}"
path_original_image = f"{path}/{file.filename}"
print(file.filename[:-4])
await send_message_true_broadcast(file.filename[:-4])
if not os.path.exists(path):
os.makedirs(path)
with open(path_original_image, "wb") as original_image:
shutil.copyfileobj(file.file, original_image)
return {"message": file.filename}
def image_to_base64(img: np.ndarray) -> bytes:
"""Given a numpy 2D array, returns a JPEG image in base64 format"""
img_buffer = cv2.imencode(".jpg", img)[1]
return base64.b64encode(img_buffer).decode("utf-8")
@app.get("/process")
async def get_process(room_name: str, process: str):
"""receive the filter type to use on the image"""
original_img_path = MEDIA_ROOT + "/" + room_name + "/" + room_name + ".jpg"
img_cropped_path = MEDIA_ROOT + "/" + room_name + "/" + room_name + "cropped.jpg"
coord_path = MEDIA_ROOT + "/" + room_name + "/coord.txt"
processed_img_path = (
MEDIA_ROOT + "/" + room_name + "/" + room_name + process + ".jpg"
)
if os.path.exists(os.path.abspath(original_img_path)):
if os.path.exists(os.path.abspath(coord_path)):
parallax.crop(
original_img_path, coord_loader.get_coords(coord_path), img_cropped_path
)
img_to_process = img_cropped_path
else:
img_to_process = original_img_path
if process == "Color":
color.whiteboard_enhance(img_to_process, processed_img_path)
elif process == "B&W":
black_n_white.black_n_white(img_to_process, processed_img_path)
elif process == "Contrast":
contrast.enhance_contrast(img_to_process, processed_img_path)
elif process == "original":
processed_img_path = img_to_process
else:
processed_img_path = img_to_process
img = cv2.imread(processed_img_path)
volume = np.asarray(img)
image = image_to_base64(volume)
return Response(content=image)
@app.get("/original_photo")
async def photo(room_name: Optional[str] = None):
"""request from front to get the image not processed"""
original_img_path = MEDIA_ROOT + "/" + room_name + "/" + room_name + ".jpg"
if os.path.exists(os.path.abspath(original_img_path)):
img = cv2.imread(original_img_path)
volume = np.asarray(img)
image = image_to_base64(volume)
return Response(content=image)
print("original image not found")
@app.websocket("/ws/{room_name}/{id}")
async def websocket_endpoint(websocket: WebSocket, room_name: str):
"""creation of the websocket with the client, given the id and the roomName"""
await manager.connect(websocket, room_name)
try:
while True:
await websocket.receive_text()
except WebSocketDisconnect:
manager.disconnect(websocket, room_name)
@app.post("/coord")
async def post_coord(coordinates: Coordinates):
"""receive coordinates from the front, to crop the image"""
room_name = coordinates.room_name
coords = [[int(float(k[0])), int(float(k[1]))] for k in coordinates.coord]
coord_dir_path = MEDIA_ROOT + "/" + room_name
if not os.path.exists(coord_dir_path):
os.makedirs(coord_dir_path)
coord_loader.save_coords(coord_dir_path + "/coord.txt", coords)
await send_message_true_broadcast(room_name)
|
# Finite Decks, Aces = reactive
# Reinforcement learning agent which plays against a delaer
import numpy as np
import matplotlib.pyplot as plt
from rl_tools import (
simulation,
scorecalc,
countcalc,
initializedrawpile,
actionupdate,
acecheck,
newcard,
twist,
)
# stage 1 of learning - learn to maximize score.
def learning(e, nodecks):
Qtable = np.zeros((34, # Value of cards in hand, 0-21, and greater than 21
2, # Twist or stick
6)) # Division of count for card counting
# divisions of count will be c<-10, -10<=c<-3, -3<=c<=3, 3<c<=10, c>10
Instances = np.zeros((34, 2, 6)) # Count the occurances of a state/action pair
# repeat following process n times (higher number = better learning)
for n in range(5000):
drawpile = initializedrawpile(nodecks)
# until drawpile is empty.
while any(drawpile != 0):
# simulation function represents 1 game (until fold or bust.)
Qtable, Instances, drawpile = simulation(Qtable, Instances, drawpile, e)
if n % 100 == 0:
print(f"Finished training episode {n}\n")
return Qtable
# function to test the results of the Qtable on unseen data. No exploration.
# this test function includes playing against a dealer, and a calculation of the
# winnings of each game.
def test(Qtable, nodecks):
# set up empty arrays
testscore = np.asarray([])
winnings = np.asarray([])
initialcounts = np.asarray([])
# most of the following is the same as simulation except no exploration and
# at the end we play against dealer.
drawpile = initializedrawpile(nodecks)
while any(drawpile != 0):
##initialcounts should be here.
initialcounts = np.append(
initialcounts, countcalc(drawpile)
) ##not where i want
# recieve first card
card, drawpile = twist(drawpile)
truecount = countcalc(drawpile)
cardsinhand = np.array([0, card])
newaction = np.argmax(Qtable[sum(cardsinhand), :, truecount])
# while they havent folded or gone bust
while (
newaction == 1
and (sum(cardsinhand) < 22 or 11 in cardsinhand)
and any(drawpile != 0)
):
if sum(cardsinhand) > 21:
# if over 21 replace 11 with 1 for aces.
cardsinhand = acecheck(sum(cardsinhand), cardsinhand)
# now we have changed 11 to 1, find new action.
newaction = actionupdate(Qtable, sum(cardsinhand), e, truecount)
else:
card, drawpile = newcard(newaction, drawpile)
cardsinhand = np.append(cardsinhand, card)
cardsinhand = acecheck(sum(cardsinhand), cardsinhand)
truecount = countcalc(drawpile)
# determine whether to stick or twist
newaction = np.argmax(Qtable[sum(cardsinhand), :, truecount])
if all(drawpile == 0):
initialcounts = initialcounts[0:-1]
break
else:
score = scorecalc(sum(cardsinhand), len(cardsinhand))
testscore = np.append(testscore, score)
# now player has played, dealer plays.
dealerscore, drawpile = dealer(drawpile)
# winningscalc function to work out winnings.
winnings = np.append(winnings, winningscalc(score, dealerscore))
return np.mean(testscore), sum(winnings), initialcounts
# policy for casino bot is to always twist for h<17 and fold for h>=17. Create
# function where input drawpile and outputs score obtained by dealer, and updated drawpile.
def dealer(drawpile):
# recieve first card.
card, drawpile = twist(drawpile)
cardsinhand = np.asarray([card])
newaction = 1
while (
newaction == 1
and (sum(cardsinhand) < 22 or 11 in cardsinhand)
and any(drawpile != 0)
):
if sum(cardsinhand) > 21:
cardsinhand = acecheck(sum(cardsinhand), cardsinhand)
newaction = dealeractioncalc(cardsinhand)
else:
card, drawpile = newcard(newaction, drawpile)
# append the card that was drawn (to test for aces)
cardsinhand = np.append(cardsinhand, card)
newaction = dealeractioncalc(cardsinhand)
score = scorecalc(sum(cardsinhand), len(cardsinhand))
return score, drawpile
# dealer always folds if above 16
def dealeractioncalc(cardsinhand):
if sum(cardsinhand) >= 17:
newaction = 0
else:
newaction = 1
return newaction
def winningscalc(score, dealerscore):
if score == 0: # lose money if go bust
winnings = -1
elif dealerscore > score: # dealer wins if they have bigger score
winnings = -1
elif dealerscore == score: # get back what you put in if draw.
winnings = 0
elif score == 1649: # if we get blackjack (and dealer doesnt)
winnings = 1.5
elif score > dealerscore: # win otherway
winnings = 1
return winnings
def plotwinnings(winnings, initialcounts):
x = np.asarray([])
y = np.asarray([])
for i in range(5):
if len(initialcounts[initialcounts == i]) != 0:
x = np.append(x, i)
mean = np.mean(winnings[initialcounts == i])
y = np.append(y, mean)
fig, ax = plt.subplots()
labels = ["C<-10", "-10<=C<-4", "-4<=C<=4", "4<C<=10", "C>10"]
ax.bar(x, y)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.set_ylabel("Average Winnings")
ax.set_title("Average winnings at different Counts")
if __name__ == "__main__":
e = 0.1
nodecks = 6
Qtable = learning(e, nodecks)
print("Finished Qtable updates\n")
# evaluate model over a number of episodes
num_episodes = 3000
winningsarray=np.zeros(num_episodes)
testarray=np.zeros(num_episodes)
score_tot = 0; winnings_tot = 0
for ep in range(num_episodes):
# once model has learned how to maximize score, test it.
testscore, winnings, _ = test(Qtable, nodecks)
winningsarray[ep]=winnings
testarray[ep]=testscore
if ep % 100 == 0:
print(f"Finished testing episode {ep}\n")
avg_score = np.mean(testarray)
avg_winnings = np.mean(winningsarray)
std_winnings = np.std(winningsarray)
sem_winnings = std_winnings / np.sqrt(num_episodes)
print(f'''After testing:\n
average score = {avg_score},
average winnings = {avg_winnings},
winnings standard error = {sem_winnings}\n''')
|
import logging
from df_engine.core.keywords import GLOBAL, LOCAL, RESPONSE, TRANSITIONS, PROCESSING
from df_engine.core import Context, Actor
import df_engine.labels as lbl
import df_engine.conditions as cnd
from examples import example_1_basics
logger = logging.getLogger(__name__)
def create_transitions():
return {
("left", "step_2"): "left",
("right", "step_2"): "right",
lbl.previous(): "previous",
lbl.to_start(): "start",
lbl.forward(): "forward",
lbl.backward(): "back",
lbl.previous(): "previous",
lbl.repeat(): "repeat",
lbl.to_fallback(): cnd.true(),
}
def add_label_processing(ctx: Context, actor: Actor, *args, **kwargs) -> Context:
processed_node = ctx.framework_states["actor"].get("processed_node", ctx.framework_states["actor"]["next_node"])
processed_node.response = f"{ctx.last_label}: {processed_node.response}"
ctx.framework_states["actor"]["processed_node"] = processed_node
return ctx
def add_prefix(prefix):
def add_prefix_processing(ctx: Context, actor: Actor, *args, **kwargs) -> Context:
processed_node = ctx.framework_states["actor"].get("processed_node", ctx.framework_states["actor"]["next_node"])
processed_node.response = f"{prefix}: {processed_node.response}"
ctx.framework_states["actor"]["processed_node"] = processed_node
return ctx
return add_prefix_processing
# a dialog script
script = {
"root": {
"start": {RESPONSE: "", TRANSITIONS: {("flow", "step_0"): cnd.true()}},
"fallback": {RESPONSE: "the end"},
},
GLOBAL: {PROCESSING: {1: add_prefix("l1_global"), 2: add_prefix("l2_global")}},
"flow": {
LOCAL: {PROCESSING: {2: add_prefix("l2_local"), 3: add_prefix("l3_local")}},
"step_0": {RESPONSE: "first", TRANSITIONS: {lbl.forward(): cnd.true()}},
"step_1": {
PROCESSING: {1: add_prefix("l1_step_1")},
RESPONSE: "second",
TRANSITIONS: {lbl.forward(): cnd.true()},
},
"step_2": {
PROCESSING: {2: add_prefix("l2_step_2")},
RESPONSE: "third",
TRANSITIONS: {lbl.forward(): cnd.true()},
},
"step_3": {
PROCESSING: {3: add_prefix("l3_step_3")},
RESPONSE: "fourth",
TRANSITIONS: {lbl.forward(): cnd.true()},
},
"step_4": {PROCESSING: {4: add_prefix("l4_step_4")}, RESPONSE: "fifth", TRANSITIONS: {"step_0": cnd.true()}},
},
}
actor = Actor(script, start_label=("root", "start"), fallback_label=("root", "fallback"))
# testing
testing_dialog = [
("", "l3_local: l2_local: l1_global: first"),
("", "l3_local: l2_local: l1_step_1: second"),
("", "l3_local: l2_step_2: l1_global: third"),
("", "l3_step_3: l2_local: l1_global: fourth"),
("", "l4_step_4: l3_local: l2_local: l1_global: fifth"),
("", "l3_local: l2_local: l1_global: first"),
]
def run_test():
ctx = {}
for in_request, true_out_response in testing_dialog:
_, ctx = example_1_basics.turn_handler(in_request, ctx, actor, true_out_response=true_out_response)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s-%(name)15s:%(lineno)3s:%(funcName)20s():%(levelname)s - %(message)s",
level=logging.INFO,
)
# run_test()
example_1_basics.run_interactive_mode(actor)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import pgweb.core.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('tld', models.CharField(max_length=3)),
],
options={
'ordering': ('name',),
'db_table': 'countries',
'verbose_name': 'Country',
'verbose_name_plural': 'Countries',
},
),
migrations.CreateModel(
name='ImportedRSSFeed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('internalname', models.CharField(unique=True, max_length=32)),
('url', models.URLField()),
('purgepattern', models.CharField(help_text="NOTE! Pattern will be automatically anchored with ^ at the beginning, but you must lead with a slash in most cases - and don't forget to include the trailing $ in most cases", max_length=512, blank=True)),
],
),
migrations.CreateModel(
name='ImportedRSSItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('url', models.URLField()),
('posttime', models.DateTimeField()),
('feed', models.ForeignKey(to='core.ImportedRSSFeed', on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='Language',
fields=[
('alpha3', models.CharField(max_length=7, serialize=False, primary_key=True)),
('alpha3term', models.CharField(max_length=3, blank=True)),
('alpha2', models.CharField(max_length=2, blank=True)),
('name', models.CharField(max_length=100)),
('frenchname', models.CharField(max_length=100)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='ModerationNotification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('objectid', models.IntegerField(db_index=True)),
('objecttype', models.CharField(max_length=100)),
('text', models.TextField()),
('author', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ('-date',),
},
),
migrations.CreateModel(
name='Organisation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('approved', models.BooleanField(default=False)),
('address', models.TextField(blank=True)),
('url', models.URLField()),
('email', models.EmailField(max_length=254, blank=True)),
('phone', models.CharField(max_length=100, blank=True)),
('lastconfirmed', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='OrganisationType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('typename', models.CharField(max_length=32)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('sshkey', models.TextField(help_text='Paste one or more public keys in OpenSSH format, one per line.', verbose_name='SSH key', blank=True, validators=[pgweb.core.models.validate_sshkey])),
('lastmodified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tree', models.DecimalField(unique=True, max_digits=3, decimal_places=1)),
('latestminor', models.IntegerField(default=0, help_text="For testing versions, latestminor means latest beta/rc number. For other releases, it's the latest minor release number in the tree.")),
('reldate', models.DateField()),
('relnotes', models.CharField(max_length=32)),
('current', models.BooleanField(default=False)),
('supported', models.BooleanField(default=True)),
('testing', models.IntegerField(default=0, help_text='Testing level of this release. latestminor indicates beta/rc number', choices=[(0, 'Release'), (1, 'Release candidate'), (2, 'Beta'), (3, 'Alpha')])),
('docsloaded', models.DateTimeField(help_text='The timestamp of the latest docs load. Used to control indexing and info on developer docs.', null=True, blank=True)),
('firstreldate', models.DateField(help_text='The date of the .0 release in this tree')),
('eoldate', models.DateField(help_text='The final release date for this tree')),
],
options={
'ordering': ('-tree',),
},
),
migrations.AddField(
model_name='organisation',
name='managers',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='organisation',
name='orgtype',
field=models.ForeignKey(verbose_name='Organisation type', to='core.OrganisationType', on_delete=models.CASCADE),
),
]
|
#!/sw/bin/python3.3
__author__ = 'Michael+Dan'
'Last Modified from Michael doron - 25.11.2014'
'''
TODO: Use OS.path,join insteado f + strings. (cross OS compatability + saves headaches.
Also - make it clearer about how to just predict for example.. and valid/invalid options
'TODO: Add "Get top features" to command line supported options. And GetTraining perf (via pipeline tasks)'
TODO: Add option for user to choose what model to use for predictions! (And params..)
'''
from FeatureGen import featExt
from Model_trainer import trainClassifier
from FeatureGen import writeClassifiedFastas
import os
import time
import pandas as pd
import sklearn
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder
import numpy as np
profiler = None
##ADD OPT for classifier type , and if to use classifier tuning
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--trainingSetDir','-r',dest='trainingDir', type = str,help='The path to the training set fasta files')
parser.add_argument('--testingSetDir','-s',dest='testingDir', type = str,help='The path to the testing set fasta files')
parser.add_argument('--resultsDir','-rs',dest='resultsDir', type = str,help='The path to directory to write the results files')
parser.add_argument('--trainFeatures','-rf',dest='GetTrainingFeatures', type = bool,default=True,help='Whether to extract the training set features')
parser.add_argument('--testFeatures','-sf',dest='GetTestFeatures', type = bool,help='Whether to get the testing set features')
parser.add_argument('--classType','-ct',dest='classType', type = str,default='file',help='Defines the classname of each protein, by \'dir\', \'file\', or \'id\'."')
parser.add_argument('--outputTrainedModel','-om',dest='outputTrainedModel',default=False, type=bool,help='Pickle (save) a trained model for future use (saved to directory of training set)')
parser.add_argument('--classifier','-c',dest='classifierType',default='forest',help='The type of the classifier')
def pipeline():
results = parser.parse_args()
trainingDir=results.trainingDir
testingDir=results.testingDir
resultsDir=results.resultsDir
GetTrainingFeatures=results.GetTrainingFeatures
GetTestFeatures=results.GetTestFeatures
classType=results.classType
classifierType=results.classifierType
outputTrainedModel=results.outputTrainedModel
if trainingDir:
if (not os.path.exists(trainingDir)):
print('training dir doesn\'t exist')
exit()
if not (os.access(trainingDir, os.R_OK) and os.access(trainingDir, os.X_OK) and os.access(trainingDir, os.W_OK)):
print('don\' have permission to access training dir')
exit()
if testingDir:
if (not os.path.exists(testingDir)):
print('testing dir doesn\'t exist')
exit()
if not (os.access(testingDir, os.R_OK) and os.access(testingDir, os.X_OK) and os.access(testingDir, os.W_OK)):
print('don\' have permission to access testing dir')
exit()
if resultsDir:
if (not os.path.exists(resultsDir)):
print('results dir doesn\'t exist')
exit()
if not (os.access(resultsDir, os.R_OK) and os.access(resultsDir, os.X_OK) and os.access(resultsDir, os.W_OK)):
print('don\' have permission to access results dir')
exit()
print(profiler)
# change here to the training data folder
# trainingDir = r'E:\Dropbox\Dropbox\BioInformatics Lab\AA_Information\CODE\Feature_Extract\test_seq\Chap'
# change here to the testing data folder
# testingDir = r'E:\Dropbox\Dropbox\BioInformatics Lab\AA_Information\FASTA_Sets\HSP33_Chap\Unknown_Tests'
if GetTrainingFeatures==True:
print('Starting to extract features from training set')
'Temporary measure: If features extracted and saved, disable following line to avoid re-extracting training features'
featExt(directory=trainingDir, trainingSetFlag=True,
classType=classType, normParams='.')
print('Extracted training data features')
# 'TODO: Seperate model training/prediction from feat.extraction!'
if GetTestFeatures or outputTrainedModel:
print('Training predictive model')
model, lb_encoder = trainClassifier(filename=trainingDir+'/trainingSetFeatures.csv',normFlag= False,classifierType= classifierType,kbest= 0,alpha= False,optimalFlag= False) #Win
print('Model trained')
'Change to "If GetPredictions==True" , after adding such a param'
if GetTestFeatures==True:
## TODO: If more than 4k seqs, predict in chunks - DANs
print()
print('Extracting features from test set')
print("trainingDir: ",trainingDir)
featExt(directory=testingDir, trainingSetFlag=False, classType='dir', normParams=(trainingDir+'/trainingSetNormParams.csv'))
# featExt(testingDir, False, 'dir', trainingDir+'\\trainingSetNormParams.csv') #ORIG
print('Extracted test data features')
# dfTesting = pd.DataFrame.from_csv(testingDir+'\\testingSetFeatures.csv') #ORIG
dfTesting = pd.DataFrame.from_csv(testingDir+'/testingSetFeatures.csv')
# We use DF training to ensure consistency with features - we just need the feature names.
dfTraining = pd.io.parsers.read_csv(trainingDir+'/trainingSetFeatures.csv',nrows=2) #Orig.
# dfTraining = pd.DataFrame.from_csv(trainingDir+'/trainingSetFeatures.csv') #New
'''
# FeatureFilt
Filter Extracted Features, keeping only feats that are in the training set.
This is crucial! (Remember be reapplied if used elsewhere, if feature filtering/selection used)
'''
# remove feature in dfTesting when not in dfTraining:
#Not working? #dan
" Bug here - fix by padding non existant features with zeroes."
feature_cols = [col for col in dfTraining.columns if col not in ['classname','Id','proteinname']]
# feature_cols = [col for col in feature_cols if col in dfTraining.columns]
# https://github.com/zygmuntz/kaggle-happiness/blob/master/vectorize_validation.py
### train.YOB[ train.YOB.isnull() ] = 0
#new - fill missing features..
# dfTesting = dfTesting[feature_cols]
common_cols = [col for col in feature_cols if col in dfTesting.columns]
missing_cols = [col for col in feature_cols if col not in dfTesting.columns]
dfTesting = dfTesting[common_cols]
#dfTesting.fillna(0)
"ToDO: Do this in one command as a map or pandas command. Faster"
print("Orig dfTesting.shape:", dfTesting.shape)
print("Missing_cols (in dfTesting: \n", missing_cols)
print("len(dfTesting)",len(dfTesting),"len(dfTesting).columns",len(dfTesting.columns))
# import numpy.zeroes
for col in missing_cols:
dfTesting[col] = pd.Series([0] * len(dfTesting))
# dfTesting[col] = np.zeroes(len(dfTesting))
print("dfTraining (shape) was:", dfTraining.shape)
print("dfTesting shape (after padding features):", dfTesting.shape)
print("Features matched")
#May be unnecessary?
# dfTesting.replace([np.inf, -np.inf], 0)
dfTesting.fillna(0, inplace=True)
# features = dfTesting[feature_cols].values #ORIG
features = dfTesting.values
print('Predicting labels')
results = model.predict(features)
labels = lb_encoder.inverse_transform(results)
# dfTesting['classname'].append(list(labels))
dfTesting['classname'] = labels
#df to df2 :
df2 = dfTesting['classname']
df2.to_csv(testingDir+'\\PredictedTestSetResults.csv')
print('Saved results to ' + testingDir+'\\PredictedTestSetResults.csv') #ORIG
# print('Saved results to ' + testingDir+'/PredictedTestSetResults.csv')
if os.access(resultsDir, os.F_OK) and os.access(resultsDir, os.W_OK):
writeClassifiedFastas(classType, testingDir, resultsDir, df2)
else:
print("Classified fastas were not written - no access to %s" % resultsDir)
profiler.dump_stats('profile.txt')
if __name__ == '__main__' :
import cProfile
profiler = cProfile.Profile()
res = profiler.runcall(pipeline)
print("Got Here")
|
import enum
import time
from pymodbus.constants import Endian
from pymodbus.payload import BinaryPayloadBuilder
from pymodbus.payload import BinaryPayloadDecoder
from pymodbus.client.sync import ModbusTcpClient
from pymodbus.client.sync import ModbusSerialClient
from pymodbus.register_read_message import ReadInputRegistersResponse
from pymodbus.register_read_message import ReadHoldingRegistersResponse
RETRIES = 3
TIMEOUT = 1
UNIT = 1
class connectionType(enum.Enum):
RTU = 1
TCP = 2
class registerType(enum.Enum):
INPUT = 1
HOLDING = 2
class registerDataType(enum.Enum):
BITS = 1
UINT8 = 2
UINT16 = 3
UINT32 = 4
UINT64 = 5
INT8 = 6
INT16 = 7
INT32 = 8
INT64 = 9
FLOAT16 = 10
FLOAT32 = 11
STRING = 12
class SDM:
model = "SDM"
stopbits = 1
parity = "N"
baud = 38400
registers = {}
def __init__(
self, host=False, port=False,
device=False, stopbits=False, parity=False, baud=False,
timeout=TIMEOUT, retries=RETRIES, unit=UNIT,
parent=False
):
if parent:
self.client = parent.client
self.mode = parent.mode
self.timeout = parent.timeout
self.retries = parent.retries
if unit:
self.unit = unit
else:
self.unit = parent.unit
if self.mode is connectionType.RTU:
self.device = parent.device
self.stopbits = parent.stopbits
self.parity = parent.parity
self.baud = parent.baud
elif self.mode is connectionType.TCP:
self.host = parent.host
self.port = parent.port
else:
raise NotImplementedError(self.mode)
else:
self.host = host
self.port = port
self.device = device
if stopbits:
self.stopbits = stopbits
if (parity
and parity.upper() in ["N", "E", "O"]):
self.parity = parity.upper()
else:
self.parity = False
if baud:
self.baud = baud
self.timeout = timeout
self.retries = retries
self.unit = unit
if device:
self.mode = connectionType.RTU
self.client = ModbusSerialClient(
method="rtu",
port=self.device,
stopbits=self.stopbits,
parity=self.parity,
baudrate=self.baud,
timeout=self.timeout)
else:
self.mode = connectionType.TCP
self.client = ModbusTcpClient(
host=self.host,
port=self.port,
timeout=self.timeout
)
def __repr__(self):
if self.mode == connectionType.RTU:
return f"{self.model}({self.device}, {self.mode}: stopbits={self.stopbits}, parity={self.parity}, baud={self.baud}, timeout={self.timeout}, retries={self.retries}, unit={hex(self.unit)})"
elif self.mode == connectionType.TCP:
return f"{self.model}({self.host}:{self.port}, {self.mode}: timeout={self.timeout}, retries={self.retries}, unit={hex(self.unit)})"
else:
return f"<{self.__class__.__module__}.{self.__class__.__name__} object at {hex(id(self))}>"
def _read_input_registers(self, address, length):
for i in range(self.retries):
if not self.connected():
self.connect()
time.sleep(0.1)
continue
result = self.client.read_input_registers(address=address, count=length, unit=self.unit)
if not isinstance(result, ReadInputRegistersResponse):
continue
if len(result.registers) != length:
continue
return BinaryPayloadDecoder.fromRegisters(result.registers, byteorder=Endian.Big, wordorder=Endian.Big)
return None
def _read_holding_registers(self, address, length):
for i in range(self.retries):
if not self.connected():
self.connect()
time.sleep(0.1)
continue
result = self.client.read_holding_registers(address=address, count=length, unit=self.unit)
if not isinstance(result, ReadHoldingRegistersResponse):
continue
if len(result.registers) != length:
continue
return BinaryPayloadDecoder.fromRegisters(result.registers, byteorder=Endian.Big, wordorder=Endian.Big)
return None
def _write_holding_register(self, address, value):
return self.client.write_registers(address=address, values=value, unit=self.unit)
def _encode_value(self, data, dtype):
builder = BinaryPayloadBuilder(byteorder=Endian.Big, wordorder=Endian.Big)
try:
if dtype == registerDataType.FLOAT32:
builder.add_32bit_float(data)
else:
raise NotImplementedError(dtype)
except NotImplementedError:
raise
return builder.to_registers()
def _decode_value(self, data, length, dtype, vtype):
try:
if dtype == registerDataType.FLOAT32:
return vtype(data.decode_32bit_float())
else:
raise NotImplementedError(dtype)
except NotImplementedError:
raise
def _read(self, value):
address, length, rtype, dtype, vtype, label, fmt, batch = value
try:
if rtype == registerType.INPUT:
return self._decode_value(self._read_input_registers(address, length), length, dtype, vtype)
elif rtype == registerType.HOLDING:
return self._decode_value(self._read_holding_registers(address, length), length, dtype, vtype)
else:
raise NotImplementedError(rtype)
except NotImplementedError:
raise
def _read_all(self, values, rtype):
addr_min = False
addr_max = False
for k, v in values.items():
v_addr = v[0]
v_length = v[1]
if addr_min is False:
addr_min = v_addr
if addr_max is False:
addr_max = v_addr + v_length
if v_addr < addr_min:
addr_min = v_addr
if (v_addr + v_length) > addr_max:
addr_max = v_addr + v_length
results = {}
offset = addr_min
length = addr_max - addr_min
try:
if rtype == registerType.INPUT:
data = self._read_input_registers(offset, length)
elif rtype == registerType.HOLDING:
data = self._read_holding_registers(offset, length)
else:
raise NotImplementedError(rtype)
if not data:
return results
for k, v in values.items():
address, length, rtype, dtype, vtype, label, fmt, batch = v
if address > offset:
skip_bytes = address - offset
offset += skip_bytes
data.skip_bytes(skip_bytes * 2)
results[k] = self._decode_value(data, length, dtype, vtype)
offset += length
except NotImplementedError:
raise
return results
def _write(self, value, data):
address, length, rtype, dtype, vtype, label, fmt, batch = value
try:
if rtype == registerType.HOLDING:
return self._write_holding_register(address, self._encode_value(data, dtype))
else:
raise NotImplementedError(rtype)
except NotImplementedError:
raise
def connect(self):
return self.client.connect()
def disconnect(self):
self.client.close()
def connected(self):
return self.client.is_socket_open()
def read(self, key):
if key not in self.registers:
raise KeyError(key)
return self._read(self.registers[key])
def write(self, key, data):
if key not in self.registers:
raise KeyError(key)
return self._write(self.registers[key], data)
def read_all(self, rtype=registerType.INPUT):
registers = {k: v for k, v in self.registers.items() if (v[2] == rtype)}
results = {}
for batch in range(1, max(len(registers), 2)):
register_batch = {k: v for k, v in registers.items() if (v[7] == batch)}
if not register_batch:
break
results.update(self._read_all(register_batch, rtype))
return results
class SDM72(SDM):
def __init__(self, *args, **kwargs):
self.model = "SDM72"
self.baud = 9600
super().__init__(*args, **kwargs)
self.registers = {
"total_system_power": (0x0034, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total system power", "W", 1),
"total_import_kwh": (0x0048, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Import KWh", "kWh", 1),
"total_export_kwh": (0x004A, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Export KWh", "kWh", 1),
"total_kwh": (0x0156, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total KWh", "kWh", 2),
"resettable_total_active_energy": (0x0180, 2, registerType.INPUT, registerDataType.FLOAT32, float, "resettable total active energy", "kWh", 3),
"resettable_import_active_energy": (0x0184, 2, registerType.INPUT, registerDataType.FLOAT32, float, "resettable import active energy", "kWh", 3),
"resettable_export_active_energy": (0x0186, 2, registerType.INPUT, registerDataType.FLOAT32, float, "resettable export active energy", "kWh", 3),
"total_import_active_power": (0x0500, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total import active power", "W", 4),
"total_export_active_power": (0x0502, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total export active power", "W", 4),
"network_parity_stop": (0x0012, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Network Parity Stop", [
"N-1", "E-1", "O-1", "N-2"], 1),
"modbus_address": (0x0014, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Modbus Address", "", 1),
"password": (0x0018, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Password", "", 1),
}
class SDM120(SDM):
def __init__(self, *args, **kwargs):
self.model = "SDM120"
self.baud = 2400
super().__init__(*args, **kwargs)
self.registers = {
"voltage": (0x0000, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Voltage", "V", 1),
"current": (0x0006, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Current", "A", 1),
"power_active": (0x000c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power (Active)", "W", 1),
"power_apparent": (0x0012, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power (Apparent)", "VA", 1),
"power_reactive": (0x0018, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power (Reactive)", "VAr", 1),
"power_factor": (0x001e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power Factor", "", 1),
"phase_angle": (0x0024, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Phase Angle", "°", 1),
"frequency": (0x0046, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Frequency", "Hz", 1),
"import_energy_active": (0x0048, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Imported Energy (Active)", "kWh", 1),
"export_energy_active": (0x004a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Exported Energy (Active)", "kWh", 1),
"import_energy_reactive": (0x004c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Imported Energy (Reactive)", "kVArh", 1),
"export_energy_reactive": (0x004e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Exported Energy (Reactive)", "kVArh", 1),
"total_demand_power_active": (0x0054, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Demand Power (Active)", "W", 2),
"maximum_total_demand_power_active": (0x0056, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Total Demand Power (Active)", "W", 2),
"import_demand_power_active": (0x0058, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Import Demand Power (Active)", "W", 2),
"maximum_import_demand_power_active": (0x005a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Import Demand Power (Active)", "W", 2),
"export_demand_power_active": (0x005c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Export Demand Power (Active)", "W", 2),
"maximum_export_demand_power_active": (0x005e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Export Demand Power (Active)", "W", 2),
"total_demand_current": (0x0102, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Demand Current", "A", 3),
"maximum_total_demand_current": (0x0108, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Total Demand Current", "A", 3),
"total_energy_active": (0x0156, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Active)", "kWh", 4),
"total_energy_reactive": (0x0158, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Reactive)", "kVArh", 4),
"demand_time": (0x0000, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Demand Time", "s", 1),
"demand_period": (0x0002, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Demand Period", "s", 1),
"relay_pulse_width": (0x000c, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Relay Pulse Width", "ms", 1),
"network_parity_stop": (0x0012, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Network Parity Stop", [
"N-1", "E-1", "O-1", "N-2"], 1),
"meter_id": (0x0014, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Meter ID", "", 1),
"baud": (0x001c, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Baud Rate", [
2400, 4800, 9600, -1, -1, 1200], 1),
"p1_output_mode": (0x0056, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "P1 Output Mode", [
0x0, "Import Energy (Active)", "Import + Export Energy (Active)", 0x3, "Export Energy (Active)",
"Import Energy (Reactive)", "Import + Export Energy (Reactive)", 0x7, "Export Energy (Reactive)"], 2),
"display_scroll_timing": (0xf900, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Display Scroll Timing", "s", 3),
"p1_divisor": (0xf910, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "P1 Divisor", [
"0.001kWh/imp", "0.01kWh/imp", "0.1kWh/imp", "1kWh/imp"], 3),
"measurement_mode": (0xf920, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Measurement Mode", [
0x0, "Total Imported", "Total Imported + Exported", "Total Imported - Exported"], 3),
"indicator_mode": (0xf930, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Pulse/LED Indicator Mode", [
"Import + Export Energy (Active)", "Import Energy (Active)", "Export Energy (Active)"], 3)
}
class SDM230(SDM):
def __init__(self, *args, **kwargs):
self.model = "SDM230"
self.baud = 2400
super().__init__(*args, **kwargs)
self.registers = {
"voltage": (0x0000, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Voltage", "V", 1),
"current": (0x0006, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Current", "A", 1),
"power_active": (0x000c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power (Active)", "W", 1),
"power_apparent": (0x0012, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power (Apparent)", "VA", 1),
"power_reactive": (0x0018, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power (Reactive)", "VAr", 1),
"power_factor": (0x001e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Power Factor", "", 1),
"phase_angle": (0x0024, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Phase Angle", "°", 1),
"frequency": (0x0046, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Frequency", "Hz", 1),
"import_energy_active": (0x0048, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Imported Energy (Active)", "kWh", 1),
"export_energy_active": (0x004a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Exported Energy (Active)", "kWh", 1),
"import_energy_reactive": (0x004c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Imported Energy (Reactive)", "kVArh", 1),
"export_energy_reactive": (0x004e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Exported Energy (Reactive)", "kVArh", 1),
"total_demand_power_active": (0x0054, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Demand Power (Active)", "W", 2),
"maximum_total_demand_power_active": (0x0056, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Total Demand Power (Active)", "W", 2),
"import_demand_power_active": (0x0058, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Import Demand Power (Active)", "W", 2),
"maximum_import_demand_power_active": (0x005a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Import Demand Power (Active)", "W", 2),
"export_demand_power_active": (0x005c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Export Demand Power (Active)", "W", 2),
"maximum_export_demand_power_active": (0x005e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Export Demand Power (Active)", "W", 2),
"total_demand_current": (0x0102, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Demand Current", "A", 3),
"maximum_total_demand_current": (0x0108, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Total Demand Current", "A", 3),
"total_energy_active": (0x0156, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Active)", "kWh", 4),
"total_energy_reactive": (0x0158, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Reactive)", "kVArh", 4),
"relay_pulse_width": (0x000c, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Relay Pulse Width", "ms", 1),
"network_parity_stop": (0x0012, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Network Parity Stop", [
"N-1", "E-1", "O-1", "N-2"], 1),
"meter_id": (0x0014, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Meter ID", "", 1),
"baud": (0x001c, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Baud Rate", [
2400, 4800, 9600, -1, -1, 1200], 1),
"p1_output_mode": (0x0056, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "P1 Output Mode", [
0x0, "Import Energy (Active)", "Import + Export Energy (Active)", 0x3, "Export Energy (Active)",
"Import Energy (Reactive)", "Import + Export Energy (Reactive)", 0x7, "Export Energy (Reactive)"], 2),
"display_scroll_timing": (0xf900, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Display Scroll Timing", "s", 3),
"p1_divisor": (0xf910, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "P1 Divisor", [
"0.001kWh/imp", "0.01kWh/imp", "0.1kWh/imp", "1kWh/imp"], 3),
"measurement_mode": (0xf920, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Measurement Mode", [
0x0, "Total Imported", "Total Imported + Exported", "Total Imported - Exported"], 3),
"running_time": (0xf930, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Running Time", "h", 3)
}
class SDM630(SDM):
def __init__(self, *args, **kwargs):
self.model = "SDM630"
self.baud = 9600
super().__init__(*args, **kwargs)
self.registers = {
"p1_voltage": (0x0000, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Voltage", "V", 1),
"p2_voltage": (0x0002, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Voltage", "V", 1),
"p3_voltage": (0x0004, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Voltage", "V", 1),
"p1_current": (0x0006, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Current", "A", 1),
"p2_current": (0x0008, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Current", "A", 1),
"p3_current": (0x000a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Current", "A", 1),
"p1_power_active": (0x000c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Power (Active)", "W", 1),
"p2_power_active": (0x000e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Power (Active)", "W", 1),
"p3_power_active": (0x0010, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Power (Active)", "W", 1),
"p1_power_apparent": (0x0012, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Power (Apparent)", "VA", 1),
"p2_power_apparent": (0x0014, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Power (Apparent)", "VA", 1),
"p3_power_apparent": (0x0016, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Power (Apparent)", "VA", 1),
"p1_power_reactive": (0x0018, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Power (Reactive)", "VAr", 1),
"p2_power_reactive": (0x001A, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Power (Reactive)", "VAr", 1),
"p3_power_reactive": (0x001C, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Power (Reactive)", "VAr", 1),
"p1_power_factor": (0x001e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Power Factor", "", 1),
"p2_power_factor": (0x0020, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Power Factor", "", 1),
"p3_power_factor": (0x0022, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Power Factor", "", 1),
"p1_phase_angle": (0x0024, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Phase Angle", "°", 1),
"p2_phase_angle": (0x0026, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Phase Angle", "°", 1),
"p3_phase_angle": (0x0028, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Phase Angle", "°", 1),
"voltage_ln": (0x002a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "L-N Voltage", "V", 1),
"current_ln": (0x002e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "L-N Current", "A", 1),
"total_line_current": (0x0030, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Line Current", "A", 1),
"total_power_active": (0x0034, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Power (Active)", "W", 1),
"total_power_apparent": (0x0038, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Power (Apparent)", "VA", 1),
"total_power_reactive": (0x003C, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Power (Reactive)", "VAr", 1),
"total_power_factor": (0x003E, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Power Factor", "", 1),
"total_phase_angle": (0x0042, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Phase Angle", "°", 1),
"frequency": (0x0046, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Frequency", "Hz", 1),
"import_energy_active": (0x0048, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Imported Energy (Active)", "kWh", 1),
"export_energy_active": (0x004a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Exported Energy (Active)", "kWh", 1),
"import_energy_reactive": (0x004c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Imported Energy (Reactive)", "kVArh", 1),
"export_energy_reactive": (0x004e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Exported Energy (Reactive)", "kVArh", 1),
"total_energy_apparent": (0x0050, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Apparent)", "kVAh", 2),
"total_current": (0x0052, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Current", "A", 2),
"total_import_demand_power_active": (0x0054, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Import Demand Power (Active)", "W", 2),
"maximum_import_demand_power_apparent": (0x0056, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Import Demand Power (Apparent)", "VA", 2),
"import_demand_power_active": (0x0058, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Import Demand Power (Active)", "W", 2),
"maximum_import_demand_power_active": (0x005a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Import Demand Power (Active)", "W", 2),
"export_demand_power_active": (0x005c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Export Demand Power (Active)", "W", 2),
"maximum_export_demand_power_active": (0x005e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Export Demand Power (Active)", "W", 2),
"total_demand_power_apparent": (0x0064, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Demand Power (Apparent)", "VA", 2),
"maximum_demand_power_apparent": (0x0066, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum System Power (Apparent)", "VA", 2),
"neutral_demand_current": (0x0068, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Neutral Demand Current", "A", 2),
"maximum_neutral_demand_current": (0x006a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum Neutral Demand Current", "A", 2),
"p12_voltage": (0x00c8, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1-P2 Voltage", "V", 3),
"p23_voltage": (0x00ca, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2-P3 Voltage", "V", 3),
"p31_voltage": (0x00cc, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3-P1 Voltage", "V", 3),
"voltage_ll": (0x00ce, 2, registerType.INPUT, registerDataType.FLOAT32, float, "L-L Voltage", "V", 3),
"neutral_current": (0x00e0, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Neutral Current", "A", 3),
"p1n_voltage_thd": (0x00ea, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1-N Voltage THD", "%", 3),
"p2n_voltage_thd": (0x00ec, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2-N Voltage THD", "%", 3),
"p3n_voltage_thd": (0x00ee, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3-N Voltage THD", "%", 3),
"p1_current_thd": (0x00f0, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Current THD", "%", 3),
"p2_current_thd": (0x00f2, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Current THD", "%", 3),
"p3_current_thd": (0x00f4, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Current THD", "%", 3),
"voltage_ln_thd": (0x00f8, 2, registerType.INPUT, registerDataType.FLOAT32, float, "L-N Voltage THD", "%", 3),
"current_thd": (0x00fa, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Current THD", "%", 3),
"total_pf": (0x00fe, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Power Factor", "", 3),
"p1_demand_current": (0x0102, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Demand Current", "A", 3),
"p2_demand_current": (0x0104, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Demand Current", "A", 3),
"p3_demand_current": (0x0106, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Demand Current", "A", 3),
"maximum_p1_demand_current": (0x0108, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum P1 Demand Current", "A", 3),
"maximum_p2_demand_current": (0x010a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum P2 Demand Current", "A", 3),
"maximum_p3_demand_current": (0x010c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Maximum P3 Demand Current", "A", 3),
"p12_voltage_thd": (0x014e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1-P2 Voltage THD", "%", 4),
"p23_voltage_thd": (0x0150, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2-P3 Voltage THD", "%", 4),
"p31_voltage_thd": (0x0152, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3-P1 Voltage THD", "%", 4),
"voltage_ll_thd": (0x0154, 2, registerType.INPUT, registerDataType.FLOAT32, float, "L-L Voltage THD", "%", 4),
"total_energy_active": (0x0156, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Active)", "kWh", 4),
"total_energy_reactive": (0x0158, 2, registerType.INPUT, registerDataType.FLOAT32, float, "Total Energy (Reactive)", "kVArh", 4),
"p1_demand_energy_active": (0x015a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Import Energy (Active)", "kWh", 4),
"p2_demand_energy_active": (0x015c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Import Energy (Active)", "kWh", 4),
"p3_demand_energy_active": (0x015e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Import Energy (Active)", "kWh", 4),
"p1_import_energy_active": (0x0160, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Import Energy (Active)", "kWh", 4),
"p2_import_energy_active": (0x0162, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Import Energy (Active)", "kWh", 4),
"p3_import_energy_active": (0x0164, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Import Energy (Active)", "kWh", 4),
"p1_energy_active": (0x0166, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Total Energy (Active)", "kWh", 4),
"p2_energy_active": (0x0168, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Total Energy (Active)", "kWh", 4),
"p3_energy_active": (0x016a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Total Energy (Active)", "kWh", 4),
"p1_demand_energy_reactive": (0x016c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Import Energy (Reactive)", "kVArh", 4),
"p2_demand_energy_reactive": (0x016e, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Import Energy (Reactive)", "kVArh", 4),
"p3_demand_energy_reactive": (0x0170, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Import Energy (Reactive)", "kVArh", 4),
"p1_import_energy_reactive": (0x0172, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Import Energy (Reactive)", "kVArh", 4),
"p2_import_energy_reactive": (0x0174, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Import Energy (Reactive)", "kVArh", 4),
"p3_import_energy_reactive": (0x0176, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Import Energy (Reactive)", "kVArh", 4),
"p1_energy_reactive": (0x0178, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P1 Total Energy (Reactive)", "kVArh", 4),
"p2_energy_reactive": (0x017a, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P2 Total Energy (Reactive)", "kVArh", 4),
"p3_energy_reactive": (0x017c, 2, registerType.INPUT, registerDataType.FLOAT32, float, "P3 Total Energy (Reactive)", "kVArh", 4),
"demand_time": (0x0000, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Demand Time", "s", 1),
"demand_period": (0x0002, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Demand Period", "s", 1),
"system_voltage": (0x0006, 2, registerType.HOLDING, registerDataType.FLOAT32, float, "System Voltage", "V", 1),
"system_current": (0x0008, 2, registerType.HOLDING, registerDataType.FLOAT32, float, "System Current", "A", 1),
"system_type": (0x000a, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "System Type", [
-1, "1P2W", "3P3W", "3P4W"], 1),
"relay_pulse_width": (0x000c, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Relay Pulse Width", "ms", 1),
"network_parity_stop": (0x0012, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Network Parity Stop", [
"N-1", "E-1", "O-1", "N-2"], 1),
"meter_id": (0x0014, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Meter ID", "", 1),
"baud": (0x001c, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "Baud Rate", [
2400, 4800, 9600, 19200, 38400], 1),
"system_power": (0x0024, 2, registerType.HOLDING, registerDataType.FLOAT32, float, "System Power", "W", 1),
"p1_divisor": (0xf910, 2, registerType.HOLDING, registerDataType.FLOAT32, int, "P1 Divisor", [
"0.001kWh/imp", "0.01kWh/imp", "0.1kWh/imp", "1kWh/imp", "10kWh/imp", "100kWh/imp"], 2)
}
|
from django.shortcuts import render
from tsuru_autoscale.instance import client
from tsuru_autoscale.event import client as eclient
def list(request, app_name=None):
token = request.session.get('tsuru_token').split(" ")[-1]
instances = client.list(token).json()
context = {
"list": instances,
}
return render(request, "instance/list.html", context)
def get(request, name):
token = request.session.get('tsuru_token').split(" ")[-1]
instance = client.get(name, token).json()
alarms = client.alarms_by_instance(name, token).json() or []
events = []
for alarm in alarms:
events.extend(eclient.list(alarm["name"], token).json())
context = {
"item": instance,
"alarms": alarms,
"events": events,
}
return render(request, "instance/get.html", context)
|
from pathlib import Path
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
import pytest
from statsmodels.tsa.seasonal import MSTL
@pytest.fixture(scope="function")
def mstl_results():
cur_dir = Path(__file__).parent.resolve()
file_path = cur_dir / "results/mstl_test_results.csv"
return pd.read_csv(file_path)
@pytest.fixture(scope="function")
def data_pd():
cur_dir = Path(__file__).parent.resolve()
file_path = cur_dir / "results/mstl_elec_vic.csv"
return pd.read_csv(file_path, index_col=["ds"], parse_dates=["ds"])
@pytest.fixture(scope="function")
def data(data_pd):
return data_pd["y"].values
def test_return_pandas_series_when_input_pandas_and_len_periods_one(data_pd):
mod = MSTL(endog=data_pd, periods=5)
res = mod.fit()
assert isinstance(res.trend, pd.Series)
assert isinstance(res.seasonal, pd.Series)
assert isinstance(res.resid, pd.Series)
assert isinstance(res.weights, pd.Series)
def test_seasonal_is_datafame_when_input_pandas_and_multiple_periods(data_pd):
mod = MSTL(endog=data_pd, periods=(3, 5))
res = mod.fit()
assert isinstance(res.seasonal, pd.DataFrame)
@pytest.mark.parametrize(
"data, periods, windows, expected",
[
(data, 3, None, 1),
(data, (3, 6), None, 2),
(data, (3, 6, 1e6), None, 2),
],
indirect=["data"],
)
def test_number_of_seasonal_components(data, periods, windows, expected):
mod = MSTL(endog=data, periods=periods, windows=windows)
res = mod.fit()
n_seasonal_components = (
res.seasonal.shape[1] if res.seasonal.ndim > 1 else res.seasonal.ndim
)
assert n_seasonal_components == expected
@pytest.mark.parametrize(
"periods, windows",
[((3, 5), 1), (7, (3, 5))],
)
def test_raise_value_error_when_periods_and_windows_diff_lengths(
periods, windows
):
with pytest.raises(
ValueError, match="Periods and windows must have same length"
):
MSTL(endog=[1, 2, 3, 4, 5], periods=periods, windows=windows)
@pytest.mark.parametrize(
"data, lmbda",
[(data, 0.1), (data, 1), (data, -3.0), (data, "auto")],
indirect=["data"],
)
def test_fit_with_box_cox(data, lmbda):
periods = (5, 6, 7)
mod = MSTL(endog=data, periods=periods, lmbda=lmbda)
mod.fit()
def test_auto_fit_with_box_cox(data):
periods = (5, 6, 7)
mod = MSTL(endog=data, periods=periods, lmbda="auto")
mod.fit()
assert hasattr(mod, "est_lmbda")
assert isinstance(mod.est_lmbda, float)
def test_stl_kwargs_smoke(data):
stl_kwargs = {
"period": 12,
"seasonal": 15,
"trend": 17,
"low_pass": 15,
"seasonal_deg": 0,
"trend_deg": 1,
"low_pass_deg": 1,
"seasonal_jump": 2,
"trend_jump": 2,
"low_pass_jump": 3,
"robust": False,
"inner_iter": 3,
"outer_iter": 3,
}
periods = (5, 6, 7)
mod = MSTL(
endog=data, periods=periods, lmbda="auto", stl_kwargs=stl_kwargs
)
mod.fit()
@pytest.mark.matplotlib
def test_plot(data, data_pd, close_figures):
mod = MSTL(endog=data, periods=5)
res = mod.fit()
res.plot()
mod = MSTL(endog=data_pd, periods=5)
res = mod.fit()
res.plot()
def test_output_similar_to_R_implementation(data_pd, mstl_results):
mod = MSTL(
endog=data_pd,
periods=(24, 24 * 7),
stl_kwargs={
"seasonal_deg": 0,
"seasonal_jump": 1,
"trend_jump": 1,
"trend_deg": 1,
"low_pass_jump": 1,
"low_pass_deg": 1,
"inner_iter": 2,
"outer_iter": 0,
},
)
res = mod.fit()
expected_observed = mstl_results["Data"]
expected_trend = mstl_results["Trend"]
expected_seasonal = mstl_results[["Seasonal24", "Seasonal168"]]
expected_resid = mstl_results["Remainder"]
assert_allclose(res.observed, expected_observed)
assert_allclose(res.trend, expected_trend)
assert_allclose(res.seasonal, expected_seasonal)
assert_allclose(res.resid, expected_resid)
@pytest.mark.parametrize(
"data, periods_ordered, windows_ordered, periods_not_ordered, "
"windows_not_ordered",
[
(data, (12, 24, 24 * 7), (11, 15, 19), (12, 24 * 7, 24), (11, 19, 15)),
(
data,
(12, 24, 24 * 7 * 1e6),
(11, 15, 19),
(12, 24 * 7 * 1e6, 24),
(11, 19, 15),
),
(data, (12, 24, 24 * 7), None, (12, 24 * 7, 24), None),
],
indirect=["data"],
)
def test_output_invariant_to_period_order(
data,
periods_ordered,
windows_ordered,
periods_not_ordered,
windows_not_ordered,
):
mod1 = MSTL(endog=data, periods=periods_ordered, windows=windows_ordered)
res1 = mod1.fit()
mod2 = MSTL(
endog=data, periods=periods_not_ordered, windows=windows_not_ordered
)
res2 = mod2.fit()
assert_equal(res1.observed, res2.observed)
assert_equal(res1.trend, res2.trend)
assert_equal(res1.seasonal, res2.seasonal)
assert_equal(res1.resid, res2.resid)
|
"""
Support for Mailgun.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mailgun/
"""
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN, CONF_WEBHOOK_ID
from homeassistant.helpers import config_entry_flow
DOMAIN = 'mailgun'
API_PATH = '/api/{}'.format(DOMAIN)
DEPENDENCIES = ['webhook']
MESSAGE_RECEIVED = '{}_message_received'.format(DOMAIN)
CONF_SANDBOX = 'sandbox'
DEFAULT_SANDBOX = False
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN): vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DOMAIN): cv.string,
vol.Optional(CONF_SANDBOX, default=DEFAULT_SANDBOX): cv.boolean,
vol.Optional(CONF_WEBHOOK_ID): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Mailgun component."""
if DOMAIN not in config:
return True
hass.data[DOMAIN] = config[DOMAIN]
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook with Mailgun inbound messages."""
data = dict(await request.post())
data['webhook_id'] = webhook_id
hass.bus.async_fire(MESSAGE_RECEIVED, data)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
entry.data[CONF_WEBHOOK_ID], handle_webhook)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
config_entry_flow.register_webhook_flow(
DOMAIN,
'Mailgun Webhook',
{
'mailgun_url':
'https://www.mailgun.com/blog/a-guide-to-using-mailguns-webhooks',
'docs_url': 'https://www.home-assistant.io/components/mailgun/'
}
)
|
import sys, getopt
import re
import pandas as pd
import os
from pathlib import Path
import urllib.request
from urllib.request import Request, urlopen
def SRARunTable():
inputFile = read_argv(sys.argv[1:])
print (inputFile)
print ('Opening file & creating categories in Downloads/categories')
df = pd.read_csv(inputFile, sep=',', low_memory=False)
for i in df.index:
searchString = df.iloc[i,:].to_string(header=False, index=False)
if re.search('HMP_', searchString):
#store first element
runIdentifier = df.iloc[i][0]
directory = df.iloc[i]['biospecimen_repository']
#make a directory if it does not exisit yet
to_directory('categories/' + directory) #set dir to "Downloads"
#download the website
scrape_website(runIdentifier, searchString)
#obtain the oTaxAnalysisData object
#store file, first column is searchString
def scrape_website(id, header):
ws = 'https://trace.ncbi.nlm.nih.gov/Traces/sra/?run='+id
outFile = id + '.txt'
try:
file_exists(outFile)
except:
fileContent = ''
print ("downloading " + id)
try:
req = Request(ws, headers={'User-Agent': 'XYZ/3.0'})
response = urlopen(req, timeout=50).read()
webpage = response.decode('utf-8')
object = re.search(r"oTaxAnalysisData.*\}\}\,.*\n0]\;", webpage, re.DOTALL)
fileContent = object[0]
#clean up
fileContent = fileContent.replace('oTaxAnalysisData =', '')
fileContent = fileContent.replace('0];', '')
header = "\t".join(header.split())
fileContent = '#' + header + "\n" + fileContent
#save
storeWebSite(outFile, fileContent)
except:
print ('no object')
pass
def file_exists(f):
path = Path(f)
if path.is_file():
print(f'The file {f} exists')
return True
else:
return 1/0
def storeWebSite(o, w):
with open(o, 'a') as f:
f.write(w)
def to_directory(targetDirectory):
#set the directory
my_dl_path = os.path.join(Path.home(), "Downloads/" + targetDirectory)
Path(my_dl_path).mkdir(parents=True, exist_ok=True)
os.chdir(my_dl_path)
def read_argv(argv):
inputFile = ''
try:
opts, args = getopt.getopt(argv,"hi:",["ifile="])
except getopt.GetoptError:
print ('test.py -i <inputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('test.py -i <inputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputFile = arg
print ('Input file is "', inputFile)
return (inputFile)
#main
if __name__ == '__main__':
read_argv(sys.argv[1:])
|
"""
2018 Day 21
https://adventofcode.com/2018/day/21
"""
from typing import Iterator, Optional
# #ip 1
# seti 123 0 5
# bani 5 456 5
# eqri 5 72 5
# addr 5 1 1
# seti 0 0 1
# seti 0 9 5
# bori 5 65536 2
# seti 7571367 9 5
# bani 2 255 4
# addr 5 4 5
# bani 5 16777215 5
# muli 5 65899 5
# bani 5 16777215 5
# gtir 256 2 4
# addr 4 1 1
# addi 1 1 1
# seti 27 1 1
# seti 0 2 4
# addi 4 1 3
# muli 3 256 3
# gtrr 3 2 3
# addr 3 1 1
# addi 1 1 1
# seti 25 6 1
# addi 4 1 4
# seti 17 8 1
# setr 4 6 2
# seti 7 4 1
# eqrr 5 0 4
# addr 4 1 1
# seti 5 5 1
# PSEUDOCODE
# 0 reg5 = 123
# 1 reg5 = reg5 & 456
# 2 reg5 = 1 if reg5 == 72 else 0
# 3 reg1 = reg1 + reg5
# 4 goto 1
# 5 reg5 = 0
# 6 reg2 = reg5 | 65536
# 7 reg5 = 7571367
# 8 reg4 = reg2 & 255
# 9 reg5 = reg5 + reg4
# 10 reg5 = reg5 & 16777215
# 11 reg5 = reg5 * 65899
# 12 reg5 = reg5 & 16777215
# 13 reg4 = 1 if 256 > reg2 else 0
# 14 reg1 = reg1 + reg4
# 15 goto 17
# 16 goto 28
# 17 reg4 = 0
# 18 reg3 = reg4 + 1
# 19 reg3 = reg3 * 256
# 20 reg3 = 1 if reg3 > reg2 else 0
# 21 reg1 = reg1 + reg3
# 22 goto 24
# 23 goto 26
# 24 reg4 = reg4 + 1
# 25 goto 18
# 26 reg2 = reg4
# 27 goto 8
# 28 reg4 = 1 if reg5 == reg0 else 0
# 29 reg1 = reg1 + reg4
# 30 goto 6
# IF STATEMENTS
# 0 reg5 = 123
# 1 reg5 = reg5 & 456
# 3 if reg5 == 72:
# goto 6
# 4 goto 1
# 5 reg5 = 0
# 6 reg2 = reg5 | 65536
# 7 reg5 = 7571367
# 8 reg4 = reg2 & 255
# 9 reg5 = reg5 + reg4
# 10 reg5 = reg5 & 16777215
# 11 reg5 = reg5 * 65899
# 12 reg5 = reg5 & 16777215
# 14 if 256 > reg2:
# goto 28
# 17 reg4 = 0
# 18 reg3 = reg4 + 1
# 19 reg3 = reg3 * 256
# 21 if reg3 > reg2:
# goto 26
# 24 reg4 = reg4 + 1
# 25 goto 18
# 26 reg2 = reg4
# 27 goto 8
# 28 if reg5 == reg0:
# goto 31
# 30 goto 6
# SIMPLIFY ROUTINES
# 3 while 123 & 456 != 72:
# pass
# 5 reg5 = 0
# 6 reg2 = reg5 | 65536
# 7 reg5 = 7571367
# 8 reg4 = reg2 & 255
# 9 reg5 = (((reg5 + reg4) & 16777215) * 65899) & 16777215
# 14 if 256 > reg2:
# goto 28
# 17 reg4 = 0
# 21 if (reg4 + 1) * 256 > reg2:
# goto 26
# 24 reg4 = reg4 + 1
# 25 goto 21
# 26 reg2 = reg4
# 27 goto 8
# 28 if reg5 == reg0:
# goto 31
# 30 goto 6
# LOOPS
# while True:
# 6 reg2 = reg5 | 65536
# 7 reg5 = 7571367
# while True:
# 8 reg4 = reg2 & 255
# 9 reg5 = (((reg5 + reg4) & 16777215) * 65899) & 16777215
# 14 if 256 > reg2:
# 28 if reg5 == reg0:
# return
# else:
# 26 reg4 = reg4 /= 256
# CONCLUSION:
# A program for Part 1 will just return the first number seen (in reg5) at line 28 - the function
# is below. After seeing the description for part 2, I changed this from *return* to *yield* in
# order to be able to capture both the first and last value.
def activation_system() -> Iterator[int]:
b = 0
while True:
a = b | 65536
b = 7571367
while True:
b = (((b + (a & 255)) & 16777215) * 65899) & 16777215
if a < 256:
yield b
break
a = a // 256
def last_solution() -> Optional[int]:
solutions = set()
prev = None
for solution in activation_system():
if solution in solutions:
return prev
solutions.add(solution)
prev = solution
return None
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
print(f"Part 1: {next(activation_system())}")
print(f"Part 2: {last_solution()}")
if __name__ == "__main__":
main()
|
import tensorflow as tf
import tf_metrics
from tensorflow import keras
class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name='categorical_true_positives', **kwargs):
super(CategoricalTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.argmax(y_pred)
values = tf.equal(tf.cast(y_true, 'int32'), tf.cast(y_pred, 'int32'))
values = tf.cast(values, 'float32')
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, 'float32')
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
'''
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1))
values = tf.cast(y_true, 'int32') == tf.cast(y_pred, 'int32')
values = tf.cast(values, 'float32')
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, 'float32')
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
'''
def result(self):
return self.true_positives
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.)
def multi_class_recall(num_classes, average, weights, **kwargs):
def categorical_recall(labels, predictions):
label_weights = [weights[idx] for idx, w in enumerate(weights)]
# any tensorflow metric
value, update_op = tf_metrics.recall(
labels, predictions, num_classes,
average=average, weights=tf.constant(label_weights), **kwargs)
# find all variables created for this metric
#print([len(i.name.split('/')) for i in tf.local_variables()])
metric_vars = []
for i in tf.local_variables():
if len(i.name.split('/')) > 2 and 'categorical_recall' in i.name.split('/')[1]:
metric_vars.append(i)
# Add metric variables to GLOBAL_VARIABLES collection.
# They will be initialized for new session.
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
# force to update metric values
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return categorical_recall
|
from django.apps import AppConfig
class GwdapisConfig(AppConfig):
name = 'GWDapis'
|
# -*- coding: utf-8 -*-
"""
Methods for platform information.
@author: - Thomas McTavish
"""
import platform
def get_platform_info():
"""
Retrieve platform information as a dict.
Code borrowed from the file, ``launch.py`` from the Sumatra package.
"""
network_name = platform.node()
bits, linkage = platform.architecture()
return dict(architecture_bits=bits,
architecture_linkage=linkage,
machine=platform.machine(),
network_name=network_name,
processor=platform.processor(),
release=platform.release(),
system_name=platform.system(),
version=platform.version())
|
from astboom.cli import cli
import sys
if __name__ == "__main__":
sys.exit(cli(prog_name="astboom"))
|
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from graphene.test import Client
from graphql_relay import to_global_id
from itdagene.app.career.models import Joblisting
from itdagene.app.company.models import Company
from itdagene.core.models import User
from itdagene.graphql.schema import schema
class TestJoblistings(TestCase):
def setUp(self):
User.objects.create(is_superuser=True)
self.company = Company.objects.create()
self.client = Client(schema)
self.joblistings_query = """
{
joblistings(first:10){
edges {
node {
id
}
}
}
}
"""
self.node_query = """
query ($id: ID!) {
node(id: $id){
__typename
id
}
}
"""
self.search_query = """
query ($query: String!) {
search(query: $query, types: [JOBLISTING]){
__typename
... on Joblisting {
id
}
}
}
"""
self.company_search_query = """
query ($query: String!) {
search(query: $query, types: [COMPANY_WITH_JOBLISTING]){
__typename
... on Company {
id
}
}
}
"""
def test_no_joblisting(self):
executed = self.client.execute(self.joblistings_query)
self.assertIsNone(executed.get("errors"))
self.assertEqual(executed["data"]["joblistings"]["edges"], [])
def test_inactive_joblisting_is_not_in_connection(self):
Joblisting.objects.create(
company=self.company, deadline=timezone.now() - timedelta(days=1)
)
executed = self.client.execute(self.joblistings_query)
self.assertIsNone(executed.get("errors"))
self.assertEqual(executed["data"]["joblistings"]["edges"], [])
def test_active_joblisting_is_in_connection(self):
Joblisting.objects.create(
company=self.company, deadline=timezone.now() + timedelta(days=1)
)
executed = self.client.execute(self.joblistings_query)
self.assertIsNone(executed.get("errors"))
self.assertEqual(len(executed["data"]["joblistings"]["edges"]), 1)
def test_inactive_joblisting_is_node(self):
""" Ensure old joblisting urls are still valid """
joblisting = Joblisting.objects.create(
company=self.company, deadline=timezone.now() - timedelta(days=1)
)
global_id = to_global_id("Joblisting", joblisting.pk)
executed = self.client.execute(
self.node_query, variable_values={"id": global_id}
)
self.assertIsNone(executed.get("errors"))
self.assertIsNotNone(executed["data"]["node"])
self.assertEqual(
executed["data"]["node"], {"id": global_id, "__typename": "Joblisting"}
)
def test_only_active_is_in_search(self):
""" Ensure old joblisting urls are still valid """
title = "Title"
active = Joblisting.objects.create(
company=self.company,
deadline=timezone.now() + timedelta(days=1),
title=title,
)
Joblisting.objects.create(
company=self.company,
deadline=timezone.now() - timedelta(days=1),
title=title,
)
global_id = to_global_id("Joblisting", active.pk)
executed = self.client.execute(
self.search_query, variable_values={"query": title}
)
expected = {"data": {"search": [{"id": global_id, "__typename": "Joblisting"}]}}
self.assertIsNone(executed.get("errors"))
self.assertEqual(executed, expected)
def test_only_companies_with_joblistings_is_in_search(self):
""" Ensure old joblisting urls are still valid """
name = "name"
active_company = Company.objects.create(name=name)
inactive_company = Company.objects.create(name=name)
Joblisting.objects.create(
company=active_company, deadline=timezone.now() + timedelta(days=1)
)
Joblisting.objects.create(
company=inactive_company, deadline=timezone.now() - timedelta(days=1)
)
global_id = to_global_id("Company", active_company.pk)
executed = self.client.execute(
self.company_search_query, variable_values={"query": name}
)
expected = {"data": {"search": [{"id": global_id, "__typename": "Company"}]}}
self.assertIsNone(executed.get("errors"))
self.assertEqual(executed, expected)
|
from ozekilibsrest import Configuration, Message, MessageApi
configuration = Configuration(
username="http_user",
password="qwe123",
api_url="http://127.0.0.1:9509/api"
)
msg1 = Message(
to_address="+3620111111",
text="Hello world 1!"
)
msg2 = Message(
to_address="+36202222222",
text="Hello world 2!"
)
msg3 = Message(
to_address="+36203333333",
text="Hello world 3!"
)
api = MessageApi(configuration)
result = api.send([msg1, msg2, msg3])
print(result) |
from PyInstaller.utils.hooks import collect_data_files
import spacy
# add datas for spacy
datas = collect_data_files('spacy', False)
# append spacy data path
datas.append((spacy.util.get_data_path(), 'spacy/data'))
datas.extend(collect_data_files('thinc.neural', False))
hiddenimports=['cymem', 'cymem.cymem', 'thinc.linalg', 'murmurhash', 'murmurhash.mrmr', 'spacy.strings',
'spacy.morphology', 'spacy.tokens.morphanalysis', 'spacy.lexeme', 'spacy.tokens', 'spacy.tokens.underscore', 'spacy.parts_of_speech', 'spacy.tokens._retokenize', 'spacy.syntax', 'spacy.syntax.stateclass',
'spacy.syntax.transition_system', 'spacy.syntax.nonproj', 'spacy.syntax.nn_parser', 'spacy.syntax.arc_eager', 'thinc.extra.search',
'spacy.syntax._beam_utils', 'spacy.syntax.ner', 'thinc.neural._classes.difference', 'srsly.msgpack.util', 'preshed',
'preshed.maps', 'thinc.neural', 'thinc.neural._aligned_alloc', 'thinc', 'thinc.neural._custom_kernels', 'blis',
'blis.py', 'spacy.vocab', 'spacy.lemmatizer', 'spacy._align', 'spacy.util',
'spacy.lang', 'spacy.syntax._parser_model', 'spacy.matcher._schemas', 'spacy.kb', 'en_core_web_sm', 'spacy.lang.en'] |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to run a pose estimation with a TFLite Movenet_multipose model."""
import os
import time
from typing import List
import cv2
from data import BodyPart
from data import KeyPoint
from data import Person
from data import Point
from data import Rectangle
import numpy as np
from tracker import BoundingBoxTracker
from tracker import KeypointTracker
from tracker import TrackerConfig
import utils
# pylint: disable=g-import-not-at-top
try:
# Import TFLite interpreter from tflite_runtime package if it's available.
from tflite_runtime.interpreter import Interpreter
except ImportError:
# If not, fallback to use the TFLite interpreter from the full TF package.
import tensorflow as tf
Interpreter = tf.lite.Interpreter
class MoveNetMultiPose(object):
"""A wrapper class for a MultiPose TFLite pose estimation model."""
def __init__(self,
model_name: str,
tracker_type: str = 'bounding_box',
input_size: int = 256) -> None:
"""Initialize a MultiPose pose estimation model.
Args:
model_name: Name of the TFLite multipose model.
tracker_type: Type of Tracker('keypoint' or 'bounding_box')
input_size: Size of the longer dimension of the input image.
"""
# Append .tflite extension to model_name if there's no extension.
_, ext = os.path.splitext(model_name)
if not ext:
model_name += '.tflite'
# Store the input size parameter.
self._input_size = input_size
# Initialize the TFLite model.
interpreter = Interpreter(model_path=model_name, num_threads=4)
self._input_details = interpreter.get_input_details()
self._output_details = interpreter.get_output_details()
self._input_type = self._input_details[0]['dtype']
self._input_height = interpreter.get_input_details()[0]['shape'][1]
self._input_width = interpreter.get_input_details()[0]['shape'][2]
self._interpreter = interpreter
# Initialize a tracker.
config = TrackerConfig()
if tracker_type == 'keypoint':
self._tracker = KeypointTracker(config)
elif tracker_type == 'bounding_box':
self._tracker = BoundingBoxTracker(config)
else:
print('ERROR: Tracker type {0} not supported. No tracker will be used.'
.format(tracker_type))
self._tracker = None
def detect(self,
input_image: np.ndarray,
detection_threshold: float = 0.11) -> List[Person]:
"""Run detection on an input image.
Args:
input_image: A [height, width, 3] RGB image. Note that height and width
can be anything since the image will be immediately resized according to
the needs of the model within this function.
detection_threshold: minimum confidence score for an detected pose to be
considered.
Returns:
A list of Person instances detected from the input image.
"""
is_dynamic_shape_model = self._input_details[0]['shape_signature'][2] == -1
# Resize and pad the image to keep the aspect ratio and fit the expected
# size.
if is_dynamic_shape_model:
resized_image, _ = utils.keep_aspect_ratio_resizer(
input_image, self._input_size)
input_tensor = np.expand_dims(resized_image, axis=0)
self._interpreter.resize_tensor_input(
self._input_details[0]['index'], input_tensor.shape, strict=True)
else:
resized_image = cv2.resize(input_image,
(self._input_width, self._input_height))
input_tensor = np.expand_dims(resized_image, axis=0)
self._interpreter.allocate_tensors()
# Run inference with the MoveNet MultiPose model.
self._interpreter.set_tensor(self._input_details[0]['index'],
input_tensor.astype(self._input_type))
self._interpreter.invoke()
# Get the model output
model_output = self._interpreter.get_tensor(
self._output_details[0]['index'])
image_height, image_width, _ = input_image.shape
return self._postprocess(model_output, image_height, image_width,
detection_threshold)
def _postprocess(self, keypoints_with_scores: np.ndarray, image_height: int,
image_width: int,
detection_threshold: float) -> List[Person]:
"""Returns a list "Person" corresponding to the input image.
Note that coordinates are expressed in (x, y) format for drawing
utilities.
Args:
keypoints_with_scores: Output of the MultiPose TFLite model.
image_height: height of the image in pixels.
image_width: width of the image in pixels.
detection_threshold: minimum confidence score for an entity to be
considered.
Returns:
A list of Person(keypoints, bounding_box, scores), each containing:
* the coordinates of all keypoints of the detected entity;
* the bounding boxes of the entity.
* the confidence core of the entity.
"""
_, num_instances, _ = keypoints_with_scores.shape
list_persons = []
for idx in range(num_instances):
# Skip a detected pose if its confidence score is below the threshold
person_score = keypoints_with_scores[0, idx, 55]
if person_score < detection_threshold:
continue
# Extract the keypoint coordinates and scores
kpts_y = keypoints_with_scores[0, idx, range(0, 51, 3)]
kpts_x = keypoints_with_scores[0, idx, range(1, 51, 3)]
scores = keypoints_with_scores[0, idx, range(2, 51, 3)]
# Create the list of keypoints
keypoints = []
for i in range(scores.shape[0]):
keypoints.append(
KeyPoint(
BodyPart(i),
Point(
int(kpts_x[i] * image_width),
int(kpts_y[i] * image_height)), scores[i]))
# Calculate the bounding box
rect = [
keypoints_with_scores[0, idx, 51], keypoints_with_scores[0, idx, 52],
keypoints_with_scores[0, idx, 53], keypoints_with_scores[0, idx, 54]
]
bounding_box = Rectangle(
Point(int(rect[1] * image_width), int(rect[0] * image_height)),
Point(int(rect[3] * image_width), int(rect[2] * image_height)))
# Create a Person instance corresponding to the detected entity.
list_persons.append(Person(keypoints, bounding_box, person_score))
if self._tracker:
list_persons = self._tracker.apply(list_persons, time.time() * 1000)
return list_persons
|
import requests
import json
import os
def Wechat(msg, corpid, secret, agentid):
data = json.dumps({
"touser" : "admin",
"msgtype" : "text",
"agentid" : agentid,
"text" : {
"content" : msg
},
"safe":0,
"enable_id_trans": 0,
"enable_duplicate_check": 0,
"duplicate_check_interval": 1800
})
url_get_token = "https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={}&corpsecret={}".format(corpid, secret)
try:
token = requests.get(url_get_token, timeout=5).json()["access_token"]
except Exception as e:
print(e)
return
url = "https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={}".format(token)
try:
r = requests.post(url, data=data, timeout=5)
print(r.json()['errmsg'])
return
except Exception as e:
print(e)
return
def Telegram(msg, token, chat_id):
url = "https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}>".format(token, chat_id, msg)
try:
r = requests.post(url, timeout=5)
# TUDO
# print(r.json()['ok'])
return
except Exception as e:
print(e)
return
|
def get_keywords(js):
info = get_info(js)
film_name = info[0]
film_id = info[1]
keyword_list = []
try:
keywords = js['keywords'][0]['keywords']
if keywords != None:
for k in keywords:
keyword = k['keyword']
keyword_id = k['id']
weight = k['weight']
entry = (film_name,film_id,keyword,keyword_id,weight)
keyword_list.append(entry)
except:
None
return(keyword_list) |
# -*- coding: utf-8 -*-
"""Console script for light_tester."""
import sys
import click
from .ledSolve import parseFile
click.disable_unicode_literals_warning = True
@click.command()
@click.option("--input", default=None, help="input URI (file or URL)")
def main(input=None):
print("input", input)
input = sys.argv[2]
result = parseFile(input)
print("There are ", result, "lights on")
return 0
if __name__ == "__main__":
sys.exit(main())
|
from functools import reduce
import warnings
import tensorflow as tf
from . import kernels
from ._settings import settings
from .quadrature import mvhermgauss
from numpy import pi as nppi
int_type = settings.dtypes.int_type
float_type = settings.dtypes.float_type
class RBF(kernels.RBF):
def eKdiag(self, X, Xcov=None):
"""
Also known as phi_0.
:param X:
:return: N
"""
return self.Kdiag(X)
def eKxz(self, Z, Xmu, Xcov):
"""
Also known as phi_1: <K_{x, Z}>_{q(x)}.
:param Z: MxD inducing inputs
:param Xmu: X mean (NxD)
:param Xcov: NxDxD
:return: NxM
"""
# use only active dimensions
Xcov = self._slice_cov(Xcov)
Z, Xmu = self._slice(Z, Xmu)
D = tf.shape(Xmu)[1]
lengthscales = self.lengthscales if self.ARD else tf.zeros((D,), dtype=float_type) + self.lengthscales
vec = tf.expand_dims(Xmu, 2) - tf.expand_dims(tf.transpose(Z), 0) # NxDxM
chols = tf.cholesky(tf.expand_dims(tf.matrix_diag(lengthscales ** 2), 0) + Xcov)
Lvec = tf.matrix_triangular_solve(chols, vec)
q = tf.reduce_sum(Lvec ** 2, [1])
chol_diags = tf.matrix_diag_part(chols) # N x D
half_log_dets = tf.reduce_sum(tf.log(chol_diags), 1) - tf.reduce_sum(tf.log(lengthscales)) # N,
return self.variance * tf.exp(-0.5 * q - tf.expand_dims(half_log_dets, 1))
def exKxz_pairwise(self, Z, Xmu, Xcov):
"""
<x_t K_{x_{t-1}, Z}>_q_{x_{t-1:t}}
:param Z: MxD inducing inputs
:param Xmu: X mean (N+1xD)
:param Xcov: 2x(N+1)xDxD
:return: NxMxD
"""
with tf.control_dependencies([
tf.assert_equal(tf.shape(Xmu)[1], tf.constant(self.input_dim, dtype=int_type),
message="Currently cannot handle slicing in exKxz."),
tf.assert_equal(tf.shape(Xmu), tf.shape(Xcov)[1:3], name="assert_Xmu_Xcov_shape")
]):
Xmu = tf.identity(Xmu)
N = tf.shape(Xmu)[0] - 1
D = tf.shape(Xmu)[1]
Xsigmb = tf.slice(Xcov, [0, 0, 0, 0], tf.stack([-1, N, -1, -1]))
Xsigm = Xsigmb[0, :, :, :] # NxDxD
Xsigmc = Xsigmb[1, :, :, :] # NxDxD
Xmum = tf.slice(Xmu, [0, 0], tf.stack([N, -1]))
Xmup = Xmu[1:, :]
lengthscales = self.lengthscales if self.ARD else tf.zeros((D,), dtype=float_type) + self.lengthscales
scalemat = tf.expand_dims(tf.matrix_diag(lengthscales ** 2.0), 0) + Xsigm # NxDxD
det = tf.matrix_determinant(
tf.expand_dims(tf.eye(tf.shape(Xmu)[1], dtype=float_type), 0) + tf.reshape(lengthscales ** -2.0, (1, 1, -1)) * Xsigm
) # N
vec = tf.expand_dims(tf.transpose(Z), 0) - tf.expand_dims(Xmum, 2) # NxDxM
smIvec = tf.matrix_solve(scalemat, vec) # NxDxM
q = tf.reduce_sum(smIvec * vec, [1]) # NxM
addvec = tf.matmul(smIvec, Xsigmc, transpose_a=True) + tf.expand_dims(Xmup, 1) # NxMxD
return self.variance * addvec * tf.reshape(det ** -0.5, (N, 1, 1)) * tf.expand_dims(tf.exp(-0.5 * q), 2)
def exKxz(self, Z, Xmu, Xcov):
"""
It computes the expectation:
<x_t K_{x_t, Z}>_q_{x_t}
:param Z: MxD inducing inputs
:param Xmu: X mean (NxD)
:param Xcov: NxDxD
:return: NxMxD
"""
with tf.control_dependencies([
tf.assert_equal(tf.shape(Xmu)[1], tf.constant(self.input_dim, dtype=int_type),
message="Currently cannot handle slicing in exKxz."),
tf.assert_equal(tf.shape(Xmu), tf.shape(Xcov)[:2], name="assert_Xmu_Xcov_shape")
]):
Xmu = tf.identity(Xmu)
N = tf.shape(Xmu)[0]
D = tf.shape(Xmu)[1]
lengthscales = self.lengthscales if self.ARD else tf.zeros((D,), dtype=float_type) + self.lengthscales
scalemat = tf.expand_dims(tf.matrix_diag(lengthscales ** 2.0), 0) + Xcov # NxDxD
det = tf.matrix_determinant(
tf.expand_dims(tf.eye(tf.shape(Xmu)[1], dtype=float_type), 0) + tf.reshape(lengthscales ** -2.0, (1, 1, -1)) * Xcov
) # N
vec = tf.expand_dims(tf.transpose(Z), 0) - tf.expand_dims(Xmu, 2) # NxDxM
smIvec = tf.matrix_solve(scalemat, vec) # NxDxM
q = tf.reduce_sum(smIvec * vec, [1]) # NxM
addvec = tf.matmul(smIvec, Xcov, transpose_a=True) + tf.expand_dims(Xmu, 1) # NxMxD
return self.variance * addvec * tf.reshape(det ** -0.5, (N, 1, 1)) * tf.expand_dims(tf.exp(-0.5 * q), 2)
def eKzxKxz(self, Z, Xmu, Xcov):
"""
Also known as Phi_2.
:param Z: MxD
:param Xmu: X mean (NxD)
:param Xcov: X covariance matrices (NxDxD)
:return: NxMxM
"""
# use only active dimensions
Xcov = self._slice_cov(Xcov)
Z, Xmu = self._slice(Z, Xmu)
M = tf.shape(Z)[0]
N = tf.shape(Xmu)[0]
D = tf.shape(Xmu)[1]
lengthscales = self.lengthscales if self.ARD else tf.zeros((D,), dtype=float_type) + self.lengthscales
Kmms = tf.sqrt(self.K(Z, presliced=True)) / self.variance ** 0.5
scalemat = tf.expand_dims(tf.eye(D, dtype=float_type), 0) + 2 * Xcov * tf.reshape(lengthscales ** -2.0, [1, 1, -1]) # NxDxD
det = tf.matrix_determinant(scalemat)
mat = Xcov + 0.5 * tf.expand_dims(tf.matrix_diag(lengthscales ** 2.0), 0) # NxDxD
cm = tf.cholesky(mat) # NxDxD
vec = 0.5 * (tf.reshape(tf.transpose(Z), [1, D, 1, M]) +
tf.reshape(tf.transpose(Z), [1, D, M, 1])) - tf.reshape(Xmu, [N, D, 1, 1]) # NxDxMxM
svec = tf.reshape(vec, (N, D, M * M))
ssmI_z = tf.matrix_triangular_solve(cm, svec) # NxDx(M*M)
smI_z = tf.reshape(ssmI_z, (N, D, M, M)) # NxDxMxM
fs = tf.reduce_sum(tf.square(smI_z), [1]) # NxMxM
return self.variance ** 2.0 * tf.expand_dims(Kmms, 0) * tf.exp(-0.5 * fs) * tf.reshape(det ** -0.5, [N, 1, 1])
class Linear(kernels.Linear):
def eKdiag(self, X, Xcov):
if self.ARD:
raise NotImplementedError
# use only active dimensions
X, _ = self._slice(X, None)
Xcov = self._slice_cov(Xcov)
return self.variance * (tf.reduce_sum(tf.square(X), 1) + tf.reduce_sum(tf.matrix_diag_part(Xcov), 1))
def eKxz(self, Z, Xmu, Xcov):
if self.ARD:
raise NotImplementedError
# use only active dimensions
Z, Xmu = self._slice(Z, Xmu)
return self.variance * tf.matmul(Xmu, Z, transpose_b=True)
def exKxz_pairwise(self, Z, Xmu, Xcov):
with tf.control_dependencies([
tf.assert_equal(tf.shape(Xmu)[1], tf.constant(self.input_dim, int_type),
message="Currently cannot handle slicing in exKxz."),
tf.assert_equal(tf.shape(Xmu), tf.shape(Xcov)[1:3], name="assert_Xmu_Xcov_shape")
]):
Xmu = tf.identity(Xmu)
N = tf.shape(Xmu)[0] - 1
Xmum = Xmu[:-1, :]
Xmup = Xmu[1:, :]
op = tf.expand_dims(Xmum, 2) * tf.expand_dims(Xmup, 1) + Xcov[1, :-1, :, :] # NxDxD
return self.variance * tf.matmul(tf.tile(tf.expand_dims(Z, 0), (N, 1, 1)), op)
def exKxz(self, Z, Xmu, Xcov):
with tf.control_dependencies([
tf.assert_equal(tf.shape(Xmu)[1], tf.constant(self.input_dim, int_type),
message="Currently cannot handle slicing in exKxz."),
tf.assert_equal(tf.shape(Xmu), tf.shape(Xcov)[:2], name="assert_Xmu_Xcov_shape")
]):
Xmu = tf.identity(Xmu)
N = tf.shape(Xmu)[0]
op = tf.expand_dims(Xmu, 2) * tf.expand_dims(Xmu, 1) + Xcov # NxDxD
return self.variance * tf.matmul(tf.tile(tf.expand_dims(Z, 0), (N, 1, 1)), op)
def eKzxKxz(self, Z, Xmu, Xcov):
"""
exKxz
:param Z: MxD
:param Xmu: NxD
:param Xcov: NxDxD
:return:
"""
# use only active dimensions
Xcov = self._slice_cov(Xcov)
Z, Xmu = self._slice(Z, Xmu)
N = tf.shape(Xmu)[0]
mom2 = tf.expand_dims(Xmu, 1) * tf.expand_dims(Xmu, 2) + Xcov # NxDxD
eZ = tf.tile(tf.expand_dims(Z, 0), (N, 1, 1)) # NxMxD
return self.variance ** 2.0 * tf.matmul(tf.matmul(eZ, mom2), eZ, transpose_b=True)
class Add(kernels.Add):
"""
Add
This version of Add will call the corresponding kernel expectations for each of the summed kernels. This will be
much better for kernels with analytically calculated kernel expectations. If quadrature is to be used, it's probably
better to do quadrature on the summed kernel function using `gpflow.kernels.Add` instead.
"""
def __init__(self, kern_list):
self.crossexp_funcs = {frozenset([Linear, RBF]): self.Linear_RBF_eKxzKzx}
# self.crossexp_funcs = {}
kernels.Add.__init__(self, kern_list)
def eKdiag(self, X, Xcov):
return reduce(tf.add, [k.eKdiag(X, Xcov) for k in self.kern_list])
def eKxz(self, Z, Xmu, Xcov):
return reduce(tf.add, [k.eKxz(Z, Xmu, Xcov) for k in self.kern_list])
def exKxz_pairwise(self, Z, Xmu, Xcov):
return reduce(tf.add, [k.exKxz_pairwise(Z, Xmu, Xcov) for k in self.kern_list])
def exKxz(self, Z, Xmu, Xcov):
return reduce(tf.add, [k.exKxz(Z, Xmu, Xcov) for k in self.kern_list])
def eKzxKxz(self, Z, Xmu, Xcov):
all_sum = reduce(tf.add, [k.eKzxKxz(Z, Xmu, Xcov) for k in self.kern_list])
if self.on_separate_dimensions and Xcov.get_shape().ndims == 2:
# If we're on separate dimensions and the covariances are diagonal, we don't need Cov[Kzx1Kxz2].
crossmeans = []
eKxzs = [k.eKxz(Z, Xmu, Xcov) for k in self.kern_list]
for i, Ka in enumerate(eKxzs):
for Kb in eKxzs[i + 1:]:
op = Ka[:, None, :] * Kb[:, :, None]
ct = tf.transpose(op, [0, 2, 1]) + op
crossmeans.append(ct)
crossmean = reduce(tf.add, crossmeans)
return all_sum + crossmean
else:
crossexps = []
for i, ka in enumerate(self.kern_list):
for kb in self.kern_list[i + 1:]:
try:
crossexp_func = self.crossexp_funcs[frozenset([type(ka), type(kb)])]
crossexp = crossexp_func(ka, kb, Z, Xmu, Xcov)
except (KeyError, NotImplementedError) as e:
print(str(e))
crossexp = self.quad_eKzx1Kxz2(ka, kb, Z, Xmu, Xcov)
crossexps.append(crossexp)
return all_sum + reduce(tf.add, crossexps)
def Linear_RBF_eKxzKzx(self, Ka, Kb, Z, Xmu, Xcov):
Xcov = self._slice_cov(Xcov)
Z, Xmu = self._slice(Z, Xmu)
lin, rbf = (Ka, Kb) if type(Ka) is Linear else (Kb, Ka)
assert type(lin) is Linear, "%s is not %s" % (str(type(lin)), str(Linear))
assert type(rbf) is RBF, "%s is not %s" % (str(type(rbf)), str(RBF))
if lin.ARD or type(lin.active_dims) is not slice or type(rbf.active_dims) is not slice:
raise NotImplementedError("Active dims and/or Linear ARD not implemented. Switching to quadrature.")
D = tf.shape(Xmu)[1]
M = tf.shape(Z)[0]
N = tf.shape(Xmu)[0]
lengthscales = rbf.lengthscales if rbf.ARD else tf.zeros((D,), dtype=float_type) + rbf.lengthscales
lengthscales2 = lengthscales ** 2.0
const = rbf.variance * lin.variance * tf.reduce_prod(lengthscales)
gaussmat = Xcov + tf.matrix_diag(lengthscales2)[None, :, :] # NxDxD
det = tf.matrix_determinant(gaussmat) ** -0.5 # N
cgm = tf.cholesky(gaussmat) # NxDxD
tcgm = tf.tile(cgm[:, None, :, :], [1, M, 1, 1])
vecmin = Z[None, :, :] - Xmu[:, None, :] # NxMxD
d = tf.matrix_triangular_solve(tcgm, vecmin[:, :, :, None]) # NxMxDx1
exp = tf.exp(-0.5 * tf.reduce_sum(d ** 2.0, [2, 3])) # NxM
# exp = tf.Print(exp, [tf.shape(exp)])
vecplus = (Z[None, :, :, None] / lengthscales2[None, None, :, None] +
tf.matrix_solve(Xcov, Xmu[:, :, None])[:, None, :, :]) # NxMxDx1
mean = tf.cholesky_solve(tcgm,
tf.matmul(tf.tile(Xcov[:, None, :, :], [1, M, 1, 1]), vecplus)
)[:, :, :, 0] * lengthscales2[None, None, :] # NxMxD
a = tf.matmul(tf.tile(Z[None, :, :], [N, 1, 1]),
mean * exp[:, :, None] * det[:, None, None] * const, transpose_b=True)
return a + tf.transpose(a, [0, 2, 1])
def quad_eKzx1Kxz2(self, Ka, Kb, Z, Xmu, Xcov):
# Quadrature for Cov[(Kzx1 - eKzx1)(kxz2 - eKxz2)]
self._check_quadrature()
warnings.warn("gpflow.ekernels.Add: Using numerical quadrature for kernel expectation cross terms.")
Xmu, Z = self._slice(Xmu, Z)
Xcov = self._slice_cov(Xcov)
N, M, HpowD = tf.shape(Xmu)[0], tf.shape(Z)[0], self.num_gauss_hermite_points ** self.input_dim
xn, wn = mvhermgauss(self.num_gauss_hermite_points, self.input_dim)
# transform points based on Gaussian parameters
cholXcov = tf.cholesky(Xcov) # NxDxD
Xt = tf.matmul(cholXcov, tf.tile(xn[None, :, :], (N, 1, 1)), transpose_b=True) # NxDxH**D
X = 2.0 ** 0.5 * Xt + tf.expand_dims(Xmu, 2) # NxDxH**D
Xr = tf.reshape(tf.transpose(X, [2, 0, 1]), (-1, self.input_dim)) # (H**D*N)xD
cKa, cKb = [tf.reshape(
k.K(tf.reshape(Xr, (-1, self.input_dim)), Z, presliced=False),
(HpowD, N, M)
) - k.eKxz(Z, Xmu, Xcov)[None, :, :] for k in (Ka, Kb)] # Centred Kxz
eKa, eKb = Ka.eKxz(Z, Xmu, Xcov), Kb.eKxz(Z, Xmu, Xcov)
wr = wn * nppi ** (-self.input_dim * 0.5)
cc = tf.reduce_sum(cKa[:, :, None, :] * cKb[:, :, :, None] * wr[:, None, None, None], 0)
cm = eKa[:, None, :] * eKb[:, :, None]
return cc + tf.transpose(cc, [0, 2, 1]) + cm + tf.transpose(cm, [0, 2, 1])
class Prod(kernels.Prod):
def eKdiag(self, Xmu, Xcov):
if not self.on_separate_dimensions:
raise NotImplementedError("Prod currently needs to be defined on separate dimensions.") # pragma: no cover
with tf.control_dependencies([
tf.assert_equal(tf.rank(Xcov), 2,
message="Prod currently only supports diagonal Xcov.", name="assert_Xcov_diag"),
]):
return reduce(tf.multiply, [k.eKdiag(Xmu, Xcov) for k in self.kern_list])
def eKxz(self, Z, Xmu, Xcov):
if not self.on_separate_dimensions:
raise NotImplementedError("Prod currently needs to be defined on separate dimensions.") # pragma: no cover
with tf.control_dependencies([
tf.assert_equal(tf.rank(Xcov), 2,
message="Prod currently only supports diagonal Xcov.", name="assert_Xcov_diag"),
]):
return reduce(tf.multiply, [k.eKxz(Z, Xmu, Xcov) for k in self.kern_list])
def eKzxKxz(self, Z, Xmu, Xcov):
if not self.on_separate_dimensions:
raise NotImplementedError("Prod currently needs to be defined on separate dimensions.") # pragma: no cover
with tf.control_dependencies([
tf.assert_equal(tf.rank(Xcov), 2,
message="Prod currently only supports diagonal Xcov.", name="assert_Xcov_diag"),
]):
return reduce(tf.multiply, [k.eKzxKxz(Z, Xmu, Xcov) for k in self.kern_list])
|
def ejercicio11():
numero1 = int(input("Escriba un numero: "))
numero2 = int(input("Escriba otro numero: "))
op1 = numero1
op2 = numero2
if numero1 == numero2:
print("El mcd es ", numero1)
elif numero1 > numero2:
while numero1 > op2:
op2 = op2 + numero2
resto = op2 - numero1
restor = op2 - numero2
resto1 = numero1 - restor
opR = resto1
if resto == 0:
print("El mcd es ", numero2)
else:
while True:
resto1 = opR
while opR < numero2:
opR = opR + resto1
resto = opR - numero2
restor = opR - resto1
resto2 = numero2 - restor
if resto == 0:
print("El mcd es ", resto1)
break
else:
numero2 = resto1
opR = resto2
pass
else:
while numero2 > op1:
op1 = op1 + numero1
resto = op1 - numero2
restor = op1 - numero1
resto1 = numero2 - restor
opR = resto1
if resto == 0:
print("El mcd es ", numero1)
else:
while True:
resto1 = opR
while opR < numero1:
opR = opR + resto1
resto = opR - numero1
restor = opR - resto1
resto2 = numero1 - restor
if resto == 0:
print("El mcd es ", resto1)
break
else:
numero2 = resto1
opR = resto2
pass |
from .fbgemm import get_fbgemm_backend_config_dict
def validate_backend_config_dict(backend_config_dict):
return "quant_patterns" in backend_config_dict
|
# Copyright 2020, Schuberg Philis B.V
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import itemgetter
from cosmicops.log import logging
from .host import CosmicHost
from .object import CosmicObject
from .storagepool import CosmicStoragePool
class CosmicCluster(CosmicObject):
def get_all_hosts(self):
return [CosmicHost(self._ops, host) for host in
self._ops.cs.listHosts(fetch_list=True, clusterid=self['id'], listall='true')]
def get_storage_pools(self, scope=None):
if scope is None:
storage_pools = self._ops.cs.listStoragePools(fetch_list=True, clusterid=self['id'], listall='true')
else:
storage_pools = self._ops.cs.listStoragePools(fetch_list=True, clusterid=self['id'], scope=scope, listall='true')
return [CosmicStoragePool(self._ops, storage_pool) for storage_pool in storage_pools]
def find_migration_host(self, vm):
hosts = self.get_all_hosts()
hosts.sort(key=itemgetter('memoryallocated'))
migration_host = None
for host in hosts:
if host['name'] == vm['hostname']:
continue
if host['resourcestate'] != 'Enabled':
continue
if host['state'] != 'Up':
continue
available_memory = host['memorytotal'] - host['memoryallocated']
available_memory /= 1048576
if 'instancename' not in vm:
service_offering = self._ops.get_service_offering(id=vm['serviceofferingid'], system=True)
if service_offering:
vm['memory'] = service_offering['memory']
else:
vm['memory'] = 1024
if available_memory < vm['memory']:
logging.warning(f"Skipping '{host['name']}' as it does not have enough memory available")
continue
migration_host = host
break
return migration_host
|
################################################################################
#
# Copyright 2021-2022 Rocco Matano
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
from types import SimpleNamespace as _namespace
from .wtypes import *
from . import (
ref,
kernel,
raise_if,
raise_on_zero,
raise_on_err,
fun_fact,
WAIT_FAILED,
GWL_STYLE,
GWL_EXSTYLE,
INPUT_KEYBOARD,
KEYEVENTF_KEYUP,
MONITOR_DEFAULTTOPRIMARY,
SWP_NOSIZE,
SWP_NOZORDER,
GMEM_MOVEABLE,
CF_UNICODETEXT,
LR_DEFAULTSIZE,
SPI_GETNONCLIENTMETRICS,
SPI_SETNONCLIENTMETRICS,
SPI_GETWHEELSCROLLLINES,
SPI_SETWHEELSCROLLLINES,
SPI_GETWORKAREA,
SPIF_UPDATEINIFILE,
SPIF_SENDCHANGE,
)
from .ntdll import proc_path_from_pid
_usr = ctypes.WinDLL("user32.dll")
################################################################################
_GetWindowThreadProcessId = fun_fact(
_usr.GetWindowThreadProcessId, (DWORD, HANDLE, PDWORD)
)
def GetWindowThreadProcessId(hwnd):
pid = DWORD()
tid = _GetWindowThreadProcessId(hwnd, ref(pid))
return tid, pid.value
################################################################################
_GetWindowTextLength = fun_fact(
_usr.GetWindowTextLengthW, (INT, HWND)
)
def GetWindowTextLength(hwnd):
return _GetWindowTextLength(hwnd)
################################################################################
_GetWindowText = fun_fact(_usr.GetWindowTextW, (INT, HWND, PWSTR, INT))
def GetWindowText(hwnd):
slen = GetWindowTextLength(hwnd)
buf = ctypes.create_unicode_buffer(slen + 1)
res = _GetWindowText(hwnd, buf, slen + 1)
raise_if(res != slen)
return buf.value
################################################################################
_SetWindowText = fun_fact(_usr.SetWindowTextW, (BOOL, HWND, PWSTR))
def SetWindowText(hwnd, txt):
raise_on_zero(_SetWindowText(hwnd, txt))
################################################################################
_GetClassName = fun_fact(
_usr.GetClassNameW, (INT, HWND, PWSTR, INT)
)
def GetClassName(hwnd):
size = 32
while True:
size *= 2
buf = ctypes.create_unicode_buffer(size)
res = _GetClassName(hwnd, buf, buf._length_)
raise_on_zero(res)
if res != size - 1:
return buf.value
################################################################################
_GetWindowLong = fun_fact(_usr.GetWindowLongW, (LONG, HWND, INT))
def GetWindowLong(hwnd, idx):
return _GetWindowLong(hwnd, idx)
################################################################################
_GetWindowLongPtr = fun_fact(
_usr.GetWindowLongPtrW, (LONG_PTR, HWND, INT)
)
def GetWindowLongPtr(hwnd, idx):
return _GetWindowLongPtr(hwnd, idx)
################################################################################
_SetWindowLong = fun_fact(
_usr.SetWindowLongW, (LONG, HWND, INT, LONG)
)
def SetWindowLong(hwnd, idx, value):
return _SetWindowLong(hwnd, idx, value)
################################################################################
_SetWindowLongPtr = fun_fact(
_usr.SetWindowLongPtrW, (LONG_PTR, HWND, INT, LONG_PTR)
)
def SetWindowLongPtr(hwnd, idx, value):
return _SetWindowLongPtr(hwnd, idx, value)
################################################################################
_EnumWindowsCallback = ctypes.WINFUNCTYPE(
BOOL,
HWND,
CallbackContextPtr
)
@_EnumWindowsCallback
def _EnumWndCb(hwnd, ctxt):
cbc = ctxt.contents
res = cbc.callback(hwnd, cbc.context)
# keep on enumerating if the callback fails to return a value
return res if res is not None else True
################################################################################
_EnumWindows = fun_fact(
_usr.EnumWindows, (BOOL, _EnumWindowsCallback, CallbackContextPtr)
)
def EnumWindows(callback, context):
cbc = CallbackContext(callback, context)
_EnumWindows(_EnumWndCb, ref(cbc))
################################################################################
_EnumChildWindows = fun_fact(
_usr.EnumChildWindows,
(BOOL, HWND, _EnumWindowsCallback, CallbackContextPtr)
)
def EnumChildWindows(hwnd, callback, context):
cbc = CallbackContext(callback, context)
_EnumChildWindows(hwnd, _EnumWndCb, ref(cbc))
################################################################################
_EnumThreadWindows = fun_fact(
_usr.EnumThreadWindows,
(BOOL, DWORD, _EnumWindowsCallback, CallbackContextPtr)
)
def EnumThreadWindows(tid, callback, context):
cbc = CallbackContext(callback, context)
_EnumThreadWindows(tid, _EnumWndCb, ref(cbc))
################################################################################
def _get_wnd_lst_cb(hwnd, wnd_lst):
tid, pid = GetWindowThreadProcessId(hwnd)
d = _namespace(
hwnd=hwnd,
text=GetWindowText(hwnd),
pid=pid,
pname=proc_path_from_pid(pid),
cls=GetClassName(hwnd),
style=GetWindowLong(hwnd, GWL_STYLE),
exstyle=GetWindowLong(hwnd, GWL_EXSTYLE)
)
wnd_lst.append(d)
return True
def get_window_list():
wnd_lst = []
EnumWindows(_get_wnd_lst_cb, wnd_lst)
return wnd_lst
def get_child_window_list(hwnd):
wnd_lst = []
EnumChildWindows(hwnd, _get_wnd_lst_cb, wnd_lst)
return wnd_lst
def get_thread_window_list(tid):
wnd_lst = []
EnumThreadWindows(tid, _get_wnd_lst_cb, wnd_lst)
return wnd_lst
################################################################################
_WaitForInputIdle = fun_fact(
_usr.WaitForInputIdle, (DWORD, HANDLE, DWORD)
)
def WaitForInputIdle(proc, timeout):
res = _WaitForInputIdle(proc, timeout)
raise_if(res == WAIT_FAILED)
return res
################################################################################
_PostMessage = fun_fact(
_usr.PostMessageW,
(BOOL, HWND, UINT, UINT_PTR, LONG_PTR)
)
def PostMessage(hwnd, msg, wp, lp):
raise_on_zero(_PostMessage(hwnd, msg, wp, lp))
################################################################################
PostQuitMessage = fun_fact(_usr.PostQuitMessage, (None, INT))
################################################################################
_SendMessage = fun_fact(
_usr.SendMessageW,
(LONG_PTR, HWND, UINT, UINT_PTR, LONG_PTR)
)
def SendMessage(hwnd, msg, wp, lp):
return _SendMessage(hwnd, msg, wp, lp)
################################################################################
_SendMessageTimeout = fun_fact(
_usr.SendMessageTimeoutW, (
LONG_PTR,
HWND,
UINT,
UINT_PTR,
LONG_PTR,
UINT,
UINT,
PDWORD
)
)
def SendMessageTimeout(hwnd, msg, wp, lp, flags, timeout):
result = DWORD()
raise_on_zero(
_SendMessageTimeout(
hwnd,
msg,
wp,
lp,
flags,
timeout,
ref(result)
)
)
return result.value
################################################################################
_GetWindow = fun_fact(_usr.GetWindow, (HWND, HWND, UINT))
def GetWindow(hwnd, cmd):
return _GetWindow(hwnd, cmd)
################################################################################
_GetAsyncKeyState = fun_fact(_usr.GetAsyncKeyState, (SHORT, INT))
def GetAsyncKeyState(vkey):
return _GetAsyncKeyState(vkey)
################################################################################
_GetWindowRect = fun_fact(_usr.GetWindowRect, (BOOL, HWND, PRECT))
def GetWindowRect(hwnd):
rc = RECT()
raise_on_zero(_GetWindowRect(hwnd, ref(rc)))
return rc
################################################################################
_GetClientRect = fun_fact(_usr.GetClientRect, (BOOL, HWND, PRECT))
def GetClientRect(hwnd):
rc = RECT()
raise_on_zero(_GetClientRect(hwnd, ref(rc)))
return rc
################################################################################
_AdjustWindowRectEx = fun_fact(
_usr.AdjustWindowRectEx, (BOOL, PRECT, DWORD, BOOL, DWORD)
)
def AdjustWindowRectEx(rc, style, has_menu, exstyle):
new_rect = rc.copy()
raise_on_zero(_AdjustWindowRectEx(ref(new_rect), style, has_menu, exstyle))
return new_rect
################################################################################
class WINDOWPLACEMENT(ctypes.Structure):
_fields_ = (
("length", UINT),
("flags", UINT),
("showCmd", UINT),
("MinPosition", POINT),
("MaxPosition", POINT),
("NormalPosition", RECT),
)
def __init__(self, f=0, s=1, mi=(0, 0), ma=(0, 0), no=(0, 0, 0, 0)):
self.length = ctypes.sizeof(WINDOWPLACEMENT)
self.flags = f
self.showCmd = s
self.MinPosition = mi
self.MaxPosition = ma
self.NormalPosition = no
def __repr__(self):
c = self.__class__.__name__
l = self.length
f = self.flags
s = self.showCmd
mi = f"({self.MinPosition.x}, {self.MinPosition.y})"
ma = f"({self.MaxPosition.x}, {self.MaxPosition.y})"
no = (
f"({self.NormalPosition.left}, {self.NormalPosition.top}, " +
f"{self.NormalPosition.right}, {self.NormalPosition.bottom})"
)
return f"{c}({l}, {f}, {s}, {mi}, {ma}, {no})"
PWINDOWPLACEMENT = POINTER(WINDOWPLACEMENT)
################################################################################
_GetWindowPlacement = fun_fact(
_usr.GetWindowPlacement, (BOOL, HWND, PWINDOWPLACEMENT)
)
def GetWindowPlacement(hwnd):
wpt = WINDOWPLACEMENT()
raise_on_zero(_GetWindowPlacement(hwnd, ref(wpt)))
return wpt
################################################################################
_SetWindowPlacement = fun_fact(
_usr.SetWindowPlacement, (BOOL, HWND, PWINDOWPLACEMENT)
)
def SetWindowPlacement(hwnd, wpt):
raise_on_zero(_SetWindowPlacement(hwnd, ref(wpt)))
################################################################################
_SetWindowPos = fun_fact(
_usr.SetWindowPos, (BOOL, HWND, HWND, INT, INT, INT, INT, UINT)
)
def SetWindowPos(hwnd, ins_after, x, y, cx, cy, flags):
raise_on_zero(_SetWindowPos(hwnd, ins_after, x, y, cx, cy, flags))
################################################################################
_AttachThreadInput = fun_fact(
_usr.AttachThreadInput, (BOOL, DWORD, DWORD, BOOL)
)
def AttachThreadInput(id_attach, id_attach_to, do_attach):
raise_on_zero(_AttachThreadInput(id_attach, id_attach_to, do_attach))
################################################################################
_BringWindowToTop = fun_fact(_usr.BringWindowToTop, (BOOL, HWND))
def BringWindowToTop(hwnd):
raise_on_zero(_BringWindowToTop(hwnd))
def to_top_maybe_attach(hwnd):
wnd_id, _ = GetWindowThreadProcessId(hwnd)
self_id = kernel.GetCurrentThreadId()
if wnd_id != self_id:
AttachThreadInput(self_id, wnd_id, True)
BringWindowToTop(hwnd)
if wnd_id != self_id:
AttachThreadInput(self_id, wnd_id, False)
################################################################################
_SetActiveWindow = fun_fact(_usr.SetActiveWindow, (HWND, HWND))
def SetActiveWindow(hwnd):
return _SetActiveWindow(hwnd)
################################################################################
_MessageBox = fun_fact(
_usr.MessageBoxW, (INT, HWND, PWSTR, PWSTR, UINT)
)
def MessageBox(hwnd, text, caption, flags):
res = _MessageBox(hwnd, text, caption, flags)
raise_on_zero(res)
return res
################################################################################
class MOUSEINPUT(ctypes.Structure):
_fields_ = (
("dx", LONG),
("dy", LONG),
("mouseData", DWORD),
("dwFlags", DWORD),
("time", DWORD),
("dwExtraInfo", UINT_PTR),
)
class KEYBDINPUT(ctypes.Structure):
_fields_ = (
("wVk", WORD),
("wScan", WORD),
("dwFlags", DWORD),
("time", DWORD),
("dwExtraInfo", UINT_PTR),
)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (
("uMsg", DWORD),
("wParamL", WORD),
("wParamH", WORD),
)
class _DUMMY_INPUT_UNION(ctypes.Union):
_fields_ = (
("mi", MOUSEINPUT),
("ki", KEYBDINPUT),
("hi", HARDWAREINPUT),
)
class INPUT(ctypes.Structure):
_anonymous_ = ("anon",)
_fields_ = (
("type", DWORD),
("anon", _DUMMY_INPUT_UNION),
)
def copy(self):
other = INPUT()
ctypes.memmove(ref(other), ref(self), ctypes.sizeof(INPUT))
return other
def as_keyup(self):
if not self.type == INPUT_KEYBOARD:
raise ValueError("not INPUT_KEYBOARD")
up = self.copy()
up.ki.dwFlags |= KEYEVENTF_KEYUP
return up
PINPUT = POINTER(INPUT)
################################################################################
def kb_input(vk, scan, flags=0):
kip = INPUT()
kip.type = INPUT_KEYBOARD
kip.ki.wVk = vk
kip.ki.wScan = scan
kip.ki.dwFlags = flags
return kip
################################################################################
_SendInput = fun_fact(_usr.SendInput, (UINT, UINT, PINPUT, INT))
def SendInput(inputs):
if isinstance(inputs, INPUT):
num, ptr = 1, ref(inputs)
else:
try:
num = len(inputs)
if not num:
return
inputs = (INPUT * num)(*inputs)
ptr = ctypes.cast(inputs, PINPUT)
except Exception as e:
raise TypeError(f"expected INPUT or list of INPUTs: {e}")
raise_on_zero(_SendInput(num, ptr, ctypes.sizeof(INPUT)))
################################################################################
_ExitWindowsEx = fun_fact(_usr.ExitWindowsEx, (BOOL, UINT, DWORD))
def ExitWindowsEx(flags, reason):
raise_on_zero(_ExitWindowsEx(flags, reason))
################################################################################
_LockWorkStation = fun_fact(_usr.LockWorkStation, (BOOL,))
def LockWorkStation():
raise_on_zero(_LockWorkStation())
################################################################################
_GetShellWindow = fun_fact(_usr.GetShellWindow, (HWND,))
def GetShellWindow():
return _GetShellWindow()
################################################################################
_MonitorFromWindow = fun_fact(_usr.MonitorFromWindow, (HANDLE, HWND, DWORD))
def MonitorFromWindow(hwnd, flags=MONITOR_DEFAULTTOPRIMARY):
return _MonitorFromWindow(hwnd, flags)
################################################################################
class MONITORINFOEX(ctypes.Structure):
_fields_ = (
("cbSize", DWORD),
("rcMonitor", RECT),
("rcWork", RECT),
("dwFlags", DWORD),
("szDevice", WCHAR * 32),
)
def __init__(self):
self.cbSize = ctypes.sizeof(self)
PMONITORINFOEX = POINTER(MONITORINFOEX)
################################################################################
_GetMonitorInfo = fun_fact(_usr.GetMonitorInfoW, (BOOL, HANDLE, PMONITORINFOEX))
def GetMonitorInfo(hmon):
mi = MONITORINFOEX()
raise_on_zero(_GetMonitorInfo(hmon, ref(mi)))
return mi
################################################################################
def get_wnd_center(hwnd=None):
if hwnd is None:
return GetMonitorInfo(MonitorFromWindow(None)).rcMonitor.center
else:
return GetWindowRect(hwnd).center
################################################################################
def center_wnd(to_be_centered, center_on=None):
center_x, center_y = get_wnd_center(center_on)
rc = GetWindowRect(to_be_centered)
SetWindowPos(
to_be_centered,
None,
rc.left + center_x - (rc.left + rc.right) // 2,
rc.top + center_y - (rc.top + rc.bottom) // 2,
0,
0,
SWP_NOSIZE | SWP_NOZORDER
)
################################################################################
def start_centered(arglist):
def center_wnd_cb(hwnd, _):
center_wnd(hwnd)
return True
with kernel.create_process(arglist) as pi:
WaitForInputIdle(pi.hProcess, 10000)
EnumThreadWindows(pi.dwThreadId, center_wnd_cb, None)
################################################################################
_LoadCursor = fun_fact(_usr.LoadCursorW, (HANDLE, HANDLE, PWSTR))
def LoadCursor(hinst, cname):
if isinstance(cname, int) and cname < 2**16:
cname = ctypes.cast(cname, PWSTR)
res = _LoadCursor(hinst, cname)
raise_on_zero(res)
return res
################################################################################
_LoadIcon = fun_fact(_usr.LoadIconW, (HANDLE, HANDLE, PWSTR))
def LoadIcon(hinst, cname):
if isinstance(cname, int) and cname < 2**16:
cname = ctypes.cast(cname, PWSTR)
res = _LoadIcon(hinst, cname)
raise_on_zero(res)
return res
################################################################################
_DefWindowProc = fun_fact(
_usr.DefWindowProcW, (LRESULT, HWND, UINT, WPARAM, LPARAM)
)
def DefWindowProc(hwnd, msg, wp, lp):
return _DefWindowProc(hwnd, msg, wp, lp)
################################################################################
class CREATESTRUCT(ctypes.Structure):
_fields_ = (
("lpCreateParams", PVOID),
("hInstance", HANDLE),
("hMenu", HANDLE),
("hwndParent", HWND),
("cx", INT),
("cy", INT),
("x", INT),
("y", INT),
("style", LONG),
("lpszName", PWSTR),
("lpszClass", PWSTR),
("dwExStyle", DWORD),
)
################################################################################
WNDPROC = ctypes.WINFUNCTYPE(
LRESULT,
HWND,
UINT,
WPARAM,
LPARAM
)
class WNDCLASS(ctypes.Structure):
_fields_ = (
("style", UINT),
("lpfnWndProc", WNDPROC),
("cbClsExtra", INT),
("cbWndExtra", INT),
("hInstance", HANDLE),
("hIcon", HANDLE),
("hCursor", HANDLE),
("hbrBackground", HANDLE),
("lpszMenuName", PWSTR),
("lpszClassName", PWSTR),
)
PWNDCLASS = POINTER(WNDCLASS)
################################################################################
class MSG(ctypes.Structure):
_fields_ = (
("hWnd", HWND),
("message", UINT),
("wParam", WPARAM),
("lParam", LPARAM),
("time", DWORD),
("pt", POINT)
)
PMSG = POINTER(MSG)
################################################################################
class PAINTSTRUCT(ctypes.Structure):
_fields_ = (
("hdc", HANDLE),
("fErase", BOOL),
("rcPaint", RECT),
("fRestore", BOOL),
("fIncUpdate", BOOL),
("rgbReserved", BYTE * 32),
)
PPAINTSTRUCT = POINTER(PAINTSTRUCT)
################################################################################
_GetClassInfo = fun_fact(_usr.GetClassInfoW, (BOOL, HANDLE, PWSTR, PWNDCLASS))
def GetClassInfo(hinst, cname):
wclass = WNDCLASS()
raise_on_zero(_GetClassInfo(hinst, cname, ref(wclass)))
return wclass
################################################################################
_RegisterClass = fun_fact(_usr.RegisterClassW, (WORD, PWNDCLASS))
def RegisterClass(wclass):
res = _RegisterClass(ref(wclass))
raise_on_zero(res)
return res
################################################################################
_CreateWindowEx = fun_fact(
_usr.CreateWindowExW, (
HWND,
DWORD,
PWSTR,
PWSTR,
DWORD,
INT,
INT,
INT,
INT,
HWND,
HANDLE,
HINSTANCE,
PVOID
)
)
def CreateWindowEx(
ex_style,
class_name,
wnd_name,
style,
x,
y,
width,
height,
parent,
menu,
hinst,
create_param
):
hwnd = _CreateWindowEx(
ex_style,
class_name,
wnd_name,
style,
x,
y,
width,
height,
parent,
menu,
hinst,
create_param
)
raise_on_zero(hwnd)
return hwnd
################################################################################
_GetMessage = fun_fact(_usr.GetMessageW, (BOOL, PMSG, HWND, UINT, UINT))
def GetMessage(hwnd=None, msg_min=0, msg_max=0):
msg = MSG()
res = _GetMessage(ref(msg), hwnd, msg_min, msg_max)
raise_if(res == -1)
return msg
################################################################################
_TranslateMessage = fun_fact(_usr.TranslateMessage, (BOOL, PMSG))
def TranslateMessage(msg):
return _TranslateMessage(ref(msg))
################################################################################
_DispatchMessage = fun_fact(_usr.DispatchMessageW, (LRESULT, PMSG))
def DispatchMessage(msg):
return _DispatchMessage(ref(msg))
################################################################################
_ShowWindow = fun_fact(_usr.ShowWindow, (BOOL, HWND, INT))
def ShowWindow(hwnd, cmd):
return bool(_ShowWindow(hwnd, cmd))
################################################################################
_UpdateWindow = fun_fact(_usr.UpdateWindow, (BOOL, HWND))
def UpdateWindow(hwnd):
raise_on_zero(_UpdateWindow(hwnd))
################################################################################
_DestroyWindow = fun_fact(_usr.DestroyWindow, (BOOL, HWND))
def DestroyWindow(hwnd):
raise_on_zero(_DestroyWindow(hwnd))
################################################################################
IsWindow = fun_fact(_usr.IsWindow, (BOOL, HWND))
################################################################################
_GetDlgItem = fun_fact(_usr.GetDlgItem, (HWND, HWND, INT))
def GetDlgItem(hwnd, id):
res = _GetDlgItem(hwnd, id)
raise_on_zero(res)
return res
################################################################################
SendDlgItemMessage = fun_fact(
_usr.SendDlgItemMessageW, (LRESULT, HWND, INT, UINT, WPARAM, LPARAM)
)
################################################################################
_SetDlgItemText = fun_fact(
_usr.SetDlgItemTextW, (BOOL, HWND, INT, PWSTR)
)
def SetDlgItemText(dlg, id, txt):
raise_on_zero(_SetDlgItemText(dlg, id, txt))
################################################################################
_GetDlgItemText = fun_fact(
_usr.GetDlgItemTextW, (UINT, HWND, INT, PWSTR, INT)
)
def GetDlgItemText(dlg, id):
length = 128
res = length
while res >= length:
length *= 2
buf = ctypes.create_unicode_buffer(length)
kernel.SetLastError(0)
res = _GetDlgItemText(dlg, id, buf, length)
raise_on_err(kernel.GetLastError())
return buf.value
################################################################################
_CheckRadioButton = fun_fact(
_usr.CheckRadioButton, (BOOL, HWND, INT, INT, INT)
)
def CheckRadioButton(dlg, first, last, check):
raise_on_zero(_CheckRadioButton(dlg, first, last, check))
################################################################################
_GetDlgCtrlID = fun_fact(_usr.GetDlgCtrlID, (INT, HWND))
def GetDlgCtrlID(hwnd):
res = _GetDlgCtrlID(hwnd)
raise_on_zero(res)
return res
################################################################################
EnableWindow = fun_fact(_usr.EnableWindow, (BOOL, HWND, BOOL))
################################################################################
SetForegroundWindow = fun_fact(_usr.SetForegroundWindow, (BOOL, HWND))
################################################################################
SetFocus = fun_fact(_usr.SetFocus, (HWND, HWND))
################################################################################
GetParent = fun_fact(_usr.GetParent, (HWND, HWND))
################################################################################
_InvalidateRect = fun_fact(_usr.InvalidateRect, (BOOL, HWND, PRECT, BOOL))
def InvalidateRect(hwnd, rc, erase):
prc = ref(rc) if rc is not None else None
raise_on_zero(_InvalidateRect(hwnd, prc, erase))
################################################################################
WindowFromPoint = fun_fact(_usr.WindowFromPoint, (HWND, POINT))
################################################################################
_MoveWindow = fun_fact(
_usr.MoveWindow, (
BOOL,
HWND,
INT,
INT,
INT,
INT,
BOOL
)
)
def MoveWindow(hwnd, x, y, width, height, repaint):
raise_on_zero(_MoveWindow(hwnd, x, y, width, height, repaint))
################################################################################
MapWindowPoints = fun_fact(
_usr.MapWindowPoints, (
INT,
HWND,
HWND,
PPOINT,
UINT,
)
)
################################################################################
_GetCursorPos = fun_fact(_usr.GetCursorPos, (BOOL, PPOINT))
def GetCursorPos():
pt = POINT()
raise_on_zero(GetCursorPos(ref(pt)))
return pt
################################################################################
_GetDC = fun_fact(_usr.GetDC, (HANDLE, HWND))
def GetDC(hwnd):
res = _GetDC(hwnd)
raise_on_zero(res)
return res
################################################################################
_GetWindowDC = fun_fact(_usr.GetWindowDC, (HANDLE, HWND))
def GetWindowDC(hwnd):
res = _GetWindowDC(hwnd)
raise_on_zero(res)
return res
################################################################################
_ReleaseDC = fun_fact(_usr.ReleaseDC, (INT, HWND, HANDLE))
def ReleaseDC(hwnd, hdc):
raise_on_zero(_ReleaseDC(hwnd, hdc))
################################################################################
_SetTimer = fun_fact(_usr.SetTimer, (UINT_PTR, HWND, UINT_PTR, UINT, PVOID))
def SetTimer(hwnd, timer_id, period_ms):
raise_on_zero(_SetTimer(hwnd, timer_id, period_ms, None))
################################################################################
_KillTimer = fun_fact(_usr.KillTimer, (BOOL, HWND, UINT_PTR))
def KillTimer(hwnd, timer_id):
raise_on_zero(_KillTimer(hwnd, timer_id))
################################################################################
_CheckDlgButton = fun_fact(_usr.CheckDlgButton, (BOOL, HWND, INT, UINT))
def CheckDlgButton(dlg, id, check):
raise_on_zero(_CheckDlgButton(dlg, id, check))
################################################################################
IsDlgButtonChecked = fun_fact(_usr.IsDlgButtonChecked, (UINT, HWND, INT))
################################################################################
_BeginPaint = fun_fact(_usr.BeginPaint, (HANDLE, HWND, PPAINTSTRUCT))
def BeginPaint(hwnd):
ps = PAINTSTRUCT()
hdc = _BeginPaint(hwnd, ref(ps))
raise_on_zero(hdc)
return hdc, ps
################################################################################
_EndPaint = fun_fact(_usr.EndPaint, (BOOL, HWND, PPAINTSTRUCT))
def EndPaint(hwnd, ps):
raise_on_zero(_EndPaint(hwnd, ref(ps)))
################################################################################
_DrawText = fun_fact(_usr.DrawTextW, (INT, HANDLE, PWSTR, INT, PRECT, UINT))
def DrawText(hdc, txt, rc, fmt):
raise_on_zero(_DrawText(hdc, txt, len(txt), ref(rc), fmt))
################################################################################
_SetProp = fun_fact(_usr.SetPropW, (BOOL, HWND, PWSTR, HANDLE))
def SetProp(hwnd, name, data):
raise_on_zero(_SetProp(hwnd, name, data))
################################################################################
_GetProp = fun_fact(_usr.GetPropW, (HANDLE, HWND, PWSTR))
def GetProp(hwnd, name):
data = _GetProp(hwnd, name)
raise_on_zero(data)
return data
def get_prop_def(hwnd, name, default=None):
data = _GetProp(hwnd, name)
return data or default
################################################################################
_RemoveProp = fun_fact(_usr.RemovePropW, (HANDLE, HWND, PWSTR))
def RemoveProp(hwnd, name):
data = _RemoveProp(hwnd, name)
raise_on_zero(data)
return data
################################################################################
_EnumPropsCallback = ctypes.WINFUNCTYPE(
BOOL,
HWND,
PVOID, #cannot use PWSTR, since it can be string or atom
HANDLE,
CallbackContextPtr
)
@_EnumPropsCallback
def _EnumPropsCb(hwnd, name, data, ctxt):
cbc = ctxt.contents
res = cbc.callback(hwnd, name, data, cbc.context)
# keep on enumerating if the callback fails to return a value
return res if res is not None else True
################################################################################
_EnumPropsEx = fun_fact(
_usr.EnumPropsExW, (INT, HWND, _EnumPropsCallback, CallbackContextPtr)
)
def EnumPropsEx(hwnd, callback, context):
cbc = CallbackContext(callback, context)
_EnumPropsEx(hwnd, _EnumPropsCb, ref(cbc))
################################################################################
def get_prop_dict(hwnd):
props = {}
@_EnumPropsCallback
def collect(hwnd, name, data, not_used):
# string or atom
name = PWSTR(name).value if name >= 0x10000 else f"#{name}"
props[name] = data
return True
_EnumPropsEx(hwnd, collect, None)
return props
################################################################################
_OpenClipboard = fun_fact(_usr.OpenClipboard, (BOOL, HWND))
def OpenClipboard(hwnd):
raise_on_zero(_OpenClipboard(hwnd))
################################################################################
_EmptyClipboard = fun_fact(_usr.EmptyClipboard, (BOOL,))
def EmptyClipboard():
raise_on_zero(_EmptyClipboard())
################################################################################
_SetClipboardData = fun_fact(_usr.SetClipboardData, (HANDLE, UINT, HANDLE))
def SetClipboardData(fmt, hmem):
res = _SetClipboardData(fmt, hmem)
raise_on_zero(res)
return res
################################################################################
_GetClipboardData = fun_fact(_usr.GetClipboardData, (HANDLE, UINT))
def GetClipboardData(fmt):
res = _GetClipboardData(fmt)
raise_on_zero(res)
return res
################################################################################
IsClipboardFormatAvailable = fun_fact(
_usr.IsClipboardFormatAvailable, (BOOL, UINT)
)
################################################################################
_CloseClipboard = fun_fact(_usr.CloseClipboard, (BOOL,))
def CloseClipboard():
raise_on_zero(_CloseClipboard())
################################################################################
_GetClipboardFormatName = fun_fact(
_usr.GetClipboardFormatNameW, (DWORD, DWORD, PWSTR, DWORD)
)
def GetClipboardFormatName(fmt_atom):
bufsize = 1024
buf = ctypes.create_unicode_buffer(bufsize)
if _GetClipboardFormatName(fmt_atom, buf, bufsize) == 0:
raise ctypes.WinError()
return buf.value
################################################################################
EnumClipboardFormats = fun_fact(_usr.EnumClipboardFormats, (DWORD, DWORD))
################################################################################
def txt_to_clip(txt, hwnd=None):
buf = ctypes.create_unicode_buffer(txt)
size = ctypes.sizeof(buf)
copied = False
hcopy = kernel.GlobalAlloc(GMEM_MOVEABLE, size)
try:
ctypes.memmove(kernel.GlobalLock(hcopy), buf, size)
kernel.GlobalUnlock(hcopy)
OpenClipboard(hwnd)
EmptyClipboard()
SetClipboardData(CF_UNICODETEXT, hcopy)
copied = True
CloseClipboard()
finally:
if not copied:
kernel.GlobalFree(hcopy)
################################################################################
def txt_from_clip(hwnd=None):
if not IsClipboardFormatAvailable(CF_UNICODETEXT):
raise EnvironmentError("no clipboard text available")
OpenClipboard(hwnd)
hmem = GetClipboardData(CF_UNICODETEXT)
txt = ctypes.wstring_at(kernel.GlobalLock(hmem))
kernel.GlobalUnlock(hmem)
CloseClipboard();
return txt
################################################################################
GetSystemMetrics = fun_fact(_usr.GetSystemMetrics, (INT, INT))
################################################################################
_ScrollWindow = fun_fact(
_usr.ScrollWindow, (BOOL, HWND, INT, INT, PRECT, PRECT)
)
def ScrollWindow(hwnd, x, y, scroll_rect=None, clip_rect=None):
scroll_rect = ref(scroll_rect) if scroll_rect is not None else None
clip_rect = ref(clip_rect) if clip_rect is not None else None
raise_on_zero(_ScrollWindow(hwnd, x, y, scroll_rect, clip_rect))
################################################################################
_GetKeyNameText = fun_fact(_usr.GetKeyNameTextW, (INT, LONG, PWSTR, INT))
def GetKeyNameText(lparam, expect_empty=False):
size = ret = 32
while ret >= size - 1:
size *= 2
key_name = ctypes.create_unicode_buffer(size)
ret = _GetKeyNameText(lparam, key_name, size)
raise_if(not ret and not expect_empty)
return key_name.value
################################################################################
_CreateIconFromResourceEx = fun_fact(
_usr.CreateIconFromResourceEx, (
HANDLE, PVOID, DWORD, BOOL, DWORD, INT, INT, UINT
)
)
def CreateIconFromResourceEx(
data,
cx=0,
cy=0,
is_icon=True,
default_size=False
):
res = _CreateIconFromResourceEx(
data,
len(data),
is_icon,
0x00030000,
cx,
cy,
LR_DEFAULTSIZE if default_size else 0
)
raise_on_zero(res)
return res
################################################################################
class GUITHREADINFO(ctypes.Structure):
_fields_ = (
("cbSize", DWORD),
("flags", DWORD),
("hwndActive", HWND),
("hwndFocus", HWND),
("hwndCapture", HWND),
("hwndMenuOwner", HWND),
("hwndMoveSize", HWND),
("hwndCaret", HWND),
("rcCaret", RECT),
)
def __init__(self):
self.cbSize = ctypes.sizeof(self)
PGUITHREADINFO = POINTER(GUITHREADINFO)
_GetGUIThreadInfo = fun_fact(
_usr.GetGUIThreadInfo, (BOOL, DWORD, PGUITHREADINFO)
)
def GetGUIThreadInfo(tid=0):
gti = GUITHREADINFO()
raise_on_zero(_GetGUIThreadInfo(tid, ref(gti)))
return gti
################################################################################
_SystemParametersInfo = fun_fact(
_usr.SystemParametersInfoW, (BOOL, UINT, UINT, PVOID, UINT)
)
################################################################################
class NONCLIENTMETRICS(ctypes.Structure):
_fields_ = (
("cbSize", UINT),
("iBorderWidth", INT),
("iScrollWidth", INT),
("iScrollHeight", INT),
("iCaptionWidth", INT),
("iCaptionHeight", INT),
("lfCaptionFont", LOGFONT),
("iSmCaptionWidth", INT),
("iSmCaptionHeight", INT),
("lfSmCaptionFont", LOGFONT),
("iMenuWidth", INT),
("iMenuHeight", INT),
("lfMenuFont", LOGFONT),
("lfStatusFont", LOGFONT),
("lfMessageFont", LOGFONT),
("iPaddedBorderWidth", INT),
)
def __init__(self):
self.cbSize = ctypes.sizeof(self)
def get_non_client_metrics():
ncm = NONCLIENTMETRICS()
raise_on_zero(
_SystemParametersInfo(
SPI_GETNONCLIENTMETRICS,
ncm.cbSize,
ref(ncm),
0
)
)
return ncm
def set_non_client_metrics(ncm, winini=SPIF_UPDATEINIFILE | SPIF_SENDCHANGE):
ncm.cbSize = ctypes.sizeof(ncm)
raise_on_zero(
_SystemParametersInfo(
SPI_SETNONCLIENTMETRICS,
ncm.cbSize,
ref(ncm),
winini
)
)
################################################################################
def get_wheel_scroll_lines():
lines = UINT()
raise_on_zero(
_SystemParametersInfo(
SPI_GETWHEELSCROLLLINES,
0,
ref(lines),
0
)
)
return lines.value
def set_wheel_scroll_lines(lines, winini=SPIF_UPDATEINIFILE | SPIF_SENDCHANGE):
raise_on_zero(
_SystemParametersInfo(
SPI_SETWHEELSCROLLLINES,
lines,
None,
winini
)
)
################################################################################
def get_work_area():
wa = RECT()
raise_on_zero(_SystemParametersInfo(SPI_GETWORKAREA, 0, ref(wa), 0))
return wa
################################################################################
class DLGTEMPLATE(ctypes.Structure):
_pack_ = 2 # for correct length
_fields_ = (
("style", DWORD),
("dwExtendedStyle", DWORD),
("cdit", WORD),
("x", SHORT),
("y", SHORT),
("cx", SHORT),
("cy", SHORT),
)
################################################################################
class DLGTEMPLATEEX(ctypes.Structure):
_pack_ = 2 # for correct length
_fields_ = (
("dlgVer", WORD),
("signature", WORD),
("helpID", DWORD),
("exStyle", DWORD),
("style", DWORD),
("cDlgItems", WORD),
("x", WORD),
("y", WORD),
("cx", WORD),
("cy", WORD),
)
################################################################################
class DLGITEMTEMPLATE(ctypes.Structure):
_pack_ = 2 # for correct length
_fields_ = (
("style",DWORD),
("exstyle", DWORD),
("x", SHORT),
("y", SHORT),
("cx", SHORT),
("cy", SHORT),
("id", WORD ),
)
################################################################################
class NMHDR(ctypes.Structure):
_fields_ = (
("hwndFrom", HWND),
("idFrom", UINT_PTR),
("code", UINT),
)
PNMHDR = POINTER(NMHDR)
MSDN_FIRST = 0xf060 # ModelesS Dialog
MSDN_LAST = MSDN_FIRST + 50
MSDN_ACTIVATE = MSDN_FIRST + 1
class NM_MSD_ACTIVATE(ctypes.Structure):
_fields_ = (
("hdr", NMHDR),
("is_active", BOOL),
)
MSDN_DESTROY = MSDN_FIRST + 2
NM_MSD_DESTROY = NMHDR
################################################################################
DLGPROC = ctypes.WINFUNCTYPE(
INT_PTR,
HWND,
UINT,
WPARAM,
LPARAM
)
################################################################################
_DialogBoxIndirectParam = fun_fact(
_usr.DialogBoxIndirectParamW, (INT_PTR, HANDLE, PVOID, HWND, DLGPROC, PVOID)
)
def DialogBoxIndirectParam(templ, parent, dlg_func, init_param, hinst=None):
kernel.SetLastError(0)
res = _DialogBoxIndirectParam(hinst, templ, parent, dlg_func, init_param)
raise_on_err(kernel.GetLastError())
return res
################################################################################
_CreateDialogIndirectParam = fun_fact(
_usr.CreateDialogIndirectParamW, (
HWND, HANDLE, PVOID, HWND, DLGPROC, PVOID
)
)
def CreateDialogIndirectParam(templ, parent, dlg_func, init_param, hinst=None):
res = _CreateDialogIndirectParam(hinst, templ, parent, dlg_func, init_param)
raise_on_zero(res)
return res
################################################################################
_EndDialog = fun_fact(_usr.EndDialog, (BOOL, HWND, INT_PTR))
def EndDialog(hdlg, result):
raise_on_zero(_EndDialog(hdlg, result))
################################################################################
|
n, m = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
a.sort(reverse=True)
b.sort(reverse=True)
ans = -1
if a[0] == n*m and b[0] == n*m:
|
# coding: utf-8
import numpy as np
import networkx as nx
import random
import multiprocessing
import torch
import torch.nn as nn
import torch_geometric as tg
import torch.nn.functional as F
from torch.nn import init
# Position-aware Graph Neural Networks. For more information, please refer to https://arxiv.org/abs/1906.04817
# We modify and simplify the code of PGNN from https://github.com/JiaxuanYou/P-GNN, and include this method in our graph embedding project framework.
# Author: jhljx
# Email: jhljx8918@gmail.com
####################### Utility Function #####################
def single_source_shortest_path_length_range(graph, node_range, cutoff):
dists_dict = {}
for node in node_range:
dists_dict[node] = nx.single_source_shortest_path_length(graph, node, cutoff)
return dists_dict
def merge_dicts(dicts):
result = {}
for dictionary in dicts:
result.update(dictionary)
return result
def all_pairs_shortest_path_length_parallel(graph, cutoff=None, num_workers=4):
nodes = list(graph.nodes)
random.shuffle(nodes)
if len(nodes) < 50:
num_workers = int(num_workers / 4)
elif len(nodes) < 400:
num_workers = int(num_workers / 2)
pool = multiprocessing.Pool(processes=num_workers)
results = [pool.apply_async(single_source_shortest_path_length_range,
args=(graph, nodes[int(len(nodes)/num_workers*i):int(len(nodes)/num_workers*(i+1))], cutoff)) for i in range(num_workers)]
output = [p.get() for p in results]
dists_dict = merge_dicts(output)
pool.close()
pool.join()
return dists_dict
# approximate == -1 means exact shortest path (time consuming), approximate > 0 means shorted path with cut-off
def precompute_dist_data(edge_indices, num_nodes, approximate):
'''
Here dist is 1/real_dist, higher actually means closer, 0 means disconnected
:return:
'''
if isinstance(edge_indices, list):
is_list = True
timestamp_num = len(edge_indices)
else: # tensor
is_list = False
timestamp_num = 1
node_dist_list = []
for i in range(timestamp_num):
graph = nx.Graph()
edge_index = edge_indices[i] if is_list else edge_indices
assert edge_index.shape[0] == 2
edge_arr = edge_index.transpose(0, 1).cpu().numpy()
graph.add_edges_from(edge_arr) # [edge_num, 2]
graph.add_nodes_from(np.arange(num_nodes))
# print('graph nodes: ', len(graph.nodes()))
##################
# This block is quite memory consuming especially on large graphs
n = num_nodes
dists_array = np.zeros((n, n))
# dists_dict = nx.all_pairs_shortest_path_length(graph,cutoff=approximate if approximate>0 else None)
# dists_dict = {c[0]: c[1] for c in dists_dict}
dists_dict = all_pairs_shortest_path_length_parallel(graph, cutoff=approximate if approximate > 0 else None)
for i, node_i in enumerate(graph.nodes()):
shortest_dist = dists_dict[node_i]
for j, node_j in enumerate(graph.nodes()):
dist = shortest_dist.get(node_j, -1)
if dist != -1:
dists_array[node_i, node_j] = 1 / (dist + 1)
# dist_tensor = torch.tensor(dists_array)
node_dist_list.append(dists_array)
#################
if is_list:
return node_dist_list
return node_dist_list[0]
def get_random_anchorset(n, c=0.5):
m = int(np.log2(n))
copy = int(c * m)
anchorset_list = []
for i in range(m):
anchor_size = int(n / np.exp2(i + 1))
for j in range(copy):
anchorset_list.append(np.random.choice(n, size=anchor_size, replace=False))
return anchorset_list
# consider mutiple timestamps
def get_dist_max(anchorset_list, node_dist_list, device):
anchor_set_num = len(anchorset_list)
# print('anchor set num: ', anchor_set_num)
if isinstance(node_dist_list, list):
is_list = True
timestamp = len(node_dist_list)
else:
is_list = False
timestamp = 1
dist_max_list = []
dist_argmax_list = []
for i in range(timestamp):
node_dist = node_dist_list[i] if is_list else node_dist_list # array
dist_max = torch.zeros((node_dist.shape[0], anchor_set_num), device=device)
dist_argmax = torch.zeros((node_dist.shape[0], anchor_set_num), device=device).long()
for i in range(anchor_set_num):
temp_id = anchorset_list[i]
dist_temp = node_dist[:, temp_id]
dist_max_temp, dist_argmax_temp = np.max(dist_temp, axis=1), np.argmax(dist_temp, axis=1)
dist_max[:, i] = torch.from_numpy(dist_max_temp)
dist_argmax[:, i] = torch.from_numpy(dist_argmax_temp)
dist_max_list.append(dist_max)
dist_argmax_list.append(dist_argmax)
if is_list:
return dist_max_list, dist_argmax_list
return dist_max_list[0], dist_max_list[0]
# Select anchor sets
# element of dist_mat_list is np.ndarray
def preselect_anchor(node_num, node_dist_list, device):
anchorset_list = get_random_anchorset(node_num, c=1)
dists_max_list, dists_argmax_list = get_dist_max(anchorset_list, node_dist_list, device)
return dists_max_list, dists_argmax_list
####################### Basic Ops #############################
# Non linearity
class Nonlinear(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, bias=True):
super(Nonlinear, self).__init__()
self.linear1 = nn.Linear(input_dim, hidden_dim, bias=bias)
self.linear2 = nn.Linear(hidden_dim, output_dim, bias=bias)
self.act = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data = init.xavier_uniform_(m.weight.data, gain=nn.init.calculate_gain('relu'))
if m.bias is not None:
m.bias.data = init.constant_(m.bias.data, 0.0)
def forward(self, x):
x = self.linear1(x)
x = self.act(x)
x = self.linear2(x)
return x
# PGNN layer, only pick closest node for message passing
class PGNN_layer(nn.Module):
def __init__(self, input_dim, output_dim, dist_trainable=True, bias=True):
super(PGNN_layer, self).__init__()
self.input_dim = input_dim
self.dist_trainable = dist_trainable
if self.dist_trainable:
self.dist_compute = Nonlinear(1, output_dim, 1, bias=bias)
self.linear_hidden = nn.Linear(input_dim*2, output_dim, bias=bias)
self.linear_out_position = nn.Linear(output_dim, 1, bias=bias)
self.act = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data = init.xavier_uniform_(m.weight.data, gain=nn.init.calculate_gain('relu'))
if m.bias is not None:
m.bias.data = init.constant_(m.bias.data, 0.0)
def forward(self, feature, dists_max, dists_argmax):
if self.dist_trainable:
dists_max = self.dist_compute(dists_max.unsqueeze(-1)).squeeze() # [n, anchor_set_num]
subset_features = feature[dists_argmax.flatten(), :] # [n, anchor_set_num, input_dim]
subset_features = subset_features.reshape((dists_argmax.shape[0], dists_argmax.shape[1], feature.shape[1])) # [n, anchor_set_num, input_dim]
messages = subset_features * dists_max.unsqueeze(-1) # [n, anchor_set_num, input_dim]
self_feature = feature.unsqueeze(1).repeat(1, dists_max.shape[1], 1) # [n, anchor_set_num, input_dim]
messages = torch.cat((messages, self_feature), dim=-1) # [n, anchor_set_num, 2 * input_dim]
messages = self.linear_hidden(messages).squeeze() # [n, anchor_set_num, output_dim]
messages = self.act(messages) # [n, anchor_set_num, output_dim]
out_position = self.linear_out_position(messages).squeeze(-1) # [n, anchor_set_num]
out_structure = torch.mean(messages, dim=1) # [n, output_dim]
return out_position, out_structure
# Position-aware graph neural network class
class PGNN(torch.nn.Module):
input_dim: int
feature_dim: int
hidden_dim: int
output_dim: int
feature_pre: bool
layer_num: int
dropout: float
bias: bool
method_name: str
def __init__(self, input_dim, feature_dim, hidden_dim, output_dim, feature_pre=True, layer_num=2, dropout=0.5, bias=True):
super(PGNN, self).__init__()
self.input_dim = input_dim
self.feature_dim = feature_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.feature_pre = feature_pre
self.layer_num = layer_num
self.dropout = dropout
self.bias = bias
self.method_name = 'PGNN'
if layer_num == 1:
hidden_dim = output_dim
if feature_pre:
self.linear_pre = nn.Linear(input_dim, feature_dim, bias=bias)
self.conv_first = PGNN_layer(feature_dim, hidden_dim, bias=bias)
else:
self.conv_first = PGNN_layer(input_dim, hidden_dim, bias=bias)
if layer_num > 1:
self.conv_hidden = nn.ModuleList([PGNN_layer(hidden_dim, hidden_dim, bias=bias) for i in range(layer_num - 2)])
self.conv_out = PGNN_layer(hidden_dim, output_dim, bias=bias)
def forward(self, x, dists_max, dists_argmax):
if isinstance(x, list):
timestamp_num = len(x)
output_list = []
for i in range(timestamp_num):
output_list.append(self.pgnn(x[i], dists_max[i], dists_argmax[i]))
return output_list
return self.pgnn(x, dists_max, dists_argmax)
def pgnn(self, x, dists_max, dists_argmax):
if self.feature_pre:
x = self.linear_pre(x)
x_position, x = self.conv_first(x, dists_max, dists_argmax)
if self.layer_num == 1:
return x_position
# x = F.relu(x) # Note: optional!
x = F.dropout(x, self.dropout, training=self.training)
for i in range(self.layer_num-2):
_, x = self.conv_hidden[i](x, dists_max, dists_argmax)
# x = F.relu(x) # Note: optional!
x = F.dropout(x, self.dropout, training=self.training)
x_position, x = self.conv_out(x, dists_max, dists_argmax)
x_position = F.normalize(x_position, p=2, dim=-1)
return x_position
|
###!/user/bin/env python
""" Top line for Unix systems. Comment out for Windows """
import os # needed for file access.
import sys # needed for sys functions.
lootTag = 'You have looted '
sellTag = 'll give you '
lootDB = {} # blank dictionary.
def logParse(fname):
'Parse a formatted log file'
if os.path.exists(fname):
fDL = open('DL_'+fname, 'w')
print('Drop Log opened')
flag = True
'Read in each line'
with open(fname, 'r', errors='ignore') as f:
while flag:
rline = f.readline()
if rline == '':
break
else:
# Find the channel string
L = lootTag in rline # is loot tag?
S = sellTag in rline # is sell tag?
# Write tagged line to the appropriate file
if L:
j = rline.find(lootTag) # get index of tag
j += len(lootTag)
fDL.write(rline[j:])
# parse to database
k = rline.find(' ',j) #skip over 1 or 2 char
l = rline.find('from',k)
# end may be ' or .
E = "'" in rline[j:]
#find the end
if E:
m = rline.find("'",j)
else:
m = rline.find(".",j)
key = rline[k+1:l-1]
a = dict(source=[rline[l+5:m]], sellTo='', buyFrom='') #make source a list
# Check if key already exists. Check source list if it does, Add if not.
if not bool(lootDB): # automatically add if empty.
lootDB[key] = a
elif key in lootDB:
# Check to see if a new source was found.
if a['source'][0] not in lootDB[key]['source']:
# then add it to the list.
lootDB[key]['source'].append(a['source'][0])
else:
# Add to database.
lootDB[key] = a
if S:
j = rline.find(sellTag) # get index of tag
j += len(sellTag)
fDL.write(rline[j:])
fDL.close()
print('Drop Log closed')
else:
print("File '%s' not found." % fname)
print("Usage: dropLogParse.py <filename>")
if __name__ == "__main__":
# execute only if run as a script
# Pass the second arg to the function. The first arg is the script name.
logParse(sys.argv[1])
# Print database
for key in sorted(lootDB):
for x in sorted(lootDB[key]['source']):
print(key, ' ', x, ' ',lootDB[key]['sellTo'], ' ',lootDB[key]['buyFrom'])
# Save to CSV file
if os.path.exists(sys.argv[1]):
fCSV = open('CSV_'+sys.argv[1], 'w')
print('CSV file opened')
for key in sorted(lootDB):
d = len(lootDB[key]['source']) # get the size of the list
if d == 1:
fCSV.write(key+','+lootDB[key]['source'][0]+','+lootDB[key]['sellTo']+','+lootDB[key]['buyFrom']+'\n')
else:
sl = '"'
for x in sorted(lootDB[key]['source']):
sl += x+'\n'
sl = sl[:-1]+'"' # strip off last CR and add closing "
fCSV.write(key+','+sl+','+lootDB[key]['sellTo']+','+lootDB[key]['buyFrom']+'\n')
fCSV.close()
print('CSV file Log closed')
else:
print("File '%s' not found." % sys.argv[1])
print("Usage: dropLogParse.py <filename>")
|
import pygame
from Color import Color
from itertools import repeat
class Level:
def __init__(self,filename):
self.block_size = (self.w,self.h) = (100,100)
self.level = []
self.screen_player_offset = (100,300)
self.player_position = (0,0)
self.enemies = []
self.floor = []
self.screen_shake = False
f = open(filename,'r')
for l in f:
self.level.append(l)
if len(self.level):
self.screen = pygame.Surface((len(self.level[0])*self.w,len(self.level)*self.h))
self.screen.fill(Color.gray_7)
j = 0
for r in self.level:
i = 0
for c in r:
pos = (i*self.w,j*self.h)
if c == 'P':
self.player_position = pos
if c == 'E':
self.enemies.append(pos)
if c == '1':
self.floor.append(pos)
i += 1
j += 1
self.rect = self.screen.get_rect()
self.master = pygame.Surface((self.rect.width,self.rect.height))
self.master.blit(self.screen,(0,0),self.rect)
def get_full_screen(self):
self.screen.blit(self.master,(0,0),self.rect)
return self.screen
def get_player_starting_position(self):
return self.player_position
def get_enemies(self):
return self.enemies
def get_floor(self):
return self.floor
def get_screen(self):
return self.screen
def get_rect(self,dim,player):
'''
Return the portion of the level where the player is currently visible
'''
(ox,oy) = self.screen_player_offset
(px,py) = player.get_position()
(dx,dy) = dim
rx = px - ox
if rx < 0:
rx = 0
if rx + dx > self.rect.width:
rx = self.rect.width - dx
ry = py - oy
if ry < 0:
ry = 0
if ry + dy > self.rect.height:
ry = self.rect.height - dy
rect = pygame.Rect(rx,ry,dx,dy)
return rect
def shake(self):
s = -1
for _ in range(0, 3):
for x in range(0, 30, 10):
yield (x*s, 0)
for x in range(30, 0, 10):
yield (x*s, 0)
s *= -1
while True:
yield (0, 0)
class Floor(pygame.sprite.Sprite):
def __init__(self,gravity,position,size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface(size)
self.image.fill(Color.gray_9)
self.rect = self.image.get_rect()
(self.rect.x,self.rect.y) = position
self.gravity = gravity
def get_position(self):
return (self.rect.x,self.rect.y)
def update(self):
'''
update behavior
''' |
"""Simulate the generative process of LDA and generate corpus based on it
"""
import numpy as np
from scipy.sparse import coo_matrix
from scipy.stats import poisson
from sklearn.utils import check_random_state
from six.moves import xrange
class LdaSampleGenerator(object):
"""Generate LDA samples
Parameters
----------
n_topics : int
Number of topics
n_words : int
Number of words in corpus
min_doc_size : int
Min word count in a document
mean_doc_size : int
Mean word count in a document
doc_topic_prior : double
Uniform Dirichlet prior of a document
topic_word_prior : double
Uniform Dirichlet prior of a topic
mean_doc_size: int
Mean Value if word count in each document
Attributes
----------
topic_word_distr_ : array, [n_topics, n_words]
Topic word distribution.
"""
def __init__(self, n_topics, n_words, min_doc_size,
mean_doc_size, doc_topic_prior,
topic_word_prior, random_state=None):
self.n_topics = n_topics
self.n_words = n_words
self.min_doc_size = min_doc_size
self.mean_doc_size = mean_doc_size
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.random_state = random_state
self.random_state_ = check_random_state(self.random_state)
# hidden variables
self.topic_word_prior_ = np.repeat(topic_word_prior, n_words)
# (n_topics, n_words)
self.doc_topic_prior_ = np.repeat(self.doc_topic_prior, n_topics)
self.topic_word_distr_ = self.random_state_.dirichlet(
self.topic_word_prior_, n_topics)
def generate_documents(self, n_docs):
"""Generate Random doc-words Matrix
Parameters
----------
n_docs : int
number of documents
Return
------
doc_word_mtx : sparse matrix, [n_docs, n_words]
document words matrix
"""
rs = self.random_state_
n_topics = self.n_topics
n_words = self.n_words
docs_size = poisson.rvs(mu=(self.mean_doc_size - self.min_doc_size),
size=n_docs, random_state=rs)
docs_size += self.min_doc_size
doc_prior = np.repeat(self.doc_topic_prior, n_topics)
# (n_docs, n_topics)
docs_distr = rs.dirichlet(doc_prior, n_docs)
rows = []
cols = []
for i in xrange(n_docs):
word_dist = np.dot(self.topic_word_distr_.T, docs_distr[i, :])
word_idx = rs.choice(n_words, docs_size[i], p=word_dist, replace=True)
rows = np.append(rows, np.repeat(i, docs_size[i]))
cols = np.append(cols, word_idx)
data = np.ones(len(rows))
doc_word_mtx = coo_matrix((data, (rows, cols)),
shape=(n_docs, n_words)).tocsr()
return docs_distr, doc_word_mtx
|
from picostack.process_spawn import invoke
from picostack.textwrap_util import wrap_multiline
class VmBuilder(object):
def get_build_jeos_call(self):
return wrap_multiline('''
sudo vmbuilder kvm ubuntu --suite quantal --flavour virtual
--arch i386 -o --libvirt qemu:///system
--bridge %(bridge_interface)s
--addpkg linux-image-generic
''' % {
'bridge_interface': 'br0',
})
def build_jeos(self):
print 'Building ubuntu JeOS..'
command = self.get_build_jeos_call()
print '"%s"' % command
print invoke(command)
|
# local_blast_zum.py
#
# Run on Python3
# Created by Alice on 2018-06-26.
#
import argparse
from os import listdir, remove, makedirs
from os.path import exists, isdir, isfile, join
from statistics import mean
from Bio.Blast.Applications import NcbiblastnCommandline
import xml.etree.ElementTree as ET
from Bio.Blast import NCBIXML
from match_db import Match_db
def main():
args = get_arguments()
result_file = "temp_out.xml"
database_path = "~/nematode_ref/nematodeDB"
#"~/Documents/larvkult_1508/nematodeDB"
if isdir(args.input):
files = [file for file in listdir(args.input)
if isfile(join(args.input, file)) and
file.split('.')[1]=="fa"]
elif isfile(args.input) and args.input.split(".")[-1]=="fa":
files = [args.input]
else:
raise NameError('Input file or directory does not exist or is not .fa format')
if args.verbose:
print("\n---- Loaded input files ----")
print(*files, sep='\n')
result_file_2 = "temp_out2.txt"
results = open(result_file_2, "w")
hits ={}
for file in files:
if args.verbose:
print("\nBlasting file: {}".format(file))
blastn_cmd=NcbiblastnCommandline(query=file, db=database_path,
max_target_seqs=10, gapopen=2, gapextend=3,
outfmt="5", out=result_file)
stdout, stderr = blastn_cmd()
print(stdout)
#Parse results
tree = ET.parse(result_file)
root = tree.getroot()
for read in root.iter('Iteration'):
if int(read.find('Iteration_iter-num').text)%100==0:
print("Read number: " + read.find('Iteration_iter-num').text)
read_name = read.find('Iteration_query-def').text
read_length = float(read.find('Iteration_query-len').text)
matches = {}
for hit in read.iter('Hit'):
hit_name = hit.find('Hit_def').text
hit_len = float(hit.find('Hit_len').text)
for hsp in hit.iter('Hsp'):
match_len = float(hsp.find('Hsp_identity').text)
perc_match = 0
if hit_len < read_length:
perc_match = round(match_len/hit_len, 4)
else:
perc_match = round(match_len/read_length, 4)
identity = float(hsp.find('Hsp_identity').text)
e_val = hsp.find('Hsp_evalue').text
alg_len = hsp.find('Hsp_align-len').text
pid = (identity/float(alg_len))*100
gaps = int(hsp.find('Hsp_gaps').text)
missmatch = int(float(alg_len) - gaps - identity)
matches[hit_name] = [read_name, hit_name.split(" ")[0], hit_name,
str(pid), e_val, str(alg_len), "0", "0", str(missmatch),
"0", str(gaps), str(perc_match*100)]
string_2_write = ""
if len(matches)>0:
best_hit = max(matches, key= lambda key: matches[key][-1])
#print("{}% {}".format(matches[best_hit][-1], best_hit))
string_2_write = matches[best_hit][0] + "\t" + matches[best_hit][1] \
+ "\t" + matches[best_hit][2] + "\t" + matches[best_hit][3] \
+ "\t" + matches[best_hit][4] + "\t" + matches[best_hit][5] \
+ "\t" + matches[best_hit][6] + "\t" + matches[best_hit][7] \
+ "\t" + matches[best_hit][8] + "\t" + matches[best_hit][9] \
+ "\t" + matches[best_hit][10] + "\t" + matches[best_hit][11] + "\n"
else:
string_2_write = read_name + "\tshortname\tlongname\t0.0\t1\t0\t0\t0\t0\t0\t0\t0\n"
#print("No hit found for read id: {}".format(read_name))
results.write(string_2_write)
results.close()
hits = summarize_blast_results(result_file_2, hits, args.pid, args.eval)
save_2_file(hits,args.output, args.verbose)
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("input", help="the input fasta files")
parser.add_argument("-v", "--verbose", help="print more info",
action="store_true")
parser.add_argument("-o", "--output", help="name of output file",
required=True)
parser.add_argument("--pid", help="Threshold of percentage identity of hits",
default=60.0, type=float)
parser.add_argument("--eval", help="Threshold of e-val of hits",
default=1e-10, type=float)
args = parser.parse_args()
if args.output.split(".")[-1]!='txt':
args.output += ".txt"
return args
def summarize_blast_results(results_file, hits, perc_id_thresh, e_val_thresh):
with open(results_file) as res_file:
for line in res_file:
query_res = line.split('\t')
short_name = query_res[1]
perc_id = float(query_res[3])
e_val = float(query_res[4])
alg_len=int(query_res[5])
if alg_len < 150 or perc_id < perc_id_thresh or e_val > e_val_thresh:
short_name = 'None'
query_res[2] = 'None'
if short_name in hits:
hits[short_name].add_read(read=query_res[0],
pid=perc_id, alg_len=int(query_res[5]),
e_val=e_val, missmatch=int(query_res[8]),
gaps=int(query_res[10]),
gaps_o=int(query_res[9]))
else:
hits[short_name] = Match_db(sn=short_name, name=query_res[2],
pid=perc_id, alg_len=int(query_res[5]),
e_val=e_val, missmatch=int(query_res[8]),
gaps=int(query_res[10]),
gaps_o=int(query_res[9]),
read=query_res[0])
res_file.close()
#remove(results_file)
return hits
def save_2_file(hits, output_file, verbosity):
tuple_hits = [ [match.name, match.count, mean(match.pid), mean(match.e_val),
mean(match.alg_len), mean(match.missmatch), mean(match.gaps),
mean(match.gap_openings)] for short_name, match in hits.items()]
sorted_hits = sorted(tuple_hits, key=lambda x: x[1], reverse=True)
sorted_hits_str = ["\t".join(list(map(str, hit)))+"\n" for hit in sorted_hits]
if verbosity:
print("\n---- Top 10 hits ----")
print(*sorted_hits_str[:10], sep='')
print("\nSaving results to: {}".format(output_file))
filehandle = open(output_file, "w")
filehandle.writelines(sorted_hits_str)
filehandle.close()
main()
|
from collections import defaultdict
from . import builtin
from .. import options as opts
from .path import buildpath, relname, within_directory
from .file_types import FileList, make_file_list, static_file
from ..backends.make import writer as make
from ..backends.ninja import writer as ninja
from ..build_inputs import build_input, Edge
from ..file_types import *
from ..iterutils import first, flatten, iterate
from ..objutils import convert_each, convert_one
from ..path import Path
from ..shell import posix as pshell
build_input('compile_options')(lambda build_inputs, env: defaultdict(list))
class BaseCompile(Edge):
desc_verb = 'compile'
def __init__(self, context, name, internal_options, directory=None,
extra_deps=None, description=None):
build = context.build
if name is None:
name = self.compiler.default_name(self.file, self)
if directory:
name = within_directory(Path(name), directory).suffix
else:
name = relname(context, name)
extra_options = self.compiler.pre_output(context, name, self)
self._internal_options = opts.option_list(
internal_options, extra_options
)
output = self.compiler.output_file(name, self)
primary = first(output)
options = self.options
compiler = self.compiler
public_output = compiler.post_output(context, options, output, self)
primary.post_install = compiler.post_install(options, output, self)
super().__init__(build, output, public_output, extra_deps, description)
@property
def options(self):
return self._internal_options + self.user_options
def flags(self, global_options=None):
return self.compiler.flags(self.options, global_options,
self.raw_output)
@staticmethod
def convert_args(context, kwargs):
convert_one(kwargs, 'directory', lambda x: buildpath(context, x, True))
return kwargs
class Compile(BaseCompile):
def __init__(self, context, name, *, includes, include_deps, pch, libs,
packages, options, lang=None, directory=None, extra_deps=None,
description=None):
self.includes = includes
self.include_deps = include_deps
self.packages = packages
self.user_options = options
internal_options = opts.option_list(opts.include_dir(i)
for i in self.includes)
# Don't bother handling forward_opts from libs now, since the only
# languages that need libs during compilation don't support static
# linking anyway.
if self.compiler.needs_libs:
self.libs = libs
internal_options.extend(opts.lib(i) for i in self.libs)
if self.compiler.needs_package_options:
internal_options.collect(i.compile_options(self.compiler)
for i in self.packages)
self.pch = pch
if self.pch:
if not self.compiler.accepts_pch:
raise TypeError('pch not supported for this compiler')
internal_options.append(opts.pch(self.pch))
super().__init__(context, name, internal_options, directory,
extra_deps, description)
@staticmethod
def convert_args(context, lang, kwargs):
def pch(file, **kwargs):
return context['precompiled_header'](file, file, **kwargs)
includes = kwargs.get('includes')
kwargs['include_deps'] = [
i for i in iterate(includes)
if isinstance(i, CodeFile) or getattr(i, 'creator', None)
]
convert_each(kwargs, 'includes', context['header_directory'])
convert_each(kwargs, 'libs', context['library'], lang=lang)
convert_each(kwargs, 'packages', context['package'], lang=lang)
kwargs['options'] = pshell.listify(kwargs.get('options'),
type=opts.option_list)
convert_one(kwargs, 'pch', pch, includes=includes,
packages=kwargs['packages'], options=kwargs['options'],
lang=lang)
kwargs = BaseCompile.convert_args(context, kwargs)
return kwargs
def add_extra_options(self, options):
self._internal_options.extend(options)
# PCH files should always be built with the same options as files using
# them, so forward the extra options onto the PCH if it exists.
if self.pch and hasattr(self.pch.creator, 'add_extra_options'):
self.pch.creator.add_extra_options(options)
class CompileSource(Compile):
def __init__(self, context, name, file, *, lang=None, **kwargs):
builder_lang = lang or getattr(file, 'lang', None)
if builder_lang is None:
raise ValueError('unable to determine language for file {!r}'
.format(file.path))
self.file = file
self.compiler = context.env.builder(builder_lang).compiler
super().__init__(context, name, **kwargs)
@classmethod
def convert_args(cls, context, file, kwargs):
lang = kwargs.get('lang')
if lang is None:
# Check if the input file needs to be forwarded on to
# `generated_source`.
guessed_file = context['auto_file'](file)
if getattr(guessed_file, 'lang', None) is None:
raise ValueError('unable to determine language for file {!r}'
.format(guessed_file.path))
builder = context.env.builder(guessed_file.lang)
if hasattr(builder, 'compiler'):
# This builder supports compilation; no need to forward to
# `generated_source`.
file = context['source_file'](file)
else:
# Pop off the `directory` argument and pass it to
# `generated_source`. This puts the intermediate source file in
# the `directory`, and then our final object file will
# automatically go there as well without needing the
# `directory` itself.
file = context['generated_source'](
guessed_file, directory=kwargs.pop('directory', None)
)
else:
file = context['source_file'](file, lang=lang)
return file, super().convert_args(context, lang or file.lang, kwargs)
class CompileHeader(Compile):
desc_verb = 'compile-header'
def __init__(self, context, name, file, *, source, lang=None, **kwargs):
builder_lang = lang or getattr(file, 'lang', None)
if builder_lang is None:
raise ValueError('unable to determine language for file {!r}'
.format(file.path))
self.file = file
self.pch_source = source
self.compiler = context.env.builder(builder_lang).pch_compiler
super().__init__(context, name, **kwargs)
@classmethod
def convert_args(cls, context, file, kwargs):
lang = kwargs.get('lang')
file = context['header_file'](file, lang=lang)
file_lang = lang or file.lang
convert_one(kwargs, 'source', context['source_file'], lang=file_lang)
return file, super().convert_args(context, file_lang, kwargs)
class GenerateSource(BaseCompile):
desc_verb = 'generate'
def __init__(self, context, name, file, *, options, lang=None,
directory=None, extra_deps=None, description=None):
builder_lang = lang or getattr(file, 'lang', None)
if builder_lang is None:
raise ValueError('unable to determine language for file {!r}'
.format(file.path))
self.file = file
self.user_options = options
self.compiler = context.env.builder(builder_lang).transpiler
super().__init__(context, name, None, directory, extra_deps,
description)
@classmethod
def convert_args(cls, context, file, kwargs):
lang = kwargs.get('lang')
file = context['auto_file'](file, lang=lang)
kwargs['options'] = pshell.listify(kwargs.get('options'),
type=opts.option_list)
return file, super().convert_args(context, kwargs)
@builtin.function()
@builtin.type(ObjectFile, extra_in_type=type(None))
def object_file(context, name=None, file=None, **kwargs):
if file is None:
if name is None:
raise TypeError('expected name')
dist = kwargs.pop('dist', True)
params = [('format', context.env.target_platform.object_format),
('lang', context.build['project']['lang'])]
return static_file(context, ObjectFile, name, dist, params, kwargs)
file, kwargs = CompileSource.convert_args(context, file, kwargs)
return CompileSource(context, name, file, **kwargs).public_output
@builtin.function()
@builtin.type(FileList, in_type=object)
def object_files(context, files, **kwargs):
@builtin.type(ObjectFile, extra_in_type=CodeFile)
def make_object_file(file, **kwargs):
file, kwargs = CompileSource.convert_args(context, file, kwargs)
return CompileSource(context, None, file, **kwargs).public_output
return make_file_list(context, make_object_file, files, **kwargs)
@builtin.function()
@builtin.type(PrecompiledHeader, extra_in_type=type(None))
def precompiled_header(context, name=None, file=None, **kwargs):
if file is None:
if name is None:
raise TypeError('expected name')
dist = kwargs.pop('dist', True)
params = [('lang', context.build['project']['lang'])]
return static_file(context, PrecompiledHeader, name, dist, params,
kwargs)
file, kwargs = CompileHeader.convert_args(context, file, kwargs)
return CompileHeader(context, name, file, **kwargs).public_output
@builtin.function()
@builtin.type(CodeFile, short_circuit=False, first_optional=True)
def generated_source(context, name, file, **kwargs):
file, kwargs = GenerateSource.convert_args(context, file, kwargs)
return GenerateSource(context, name, file, **kwargs).public_output
@builtin.function()
@builtin.type(FileList, in_type=object)
def generated_sources(context, files, **kwargs):
return make_file_list(context, context['generated_source'], files,
**kwargs)
@builtin.function()
def global_options(context, options, lang):
for i in iterate(lang):
context.build['compile_options'][i].extend(pshell.listify(
options, type=opts.option_list
))
def _get_flags(backend, rule, build_inputs, buildfile):
variables = {}
cmd_kwargs = {}
compiler = rule.compiler
if hasattr(compiler, 'flags_var'):
gopts = build_inputs['compile_options'][compiler.lang]
global_cflags, cflags = backend.flags_vars(
compiler.flags_var,
compiler.global_flags + compiler.flags(gopts, mode='global'),
buildfile
)
cmd_kwargs['flags'] = cflags
flags = rule.flags(gopts)
if flags:
variables[cflags] = [global_cflags] + flags
return variables, cmd_kwargs
@make.rule_handler(CompileSource, CompileHeader, GenerateSource)
def make_compile(rule, build_inputs, buildfile, env):
compiler = rule.compiler
variables, cmd_kwargs = _get_flags(make, rule, build_inputs, buildfile)
output_params = []
if compiler.num_outputs == 'all':
output_vars = make.qvar('@')
else:
output_vars = []
for i in range(compiler.num_outputs):
v = make.var(str(i + 1))
output_vars.append(v)
output_params.append(rule.output[i])
recipename = make.var('RULE_{}'.format(compiler.rule_name.upper()))
if not buildfile.has_variable(recipename):
recipe_extra = []
# Only GCC-style depfiles are supported by Make.
if compiler.deps_flavor == 'gcc':
depfixer = env.tool('depfixer')
cmd_kwargs['deps'] = deps = first(output_vars) + '.d'
recipe_extra = [make.Silent(depfixer(deps))]
buildfile.define(recipename, [compiler(
make.qvar('<'), output_vars, **cmd_kwargs
)] + recipe_extra)
deps = []
if getattr(rule, 'pch_source', None):
deps.append(rule.pch_source)
deps.append(rule.file)
if getattr(rule, 'pch', None):
deps.append(rule.pch)
deps.extend(getattr(rule, 'include_deps', []))
if getattr(rule, 'libs', None):
deps.extend(rule.libs)
deps.extend(flatten(i.deps for i in getattr(rule, 'packages', [])))
if compiler.deps_flavor == 'gcc':
depfile = rule.output[0].path.addext('.d')
build_inputs.add_target(File(depfile))
buildfile.include(depfile, optional=True)
make.multitarget_rule(
build_inputs, buildfile,
targets=rule.output,
deps=deps + rule.extra_deps,
order_only=make.directory_deps(rule.output),
recipe=make.Call(recipename, *output_params),
variables=variables
)
@ninja.rule_handler(CompileSource, CompileHeader, GenerateSource)
def ninja_compile(rule, build_inputs, buildfile, env):
compiler = rule.compiler
variables, cmd_kwargs = _get_flags(ninja, rule, build_inputs, buildfile)
if rule.description:
variables['description'] = rule.description
if compiler.num_outputs == 'all':
output_vars = ninja.var('out')
elif compiler.num_outputs == 1:
output_vars = ninja.var('output')
variables[output_vars] = rule.output[0]
else:
output_vars = []
for i in range(compiler.num_outputs):
v = ninja.var('output{}'.format(i + 1))
output_vars.append(v)
variables[v] = rule.output[i]
if not buildfile.has_rule(compiler.rule_name):
depfile = None
deps = None
if compiler.deps_flavor == 'gcc':
deps = 'gcc'
cmd_kwargs['deps'] = depfile = ninja.var('out') + '.d'
elif compiler.deps_flavor == 'msvc':
deps = 'msvc'
cmd_kwargs['deps'] = True
desc = rule.desc_verb + ' => ' + first(output_vars)
buildfile.rule(name=compiler.rule_name, command=compiler(
ninja.var('in'), output_vars, **cmd_kwargs
), depfile=depfile, deps=deps, description=desc)
inputs = [rule.file]
implicit_deps = []
if getattr(rule, 'pch', None):
implicit_deps.append(rule.pch)
if getattr(rule, 'pch_source', None):
inputs = [rule.pch_source]
implicit_deps.append(rule.file)
implicit_deps.extend(getattr(rule, 'include_deps', []))
if getattr(rule, 'libs', None):
implicit_deps.extend(rule.libs)
implicit_deps.extend(flatten(
i.deps for i in getattr(rule, 'packages', [])
))
# Ninja doesn't support multiple outputs and deps-parsing at the same time,
# so just use the first output and set up an alias if necessary. Aliases
# aren't perfect, since the build can get out of sync if you delete the
# "alias" file, but it's close enough.
if compiler.deps_flavor in ('gcc', 'msvc') and len(rule.output) > 1:
output = rule.output[0]
buildfile.build(
output=rule.output[1:],
rule='phony',
inputs=rule.output[0]
)
else:
output = rule.output
buildfile.build(
output=output,
rule=compiler.rule_name,
inputs=inputs,
implicit=implicit_deps + rule.extra_deps,
variables=variables
)
try:
from ..backends.msbuild import writer as msbuild
@msbuild.rule_handler(CompileSource, CompileHeader)
def msbuild_compile(rule, build_inputs, solution, env):
# MSBuild does compilation and linking in one unit; see link.py.
pass
@msbuild.rule_handler(GenerateSource)
def msbuild_generate_source(rule, build_inputs, solution, env):
raise ValueError('msbuild backend does not currently support ' +
"'generated_source'") # pragma: no cover
except ImportError: # pragma: no cover
pass
|
"""
Binary Search Tree and Tree node
"""
from typing import Optional
class Node:
def __init__(self, data: int):
self.data = data
self.left: Optional[Node] = None
self.right: Optional[Node] = None
def __repr__(self):
return f"{self.__class__} {self.data}"
class BST:
"""BST
Data less than that of node goes in the left subtree and data more than that
of node goes in the right subtree recursively. No two nodes can have same data.
h: height of tree
n: total number of nodes
in worst cast the height of binary search tree becomes n
"""
def __init__(self):
self.root: Optional[Node] = None
def insert(self, data: int):
"""
time complexity: O(h). Iterative.
"""
if self.root:
root = self.root
node = Node(data)
while root.data != data:
if root.data > data:
if root.left:
root = root.left
else:
break
elif root.data < data:
if root.right:
root = root.right
else:
break
if root.data > data:
root.left = node
else:
root.right = node
else:
self.root = Node(data)
return self
def search(self, data: int) -> str:
"""
time complexity: O(h). Iterative.
"""
root = self.root
if root:
while root and root.data != data:
if root.data > data:
root = root.left
elif root.data < data:
root = root.right
return "Found" if root else "Not Found"
return "Not Found"
@staticmethod
def min_value_node(node) -> int:
if node:
while node.left:
node = node.left
return node.data if node else -1
def delete(self, root, data: int):
"""3 cases
1. Node has no children: delete the node
2. Node has right child: Copy inorder data into the node and delete inorder successor
3. Node has only left child: replace node with it's left child
Expected Time complexity: O(h)
"""
if root:
if data < root.data:
root.left = self.delete(root.left, data)
elif data > root.data:
root.right = self.delete(root.right, data)
elif root.right: # data = root.data
root.data = self.min_value_node(root.right)
root.right = self.delete(root.right, root.data)
return root
else: # data = root.data and root.right is None
return root.left
return root
def inorder(root: Optional[Node]):
if root:
inorder(root.left)
print(root.data, end=" ")
inorder(root.right)
if __name__ == "__main__":
bst = BST()
bst.insert(50).insert(30).insert(20).insert(40).insert(70).insert(60).insert(80)
inorder(bst.root)
print()
print(bst.search(20))
print("Deleting 50")
bst.delete(bst.root, 50)
inorder(bst.root)
print()
|
from . import additional
from . import attachments
from .base import BaseModel
from .community import Community
from .events.community.events_list import Event as BotEvent
from .message import Action
from .message import Message
from .user import User
|
import os
from torchvision import datasets
def download_cifar10(path, train=True, transform=None):
"""Download CIFAR10 dataset
Args:
path: Path where dataset will be downloaded. Defaults to None.
If no path provided, data will be downloaded in a pre-defined
directory.
train: If True, download training data else test data.
Defaults to True.
transform: Data transformations to be applied on the data.
Defaults to None.
Returns:
Downloaded dataset.
"""
if not path:
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cifar10')
return datasets.CIFAR10(
path, train=train, download=True, transform=transform
)
|
import sys, os
import commands
import time
import multiprocessing
import random
import numpy as np
# Process class to run the Bayenv test phase in paralell
class RunInProcess(multiprocessing.Process):
def __init__(self, cmd, thread_id, testdata, testsize):
multiprocessing.Process.__init__(self)
self.cmd = cmd
self.thread_id = thread_id
self.testdata = testdata
self.testsize = testsize
def run(self):
errors = open("errors.out", 'w')
outfile = "results/bf_results_t" + str(self.thread_id)
k = 0
dataset = open(self.testdata, 'r')
thread_id = str(self.thread_id)
cmd = self.cmd
data_size = self.testsize
while dataset:
k += 1
line1 = dataset.readline()
if line1 == '':
break
l = line1.split("\t")
marker_file = l.pop(0) + "-" + str(self.thread_id)
FILE = open(marker_file, 'w')
line1 = "\t".join(l)
line2 = dataset.readline()
FILE.write(line1 + line2)
FILE.close()
print "BAYENV: process " + thread_id + " is processing " + marker_file + " (" + str(k) + ")...",
start_test = time.time()
sys.stdout.flush()
failure, output = commands.getstatusoutput(cmd % (marker_file, outfile))
elapsed = (time.time() - start_test)
remaining = ((data_size-k)*elapsed)/60
print "done. %f sec to complete. Estimated time remaining: %f minutes" % (elapsed, remaining)
if failure:
print output
errors.write(output)
error = "Could not test locus: " + marker_file +"\n"\
+ line1 + line2
errors.write(error)
os.remove(marker_file)
os.remove(marker_file + ".freqs")
continue
os.remove(marker_file)
os.remove(marker_file + ".freqs")
errors.close()
dataset.close()
############# Running BAYENV with multiprocessing ###############
def test_all_snps_multip(testdata, cmds, testsize):
procs = []
start = time.time()
for i in range(len(cmds)):
proc = RunInProcess(cmds[i], i, testdata, testsize)
procs.append(proc)
proc.start()
for p in procs:
p.join()
print "Elapsed time: %s" % (time.time()-start)
#Calculating covariance matrices every 500 iterations - Bayenv 2.0
def compute_null_model_bayenv2(num_pops, iterations, snpfile):
print "#############################################"
time_usage = open("t_usage.out", 'w')
rand_seed = str(int(random.uniform(1,99999)))
print "Random seed = " + rand_seed
cmd_str = open("covar-cmd.txt", "wb")
cmd = "bayenv2 -i " + snpfile + " -p " + str(num_pops) + " -k " + str(iterations) \
+ " -r " + str(rand_seed) + " > covars.txt"
print cmd
cmd_str.write(cmd)
cmd_str.close()
print "BAYENV calculating the covariance matrices...",
start_test = time.time()
sys.stdout.flush()
failure, output = commands.getstatusoutput(cmd)
elapsed = (time.time() - start_test)
usage = "done. %f sec to complete" % elapsed
print usage
time_usage.write(usage)
time_usage.close()
#Separates the covariance matrices from bayenv2 output and
#writes one mean co-variance matrix
def write_mean_covar_bayenv2():
covars = open("covars.txt", 'r')
#Removing the first 15 lines
for i in range(0, 15):
covars.readline()
covar_lists = []
cov = []
matrix_counter = 0
for line in covars:
if ("VAR-COVAR" in line):
matrix_counter += 1
covar_lists.append(cov)
cov = []
elif line == "\n":
continue
else:
line = line.strip("\t\n")
cov.append(line.split("\t"))
num_cov_matrix = np.array(covar_lists, np.float64)
matrix_mean = np.average(num_cov_matrix, axis=0)
covar_string = ""
for list in matrix_mean:
for elem in list:
covar_string += str(elem) + "\t"
covar_string += "\n"
f = open("covar-cmd.txt", "a")
f.write(covar_string)
f.close()
covar_file = open("mean_covar.txt", 'w')
covar_file.write(covar_string)
covar_file.close()
covars.close()
|
#!/usr/bin/env python3
'''
@file: text_gio.py
@auth: Sprax Lines
@date: 2020.11.22
DNA pattern matching functions and some text file utilities.
Written with Python version >= 3.8.5
'''
import argparse
import errno
# import fnmatch
import glob
import ntpath
import os
import os.path
import pickle
import random
# import re
import sys
import time
from pdb import set_trace
from typing import Deque, Dict, List, Set, Tuple
# from pprint import pprint
def cwd():
'''current working directory'''
return os.path.dirname(os.path.realpath('.'))
def path_base(path):
'''
Returns only the basename (no parent path)
for Unix or Windows style paths.
Logic: 'C:\\tmp/some\\file.txt' => 'file'
'''
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def path_name(path):
'''
Returns only the basename (no parent path, no file extension)
for Unix or Windows style paths.
Logic: 'C:\\tmp/some\\file.txt' => 'file'
'''
return os.path.splitext(path_base(path))[0]
def get_abs_path(path):
'''
Convert the specified path to an absolute path (if it isn't already one).
Returns the corresponding absolute path.
'''
return os.path.abspath(path)
def make_abs_path(dirpath, filepath):
'''
Given a directory path and a filename or relative file path,
get the absolute path for the specified file under that directory.
Returns this absolute path as a string suitable as an argument to open().
'''
return os.path.abspath(os.path.join(dirpath, filepath))
def print_stdout_stderr(text):
''' print text to stdout and stderr '''
print("sys.stdout: ", text, file=sys.stdout)
print("sys.stderr: ", text, file=sys.stderr)
def open_out_file(file_spec, label='text'):
'''
returns a file handle open for writing,
to be closed by the caller, else None
'''
if file_spec:
if file_spec in ['-', 'stdout']:
return sys.stdout
else:
try:
out_file = open(file_spec, 'w')
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
print("IOError opening {} file [{}]:".format(
label, file_spec), ex)
out_file = sys.stdout
return out_file
else:
return None
def read_lines(file_spec, charset='utf8'):
'''read and yield all lines of a text file as a iter of str'''
with open(file_spec, 'r', encoding=charset) as text:
for line in text:
yield line.rstrip()
def read_text_lines(file_spec, charset='utf8'):
'''read and yield non-empty lines of a text file as a iter of str'''
with open(file_spec, 'r', encoding=charset) as text:
for line in text:
line = line.strip()
if line:
yield line
def pickle_file(in_path, out_path, data_struct, data_adder, charset='utf8'):
'''read in_file into data_struct via data_adder then save to out_path'''
lines_in = 0
lines = read_text_lines(in_path, charset)
for line in lines:
data_adder(data_struct, line)
lines_in += 1
with open(out_path, 'wb') as out_file:
pickle.dump(data_struct, out_file)
return (lines_in, len(data_struct))
def pickle_word_list(in_path, out_path, word_set=None, adder=set.add, charset='utf8'):
'''
read single words/strings per line from in_file
and save them as a set to out_path as a pickle file
'''
if word_set is None:
word_set = set()
return pickle_file(in_path, out_path, word_set, adder, charset)
def read_file(file_spec, charset='us-ascii'): # Not: 'utf8'
'''
read and return all contents of file as one str
'''
with open(file_spec, 'r', encoding=charset) as src:
return src.read()
def read_file_eafp(file_spec, charset='us-ascii'): # Not: 'utf8'
'''
Read contents of file_spec.
Easier to Ask for Forgiveness than ask Permission.
'''
try:
src = open(file_spec, 'r', encoding=charset)
except IOError as ex:
if ex.errno != errno.ENOENT:
raise
print("WARNING: {} does not exist".format(file_spec))
return None
else:
text = src.read()
src.close()
return text
def read_text_file(file_spec):
'''
Read and return all contents of file as one str
Try to read ascii or utf-8 and failover to iso-8859-1, etc.
'''
try:
return read_file(file_spec, 'utf-8')
except UnicodeDecodeError:
return read_file(file_spec, 'iso-8859-1')
def glob_files(dir_path: str, end_pat: str, recursive=False, verbose=0):
'''
Returns list of (relative) paths of files maching file_pat in dir_path.
Uses glob for *nix-like file name matching.
Recursive is OFF by default.
'''
glob_pat = dir_path + "/*" + end_pat
if verbose > 3:
print("find_file_paths: glob_pat(%s)" % glob_pat)
return filter(os.path.isfile, glob.iglob(glob_pat))
def scan_files(dir_path: str, end_pat: str) -> Dict:
'''
Returns list of file names (only) maching file_pat in dir_path.
Uses os.scandir for Posix-like file name matching.
Recursive is always ON.
'''
return [f.name for f in os.scandir(dir_path) if f.name.endswith(end_pat)]
def name_seq_map_from_dir(dna_dir: str,
end_pat: str,
charset: str = 'us-ascii') -> Dict:
'''
TODO: separate file finding from loading.
'''
name_acgt_map = {}
glob_pat = dna_dir + "/*" + end_pat
for path in filter(os.path.isfile, glob.iglob(glob_pat)):
name = path_name(path)
name_acgt_map[name] = read_file(path, charset)
return name_acgt_map
def load_dna_map(dna_dir: str,
file_pat: str,
charset: str,
verbose: int = 1) -> Dict:
'''
Load NCBI-named Proteins as DNA sequences (acgt) from raw text files.
Uses glob instead of scandir for the simplicity of one flat data dir,
not a tree of subdirectories.
'''
dna_files = glob_files(dna_dir, file_pat, recursive=False, verbose=verbose)
if verbose > 3:
print("glob DNA files:", list(dna_files))
return name_seq_map_from_dir(dna_dir, file_pat, charset)
def find_proteins(name_acgt_map: Dict,
acgt_str: str,
max_find: int = 1,
verbose: int = 0) -> int:
'''
Find up to max_find proteins matching the search string acgt_str.
If max_find == 0, all proteins are searched.
If max_find == 1, the proteins are searched in random order;
Otherwise, the proteins are searched in sorted key order.
Threaded searching means the found order may vary.
By default, the names of matching proteins are re-sorted.
NOTE: str.find uses The Fast Search Algorithm (aka “BMHBNFS”)
as in Boyer-Moore-Horspool-..., expected to run in sub-linear
time, and slightly faster than KMP, for packed strings with
small alphabet / many repetitions. The extra space for BMH
is amortized.
@see: http://effbot.org/zone/stringlib.htm
One SQLite3 query similar to the use of str.find here might be:
sqlite> .timer ON
sqlite> SELECT ncbi_name, INSTR(acgt_text, 'tattatttttatat') - 1 AS idx
FROM pfind_protein
WHERE idx > 0
LIMIT 0, $(max_find);
when max_find > 0; if max_find == 0, omit the LIMIT part.
Or, using the Django model with parameters passed into raw query:
offset = 0
INT_MAX = 2**31 - 1
limit = max_find if max_find > 0 else INT_MAX
Protein.objects.raw("SELECT ncbi_name,
INSTR(acgt_text, %s) - 1 AS idx
WHERE idx >= 0
ORDER BY ncbi_name ASC
LIMIT %s, %s", [acgt_str, offset, limit])
Or omit the LIMIT part if max_find == 0.
'''
found = 0
names = list(name_acgt_map.keys())
if max_find == 1:
random.shuffle(names)
else:
names.sort()
for name in names:
raw = name_acgt_map[name]
idx = raw.find(acgt_str)
if idx < 0:
if verbose > 2:
print("%s excludes %s" % (name, acgt_str))
else:
found += 1
if verbose > 1:
print("%s CONTAINS %s at %d" % (name, acgt_str, idx))
if 0 < max_find <= found:
break
return found
def unit_test(opt):
'''
Test the functions above with one set of options.
'''
verbose = opt.verbose
prot_map = load_dna_map(opt.dna_dir, opt.file_pat, opt.charset, verbose)
beg_time = time.perf_counter()
found_ct = find_proteins(prot_map, opt.acgt_str, opt.max_find, verbose)
dur_time = time.perf_counter() - beg_time
print("END unit_test: Found %d / %d matches for %s in %7.4g seconds."
% (found_ct, opt.max_find, opt.acgt_str, dur_time))
###############################################################################
def main():
'''
Test driver for finding DNA fragments in sequenced proteins.
'''
default_acgt_str = "tattatttttatat"
default_max_find = 1
parser = argparse.ArgumentParser(
# usage='%(prog)s [options]',
description="Test driver for DNA alignment/Protein search")
parser.add_argument('acgt_str', type=str, nargs='?',
default=default_acgt_str,
help=('DNA search string. Example: catattaggaatttt. '
'Default: %s.' % default_acgt_str
))
parser.add_argument('max_find', type=int, nargs='?',
default=default_max_find,
help=('Maximum matches to find and show. '
' 0 means no limit. '
' Default: %d (stop at first match)'
% default_max_find
))
parser.add_argument('-c', '--charset', dest='charset', type=str,
default='us-ascii',
# default='iso-8859-1',
help='charset encoding of input text')
parser.add_argument('-d', '--dna_dir', dest='dna_dir', type=str,
default='DNA',
help='path to dir containing DNA sequence files')
parser.add_argument('-file_pat', type=str,
default='.raw',
help='Pattern matching DNA sequence file names')
parser.add_argument('-name_order', action='store_true',
help=("Find and show matches in name-sorted order v. "
"random (maybe parallelized) order. "
"NOT IMPLEMENTED"))
parser.add_argument('-out_file', type=str, nargs='?', const='-',
help='output file for search results (default: None)')
parser.add_argument('-repr', action='store_true',
help='output repr of data, not raw data')
parser.add_argument('-verbose', type=int, nargs='?', const=1, default=1,
help='verbosity of output (default: 1)')
args = parser.parse_args()
if args.verbose > 5:
print("outfile: <{}>".format(args.out_file))
print("args:", args)
print(__doc__)
exit(0)
unit_test(args)
if __name__ == '__main__':
main()
|
from django.conf import settings
from django_statsd.clients import statsd
from lib.geoip import GeoIP
import mkt
class RegionMiddleware(object):
"""Figure out the user's region and store it in a cookie."""
def __init__(self):
self.geoip = GeoIP(settings)
def region_from_request(self, request):
ip_reg = self.geoip.lookup(request.META.get('REMOTE_ADDR'))
return mkt.regions.REGIONS_DICT.get(ip_reg, mkt.regions.RESTOFWORLD)
def process_request(self, request):
regions = mkt.regions.REGION_LOOKUP
user_region = restofworld = mkt.regions.RESTOFWORLD
if not getattr(request, 'API', False):
request.REGION = restofworld
mkt.regions.set_region(restofworld)
return
# ?region= -> geoip -> lang
url_region = request.REQUEST.get('region')
if url_region in regions:
statsd.incr('z.regions.middleware.source.url')
user_region = regions[url_region]
else:
user_region = self.region_from_request(request)
# If the above fails, let's try `Accept-Language`.
if user_region == restofworld:
statsd.incr('z.regions.middleware.source.accept-lang')
if request.LANG == settings.LANGUAGE_CODE:
choices = mkt.regions.REGIONS_CHOICES[1:]
else:
choices = mkt.regions.REGIONS_CHOICES
if request.LANG:
for name, region in choices:
if name.lower() in request.LANG.lower():
user_region = region
break
# All else failed, try to match against our forced Language.
if user_region == mkt.regions.RESTOFWORLD:
# Try to find a suitable region.
for name, region in choices:
if region.default_language == request.LANG:
user_region = region
break
accept_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if (user_region == mkt.regions.US
and accept_language is not None
and not accept_language.startswith('en')):
# Let us default to restofworld if it's not English.
user_region = mkt.regions.RESTOFWORLD
else:
statsd.incr('z.regions.middleware.source.geoip')
# Only update the user's region if it changed.
amo_user = getattr(request, 'amo_user', None)
if amo_user and amo_user.region != user_region.slug:
amo_user.region = user_region.slug
amo_user.save()
request.REGION = user_region
mkt.regions.set_region(user_region)
|
from main import BotClient
import os
# run bot
bot = BotClient()
bot.run(os.getenv("TOKEN"))
|
from enum import Enum
class ContentType(Enum):
Film = 0
Series = 1
class Content:
def __init__(self, name, content_type, date):
self.name = name
self.content_type = content_type
self.date = date
def __str__(self) -> str:
return f"name='{self.name}'; type={self.content_type}; date={self.date}"
|
import hazelcast
from hazelcast.serialization.api import IdentifiedDataSerializable
class Student(IdentifiedDataSerializable):
FACTORY_ID = 1
CLASS_ID = 1
def __init__(self, id=None, name=None, gpa=None):
self.id = id
self.name = name
self.gpa = gpa
def read_data(self, object_data_input):
self.id = object_data_input.read_int()
self.name = object_data_input.read_string()
self.gpa = object_data_input.read_float()
def write_data(self, object_data_output):
object_data_output.write_int(self.id)
object_data_output.write_string(self.name)
object_data_output.write_float(self.gpa)
def get_factory_id(self):
return self.FACTORY_ID
def get_class_id(self):
return self.CLASS_ID
def __repr__(self):
return "Student(id=%s, name=%s, gpa=%s)" % (self.id, self.name, self.gpa)
client = hazelcast.HazelcastClient(
data_serializable_factories={Student.FACTORY_ID: {Student.CLASS_ID: Student}}
)
my_map = client.get_map("map")
student = Student(1, "John Doe", 3.0)
my_map.put("student1", student)
returned_student = my_map.get("student1").result()
print("Received:", returned_student)
client.shutdown()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020-2022 INRAE
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
"""Perform the evaluation of models from TFRecords"""
import argparse
import logging
import sys
import tensorflow as tf
from decloud.core import system
from decloud.models.model_factory import ModelFactory
from decloud.models.tfrecord import TFRecords
from decloud.models import metrics
from decloud.models.utils import get_available_gpus
def main(args):
# Application parameters parsing
parser = argparse.ArgumentParser(description="Saved model evaluation")
parser.add_argument("--savedmodel", help="SavedModel path. Mandatory for trained deep learning models.")
parser.add_argument("--model", required=True, help="Model name")
parser.add_argument("--test_records", nargs='+', default=[], help="Set of folders containing shards and .pkl files")
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--strategy', default='mirrored',
const='mirrored',
nargs='?',
choices=['mirrored', 'singlecpu'],
help='tf.distribute strategy')
if len(sys.argv) == 1:
parser.print_help()
parser.exit()
params = parser.parse_args(args)
# Logging
system.basic_logging_init()
# Check SavedModel
if not params.savedmodel:
logging.warning("No SavedModel provided! Are you using a deterministic model?")
elif not system.is_dir(params.savedmodel):
logging.fatal("SavedModel directory %s doesn't exist, exiting.", params.savedmodel)
system.terminate()
# Strategy
# For model evaluation we restrain strategies to "singlecpu" and "mirrored"
n_workers = 1
if params.strategy == "mirrored":
strategy = tf.distribute.MirroredStrategy()
# Get number of GPUs
n_workers = len(get_available_gpus())
if n_workers == 0:
logging.error("No GPU device found. At least one GPU is required! "
"Did you set correctly the CUDA_VISIBLE_DEVICES environment variable?")
system.terminate()
elif params.strategy == "singlecpu":
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
else:
logging.error("Please provide a supported tf.distribute strategy.")
system.terminate()
# Datasets
if not params.test_records:
logging.error("Please provide at least one directory containing TFRecords files.")
system.terminate()
tfrecord_test_array = [TFRecords(rep) for rep in params.test_records]
# Shape of the first dataset
dataset_shapes = tfrecord_test_array[0].output_shape
# Model
model = ModelFactory.get_model(params.model, dataset_shapes=dataset_shapes)
# List of tf.dataset
tf_ds_test = [tfrecord.read(batch_size=params.batch_size,
target_keys=model.model_output_keys,
n_workers=n_workers,
drop_remainder=False) for tfrecord in tfrecord_test_array]
with strategy.scope():
# Create the model
model.create_network()
if params.savedmodel:
# Load the SavedModel if provided (the model can be deterministic e.g. gapfilling)
logging.info("Loading model weight from \"{}\"".format(params.savedmodel))
model.load_weights(params.savedmodel)
# Metrics
metrics_list = [metrics.MeanSquaredError(), metrics.StructuralSimilarity(), metrics.PSNR(),
metrics.SpectralAngle()]
model.compile(metrics={out_key: metrics_list for out_key in model.model_output_keys})
model.summary()
# Validation on multiple datasets
for tf_ds in tf_ds_test:
model.evaluate(tf_ds)
if __name__ == "__main__":
system.run_and_terminate(main)
|
def test_edit_group(app):
app.session.login(username="admin", password="secret")
app.group.edit_first_group(group_name="11111")
app.session.logout() |
from unittest import TestCase
class TestWeightedDirGraph(TestCase):
def test___init__(self):
from pyavia import WtDirgraph, g_link
wdg = WtDirgraph()
# Test basic functions.
wdg['a':'b'] = 'somevalue'
self.assertIn('a', wdg) # Both keys created by link assignment.
self.assertIn('b', wdg)
self.assertEqual(wdg['a':'b'], 'somevalue')
with self.assertRaises(KeyError):
print(wdg['b':'a']) # Reverse should not exist.
# Test reverse link.
wdg['b':'a'] = 1.23
self.assertEqual(wdg['b':'a'], 1.23)
self.assertNotEqual(wdg['a':'b'], wdg['b':'a'])
# Test heterogeneous and multiple keys.
wdg['a':3.14159] = (22, 7)
wdg[456:True] = 'Yes'
with self.assertRaises(KeyError):
wdg[1:2:3] = 4 # Invalid kind of slice index.
self.assertNotEqual(wdg['a':'b'], wdg['a':3.14159])
# Test key deletion and contains.
del wdg['a':'b'] # Specific x -> y
self.assertNotIn(g_link('a', 'b'), wdg)
self.assertIn(g_link('b', 'a'), wdg) # Reverse should not be deleted.
del wdg[456] # Entire x-key.
with self.assertRaises(KeyError):
del wdg[3.14159, 'a'] # Reverse should not exist.
del wdg[456, True] # Should already be gone.
# Can't set path to nowhere.
with self.assertRaises(KeyError):
wdg['a':'a'] = 666
# Test construction with forwards dict.
wdg = WtDirgraph({'a': {'b': 2, 'c': 5}, 'c': {'a': 4}})
self.assertEqual(wdg['c':'a'], 4)
with self.assertRaises(KeyError):
print(wdg['b':'a'])
def test_trace(self):
from pyavia import WtDirgraph
wdg = WtDirgraph()
wdg[1:2] = 0.5
wdg[1:3] = 0.2
wdg[1:4] = 5
wdg[2:7] = 1
wdg[2:8] = 3.14159
wdg[7:-1] = -2
# Simple paths should be lists with two nodes.
self.assertEqual(wdg.trace(2, 7), [2, 7])
# Path to nowhere is invalid.
with self.assertRaises(KeyError):
wdg.trace(4, 4)
# Even simple paths should not be reversible.
self.assertEqual(wdg.trace(7, 2), None)
# Check complex forward path.
path, path_sum = wdg.trace(1, -1, op=lambda x, y: x + y)
self.assertEqual(path, [1, 2, 7, -1])
self.assertEqual(path_sum, -0.5)
# Forward path check (#2 check side-effects of caching).
path, path_sum = wdg.trace(1, -1, op=lambda x, y: x + y)
self.assertEqual(path, [1, 2, 7, -1])
self.assertEqual(path_sum, -0.5)
# No reverse path should exist.
path, path_sum = wdg.trace(-1, 1, op=lambda x, y: x + y)
self.assertIsNone(path)
self.assertIsNone(path_sum)
# Add reverse path and confirm it now exists and is different.
wdg[-1:3] = 5
wdg[3:1] = 7
path, path_sum = wdg.trace(-1, 1, op=lambda x, y: x + y)
self.assertEqual(path, [-1, 3, 1])
self.assertEqual(path_sum, 12)
# Forward path check (#3 check side-effects of caching reverse).
path, path_sum = wdg.trace(1, -1, op=lambda x, y: x + y)
self.assertEqual(path, [1, 2, 7, -1])
self.assertEqual(path_sum, -0.5)
# Reverse path check (#2 check side-effects of caching and fwd path).
path, path_sum = wdg.trace(-1, 1, op=lambda x, y: x + y)
self.assertEqual(path, [-1, 3, 1])
self.assertEqual(path_sum, 12)
|
from kids_math.utils import valid_answer
from kids_math.gifs import PeterRabbitGif, FrozenGif
# from kids_math.img import Images
def greater_than_less_than(first_number, second_number):
"""Fill in.
"""
gifs = PeterRabbitGif()
acceptable = ('=', '>', '<')
if first_number == second_number:
result = '='
elif first_number > second_number:
result = '>'
else:
result = '<'
response = input("""Is the first number greater than (>), less than (<), or equal to (=) the second number?
[enter either >, <, or =]: """).strip()
wrong_input = "Try again: Your answer must either be >, <, or ="
response = valid_answer(response, acceptable, wrong_input)
wrong_response = "Sorry! {} is not {} to {}. Try again...".format(first_number, response, second_number)
# ask five more times if need be
correct = 0
while correct < 5:
if response == result:
print('\nCORRECT!!! GREAT WORK!!!')
return gifs.wink()
else:
response = input(wrong_response)
# check for valid answer
response = valid_answer(response, acceptable, wrong_input)
correct += 1
print("Sorry, either try again or ask for help.")
return gifs.nope()
def add_to_five(number):
"""Add to the 5 by any number.
:param number:
"""
gifs = FrozenGif()
acceptable = (int, float)
type_number = type(number)
if type_number in acceptable:
# get answer
answer = 5 + number
response = int(input("Enter the answer to 5 + {}? ".format(number)))
type_response = type(response)
wrong_response = "Sorry! 5 + {} is not equal to {}. Try again...".format(number, response)
if (type_response in acceptable) and (response == answer):
print('\n CORRECT!!! GREAT WORK!!!')
return gifs.walking()
else:
correct = 0
while correct < 5:
if (type_response in acceptable) and (response == answer):
print('\n CORRECT!!! GREAT WORK!!!')
return gifs.walking()
elif (type_response in acceptable) and (response != answer):
response = int(input(wrong_response))
correct += 1
else:
pass
print("Sorry, either try again or ask for help.")
return gifs.olaf_heart()
# def rotational_symmetry():
# """Choose whether or not the object has rotational symmetry."""
#
# response = input("""Your goal is to choose whether or not the image that will be shown has rotational symmetry by
# entering either "YES" or "NO". Press [return] or [enter] to begin. """)
#
# if response.lower() in ('yes', 'no'):
#
# return Images.display_img(Images.STAR)
|
{
'targets': [
{
'target_name': 'riskjs',
'sources': [
'src/RiskJS.cpp',
'src/CVaRHistorical.cpp',
'src/CVaRMonteCarlo.cpp',
'src/CVaRVarianceCovariance.cpp',
'src/compute_returns_eigen.cpp',
'src/instrument.cpp',
'src/path.cpp',
'src/pca.cpp',
'src/portfolio.cpp',
'src/ptf_var.cpp',
'src/rng.cpp',
'src/var_model.cpp'
],
'include_dirs': [
'include',
'include/eigen3'
],
'conditions': [
['OS=="win"', {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1,
'AdditionalOptions': [
'/GR', '/EHsc', '/wd4003', '/wd4018', '/wd4506', '/wd4800'
]
}
}
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'cflags': [ '-std=c++11' ],
'cflags_cc!': [ '-fno-rtti', '-fno-exceptions' ]
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES',
'OTHER_CPLUSPLUSFLAGS': [ '-std=c++11', '-stdlib=libc++' ],
'OTHER_LDFLAGS': [ '-stdlib=libc++' ],
'MACOSX_DEPLOYMENT_TARGET': '10.7'
}
}]
]
}
]
}
|
#
# Copyright 2020 Two Sigma Open Source, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime as dt
import pathlib
import typing
from uberjob._util import repr_helper
from uberjob._value_store import ValueStore
from uberjob.stores._file_store import get_modified_time
class PathSource(ValueStore):
"""
A :class:`~uberjob.ValueStore` that returns the path itself from ``read`` rather than actually reading any data.
:param path: The input path.
:param required: When true, ``get_modified_time`` will raise an exception when the path is missing rather than
return ``None``.
"""
def __init__(self, path: typing.Union[str, pathlib.Path], *, required: bool = True):
self.path = path
self.required = required
def read(self) -> typing.Union[str, pathlib.Path]:
"""
Get the path.
When ``required`` is false, this will raise an exception if the file does not exist.
"""
if not self.required:
self._get_modified_time(required=True)
return self.path
def write(self, value):
"""Not implemented."""
raise NotImplementedError()
def get_modified_time(self) -> typing.Optional[dt.datetime]:
"""
Get the modified time of the file.
If it does not exist or is inaccessible, ``None`` will be returned if ``required`` is false and an exception
will be raised otherwise.
"""
return self._get_modified_time(self.required)
def _get_modified_time(self, required):
modified_time = get_modified_time(self.path)
if modified_time is None and required:
raise IOError(
f"Failed to get modified time of required source path {self.path!r}."
)
return modified_time
def __repr__(self):
return repr_helper(
self, self.path, required=self.required, defaults={"required": True}
)
|
import yaml
from tests.base.io_test import BaseIOTest
from tests.base.value_error import BaseValueErrorTest
from tests.base.folder_test import BaseFolderTest
from tests.base.hash_test import BaseHashTest
from queenbee.plugin import Plugin
ASSET_FOLDER = 'tests/assets/plugins'
class TestIO(BaseIOTest):
klass = Plugin
asset_folder = ASSET_FOLDER
class TestValueError(BaseValueErrorTest):
klass = Plugin
asset_folder = ASSET_FOLDER
class TestFolder(BaseFolderTest):
klass = Plugin
asset_folder = ASSET_FOLDER
class TestHash(BaseHashTest):
klass = Plugin
asset_folder = ASSET_FOLDER
|
import schedule
import settings
from .poll_pull_requests import poll_pull_requests as poll_pull_requests
from .poll_read_issue_comments import poll_read_issue_comments
from .poll_issue_close_stale import poll_issue_close_stale
def schedule_jobs(api):
schedule.every(settings.PULL_REQUEST_POLLING_INTERVAL_SECONDS).seconds.do(
lambda: poll_pull_requests(api))
schedule.every(settings.ISSUE_COMMENT_POLLING_INTERVAL_SECONDS).seconds.do(
lambda: poll_read_issue_comments(api))
schedule.every(settings.ISSUE_CLOSE_STALE_INTERVAL_SECONDS).seconds.do(
lambda: poll_issue_close_stale(api))
# Call manually the first time, so that we are guaranteed this will run
# at least once in the interval...
poll_issue_close_stale(api)
|
import os
from os import walk
import sys
try:
rename_file = sys.argv[1]
photo_attack_folder = sys.argv[2]
video_attack_folder = sys.argv[3]
real_folder = sys.argv[4]
except:
rename_file = 'Test.txt'
photo_attack_folder = 'self_created/photo_attack'
video_attack_folder = 'self_created/video_attack'
real_folder = 'self_created/real'
'''
Rename files based on Dev.txt and Test.txt. These two text files contain OULU-NPU protocol 1
file names. If the file name ends with 4, and 5 it is a video attack and if it ends with 2
and 3 it is a photo attack. If it ends with 1 it is a real sample.
To use this file, run python adjust_new_data_names.py A B C D
A:
Test.txt if you want to rename files based on test protocol of OULU NPU
Dev.txt if you want to rename files based on dev protocol of OULU NPU
B:
The address of the folder containing photo attack videos
C:
The address of the folder containing video attack videos
D:
The address of the folder containing real sample videos
'''
def rename_files(path, file_names):
f = []
for (dirpath, dirnames, filenames) in walk(path):
f.extend(filenames)
for i in range(len(f)):
if f[i].split('.')[-1] == 'mp4':
os.rename(os.path.join(path, f[i]), os.path.join(path, file_names[i] +'.'+ f[i].split('.')[-1]))
def get_file_names(path):
lines = []
with open(path) as f:
lines = f.readlines()
live_file_names = []
fake_photo_file_names = []
fake_video_file_names = []
for line in lines:
x = line.split(',')
if x[0] == '+1':
live_file_names.append(str(x[1][:8]))
else:
if x[1].split('_')[-1] == '5\n' or x[1].split('_')[-1] == '4\n':
fake_video_file_names.append(str(x[1][:8]))
else:
fake_photo_file_names.append(str(x[1][:8]))
return live_file_names, fake_video_file_names,fake_photo_file_names
live_file_names, fake_video_file_names,fake_photo_file_names = get_file_names(rename_file)
rename_files(photo_attack_folder, fake_photo_file_names)
rename_files(video_attack_folder, fake_video_file_names)
rename_files(real_folder, live_file_names) |
from http.server import BaseHTTPRequestHandler, HTTPServer
import re
import socket
from threading import Thread
import unittest
import os
# Third-party imports...
from unittest.mock import MagicMock, Mock
from pywink.api import *
from pywink.api import WinkApiInterface
from pywink.devices.sensor import WinkSensor
from pywink.devices.hub import WinkHub
from pywink.devices.piggy_bank import WinkPorkfolioBalanceSensor, WinkPorkfolioNose
from pywink.devices.key import WinkKey
from pywink.devices.remote import WinkRemote
from pywink.devices.powerstrip import WinkPowerStrip, WinkPowerStripOutlet
from pywink.devices.light_bulb import WinkLightBulb
from pywink.devices.binary_switch import WinkBinarySwitch
from pywink.devices.lock import WinkLock
from pywink.devices.eggtray import WinkEggtray
from pywink.devices.garage_door import WinkGarageDoor
from pywink.devices.shade import WinkShade
from pywink.devices.siren import WinkSiren
from pywink.devices.fan import WinkFan, WinkGeZwaveFan
from pywink.devices.thermostat import WinkThermostat
from pywink.devices.button import WinkButton
from pywink.devices.gang import WinkGang
from pywink.devices.smoke_detector import WinkSmokeDetector, WinkSmokeSeverity, WinkCoDetector, WinkCoSeverity
from pywink.devices.camera import WinkCanaryCamera
from pywink.devices.air_conditioner import WinkAirConditioner
from pywink.devices.propane_tank import WinkPropaneTank
from pywink.devices.scene import WinkScene
from pywink.devices.robot import WinkRobot
from pywink.devices.water_heater import WinkWaterHeater
USERS_ME_WINK_DEVICES = {}
GROUPS = {}
class ApiTests(unittest.TestCase):
def setUp(self):
global USERS_ME_WINK_DEVICES, GROUPS
super(ApiTests, self).setUp()
all_devices = os.listdir('{}/devices/api_responses/'.format(os.path.dirname(__file__)))
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/devices/api_responses/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/devices/api_responses/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
USERS_ME_WINK_DEVICES["data"] = device_list
all_devices = os.listdir('{}/devices/api_responses/groups'.format(os.path.dirname(__file__)))
device_list = []
for json_file in all_devices:
if os.path.isfile('{}/devices/api_responses/groups/{}'.format(os.path.dirname(__file__), json_file)):
_json_file = open('{}/devices/api_responses/groups/{}'.format(os.path.dirname(__file__), json_file))
device_list.append(json.load(_json_file))
_json_file.close()
GROUPS["data"] = device_list
self.port = get_free_port()
start_mock_server(self.port)
self.api_interface = MockApiInterface()
def test_local_control_enabled_by_default(self):
self.assertTrue(ALLOW_LOCAL_CONTROL)
def test_that_disable_local_control_works(self):
from pywink.api import ALLOW_LOCAL_CONTROL
disable_local_control()
self.assertFalse(ALLOW_LOCAL_CONTROL)
def test_set_user_agent(self):
from pywink.api import API_HEADERS
set_user_agent("THIS IS A TEST")
self.assertEqual("THIS IS A TEST", API_HEADERS["User-Agent"])
def test_set_bearer_token(self):
from pywink.api import API_HEADERS, LOCAL_API_HEADERS
set_bearer_token("THIS IS A TEST")
self.assertEqual("Bearer THIS IS A TEST", API_HEADERS["Authorization"])
def test_get_authorization_url(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
url = get_authorization_url("TEST", "127.0.0.1")
comparison_url = "%s/oauth2/authorize?client_id=TEST&redirect_uri=127.0.0.1" % ("http://localhost:" + str(self.port))
self.assertEqual(comparison_url, url)
def test_bad_status_codes(self):
try:
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port) + "/401/"
wink_api_fetch()
except Exception as e:
self.assertTrue(type(e), WinkAPIException)
try:
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port) + "/404/"
wink_api_fetch()
except Exception as e:
self.assertTrue(type(e), WinkAPIException)
def test_get_subscription_key(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
get_all_devices()
self.assertIsNotNone(get_subscription_key())
def test_get_all_devices_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_all_devices()
self.assertEqual(len(devices), 77)
lights = get_light_bulbs()
for light in lights:
self.assertTrue(isinstance(light, WinkLightBulb))
sensors = get_sensors()
sensors.extend(get_door_bells())
for sensor in sensors:
self.assertTrue(isinstance(sensor, WinkSensor))
smoke_detectors = get_smoke_and_co_detectors()
for device in smoke_detectors:
self.assertTrue(isinstance(device, WinkSmokeDetector) or isinstance(device, WinkSmokeSeverity) or
isinstance(device, WinkCoDetector) or isinstance(device, WinkCoSeverity))
keys = get_keys()
for key in keys:
self.assertTrue(isinstance(key, WinkKey))
switches = get_switches()
for switch in switches:
self.assertTrue(isinstance(switch, WinkBinarySwitch))
locks = get_locks()
for lock in locks:
self.assertTrue(isinstance(lock, WinkLock))
eggtrays = get_eggtrays()
for eggtray in eggtrays:
self.assertTrue(isinstance(eggtray, WinkEggtray))
garage_doors = get_garage_doors()
for garage_door in garage_doors:
self.assertTrue(isinstance(garage_door, WinkGarageDoor))
powerstrip = get_powerstrips()
self.assertEqual(len(powerstrip), 3)
for device in powerstrip:
self.assertTrue(isinstance(device, WinkPowerStrip) or isinstance(device, WinkPowerStripOutlet))
shades = get_shades()
for shade in shades:
self.assertTrue(isinstance(shade, WinkShade))
sirens = get_sirens()
for siren in sirens:
self.assertTrue(isinstance(siren, WinkSiren))
keys = get_keys()
for key in keys:
self.assertTrue(isinstance(key, WinkKey))
porkfolio = get_piggy_banks()
self.assertEqual(len(porkfolio), 2)
for device in porkfolio:
self.assertTrue(isinstance(device, WinkPorkfolioBalanceSensor) or isinstance(device, WinkPorkfolioNose))
thermostats = get_thermostats()
for thermostat in thermostats:
self.assertTrue(isinstance(thermostat, WinkThermostat))
hubs = get_hubs()
for hub in hubs:
self.assertTrue(isinstance(hub, WinkHub))
fans = get_fans()
for fan in fans:
self.assertTrue(isinstance(fan, WinkFan) or isinstance(fan, WinkGeZwaveFan))
buttons = get_buttons()
for button in buttons:
self.assertTrue(isinstance(button, WinkButton))
acs = get_air_conditioners()
for ac in acs:
self.assertTrue(isinstance(ac, WinkAirConditioner))
propane_tanks = get_propane_tanks()
for tank in propane_tanks:
self.assertTrue(isinstance(tank, WinkPropaneTank))
water_heaters = get_water_heaters()
for water_heater in water_heaters:
self.assertTrue(isinstance(water_heater, WinkWaterHeater))
def test_get_sensor_and_binary_switch_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
sensor_types = [WinkSensor, WinkHub, WinkPorkfolioBalanceSensor, WinkKey, WinkRemote,
WinkGang, WinkSmokeDetector, WinkSmokeSeverity,
WinkCoDetector, WinkCoSeverity, WinkButton, WinkRobot]
# No way to validate scene is activated, so skipping.
skip_types = [WinkPowerStripOutlet, WinkCanaryCamera, WinkScene]
devices = get_all_devices()
old_states = {}
for device in devices:
if type(device) in skip_types:
continue
device.api_interface = self.api_interface
if type(device) in sensor_types:
old_states[device.object_id() + device.name()] = device.state()
elif isinstance(device, WinkPorkfolioNose):
device.set_state("FFFF00")
elif device.state() is False or device.state() is True:
old_states[device.object_id()] = device.state()
device.set_state(not device.state())
device.update_state()
for device in devices:
if type(device) in skip_types:
continue
if isinstance(device, WinkPorkfolioNose):
self.assertEqual(device.state(), "FFFF00")
elif type(device) in sensor_types:
self.assertEqual(device.state(), old_states.get(device.object_id() + device.name()))
elif device.object_id() in old_states:
self.assertEqual(not device.state(), old_states.get(device.object_id()))
def test_get_light_bulbs_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_light_bulbs()
old_states = {}
# Set states
for device in devices:
device.api_interface = self.api_interface
# Test xy color and powered
if device.supports_xy_color():
old_states[device.object_id()] = device.state()
device.set_state(not device.state(), color_xy=[0.5, 0.5])
# Test HSB and powered
elif device.supports_hue_saturation():
old_states[device.object_id()] = device.state()
device.set_state(not device.state(), 0.5, color_hue_saturation=[0.5, 0.5])
# Test temperature and powered
elif not device.supports_hue_saturation() and device.supports_temperature():
old_states[device.object_id()] = device.state()
device.set_state(not device.state(), 0.5, color_kelvin=2500)
# Test Brightness and powered
else:
old_states[device.object_id()] = device.state()
device.set_state(not device.state(), 0.5)
# Check states
for device in devices:
# Test xy color and power
if device.supports_xy_color():
self.assertEqual([not old_states.get(device.object_id()), [0.5, 0.5]], [device.state(), device.color_xy()])
# Test HSB and powered
elif device.supports_hue_saturation():
self.assertEqual([old_states.get(device.object_id()), 0.5, [0.5, 0.5]],
[not device.state(), device.brightness(), [device.color_saturation(), device.color_hue()]])
# Test temperature and powered
elif not device.supports_hue_saturation() and device.supports_temperature():
self.assertEqual([not old_states.get(device.object_id()), 0.5, 2500], [device.state(), device.brightness(), device.color_temperature_kelvin()])
# Test Brightness and powered
else:
self.assertEqual([old_states.get(device.object_id()), 0.5], [not device.state(), device.brightness()])
def test_get_switch_group_updated_state_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_binary_switch_groups()
for device in devices:
device.api_interface = self.api_interface
# The Mock API only changes the "powered" true_count and false_count
device.set_state(False)
device.update_state()
for device in devices:
self.assertFalse(device.state())
def test_get_light_group_updated_state_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_light_groups()
for device in devices:
device.api_interface = self.api_interface
# The Mock API only changes the "powered" true_count and false_count
device.set_state(True)
device.update_state()
for device in devices:
self.assertTrue(device.state())
def test_get_shade_group_updated_state_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_shade_groups()
for device in devices:
device.api_interface = self.api_interface
# The Mock API only changes the "position" average
device.set_state(1.0)
device.update_state()
for device in devices:
self.assertEqual(device.state(), 1.0)
def test_all_devices_local_control_id_is_not_decimal(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_all_devices()
for device in devices:
if device.local_id() is not None:
_temp = float(device.local_id())
_temp2 = int(device.local_id())
self.assertEqual(_temp, _temp2)
def test_local_control_get_state_is_being_called(self):
mock_api_object = Mock()
mock_api_object.local_get_state = MagicMock()
mock_api_object.get_device_state = MagicMock()
devices = get_light_bulbs()
devices[0].api_interface = mock_api_object
devices[0].update_state()
mock_api_object.local_get_state.assert_called_with(devices[0])
def test_local_control_set_state_is_being_called(self):
def Any(cls):
class Any(cls):
def __eq__(self, other):
return True
return Any()
mock_api_object = Mock()
mock_api_object.local_set_state = MagicMock()
mock_api_object.set_device_state = MagicMock()
devices = get_light_bulbs()
devices[0].api_interface = mock_api_object
devices[0].set_state(True)
mock_api_object.local_set_state.assert_called_with(devices[0], Any(str))
def test_local_control_get_state_is_not_being_called(self):
mock_api_object = Mock()
mock_api_object.local_get_state = MagicMock()
mock_api_object.get_device_state = MagicMock()
devices = get_piggy_banks()
devices[0].api_interface = mock_api_object
devices[0].update_state()
mock_api_object.get_device_state.assert_called_with(devices[0])
def test_local_control_set_state_is_not_being_called(self):
def Any(cls):
class Any(cls):
def __eq__(self, other):
return True
return Any()
mock_api_object = Mock()
mock_api_object.local_set_state = MagicMock()
mock_api_object.set_device_state = MagicMock()
devices = get_thermostats()
devices[0].api_interface = mock_api_object
devices[0].set_operation_mode("auto")
mock_api_object.set_device_state.assert_called_with(devices[0], Any(str))
def test_get_shade_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_shades()
for device in devices:
device.api_interface = self.api_interface
device.set_state(1.0)
device.update_state()
for device in devices:
self.assertEqual(1.0, device.state())
def test_get_garage_door_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_garage_doors()
for device in devices:
device.api_interface = self.api_interface
device.set_state(1)
device.update_state()
for device in devices:
self.assertEqual(1, device.state())
def test_get_powerstrip_outlets_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
skip_types = [WinkPowerStrip]
devices = get_powerstrips()
old_states = {}
for device in devices:
if type(device) in skip_types:
continue
device.api_interface = self.api_interface
if device.state() is False or device.state() is True:
old_states[device.object_id()] = device.state()
device.set_state(not device.state())
device.update_state()
for device in devices:
if device.object_id() in old_states:
self.assertEqual(not device.state(), old_states.get(device.object_id()))
def test_get_siren_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_sirens()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
old_states[device.object_id()] = device.state()
device.set_state(not device.state())
device.set_mode("strobe")
device.set_auto_shutoff(120)
device.set_siren_volume("medium")
device.set_chime_volume("medium")
device.set_siren_sound("test_sound")
device.set_chime("test_sound", 10)
device.set_chime_strobe_enabled(True)
device.set_siren_strobe_enabled(False)
device.update_state()
self.assertEqual(not device.state(), old_states.get(device.object_id()))
self.assertEqual(device.mode(), "strobe")
self.assertEqual(device.auto_shutoff(), 120)
self.assertEqual(device.siren_volume(), "medium")
self.assertEqual(device.chime_volume(), "medium")
self.assertEqual(device.chime_mode(), "test_sound")
self.assertEqual(device.siren_sound(), "test_sound")
self.assertTrue(device.chime_strobe_enabled())
self.assertFalse(device.strobe_enabled())
self.assertEqual(device.chime_cycles(), 10)
def test_get_lock_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_locks()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
old_states[device.object_id()] = device.state()
device.set_state(not device.state())
device.set_alarm_sensitivity(0.22)
device.set_alarm_mode("alert")
device.set_alarm_state(False)
device.set_vacation_mode(True)
device.set_beeper_mode(True)
device.update_state()
self.assertEqual(not device.state(), old_states.get(device.object_id()))
self.assertEqual(device.alarm_mode(), "alert")
self.assertFalse(device.alarm_enabled())
self.assertTrue(device.vacation_mode_enabled())
self.assertTrue(device.beeper_enabled())
def test_get_air_conditioner_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_air_conditioners()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
old_states[device.object_id()] = device.state()
device.set_operation_mode("cool_only")
device.set_temperature(70)
device.set_schedule_enabled(False)
device.set_ac_fan_speed(0.5)
for device in devices:
self.assertEqual(device.state(), "cool_only")
self.assertEqual(70, device.current_max_set_point())
self.assertFalse(device.schedule_enabled())
self.assertEqual(0.5, device.current_fan_speed())
def test_get_thermostat_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_thermostats()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
old_states[device.object_id()] = device.state()
if device.name() == "Home Hallway Thermostat":
device.set_operation_mode("off")
else:
device.set_operation_mode("auto")
device.set_away(True)
if device.has_fan():
device.set_fan_mode("auto")
device.set_temperature(10, 50)
for device in devices:
if device.name() == "Home Hallway Thermostat":
self.assertFalse(device.is_on())
else:
self.assertEqual(device.current_hvac_mode(), "auto")
self.assertTrue(device.away())
if device.has_fan():
self.assertEqual(device.current_fan_mode(), "auto")
self.assertEqual(10, device.current_min_set_point())
self.assertEqual(50, device.current_max_set_point())
def test_get_water_heater_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_water_heaters()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
old_states[device.object_id()] = device.state()
device.set_operation_mode("heat_pump")
device.set_temperature(70)
device.set_vacation_mode(True)
for device in devices:
self.assertEqual(device.state(), "heat_pump")
self.assertEqual(70, device.current_set_point())
self.assertTrue(device.vacation_mode_enabled())
def test_get_camera_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_cameras()
old_states = {}
for device in devices:
if isinstance(device, WinkCanaryCamera):
device.api_interface = self.api_interface
device.set_mode("away")
device.set_privacy(True)
device.update_state()
for device in devices:
if isinstance(device, WinkCanaryCamera):
self.assertEqual(device.state(), "away")
self.assertTrue(device.private())
def test_get_fan_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_fans()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
if isinstance(device, WinkGeZwaveFan):
device.set_state(True, "high")
else:
device.set_state(True, "auto")
device.set_fan_direction("reverse")
device.set_fan_timer(300)
device.update_state()
for device in devices:
if isinstance(device, WinkGeZwaveFan):
self.assertEqual(device.current_fan_speed(), "high")
else:
self.assertEqual(device.current_fan_speed(), "auto")
self.assertEqual(device.current_fan_direction(), "reverse")
self.assertEqual(device.current_timer(), 300)
def test_get_propane_tank_updated_states_from_api(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_propane_tanks()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
device.set_tare(5.0)
device.update_state()
self.assertEqual(device.tare(), 5.0)
def test_set_all_device_names(self):
WinkApiInterface.BASE_URL = "http://localhost:" + str(self.port)
devices = get_all_devices()
old_states = {}
for device in devices:
device.api_interface = self.api_interface
device.set_name("TEST_NAME")
device.update_state()
for device in devices:
self.assertTrue(device.name().startswith("TEST_NAME"))
class MockServerRequestHandler(BaseHTTPRequestHandler):
USERS_ME_WINK_DEVICES_PATTERN = re.compile(r'/users/me/wink_devices')
BAD_STATUS_PATTERN = re.compile(r'/401/')
NOT_FOUND_PATTERN = re.compile(r'/404/')
REFRESH_TOKEN_PATTERN = re.compile(r'/oauth2/token')
DEVICE_SPECIFIC_PATTERN = re.compile(r'/*/[0-9]*')
GROUPS_PATTERN = re.compile(r'/groups')
def do_GET(self):
if re.search(self.BAD_STATUS_PATTERN, self.path):
# Add response status code.
self.send_response(requests.codes.unauthorized)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
return
elif re.search(self.NOT_FOUND_PATTERN, self.path):
# Add response status code.
self.send_response(requests.codes.not_found)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
return
elif re.search(self.USERS_ME_WINK_DEVICES_PATTERN, self.path):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps(USERS_ME_WINK_DEVICES)
self.wfile.write(response_content.encode('utf-8'))
return
elif re.search(self.GROUPS_PATTERN, self.path):
# Add response status code.
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps(GROUPS)
self.wfile.write(response_content.encode('utf-8'))
return
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(port):
mock_server = HTTPServer(('localhost', port), MockServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
class MockApiInterface():
def set_device_state(self, device, state, id_override=None, type_override=None):
"""
:type device: WinkDevice
"""
object_id = id_override or device.object_id()
device_object_type = device.object_type()
object_type = type_override or device_object_type
return_dict = {}
if "name" in str(state):
for dict_device in USERS_ME_WINK_DEVICES.get('data'):
_object_id = dict_device.get("object_id")
if _object_id == object_id:
if device_object_type == "outlet":
index = device.index()
set_state = state["outlets"][index]["name"]
dict_device["outlets"][index]["name"] = set_state
return_dict["data"] = dict_device
else:
dict_device["name"] = state.get("name")
for dict_device in GROUPS.get('data'):
_object_id = dict_device.get("object_id")
if _object_id == object_id:
dict_device["name"] = state.get("name")
elif object_type != "group":
for dict_device in USERS_ME_WINK_DEVICES.get('data'):
_object_id = dict_device.get("object_id")
if _object_id == object_id:
if device_object_type == "powerstrip":
set_state = state["outlets"][0]["desired_state"]["powered"]
dict_device["outlets"][0]["last_reading"]["powered"] = set_state
dict_device["outlets"][1]["last_reading"]["powered"] = set_state
return_dict["data"] = dict_device
elif device_object_type == "outlet":
index = device.index()
set_state = state["outlets"][index]["desired_state"]["powered"]
dict_device["outlets"][index]["last_reading"]["powered"] = set_state
return_dict["data"] = dict_device
else:
if "nose_color" in state:
dict_device["nose_color"] = state.get("nose_color")
elif "tare" in state:
dict_device["tare"] = state.get("tare")
else:
for key, value in state.get('desired_state').items():
dict_device["last_reading"][key] = value
return_dict["data"] = dict_device
else:
for dict_device in GROUPS.get('data'):
_object_id = dict_device.get("object_id")
if _object_id == object_id:
set_state = state["desired_state"].get("powered")
if set_state is not None:
if set_state:
dict_device["reading_aggregation"]["powered"]["true_count"] = 1
dict_device["reading_aggregation"]["powered"]["false_count"] = 0
else:
dict_device["reading_aggregation"]["powered"]["true_count"] = 0
dict_device["reading_aggregation"]["powered"]["false_count"] = 1
return_dict["data"] = dict_device
else:
set_state = state["desired_state"].get("position")
dict_device["reading_aggregation"]["position"]["average"] = set_state
return_dict["data"] = dict_device
return return_dict
def local_set_state(self, device, state, id_override=None, type_override=None):
return self.set_device_state(device, state, id_override, type_override)
def get_device_state(self, device, id_override=None, type_override=None):
"""
:type device: WinkDevice
"""
object_id = id_override or device.object_id()
return_dict = {}
for device in USERS_ME_WINK_DEVICES.get('data'):
_object_id = device.get("object_id")
if _object_id == object_id:
return_dict["data"] = device
return return_dict
def local_get_state(self, device, id_override=None, type_override=None):
return self.get_device_state(device, id_override, type_override)
|
import math
from io import BytesIO
import qrcode
from reportlab.pdfbase.pdfmetrics import getAscent
from reportlab.pdfgen.canvas import Canvas
from lib.qrcrc import calc_crc
def gen_qr(data):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=1,
border=0,
)
qr.clear()
qr.add_data(data)
return qr.make_image(fill_color="black", back_color="white")
def mm(x):
import reportlab.lib.units
return x * reportlab.lib.units.mm
def generate(page_width_mm: float, page_height_mm: float,
base_size_mm: float = 10, margin_x_mm: float = 2, margin_y_mm: float = 2, stride_x_mm: float = 0, stride_y_mm: float = 0,
prefix: str = "", suffix: str = "", digits: int = 5, start: int = 0, pages: int = 1,
generate_label: bool = True, label_spacing_mm: float = 0.4, font_size: int = 5):
f = BytesIO()
page_width = mm(page_width_mm)
page_height = mm(page_height_mm)
canvas = Canvas(f, pagesize=(page_width, page_height))
font = "Helvetica", font_size
label_height = getAscent(*font) + mm(label_spacing_mm)
tile_base_size = mm(base_size_mm)
tile_margin_x = mm(margin_x_mm)
tile_margin_y = mm(margin_y_mm)
tile_width = tile_base_size + tile_margin_x
tile_height = tile_base_size + tile_margin_y
if generate_label:
tile_height += label_height
stride_x = max(mm(stride_x_mm), tile_width)
stride_y = max(mm(stride_y_mm), tile_height)
num = start
page = 0
pos_x = tile_margin_x
pos_y = page_height - tile_margin_y
while True:
txt = f"{prefix}{num:0{digits}}{suffix}"
data_txt = f"V1.{txt}.{calc_crc(txt)}"
num += 1
img = gen_qr(data_txt)
canvas.drawInlineImage(img, pos_x, pos_y - tile_base_size, width=tile_base_size, height=tile_base_size)
if generate_label:
label_size = canvas.stringWidth(txt, *font)
text_obj = canvas.beginText(
pos_x + tile_base_size / 2 - label_size / 2,
pos_y - tile_base_size - label_height)
text_obj.setFont(*font)
text_obj.textLines(txt)
canvas.drawText(text_obj)
pos_x += stride_x
if pos_x + tile_width > page_width:
pos_x = tile_margin_x
pos_y -= stride_y
if pos_y - tile_height < tile_margin_y:
canvas.showPage()
page += 1
pos_y = page_height - tile_margin_y
if page == pages:
break
canvas.save()
f.seek(0)
return f
__all__ = [
"generate",
]
|
import uuid
def get_uuid_unicode():
u = uuid.uuid4()
try:
return unicode(u)
except NameError:
return str(u)
class NotAuthorizedException(Exception):
pass
|
from dacite import from_dict
from sqlalchemy.sql.functions import user
from src.database.models.account import Account
from src.database.models.transaction import Transaction
from src.models.response.razorpayx import PayoutsPayload
from src.models.response.razorpay import PaymentsPayload
from src.database.models.transaction import Transaction
from src.utils.transactions import get_all_transactions
from src.routes.websocket import ClientWebsocketEndpoint
from starlette.websockets import WebSocket, WebSocketState
from starlette.responses import JSONResponse
async def razorpayx_webhook(request):
response = await request.json()
print(response, "do")
data = from_dict(data_class=PayoutsPayload, data=response)
user_id = data.payload.payout.entity.notes["user_id"]
upi = data.payload.payout.entity.notes["upi_id"]
status = data.event.split(".")[1]
await Transaction.create(
razorpay_tid=data.payload.payout.entity.id,
amount=data.payload.payout.entity.amount / 100,
user_id=user_id,
type="send",
fund_account_id=data.payload.payout.entity.fund_account_id,
upi=upi,
status=status,
)
if status == "processed":
account = await Account.get_by_user_id(user_id)
await Account.update_by_user_id(
user_id, balance=float(account.balance) - data.payload.payout.entity.amount / 100
)
print(f"Deducted {data.payload.payout.entity.amount / 100} from {user_id}")
# send to websocket here
websocket = ClientWebsocketEndpoint.user_socket_map.get(user_id)
if websocket and websocket.client_state == WebSocketState.CONNECTED:
transactions = await get_all_transactions(user_id)
account = await Account.get_by_user_id(user_id)
websocket_response = {
"user_id": user_id,
"transactions": transactions,
"balance": float(account.balance)
}
await websocket.send_json(websocket_response)
return JSONResponse({"Success": "Success"}, status_code=200)
else:
return JSONResponse({"Error": "Websocket is already closed"}, status_code=500)
async def razorpay_webhook(request):
response = await request.json()
print(response, "get")
data: PaymentsPayload = from_dict(data_class=PaymentsPayload, data=response)
user_id = data.payload.payment.entity.notes.user_id
upi = data.payload.payment.entity.notes.upi_id
status = data.event.split(".")[1]
await Transaction.create(
razorpay_tid=data.payload.payment.entity.id,
amount=data.payload.payment.entity.amount / 100,
user_id=user_id,
type="receive",
fund_account_id=None,
upi=upi,
status=data.event.split(".")[1],
)
if status == "authorized":
account = await Account.get_by_user_id(user_id)
await Account.update_by_user_id(
user_id, balance=float(account.balance) + data.payload.payment.entity.amount / 100
)
print(f"Credited {data.payload.payment.entity.amount // 100} to {user_id}")
# send to websocket here
websocket = ClientWebsocketEndpoint.user_socket_map.get(user_id)
if websocket and websocket.client_state == WebSocketState.CONNECTED:
transactions = await get_all_transactions(user_id)
account = await Account.get_by_user_id(user_id)
websocket_response = {
"user_id": user_id,
"transactions": transactions,
"balance": float(account.balance)
}
await websocket.send_json(websocket_response)
return JSONResponse({"Success": "Success"}, status_code=200)
else:
return JSONResponse({"Error": "Websocket is already closed"}, status_code=500)
|
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Referee supervisor controller for the Robot Wrestling Tournament."""
import math
from controller import Supervisor
class Referee (Supervisor):
def init(self):
self.digit = [[0] * 10 for i in range(3)] # create an array of size [3][10] filled in with zeros
for j in range(3):
for i in range(10):
self.digit[j][i] = self.getDevice("digit " + str(j) + str(i))
self.currentDigit = [0, 0, 0] # 0:00
self.robot = [0] * 2
self.robot[0] = self.getFromDef("WRESTLER_RED")
self.robot[1] = self.getFromDef("WRESTLER_BLUE")
self.min = [[0] * 3 for i in range(2)]
self.max = [[0] * 3 for i in range(2)]
for i in range(2):
self.min[i] = self.robot[i].getPosition()
self.max[i] = self.robot[i].getPosition()
self.coverage = [0] * 2
self.koCount = [0] * 2
self.indicator = [0] * 2
self.indicator[0] = self.getDevice("red indicator")
self.indicator[1] = self.getDevice("blue indicator")
def displayTime(self, minutes, seconds):
for j in range(3):
self.digit[j][self.currentDigit[j]].setPosition(1000) # far away, not visible
self.currentDigit[0] = minutes
self.currentDigit[1] = seconds // 10
self.currentDigit[2] = seconds % 10
for j in range(3):
self.digit[j][self.currentDigit[j]].setPosition(0) # visible
def run(self):
matchDuration = 3 * 60 * 1000 # a match lasts 3 minutes
timeStep = int(self.getBasicTimeStep()) # retrieves the WorldInfo.basicTimeTime (ms) from the world file
time = 0
seconds = -1
ko = -1
while True:
if time % 200 == 0:
s = int(time / 1000) % 60
if seconds != s:
seconds = s
minutes = int(time / 60000)
self.displayTime(minutes, seconds)
box = [0] * 3
for i in range(2):
position = self.robot[i].getPosition()
if abs(position[0]) > 1 or abs(position[1]) > 1: # outside of the ring
continue
coverage = 0
for j in range(3):
if position[j] < self.min[i][j]:
self.min[i][j] = position[j]
elif position[j] > self.max[i][j]:
self.max[i][j] = position[j]
box[j] = self.max[i][j] - self.min[i][j]
coverage += box[j] * box[j]
coverage = math.sqrt(coverage)
self.coverage[i] = coverage
self.indicator[i].setPosition(self.coverage[i] / 7)
if position[1] < 0.75: # low position threshold
self.koCount[i] = self.koCount[i] + 200
if self.koCount[i] > 10000: # 10 seconds
ko = i
else:
self.koCount[i] = 0
if self.koCount[0] > self.koCount[1]:
print("\fred KO: %d" % (10 - self.koCount[0] // 1000))
elif self.koCount[1] > self.koCount[0]:
print("\fblue KO: %d" % (10 - self.koCount[1] // 1000))
# print("\fred: %1.3f - blue: %1.3f" % (self.coverage[0], self.coverage[1]))
if self.step(timeStep) == -1 or time > matchDuration or ko != -1:
break
time += timeStep
if ko == 0:
print("Wrestler red is KO. Wrestler blue wins!")
elif ko == 1:
print("Wrestler blue is KO. Wrestler red wins!")
elif self.coverage[0] >= self.coverage[1]: # in case of coverage equality, red wins
print("Wresler red wins: %s >= %s" % (self.coverage[0], self.coverage[1]))
else:
print("Wresler blue wins: %s > %s" % (self.coverage[1], self.coverage[0]))
# create the referee instance and run main loop
referee = Referee()
referee.init()
referee.run()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class TaskOrchestrationItem(Model):
"""TaskOrchestrationItem.
:param item_type:
:type item_type: object
"""
_attribute_map = {
'item_type': {'key': 'itemType', 'type': 'object'}
}
def __init__(self, item_type=None):
super(TaskOrchestrationItem, self).__init__()
self.item_type = item_type
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'notebooks instances rollback' command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.notebooks import instances as instance_util
from googlecloudsdk.api_lib.notebooks import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.notebooks import flags
DETAILED_HELP = {
'DESCRIPTION':
"""
Request for rolling back notebook instances.
""",
'EXAMPLES':
"""
To rollback an instance, run:
$ {command} example-instance target-snapshot=projects/example-project/global/snapshots/aorlbjvpavvf --location=us-central1-a
""",
}
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Rollback(base.Command):
"""Request for rolling back instances."""
@staticmethod
def Args(parser):
"""Upgrade flags for this command."""
flags.AddRollbackInstanceFlags(parser)
def Run(self, args):
release_track = self.ReleaseTrack()
client = util.GetClient(release_track)
messages = util.GetMessages(release_track)
instance_service = client.projects_locations_instances
if args.IsSpecified('target_snapshot'):
operation = instance_service.Rollback(
instance_util.CreateInstanceRollbackRequest(args, messages))
return instance_util.HandleLRO(
operation,
args,
instance_service,
release_track,
operation_type=instance_util.OperationType.ROLLBACK)
Rollback.detailed_help = DETAILED_HELP
|
# win32traceutil like utility for Pythonwin
import _thread
import win32trace, win32event, win32api
from pywin.framework import winout
outputWindow = None
def CollectorThread(stopEvent, file):
win32trace.InitRead()
handle = win32trace.GetHandle()
# Run this thread at a lower priority to the main message-loop (and printing output)
# thread can keep up
import win32process
win32process.SetThreadPriority(
win32api.GetCurrentThread(), win32process.THREAD_PRIORITY_BELOW_NORMAL
)
try:
while 1:
rc = win32event.WaitForMultipleObjects(
(handle, stopEvent), 0, win32event.INFINITE
)
if rc == win32event.WAIT_OBJECT_0:
# About the only char we can't live with is \0!
file.write(win32trace.read().replace("\0", "<null>"))
else:
# Stop event
break
finally:
win32trace.TermRead()
print("Thread dieing")
class WindowOutput(winout.WindowOutput):
def __init__(self, *args):
winout.WindowOutput.__init__(*(self,) + args)
self.hStopThread = win32event.CreateEvent(None, 0, 0, None)
_thread.start_new(CollectorThread, (self.hStopThread, self))
def _StopThread(self):
win32event.SetEvent(self.hStopThread)
self.hStopThread = None
def Close(self):
self._StopThread()
winout.WindowOutput.Close(self)
# def OnViewDestroy(self, frame):
# return winout.WindowOutput.OnViewDestroy(self, frame)
# def Create(self, title=None, style = None):
# rc = winout.WindowOutput.Create(self, title, style)
return rc
def MakeOutputWindow():
# Note that it will not show until the first string written or
# you pass bShow = 1
global outputWindow
if outputWindow is None:
title = "Python Trace Collector"
# queueingFlag doesnt matter, as all output will come from new thread
outputWindow = WindowOutput(title, title)
# Let people know what this does!
msg = """\
# This window will display output from any programs that import win32traceutil
# win32com servers registered with '--debug' are in this category.
"""
outputWindow.write(msg)
# force existing window open
outputWindow.write("")
return outputWindow
if __name__ == "__main__":
MakeOutputWindow()
|
import time
from math import fabs
import putil.timer
from putil.testing import UtilTest
class TestTimer(UtilTest):
def setUp(self):
self.op1_times = iter([ .01, .02 ])
self.a1 = putil.timer.Accumulator()
self.op2_step1_times = iter([ .005, .015, .005, .005])
self.op2_step2_times = iter([ .01, .02, .01, .01])
self.a2 = putil.timer.Accumulator()
def test_found_caller(self):
import importable.create_timer
t = importable.create_timer.t
self.assertEquals('timing.putil.test.importable.create_timer', t.logger.name)
def test_time_event(self):
t = putil.timer.Timer()
time.sleep(0.01)
t.complete_step('pause')
time.sleep(0.02)
t.complete_step()
self.assertEquals(3, len(t.times))
def one_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op1_times.next())
t.complete_step()
self.a1.add(t)
def test_stats_one_step(self):
try:
while True:
self.one_step_operation()
except StopIteration:
pass
self.assertEquals(2, self.a1.get_count())
self.assertAlmostEqual(self.a1.get_average(), 0.015, places=2)
self.assertTrue( fabs(self.a1.get_average()-0.015) < .002 )
self.assertAlmostEqual(self.a1.get_standard_deviation(), 0.005, places=2)
def two_step_operation(self):
t = putil.timer.Timer()
time.sleep(self.op2_step1_times.next())
t.complete_step('one')
time.sleep(self.op2_step2_times.next())
t.complete_step('two')
self.a2.add(t)
def test_stats_two_steps(self):
try:
while True:
self.two_step_operation()
except StopIteration:
pass
self.assertEquals(8, self.a2.get_count())
self.assertEquals(4, self.a2.get_count("one"))
self.assertEquals(4, self.a2.get_count("two"))
self.assertAlmostEqual(self.a2.get_average(), 0.01, places=2)
self.assertAlmostEqual(self.a2.get_average("one"), 0.008, places=2)
self.assertAlmostEqual(self.a2.get_average("two"), 0.013, places=2)
self.assertNotEquals(0, self.a2.get_standard_deviation())
|
# coding=utf-8
# Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Dict
import numpy as np
import flax.linen as nn
import jax
import jax.numpy as jnp
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_flax_utils import FlaxPreTrainedModel, gelu
from ...utils import logging
from .configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.FlaxPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading, saving and converting weights from
PyTorch models)
This model is also a Flax Linen `flax.nn.Module
<https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html>`__ subclass. Use it as a regular Flax
Module and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- `Just-In-Time (JIT) compilation <https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit>`__
- `Automatic Differentiation <https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation>`__
- `Vectorization <https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap>`__
- `Parallelization <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`__
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`numpy.ndarray` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`numpy.ndarray` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`numpy.ndarray` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`numpy.ndarray` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class FlaxBertLayerNorm(nn.Module):
"""
Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data.
"""
epsilon: float = 1e-6
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
bias: bool = True # If True, bias (beta) is added.
scale: bool = True # If True, multiply by scale (gamma). When the next layer is linear
# (also e.g. nn.relu), this can be disabled since the scaling will be
# done by the next layer.
bias_init: jnp.ndarray = nn.initializers.zeros
scale_init: jnp.ndarray = nn.initializers.ones
@nn.compact
def __call__(self, x):
"""
Applies layer normalization on the input. It normalizes the activations of the layer for each given example in
a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that
maintains the mean activation within each example close to 0 and the activation standard deviation close to 1
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True)
var = mean2 - jax.lax.square(mean)
mul = jax.lax.rsqrt(var + self.epsilon)
if self.scale:
mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype)
y = (x - mean) * mul
if self.bias:
y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype)
return y
class FlaxBertEmbedding(nn.Module):
"""
Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch
use 'weight'
"""
vocab_size: int
hidden_size: int
emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1)
@nn.compact
def __call__(self, inputs):
embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size))
return jnp.take(embedding, inputs, axis=0)
class FlaxBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
vocab_size: int
hidden_size: int
type_vocab_size: int
max_length: int
@nn.compact
def __call__(self, input_ids, token_type_ids, position_ids, attention_mask):
# Embed
w_emb = FlaxBertEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")(
jnp.atleast_2d(input_ids.astype("i4"))
)
p_emb = FlaxBertEmbedding(self.max_length, self.hidden_size, name="position_embeddings")(
jnp.atleast_2d(position_ids.astype("i4"))
)
t_emb = FlaxBertEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")(
jnp.atleast_2d(token_type_ids.astype("i4"))
)
# Sum all embeddings
summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb
# Layer Norm
layer_norm = FlaxBertLayerNorm(name="layer_norm")(summed_emb)
return layer_norm
class FlaxBertAttention(nn.Module):
num_heads: int
head_size: int
@nn.compact
def __call__(self, hidden_state, attention_mask):
# Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
# FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
# with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
self_att = nn.attention.SelfAttention(num_heads=self.num_heads, qkv_features=self.head_size, name="self")(
hidden_state, attention_mask
)
layer_norm = FlaxBertLayerNorm(name="layer_norm")(self_att + hidden_state)
return layer_norm
class FlaxBertIntermediate(nn.Module):
output_size: int
@nn.compact
def __call__(self, hidden_state):
# TODO: Add ACT2FN reference to change activation function
dense = nn.Dense(features=self.output_size, name="dense")(hidden_state)
return gelu(dense)
class FlaxBertOutput(nn.Module):
@nn.compact
def __call__(self, intermediate_output, attention_output):
hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output)
hidden_state = FlaxBertLayerNorm(name="layer_norm")(hidden_state + attention_output)
return hidden_state
class FlaxBertLayer(nn.Module):
num_heads: int
head_size: int
intermediate_size: int
@nn.compact
def __call__(self, hidden_state, attention_mask):
attention = FlaxBertAttention(self.num_heads, self.head_size, name="attention")(hidden_state, attention_mask)
intermediate = FlaxBertIntermediate(self.intermediate_size, name="intermediate")(attention)
output = FlaxBertOutput(name="output")(intermediate, attention)
return output
class FlaxBertLayerCollection(nn.Module):
"""
Stores N BertLayer(s)
"""
num_layers: int
num_heads: int
head_size: int
intermediate_size: int
@nn.compact
def __call__(self, inputs, attention_mask):
assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})"
# Initialize input / output
input_i = inputs
# Forward over all encoders
for i in range(self.num_layers):
layer = FlaxBertLayer(self.num_heads, self.head_size, self.intermediate_size, name=f"{i}")
input_i = layer(input_i, attention_mask)
return input_i
class FlaxBertEncoder(nn.Module):
num_layers: int
num_heads: int
head_size: int
intermediate_size: int
@nn.compact
def __call__(self, hidden_state, attention_mask):
layer = FlaxBertLayerCollection(
self.num_layers, self.num_heads, self.head_size, self.intermediate_size, name="layer"
)(hidden_state, attention_mask)
return layer
class FlaxBertPooler(nn.Module):
@nn.compact
def __call__(self, hidden_state):
cls_token = hidden_state[:, 0]
out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token)
return jax.lax.tanh(out)
class FlaxBertModule(nn.Module):
vocab_size: int
hidden_size: int
type_vocab_size: int
max_length: int
num_encoder_layers: int
num_heads: int
head_size: int
intermediate_size: int
@nn.compact
def __call__(self, input_ids, attention_mask, token_type_ids, position_ids):
# Embedding
embeddings = FlaxBertEmbeddings(
self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings"
)(input_ids, token_type_ids, position_ids, attention_mask)
# N stacked encoding layers
encoder = FlaxBertEncoder(
self.num_encoder_layers, self.num_heads, self.head_size, self.intermediate_size, name="encoder"
)(embeddings, attention_mask)
pooled = FlaxBertPooler(name="pooler")(encoder)
return encoder, pooled
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class FlaxBertModel(FlaxPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
"""
model_class = FlaxBertModule
config_class = BertConfig
base_model_prefix = "bert"
@staticmethod
def convert_from_pytorch(pt_state: Dict, config: BertConfig) -> Dict:
jax_state = dict(pt_state)
# Need to change some parameters name to match Flax names so that we don't have to fork any layer
for key, tensor in pt_state.items():
# Key parts
key_parts = set(key.split("."))
# Every dense layer has "kernel" parameters instead of "weight"
if "dense.weight" in key:
del jax_state[key]
key = key.replace("weight", "kernel")
jax_state[key] = tensor
# SelfAttention needs also to replace "weight" by "kernel"
if {"query", "key", "value"} & key_parts:
# Flax SelfAttention decomposes the heads (num_head, size // num_heads)
if "bias" in key:
jax_state[key] = tensor.reshape((config.num_attention_heads, -1))
elif "weight":
del jax_state[key]
key = key.replace("weight", "kernel")
tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1))
jax_state[key] = tensor
# SelfAttention output is not a separate layer, remove one nesting
if "attention.output.dense" in key:
del jax_state[key]
key = key.replace("attention.output.dense", "attention.self.out")
jax_state[key] = tensor
# SelfAttention output is not a separate layer, remove nesting on layer norm
if "attention.output.LayerNorm" in key:
del jax_state[key]
key = key.replace("attention.output.LayerNorm", "attention.LayerNorm")
jax_state[key] = tensor
# There are some transposed parameters w.r.t their PyTorch counterpart
if "intermediate.dense.kernel" in key or "output.dense.kernel" in key:
jax_state[key] = tensor.T
# Self Attention output projection needs to be transposed
if "out.kernel" in key:
jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose(
1, 2, 0
)
# Pooler needs to transpose its kernel
if "pooler.dense.kernel" in key:
jax_state[key] = tensor.T
# Handle LayerNorm conversion
if "LayerNorm" in key:
del jax_state[key]
# Replace LayerNorm by layer_norm
new_key = key.replace("LayerNorm", "layer_norm")
if "weight" in key:
new_key = new_key.replace("weight", "gamma")
elif "bias" in key:
new_key = new_key.replace("bias", "beta")
jax_state[new_key] = tensor
return jax_state
def __init__(self, config: BertConfig, state: dict, seed: int = 0, **kwargs):
model = FlaxBertModule(
vocab_size=config.vocab_size,
hidden_size=config.hidden_size,
type_vocab_size=config.type_vocab_size,
max_length=config.max_position_embeddings,
num_encoder_layers=config.num_hidden_layers,
num_heads=config.num_attention_heads,
head_size=config.hidden_size,
intermediate_size=config.intermediate_size,
)
super().__init__(config, model, state, seed)
@property
def module(self) -> nn.Module:
return self._module
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def __call__(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None):
if token_type_ids is None:
token_type_ids = jnp.ones_like(input_ids)
if position_ids is None:
position_ids = jnp.arange(jnp.atleast_2d(input_ids).shape[-1])
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
return self.model.apply(
{"params": self.params},
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(token_type_ids, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
)
|
import enum
from datetime import datetime
from ipaddress import IPv4Address, IPv6Address
from typing import Callable, Iterator, Union
from pydantic import BaseModel, Field
__all__ = (
'MandatoryFields',
'ExtensionFields',
)
MAC_REGEX = r'^([A-F0-9]{2}:){5}[A-F0-9]{2}$'
HOSTNAME_REGEX = r'^[A-Za-z0-9][A-Za-z0-9\.\-]*(?!\n)$'
class DateTime(datetime):
DATETIME_FORMATS = (
'%b %d %H:%M:%S.%f %Z%z',
'%b %d %H:%M:%S %Z%z',
'%b %d %H:%M:%S.%f',
'%b %d %H:%M:%S',
'%b %d %Y %H:%M:%S.%f %Z%z',
'%b %d %Y %H:%M:%S %Z%z',
'%b %d %Y %H:%M:%S.%f',
'%b %d %Y %H:%M:%S',
)
@classmethod
def __get_validators__(cls) -> Iterator[Callable]:
yield cls.validate_dt_formats
@classmethod
def validate_dt_formats(
cls,
value: Union[datetime, int, float, str]
) -> Union[int, float, str]:
if isinstance(value, datetime):
return value.strftime(cls.DATETIME_FORMATS[4])
elif isinstance(value, (int, float)):
datetime.fromtimestamp(value)
return value
elif isinstance(value, str):
if value.isdigit():
try:
datetime.fromtimestamp(float(value))
except Exception as ex:
raise ValueError(str(ex))
return value
else:
errors = []
for dt_format in cls.DATETIME_FORMATS:
try:
datetime.strptime(value, dt_format)
except ValueError as ex:
errors.append(str(ex))
else:
return value
raise ValueError('\n '.join(errors))
raise TypeError(f'Datetime came of type: {type(value)} '
f'expected: str, int, float')
class SeverityInts(enum.IntEnum):
LOW_0 = 0
LOW_1 = 1
LOW_2 = 2
LOW_3 = 3
MEDIUM_1 = 4
MEDIUM_2 = 5
MEDIUM_3 = 6
HIGH_1 = 7
HIGH_2 = 8
VERY_HIGH_1 = 9
VERY_HIGH_2 = 10
class SeverityStrings(enum.Enum):
Unknown = 'Unknown'
Low = 'Low'
Medium = 'Medium'
High = 'High'
Very_High = 'Very-High'
class MandatoryFields(BaseModel):
Version: int
DeviceVendor: str
DeviceProduct: str
DeviceVersion: str
DeviceEventClassID: Union[int, str]
Name: str
Severity: Union[SeverityInts, SeverityStrings]
class ExtensionFields(BaseModel):
act: str = Field(default=None, max_length=63)
app: str = Field(default=None, max_length=31)
c6a1: IPv6Address = Field(default=None)
c6a1Label: str = Field(default=None, max_length=1023)
c6a2: IPv6Address = Field(default=None)
c6a2Label: str = Field(default=None, max_length=1023)
c6a3: IPv6Address = Field(default=None)
c6a3Label: str = Field(default=None, max_length=1023)
c6a4: IPv6Address = Field(default=None)
c6a4Label: str = Field(default=None, max_length=1023)
cat: str = Field(default=None, max_length=1023)
cfp1: float = Field(default=None)
cfp1Label: str = Field(default=None, max_length=1023)
cfp2: float = Field(default=None)
cfp2Label: str = Field(default=None, max_length=1023)
cfp3: float = Field(default=None)
cfp3Label: str = Field(default=None, max_length=1023)
cfp4: float = Field(default=None)
cfp4Label: str = Field(default=None, max_length=1023)
cn1: int = Field(default=None)
cn1Label: str = Field(default=None, max_length=1023)
cn2: int = Field(default=None)
cn2Label: str = Field(default=None, max_length=1023)
cn3: int = Field(default=None)
cn3Label: str = Field(default=None, max_length=1023)
cnt: int = Field(default=None)
cs1: str = Field(default=None, max_length=4000)
cs1Label: str = Field(default=None, max_length=1023)
cs2: str = Field(default=None, max_length=4000)
cs2Label: str = Field(default=None, max_length=1023)
cs3: str = Field(default=None, max_length=4000)
cs3Label: str = Field(default=None, max_length=1023)
cs4: str = Field(default=None, max_length=4000)
cs4Label: str = Field(default=None, max_length=1023)
cs5: str = Field(default=None, max_length=4000)
cs5Label: str = Field(default=None, max_length=1023)
cs6: str = Field(default=None, max_length=4000)
cs6Label: str = Field(default=None, max_length=1023)
destinationDnsDomain: str = Field(
default=None, max_length=255, regex=HOSTNAME_REGEX,
)
destinationServiceName: str = Field(default=None, max_length=1023)
destinationTranslatedAddress: IPv4Address = Field(default=None)
destinationTranslatedPort: int = Field(default=None, gt=0, le=65535)
deviceCustomDate1: DateTime = Field(default=None)
deviceCustomDate1Label: str = Field(default=None, max_length=1023)
deviceCustomDate2: DateTime = Field(default=None)
deviceCustomDate2Label: str = Field(default=None, max_length=1023)
deviceDirection: int = Field(default=None, ge=0, le=1)
deviceDnsDomain: str = Field(
default=None, max_length=255, regex=HOSTNAME_REGEX,
)
deviceExternalId: str = Field(default=None, max_length=255)
deviceFacility: str = Field(default=None, max_length=1023)
deviceInboundInterface: str = Field(default=None, max_length=128)
deviceNtDomain: str = Field(default=None, max_length=255)
deviceOutboundInterface: str = Field(default=None, max_length=128)
devicePayloadId: str = Field(default=None, max_length=128)
deviceProcessName: str = Field(default=None, max_length=1023)
deviceTranslatedAddress: IPv4Address = Field(default=None)
dhost: str = Field(default=None, max_length=255, regex=HOSTNAME_REGEX)
dmac: str = Field(default=None, regex=MAC_REGEX)
dntdom: str = Field(default=None, max_length=255)
dpid: int = Field(default=None)
dpriv: int = Field(default=None)
dproc: str = Field(default=None, max_length=1023)
dpt: int = Field(default=None, gt=0, le=65535)
dst: IPv4Address = Field(default=None)
dtz: str = Field(default=None, max_length=255)
duid: str = Field(default=None, max_length=1023)
duser: str = Field(default=None, max_length=1023)
dvc: IPv4Address = Field(default=None)
dvchost: str = Field(default=None, max_length=100, regex=HOSTNAME_REGEX)
dvcmac: str = Field(default=None, regex=MAC_REGEX)
dvcpid: int = Field(default=None)
end: DateTime = Field(default=None)
externalId: str = Field(default=None, max_length=40)
fileCreateTime: DateTime = Field(default=None)
fileHash: str = Field(default=None, max_length=255)
fileId: str = Field(default=None, max_length=1023)
fileModificationTime: DateTime = Field(default=None)
filePath: str = Field(default=None, max_length=1023)
filePermission: str = Field(default=None, max_length=1023)
fileType: str = Field(default=None, max_length=1023)
flexDate1: DateTime = Field(default=None)
flexDate1Label: str = Field(default=None, max_length=128)
flexString1: str = Field(default=None, max_length=1023)
flexString1Label: str = Field(default=None, max_length=128)
flexString2: str = Field(default=None, max_length=1023)
flexString2Label: str = Field(default=None, max_length=128)
fname: str = Field(default=None, max_length=1023)
fsize: int = Field(default=None)
in_: int = Field(default=None)
msg: str = Field(default=None, max_length=1023)
oldFileCreateTime: DateTime = Field(default=None)
oldFileHash: str = Field(default=None, max_length=255)
oldFileId: str = Field(default=None, max_length=1023)
oldFileModificationTime: DateTime = Field(default=None)
oldFileName: str = Field(default=None, max_length=1023)
oldFilePath: str = Field(default=None, max_length=1023)
oldFilePermission: str = Field(default=None, max_length=1023)
oldFileSize: int = Field(default=None)
oldFileType: str = Field(default=None, max_length=1023)
out: int = Field(default=None)
outcome: str = Field(default=None, max_length=63)
proto: str = Field(default=None, max_length=31)
reason: str = Field(default=None, max_length=1023)
request: str = Field(default=None, max_length=1023)
requestClientApplication: str = Field(default=None, max_length=1023)
requestContext: str = Field(default=None, max_length=2048)
requestCookies: str = Field(default=None, max_length=1023)
requestMethod: str = Field(default=None, max_length=1023)
rt: DateTime = Field(default=None)
shost: str = Field(default=None, max_length=1023, regex=HOSTNAME_REGEX)
smac: str = Field(default=None, regex=MAC_REGEX)
sntdom: str = Field(default=None, max_length=255)
sourceDnsDomain: str = Field(
default=None, max_length=255, regex=HOSTNAME_REGEX,
)
sourceServiceName: str = Field(default=None, max_length=1023)
sourceTranslatedAddress: IPv4Address = Field(default=None)
sourceTranslatedPort: int = Field(default=None, gt=0, le=65535)
spid: int = Field(default=None)
spriv: str = Field(default=None, max_length=1023)
sproc: str = Field(default=None, max_length=1023)
spt: int = Field(default=None, gt=0, le=65535)
src: IPv4Address = Field(default=None)
start: DateTime = Field(default=None)
suid: str = Field(default=None, max_length=1023)
suser: str = Field(default=None, max_length=1023)
type: int = Field(default=None)
|
from typing import Any
class State:
"""Basic implementation of a state object"""
_state = {}
def set_prop(self, prop: str, value: Any) -> None:
self._state[prop] = value
def get_prop(self, prop: str) -> Any:
return self._state.get(prop, None)
state = State() |
"""Init Control"""
from .keycontroller import * |
class FileResult:
def __init__(self, filename, content):
self.content = content
self.filename = filename
def __repr__(self):
n_char = 20
content = self.content if len(self.content) < n_char else f'{self.content[:n_char]}...'
return f'FileResult({self.filename!r}, {content!r})'
class LineObject(FileResult):
def __init__(self, filename, line_no, content):
super().__init__(filename, content)
self.line_no = line_no
def __repr__(self):
n_char = 20
content = self.content if len(self.content) < n_char else f'{self.content[:n_char]}...'
return f'LineObject({self.filename!r}, {self.line_no}, {content!r})'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.