hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a8a8041933930261f3a31462ef960ecbd49c2f5c | 461 | py | Python | demo/worlddata/migrations/0005_foods_hunger.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | demo/worlddata/migrations/0005_foods_hunger.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | demo/worlddata/migrations/0005_foods_hunger.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-07-06 08:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlddata', '0004_auto_20170628_1556'),
]
operations = [
migrations.AddField(
model_name='foods',
name='hunger',
field=models.IntegerField(blank=True, default=0),
),
]
| 21.952381 | 61 | 0.618221 | 303 | 0.657267 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.264642 |
a8a830fc1bf61dcb27f3d46222c49301867517cd | 1,216 | py | Python | woodstock/auth_backends.py | allink/woodstock | afafecb7c4454f96e51c051044ed8ed74853c048 | [
"BSD-3-Clause"
] | null | null | null | woodstock/auth_backends.py | allink/woodstock | afafecb7c4454f96e51c051044ed8ed74853c048 | [
"BSD-3-Clause"
] | null | null | null | woodstock/auth_backends.py | allink/woodstock | afafecb7c4454f96e51c051044ed8ed74853c048 | [
"BSD-3-Clause"
] | null | null | null | from woodstock.models import Invitee, Participant
from woodstock import settings
from django.core.exceptions import ObjectDoesNotExist
class PersonBackend(object):
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None, user=None):
if user:
return user
try:
if settings.USERNAME_FIELD:
get_filter = {settings.USERNAME_FIELD: username}
user = self.model.objects.get(**get_filter)
if user.check_password(password):
return user
else:
return self.model.objects.get(password=password)
except ObjectDoesNotExist:
return None
def get_user(self, user_id):
try:
return self.model.objects.get(pk=user_id)
except ObjectDoesNotExist:
return None
class InviteeBackend(PersonBackend):
"""
Authenticates against woodstock.models.Invitee.
"""
model = Invitee
class ParticipantBackend(PersonBackend):
"""
Authenticates against woodstock.models.Participant.
"""
model = Participant
| 27.636364 | 68 | 0.649671 | 1,072 | 0.881579 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.106908 |
a8a98856c914486131c71af74b19873a5711b4de | 198 | py | Python | Python_Orientacao_Objeto/teste2.py | carlosrjhoe/Python | 2df531b23ebbabc40539b09bb2bbdbe1ed1c2164 | [
"MIT"
] | null | null | null | Python_Orientacao_Objeto/teste2.py | carlosrjhoe/Python | 2df531b23ebbabc40539b09bb2bbdbe1ed1c2164 | [
"MIT"
] | null | null | null | Python_Orientacao_Objeto/teste2.py | carlosrjhoe/Python | 2df531b23ebbabc40539b09bb2bbdbe1ed1c2164 | [
"MIT"
] | null | null | null | def soma(x,y):
return print(x + y)
def sub(x,y):
return print(x - y)
def mult(x,y):
return print(x * y)
def div(x,y):
return print(x / y)
soma(3,8)
sub(10,5)
mult(3,9)
div(15,7) | 11.647059 | 23 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8aa4492ae02a02a4154bc910275a7278a6dce85 | 5,846 | py | Python | Greedy/HuffmanCompression.py | Jh123x/COMP-550-Algorithms-and-Analysis | 1bc0b7fb8c48dc52cf89704557795aa87290e88b | [
"MIT"
] | null | null | null | Greedy/HuffmanCompression.py | Jh123x/COMP-550-Algorithms-and-Analysis | 1bc0b7fb8c48dc52cf89704557795aa87290e88b | [
"MIT"
] | null | null | null | Greedy/HuffmanCompression.py | Jh123x/COMP-550-Algorithms-and-Analysis | 1bc0b7fb8c48dc52cf89704557795aa87290e88b | [
"MIT"
] | null | null | null | from math import log2
import time
from typing import Optional
from heapq import heapify, heappush, heappop
class Node(object):
def __init__(self, freq: int, value: Optional[str] = None, left: 'Node' = None, right: 'Node' = None) -> None:
"""Node object for representing the huffman tree"""
# Check if it is a valid node
if None in (left, right) and value is None:
raise ValueError("Either nodes or value must be defined")
if None not in (left, right) and value is not None:
raise ValueError(
f"Cannot define both left or right and value. Value: {value}, Left: {left}, Right: {right}")
self.value = value
self.freq = freq
self.left = left
self.right = right
def merge(self, other: 'Node') -> 'Node':
"""Merge other node with current node"""
return Node(self.freq + other.freq, left=self, right=other)
def get_compressed_string(self, letter: str) -> Optional[str]:
"""Get the compressed string based on the tree"""
# Leaf node
if self.value is not None:
return None if letter != self.value else ''
# Not a leaf
lresult = self.left.get_compressed_string(letter)
if lresult is not None:
return "0" + lresult
rresult = self.right.get_compressed_string(letter)
if rresult is not None:
return "1" + rresult
return None
def __eq__(self, __o: object) -> bool:
if type(__o) != type(self):
return False
return self.value == self.freq
def __lt__(self, __o: object) -> bool:
if type(__o) != type(self):
raise ValueError(f"Cannot compare {type(self)} with {type(__o)}")
return self.freq < __o.freq
def __repr__(self) -> str:
# Leaf
if self.value is not None:
return f"\tLeaf Node: '{self.value}': {self.freq}\n"
# Node
return f"""Node {self.freq}:\n{self.left}{self.right}"""
def get_frequency(text: str) -> dict:
"""Get word frequency from text"""
freq_dict = {}
for letter in text:
freq_dict[letter] = freq_dict.get(letter, 0) + 1
return freq_dict
def huffman_tree(text: str) -> Node:
"""Text data compression"""
frequency = get_frequency(text)
if len(frequency) == 0:
return None
# Min heap with key (freq, value)
min_heap = list(
map(
lambda x: Node(x[1], x[0]),
frequency.items()
)
)
heapify(min_heap)
# Merge nodes
while len(min_heap) > 1:
min1 = heappop(min_heap)
min2 = heappop(min_heap)
new_node = min1.merge(min2)
heappush(min_heap, new_node)
return min_heap[0]
def huffman_compression_dict(text: str) -> dict[str, str]:
"""Compressions using huffman tree"""
# Get tree
tree = huffman_tree(text)
# Get set of letters
letter_set = set(text)
# Encoding dict
encoding_d = {}
# Get encoding
for letter in letter_set:
result = tree.get_compressed_string(letter)
if result is None:
raise ValueError(f"Value not found in huffman tree {letter}")
encoding_d[letter] = result
return encoding_d
def conventional_compression_dict(text: str) -> dict[str, str]:
"""Compress the string using bits to represent them"""
letters = list(set(text))
# Get number of bits required to represent each character
bits_required = log2(len(letters))
if int(bits_required) != bits_required:
bits_required = int(bits_required) + 1
# Make encoding dict
encode_d = {}
# Assign bits to index
for index, letter in enumerate(letters):
encoded_val = bin(index)[2:]
# Padding
if len(encoded_val) < bits_required:
encoded_val = "0" * \
(bits_required - len(encoded_val)) + encoded_val
encode_d[letter] = encoded_val
return encode_d
def compress(compress_dict: dict[str, str], text: str) -> str:
"""Compression routine"""
# Compression
acc = []
for letter in text:
acc.append(compress_dict[letter])
return ''.join(acc)
def decompress(compress_dict: dict[str, str], encoded_text: str) -> str:
"""Decompression routine"""
# Invert dictionary
decompress_dict = dict(map(lambda x: x[::-1], compress_dict.items()))
index = 0
length = 1
buffer = []
while index < len(encoded_text):
curr = encoded_text[index: index+length+1]
if index + length > len(encoded_text):
raise ValueError(f"Error decoding: {buffer}, {curr}")
if curr not in decompress_dict:
length += 1
continue
buffer.append(decompress_dict[curr])
index += length + 1
length = 1
return ''.join(buffer)
if __name__ == "__main__":
text = "Hello world! This is the power of huffman encoding."
print(f"Text to encode: {text}\n")
start_time = time.time_ns()
huffman_dict = huffman_compression_dict(text)
huffman = compress(huffman_dict, text)
huffman_time = time.time_ns() - start_time
start_time = time.time_ns()
conventional_dict = conventional_compression_dict(text)
conven = compress(conventional_dict, text)
conven_time = time.time_ns() - start_time
print(
f"Length of huffman compression result = {len(huffman)} Time taken: {huffman_time}ns")
print(
f"Length of conventional compression result = {len(conven)} Time taken: {conven_time}ns")
print(f"Bits saved: {len(conven) - len(huffman)}")
print(
f"Huffman decompress to text string: {decompress(huffman_dict, huffman)}")
print(
f"Conventional decompress to text string: {decompress(conventional_dict, conven)}")
| 28.378641 | 114 | 0.614608 | 1,918 | 0.328088 | 0 | 0 | 0 | 0 | 0 | 0 | 1,502 | 0.256928 |
a8aaa18961285e591a10879305e552c35655d72a | 929 | py | Python | practice11.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | practice11.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null | practice11.py | ikramulkayes/Python_season2 | d057460d07c5d2d218ecd52e08c1d355add44df2 | [
"MIT"
] | null | null | null |
import turtle as t
import time
import os #connected with practice10.py
path = 'F:\\Github\\Python_season2'
os.chdir(path)
from practice10 import Snake
from practice13 import Food
from practice14 import Score
screen = t.Screen()
screen.setup(width=600,height=600)
screen.bgcolor("black")
screen.title("My snake")
screen.tracer(0)
snake = Snake()
food = Food()
score = Score()
screen.listen()
screen.onkey(snake.up,"w")
screen.onkey(snake.down,"s")
screen.onkey(snake.right,"d")
screen.onkey(snake.left,"a")
game_on_off = True
while game_on_off:
snake.movesnake()
screen.update()
time.sleep(0.1)
if snake.distancefromfood(food) < 15:
food.refresh()
snake.extendsnake()
score.increase()
if snake.distancefromwall():
game_on_off = False
score.gameover()
if snake.hitwithtail():
game_on_off = False
score.gameover()
screen.exitonclick()
| 19.765957 | 47 | 0.682454 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.092573 |
a8ab1507e7422c2e6e8504b138f80c7c058d5661 | 118 | py | Python | Maths/__init__.py | NextLmabda/PyLambda | 5fb91062c4f9c493fcd3637c2aa4d786f8c387d0 | [
"MIT"
] | null | null | null | Maths/__init__.py | NextLmabda/PyLambda | 5fb91062c4f9c493fcd3637c2aa4d786f8c387d0 | [
"MIT"
] | null | null | null | Maths/__init__.py | NextLmabda/PyLambda | 5fb91062c4f9c493fcd3637c2aa4d786f8c387d0 | [
"MIT"
] | null | null | null | print('Omolewa is teaching a class')
print('Lanre is still making changes')
print('Omolewa has made a change too')
| 29.5 | 39 | 0.737288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.771186 |
a8ad80bc38cd16df91c7f7717982ec487fa5e8c1 | 126 | py | Python | preprocessing_danych/dataset_config.py | MaciejKonieczka/gazspot-prediction | 55177d7b7ae39a01a2820cd79f0e74897418e522 | [
"MIT"
] | null | null | null | preprocessing_danych/dataset_config.py | MaciejKonieczka/gazspot-prediction | 55177d7b7ae39a01a2820cd79f0e74897418e522 | [
"MIT"
] | null | null | null | preprocessing_danych/dataset_config.py | MaciejKonieczka/gazspot-prediction | 55177d7b7ae39a01a2820cd79f0e74897418e522 | [
"MIT"
] | null | null | null | test_index = ("2019-05-01", "2020-04-30")
train_index = ("2016-01-02", "2019-04-30")
val_index = ("2018-10-01", "2019-04-30")
| 31.5 | 42 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.571429 |
a8ada55f883fca697cc8b38ca817ca0714f36859 | 4,079 | py | Python | deployutils/apps/django/backends/jwt_session_store.py | knivets/djaodjin-deployutils | e3ce90152f1187dd1cd3d63aa14a1aaff924248f | [
"BSD-2-Clause"
] | null | null | null | deployutils/apps/django/backends/jwt_session_store.py | knivets/djaodjin-deployutils | e3ce90152f1187dd1cd3d63aa14a1aaff924248f | [
"BSD-2-Clause"
] | null | null | null | deployutils/apps/django/backends/jwt_session_store.py | knivets/djaodjin-deployutils | e3ce90152f1187dd1cd3d63aa14a1aaff924248f | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Session Store for JWT tokens.
"""
from __future__ import absolute_import
import logging, json
from django.contrib.sessions.backends.signed_cookies import SessionStore \
as SessionBase
from django.contrib.auth import (BACKEND_SESSION_KEY, HASH_SESSION_KEY,
SESSION_KEY)
from jwt import encode, decode
from .... import crypt
from .. import settings
from .auth import ProxyUserBackend
LOGGER = logging.getLogger(__name__)
class SessionStore(SessionBase):
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key=session_key)
@property
def data(self):
return self._session
@staticmethod
def prepare(session_data={}, #pylint: disable=dangerous-default-value
passphrase=None):
"""
Returns *session_dict* as a base64 encrypted json string.
"""
if passphrase is None:
passphrase = settings.DJAODJIN_SECRET_KEY
serialized = json.dumps(session_data, indent=2, cls=JSONEncoder)
return encode({'payload': serialized}, passphrase)
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
session_data = {}
try:
session_text = decode(self.session_key,
settings.DJAODJIN_SECRET_KEY)
LOGGER.debug("session text: %s<%s>",
session_text, session_text.__class__)
session_data = json.loads(session_text.get('payload'))
# We have been able to decode the session data, let's
# create Users and session keys expected by Django
# contrib.auth backend.
if 'username' in session_data:
backend = ProxyUserBackend()
backend.create_user(session_data)
user = backend.authenticate(session_data['username'])
session_data[SESSION_KEY] = user.id
session_data[BACKEND_SESSION_KEY] = "%s.%s" % (
backend.__class__.__module__, backend.__class__.__name__)
session_data[HASH_SESSION_KEY] = user.get_session_auth_hash()
except:
return {}
return session_data
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return self.prepare(session_cache)
| 38.847619 | 78 | 0.68963 | 2,296 | 0.562883 | 0 | 0 | 487 | 0.119392 | 0 | 0 | 2,148 | 0.5266 |
a8adb47f47c90b72537625f613641ed8ee04bd96 | 4,143 | py | Python | gnumber/game.py | robin-pt/learning_note_pyGuessNumber | 9692991c603aad14329a835305e59c2c1ff76874 | [
"Apache-2.0"
] | null | null | null | gnumber/game.py | robin-pt/learning_note_pyGuessNumber | 9692991c603aad14329a835305e59c2c1ff76874 | [
"Apache-2.0"
] | null | null | null | gnumber/game.py | robin-pt/learning_note_pyGuessNumber | 9692991c603aad14329a835305e59c2c1ff76874 | [
"Apache-2.0"
] | null | null | null | """ handle game's room """
import uuid
from django_redis import get_redis_connection
from . import rules
def roomExist(roomID):
""" retun true if room is exist """
conn = get_redis_connection("games")
result = conn.exists(roomID)
del conn
return result
def create(userID):
""" create game's room, return room's id """
roomID = uuid.uuid4().hex
while roomExist(roomID):
roomID = uuid.uuid4().hex
conn = get_redis_connection("games")
conn.hset(roomID, userID, 0)
conn.expire(roomID, 300)
del conn
return roomID
def close(roomID):
""" using id close room """
if roomExist(roomID):
conn = get_redis_connection("games")
try:
if conn.delete(roomID):
print("close room {}".format(roomID))
finally:
del conn
def listRoom():
""" list current room """
conn = get_redis_connection("games")
result = conn.keys("*")
del conn
return result
def roomMemberCounter(roomID):
""" return amount of room's user """
if roomExist(roomID):
conn = get_redis_connection("games")
result = 0
try:
result = conn.hlen(roomID)
finally:
del conn
return int(result)
else:
return False
def startGame(roomID):
""" insert answer into room """
if not roomExist(roomID):
return False
status, response = rules.genQuestion(4)
if status is False:
return False
conn = get_redis_connection("answer")
result = conn.set(roomID, response)
conn.expire(roomID, 300)
del conn
return result
def endGame(roomID):
""" ending of game """
if not roomExist(roomID):
return False
conn = get_redis_connection("answer")
conn.delete(roomID)
del conn
def getAnswer(roomID):
""" get room's answer """
if not roomExist(roomID):
return False
conn = get_redis_connection("answer")
result = conn.get(roomID)
del conn
return result
def checkMatch(roomID, inputNumber):
""" return input result """
if not roomExist(roomID):
return False
if not isinstance(inputNumber, str):
return False
answer = getAnswer(roomID)
return rules.isMatchAnswer(str(inputNumber), str(answer))
def userExistInRoom(roomID, user):
""" check user is exist in room """
conn = get_redis_connection("games")
result = conn.hexists(roomID, user)
del conn
return result
def userJoin(roomID, user):
""" join user into room """
membersNumber = roomMemberCounter(roomID)
if not membersNumber:
return (False, "room is not exist.")
if membersNumber >= 4:
return (False, "room is full.")
conn = get_redis_connection("games")
conn.hset(roomID, user, 0)
del conn
return (True, "")
def userQuit(roomID, user):
""" user quit from room """
if not roomExist(roomExist):
return False
if not userExistInRoom(roomID, user):
return False
conn = get_redis_connection("games")
conn.hdel(roomID, user)
del conn
if roomMemberCounter == 0:
close(roomID)
return True
def getUsersInRoom(roomID):
""" get all user and their result """
if not roomExist(roomID):
return False
conn = get_redis_connection("games")
result = conn.hgetall(roomID)
del conn
return result
def getUserResult(roomID, user):
""" get user's result """
if not roomExist(roomID):
return False
if not userExistInRoom(roomID, user):
return False
conn = get_redis_connection("games")
result = conn.hget(roomID, user)
del conn
return result
def userIncre(roomID, user):
""" increment user's result """
if not roomExist(roomID):
return False
if not userExistInRoom(roomID, user):
return False
conn = get_redis_connection("games")
result = (user, conn.hincrby(roomID, user, 1))
del conn
return result
| 26.388535 | 62 | 0.603427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.153512 |
a8ae061feeba3985c2e2f3b0364a2da97ebf2e9e | 211 | py | Python | muscles/neo4j/read_py2neo.py | Spanarchian/measure_group | 45eb1f2420cda02611e454f9cd9e568ccba9a4e9 | [
"MIT"
] | null | null | null | muscles/neo4j/read_py2neo.py | Spanarchian/measure_group | 45eb1f2420cda02611e454f9cd9e568ccba9a4e9 | [
"MIT"
] | null | null | null | muscles/neo4j/read_py2neo.py | Spanarchian/measure_group | 45eb1f2420cda02611e454f9cd9e568ccba9a4e9 | [
"MIT"
] | null | null | null |
from py2neo import Graph
grapher = Graph("bolt://localhost:7687", auth=("neo4j", "changeme"))
x = grapher.run("MATCH (a :Person) RETURN a.name, a.city, a.age").to_data_frame()
print(f"To_data_frame() :\n{x}")
| 30.142857 | 81 | 0.682464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.535545 |
a8ae7294dfc6a8d068a24cb267a4767b51987e41 | 5,687 | py | Python | core/levels/thunder/a2finance/a2finance.py | wwu-pdx/web-ctf | 82774af40f613e99568d44cd83ea564e4ee68711 | [
"MIT"
] | null | null | null | core/levels/thunder/a2finance/a2finance.py | wwu-pdx/web-ctf | 82774af40f613e99568d44cd83ea564e4ee68711 | [
"MIT"
] | null | null | null | core/levels/thunder/a2finance/a2finance.py | wwu-pdx/web-ctf | 82774af40f613e99568d44cd83ea564e4ee68711 | [
"MIT"
] | null | null | null | import random
import os
import subprocess
import shutil
from google.cloud import storage, logging as glogging
from core.framework import levels
from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys
LEVEL_PATH = 'thunder/a2finance'
RESOURCE_PREFIX = 'a2'
LOG_NAME = 'transactions'
def create():
print("Level initialization started for: " + LEVEL_PATH)
# Create randomized nonce name to avoid namespace conflicts
nonce = str(random.randint(100000000000, 999999999999))
bucket_name = f'{RESOURCE_PREFIX}-bucket-{nonce}'
# Create ssh key
ssh_private_key, ssh_public_key = ssh_keys.generate_ssh_keypair()
ssh_username = "clouduser"
try:
# Construct git repo
repo_path = os.path.dirname(os.getcwd()) + "/temp-repository-" + nonce
create_repo_files(repo_path, ssh_private_key)
print("Level initialization finished for: " + LEVEL_PATH)
# Insert deployment
config_template_args = {'nonce': nonce,
'ssh_public_key': ssh_public_key,
'ssh_username': ssh_username}
template_files = [
'core/framework/templates/bucket_acl.jinja',
'core/framework/templates/ubuntu_vm.jinja',
'core/framework/templates/service_account.jinja',
'core/framework/templates/iam_policy.jinja']
deployments.insert(LEVEL_PATH, template_files=template_files,
config_template_args=config_template_args)
print("Level setup started for: " + LEVEL_PATH)
# Upload repository to bucket
gcstorage.upload_directory_recursive(repo_path, bucket_name)
# Create logs
secret_name = create_logs()
# Create service account key file
sa_key = iam.generate_service_account_key(f'{RESOURCE_PREFIX}-access')
print(f'Level creation complete for: {LEVEL_PATH}')
start_message = (
f'Use the compromised service account credentials stored in {RESOURCE_PREFIX}-access.json to find the credit card number of {secret_name}, '
'which is hidden somewhere in the GCP project')
levels.write_start_info(
LEVEL_PATH, start_message, file_name=f'{RESOURCE_PREFIX}-access.json', file_content=sa_key)
print(
f'Instruction for the level can be accessed at thunder-ctf.cloud/levels/{LEVEL_PATH}.html')
finally:
# If there is an error, make sure to delete the temporary repository before exiting
if os.path.exists(repo_path):
shutil.rmtree(repo_path)
def create_repo_files(repo_path, ssh_private_key):
cwd = os.getcwd()
os.makedirs(repo_path + '/function')
os.chdir(repo_path)
# Make dummy cloud function files
with open(repo_path+'/function/requirements.txt', 'w+') as f:
f.write('')
with open(repo_path+'/function/main.py', 'w+') as f:
f.write('')
# Add ssh key file
with open(repo_path+'/ssh_key', 'w+') as f:
f.write(ssh_private_key)
os.chmod('ssh_key', 0o700)
# Add files in first commit, then delete key in second
subprocess.call(['git', 'init', '--q'])
p = subprocess.Popen(['git', 'add', '*'])
p.communicate()
subprocess.call(['git', 'commit', '-q', '-m', 'added initial files', ])
os.remove('ssh_key')
p = subprocess.Popen(['git', 'add', '*'])
p.communicate()
subprocess.call(
['git', 'commit', '-q', '-m', 'Oops. Deleted accidental key upload'])
# Reset working directory
os.chdir(cwd)
def create_logs():
# Load list of framework names
with open(f'core/levels/{LEVEL_PATH}/first-names.txt') as f:
first_names = f.read().split('\n')
with open(f'core/levels/{LEVEL_PATH}/last-names.txt') as f:
last_names = f.read().split('\n')
# Randomly determine a name associated with the secret
secret_name = (first_names[random.randint(0, 199)] + '_' +
last_names[random.randint(0, 299)])
# Randomly determine an index of logging of the secret transaction
secret_position = random.randint(0, 99)
logger = glogging.Client().logger(LOG_NAME)
for i in range(0, 100):
# On secret index, log the transaction with the secret as the credit card number of the struct
if i == secret_position:
logger.log_struct(
{'name': secret_name,
'transaction-total': f'${random.randint(1,300)}.{random.randint(0,9)}{random.randint(0,9)}',
'credit-card-number': levels.make_secret(LEVEL_PATH, 16)})
else:
# For the other entities, determine a random name
name = (first_names[random.randint(0, 199)] + '_' +
last_names[random.randint(0, 299)])
# If the name is not equal to the secret name, log the transaction with a random credit card number
if not name == secret_name:
logger.log_struct(
{'name': name,
'transaction-total': f'${random.randint(1,150)}.{random.randint(1,99)}',
'credit-card-number': str(random.randint(1000000000000000, 9999999999999999))})
return secret_name.replace('_', ' ')
def destroy():
print('Level tear-down started for: ' + LEVEL_PATH)
# Delete logs
client = glogging.Client()
if len([entry for entry in client.list_entries(filter_=f'logName:{LOG_NAME}')]) > 0:
logger = client.logger(LOG_NAME)
logger.delete()
# Delete starting files
levels.delete_start_files()
print('Level tear-down finished for: ' + LEVEL_PATH)
# Delete deployment
deployments.delete()
| 40.913669 | 152 | 0.641639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,286 | 0.401969 |
a8aef546c4ef288dfde3666fee31117f886756ad | 3,712 | py | Python | train.py | R4j4n/Travelling-salesman-problem-using-Genetic-Algorimthm | fe30575497d4851eb781225c66bd68ee22203d47 | [
"MIT"
] | 4 | 2020-11-19T12:41:33.000Z | 2021-02-24T03:08:32.000Z | train.py | R4j4n/Travelling-salesman-problem-using-Genetic-Algorimthm | fe30575497d4851eb781225c66bd68ee22203d47 | [
"MIT"
] | null | null | null | train.py | R4j4n/Travelling-salesman-problem-using-Genetic-Algorimthm | fe30575497d4851eb781225c66bd68ee22203d47 | [
"MIT"
] | 2 | 2020-11-24T01:01:53.000Z | 2021-02-18T16:10:04.000Z | import numpy as np
from environment import Environment
#~~~~~~~~CREATING THE BOTS~~~~~~~~~
class Route():
def __init__(self, dnaLength):
# dnaLength = num of planets........
self.dnaLength = dnaLength
self.dna = list()
self.distance = 0
# Initialize the random DNA
for i in range(self.dnaLength - 1):
rnd = np.random.randint(1, self.dnaLength)
while rnd in self.dna:
rnd = np.random.randint(1, self.dnaLength)
self.dna.append(rnd)
self.dna.append(0)
# Building The crossover method
def mix(self, dna1, dna2):
self.dna = dna1.copy()
for i in range(self.dnaLength - 1):
if np.random.rand() <= 0.5:
previous = self.dna[i]
inx = self.dna.index(dna2[i])
self.dna[inx] = previous
self.dna[i] = dna2[i]
# Random Partial Mutations:
# MUTATION 1
for i in range(self.dnaLength - 1 ):
if np.random.rand() <= 0.1:
previous = self.dna[i]
rnd = np.random.randint(1, self.dnaLength)
inx = self.dna.index(rnd)
self.dna[inx] = previous
self.dna[i] = rnd
# MUTATION : 2
elif np.random.rand() <= 0.1:
rnd = np.random.randint(1, self.dnaLength)
prevInx = self.dna.index(rnd)
self.dna.insert(i, rnd)
if i>= prevInx:
self.dna.pop(prevInx)
else:
self.dna.pop(prevInx + 1 )
populationSize = 50
mutationRate = 0.1
nSelected = 5
env = Environment()
dnaLength = len(env.planets)
population = list()
# CREATING THE POPULATION
for i in range(populationSize):
route = Route(dnaLength)
population.append(route)
# Starting the main loop
generation = 0
bestDist = np.inf
while True:
generation += 1
# Evaluate the population(Fitness_Function)
for route in population:
env.reset()
for i in range(dnaLength):
action = route.dna[i]
# FITNESS
route.distance += env.step(action ,'none')
# Sorting The population
sortedPop = sorted(population,key= lambda x: x.distance)
population.clear()
if sortedPop[0].distance < bestDist:
bestDist = sortedPop[0].distance
# Adding best previous bot to the population:
for i in range(nSelected):
best = sortedPop[i]
best.distance = 0
population.append(best)
# Fillinf the rest of the population:
left = populationSize - nSelected
for i in range(left):
newRoute = Route(dnaLength)
if np.random.rand() <= mutationRate:
population.append(newRoute)
else:
inx1 = np.random.randint(0 , nSelected)
inx2 = np.random.randint(0 , nSelected)
while inx1 == inx2:
inx2 = np.random.randint(0 , nSelected)
dna1 = sortedPop[inx1].dna
dna2 = sortedPop[inx2].dna
newRoute.mix(dna1,dna2)
population.append(newRoute)
# Displaying The Result:
env.reset()
for i in range(dnaLength):
action = sortedPop[0].dna[i]
_ = env.step(action , 'normal')
if generation % 100 == 0:
env.reset()
for i in range(dnaLength):
action = sortedPop[0].dna[i]
_ = env.step(action, 'beautiful')
print('Generation' + str(generation)+ 'Shortest distance: {:.2f}'.format(bestDist)+ 'light years')
| 24.421053 | 106 | 0.53556 | 1,598 | 0.430496 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.134698 |
a8afaad49a2d75bdb21b22f68fd79476052c0537 | 7,002 | py | Python | PerFrameObjectDetector.py | SunBangjie/smartphone_pairing | 633f80961be1a213e82077d2e5fd08f0cdf2453b | [
"MIT"
] | null | null | null | PerFrameObjectDetector.py | SunBangjie/smartphone_pairing | 633f80961be1a213e82077d2e5fd08f0cdf2453b | [
"MIT"
] | null | null | null | PerFrameObjectDetector.py | SunBangjie/smartphone_pairing | 633f80961be1a213e82077d2e5fd08f0cdf2453b | [
"MIT"
] | null | null | null | import cv2
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
import numpy as np
import time
import math
DEBUG = True
FACTOR = 2
RESO_X = int(576 / FACTOR)
RESO_Y = int(640 / FACTOR)
CONF_VAL = 0
THRESHOLD = 0
UPPER_BOUND = 230
LOWER_BOUND = 150
def get_file_index(filename):
index = int(filename.split('.')[0])
return index
def create_windows():
cv2.namedWindow("RGB", cv2.WINDOW_NORMAL)
cv2.namedWindow("Depth", cv2.WINDOW_NORMAL)
cv2.resizeWindow("RGB", RESO_X, RESO_Y)
cv2.resizeWindow("Depth", RESO_X, RESO_Y)
def load_yolo(model_folder):
# load the COCO class labels our YOLO model was trained on
labelsPath = model_folder + "coco.names"
LABELS = open(labelsPath).read().strip().split("\n")
weightsPath = model_folder + "yolov3-spp.weights"
configPath = model_folder + "yolov3-spp.cfg"
print("[INFO] loading YOLO from disk...")
if DEBUG:
print("label: {}\nweights: {}\nconfig: {}".format(
labelsPath, weightsPath, configPath))
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return net, ln, LABELS
def process_frame(frame, net, ln, LABELS):
# get frame height and width
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start_time = time.time()
layerOutputs = net.forward(ln)
duration = time.time() - start_time
if DEBUG:
print("[INFO] processed within {}s".format(round(duration, 2)))
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONF_VAL and LABELS[classID] == "cell phone":
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates and confidences
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
return boxes, confidences
def detect_object(experiment_name, save_images=False):
rgb_folder = "Experiment_Frames/" + experiment_name + "/rgb_frames/"
depth_folder = "Experiment_Frames/" + experiment_name + "/depth_frames/"
model_folder = "yolo-coco/"
output_folder = "Experiment_Output/" + experiment_name + "/"
# make the folders if not exist
if not exists(rgb_folder):
makedirs(rgb_folder)
if not exists(depth_folder):
makedirs(depth_folder)
if not exists(output_folder):
makedirs(output_folder)
if not exists(output_folder + 'depth/'):
makedirs(output_folder + 'depth/')
if not exists(output_folder + 'rgb/'):
makedirs(output_folder + 'rgb/')
# load rgb images
print("[INFO] loading rgb images from disk...")
img_files = [f for f in listdir(rgb_folder) if isfile(join(rgb_folder, f))]
img_files = sorted(img_files, key=get_file_index)
# load image net
net, ln, LABELS = load_yolo(model_folder)
out_file = open(output_folder + "/" + "positions.txt", "w")
# process each frame
for img_file in img_files:
if DEBUG:
print("[INFO] processing image {}".format(img_file))
# read rgb frame
frame = cv2.imread(rgb_folder + "/" + img_file, cv2.IMREAD_COLOR)
# read depth frame
depth = cv2.imread(depth_folder + "/" + img_file)
# rotate 90 degree for phone images
# frame = cv.rotate(frame, rotateCode=cv.ROTATE_90_CLOCKWISE)
# process using YOLO
boxes, confidences = process_frame(frame, net, ln, LABELS)
# suppress boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONF_VAL, THRESHOLD)
# ensure at least one detection exists
if len(idxs) > 0:
# get first box
i = idxs.flatten()[0]
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.rectangle(depth, (x, y), (x + w, y + h), (255, 0, 0), 2)
if save_images:
# display and save image
cv2.imshow("RGB", frame)
cv2.imwrite(output_folder +
"rgb/" + img_file, frame)
cv2.imshow("Depth", depth)
cv2.imwrite(output_folder +
"depth/" + img_file, depth)
# get centroid of the bouding box
centroid_x = x + int(w / 2)
centroid_y = y + int(h / 2)
# get average depth within the bounding box
depth_pixels = depth[x: x+w, y: y+h, 0]
depth_pixels = depth_pixels.flatten()
mask = (depth_pixels > LOWER_BOUND) & (depth_pixels < UPPER_BOUND)
depth_pixels = depth_pixels[mask]
pixel_mean = np.mean(depth_pixels)
# save timestamp and position
if not math.isnan(pixel_mean):
timestamp = img_file.split('.')[0]
out_file.write("{},{},{},{}\n".format(
timestamp, centroid_x, centroid_y, round(pixel_mean, 4)
))
if DEBUG:
print("point is ({}, {}, {})".format(
centroid_x, centroid_y, round(pixel_mean, 4)))
key = cv2.waitKey(50)
if key != -1:
cv2.destroyAllWindows()
break
out_file.close()
| 35.363636 | 79 | 0.59126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,988 | 0.283919 |
a8b12924de45222461a00e3c63814bfcde7cffa6 | 25,138 | py | Python | example/nsls2id.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | null | null | null | example/nsls2id.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | 1 | 2020-02-17T18:56:18.000Z | 2020-02-20T17:06:20.000Z | example/nsls2id.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | 1 | 2021-03-08T16:07:11.000Z | 2021-03-08T16:07:11.000Z | """
NSLS-II insertion device commissioning/operation
copyright (C) 2014, Yongjun Li, Yoshi Hidaka, Lingyun Yang
"""
import aphla as ap
import itertools
import numpy as np
import re
import h5py
_params = {
"dw100g1c08u":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c08d":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c18u":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c18d":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c28u":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c28d":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
}
def getBrho(E_GeV):
"""
"""
import scipy.constants as const
m_e_GeV = const.m_e*const.c*const.c/const.e/1e9
gamma = E_GeV / m_e_GeV
Brho = const.m_e * const.c * gamma / const.e # magnetic rigidity [T*m]
return Brho
def putPar(ID, parList, **kwargs):
"""
Put (write) a set of parameters (list) on an ID while the hardware
itself (motor control) checks whether the target state is reached or not.
inputs:
ID: aphla ID instance
parList: 2d parameter list in the format of [name, value, tolerance]
[['gap',15,1e-4],['phase',12,1e-4]]
timeout: Maximum time the motor control should wait for each "put"
in the unit of seconds.
verbose: integer larger means more details.
throw: raise exception if True, otherwise return False
returns: True if success, otherwise False
"""
timeout = kwargs.get("timeout", _params[ID.name].get("timeout", 150))
unitsys = kwargs.get("unitsys", _params[ID.name].get("unitsys", 'phy'))
throw = kwargs.get("throw", True)
verbose = kwargs.get("verbose", 0)
agree = True
for par in parList:
ID.put(par[0], par[1], timeout=timeout, unitsys=unitsys, trig=1)
p0 = ID.get(par[0], unitsys=unitsys)
if abs(p0-par[1]) <= par[2]:
continue
# error handling
agree = False
if verbose:
print 'For "{0}" of {1}:'.format(par[0], ID.name)
print 'Target SP = {0:.9g}, Current RB = {1:.9g}, Tol = {2:.9g}'.\
format(par[1], p0, par[2])
if throw:
raise RuntimeError('Failed to set device within tolerance.')
else:
break
return agree
def createCorrectorField(ID):
return [(ID, fld) for fld in _params[ID.name].get("cch", [])]
def createParList(ID, parScale):
"""
create parameter list based on the paraneter range, spaced type
parRange: 2d parameter range in the format of
[[name, spacedType, start, end, step, tolerance],...]
example:[['gap','log',150,15,21,0.1]]
scan table will cover 15~150 with 21 steps, tolerance is 0.1,
spacedType: log or linear
return parameter list for communicating with hardware, table for data
archive
"""
nlist,vlist,tlist = [],[],[] #name, value and tolerance list
for fld, scale in parScale:
if not _params[ID.name].get(fld, None): continue
nlist.append(fld)
vmin, vmax, vstep, vtol = _params[ID.name][fld]
if scale == 'linear':
vlist.append(list(np.linspace(vmin, vmax, int(vstep))))
elif scale == 'log':
if vmin<=0 or vmax<=0:
raise RuntimeError('negative boundary can not be spaced Logarithmically')
else:
vlist.append(list(np.logspace(np.log10(vmin),np.log10(vmax),int(vstep))))
else:
raise RuntimeError('unknown spaced pattern: %s'%p[1])
tlist.append(vtol)
valueList = itertools.product(*vlist)
parList = []
for v in valueList:
tmp = []
for i,n in enumerate(nlist):
tmp.append([n,v[i],tlist[i]])
parList.append(tmp)
valueList = itertools.product(*vlist)
table = np.array([vi for vi in valueList])
return parList, nlist, table
def putParHardCheck(ID, parList, timeout=30, throw=True, unitsys='phy'):
'''
Put (write) a set of parameters (list) on an ID while the hardware
itself (motor control) checks whether the target state is reached or not.
ID: aphla ID instance
parList: 2d parameter list in the format of [name, value, tolerance]
[['gap',15,1e-4],['phase',12,1e-4]]
timeout: Maximum time the motor control should wait for each "put"
in the unit of seconds.
return: True if success, otherwise throws an exception.
'''
agree = True
for par in parList:
ID.put(par[0], par[1], timeout=timeout, unitsys=unitsys, trig=1)
# raw unit for "gap" = [um]
# raw unit for "phase" = [um?]
p0 = ID.get(par[0], unitsys=unitsys)
if abs(p0-par[1]) <= par[2]: # TODO: readback & setpoint unit may be different! Check it!
continue # print "Agree: ", p0, par[1], "eps=", par[2]
# error handling
agree = False
print 'For "{0}" of {1}:'.format(par[0], ID.name)
print 'Target Setpoint = {0:.9g}, Current Readback = {1:.9g}, Tolerance = {2:.9g}'.format(
par[1], p0, par[2])
if throw:
raise RuntimeError('Failed to set device within tolerance.')
else:
break
return agree
# <codecell>
def putParSoftCheck(ID, parList, timeout=30, online=False):
'''
Put (write) a set of parameters (list) on an ID while this function
checks whether the target state is reached or not through readbacks
for given tolerances.
ID: aphla ID instance
parList: 2d parameter list in the format of [name, value, tolerance]
[['gap',15,1e-4],['phase',12,1e-4]]
timeout: Maximum time the motor control should wait for each "put"
in the unit of seconds.
return: True if success, otherwise throws an exception.
'''
if not online: return True # TODO: To be reomved once we are allowed to move ID motors
for par in parList:
t0 = datetime.now()
converged = False
try:
ID.put(par[0], par[1], unitsys=None) # raw unit
except:
print 'Failed to set the setpoint for {0} to {1}'.format(par[0], par[1])
raise
# TODO: remove hardcoding
ap.caput("SR:C28-ID:G1{DW100:2}ManG:Go_.PROC", 1, wait=False)
while not converged:
p0 = ID.get(par[0], unitsys=None)
if abs(p0-par[1]) <= par[2]: # TODO: readback & setpoint unit may be different! Check it!
# print "Agree: ", p0, par[1], "eps=", par[2]
converged = True
break
t1 = datetime.now()
if (t1-t0).total_seconds() > timeout:
break
time.sleep(0.5)
if not converged:
raise RuntimeError("timeout at setting {0}={1} (epsilon={2})".format(par[0], par[1], par[2]))
return True
def putBackground(ID, **kwargs):
"""
put ID to passive status,
gap to max, phase to 0 if apply, all correction cch to zeros
"""
gapMin, gapMax, gapStep, gapTol = kwargs.get("gap",
_params[ID.name]["gap"])
phaseMin, phaseMax, phaseStep, phaseTol = \
kwargs.get("phase", _params[ID.name].get("phase", (None, None, None, None)))
zeroPhase = 0.0
timeout = kwargs.get("timeout", 150)
throw = kwargs.get("throw", True)
unitsys = kwargs.get("unitsys", 'phy')
verbose = kwargs.get("verbose", 0)
flds = ID.fields()
parList = []
if 'gap' in flds:
parList.append(['gap',gapMax,gapTol])
if 'phase' in flds:
parList.append(['phase',zeroPhase,phaseTol])
if putPar(ID, parList, timeout=timeout,
throw=throw, unitsys=unitsys, verbose=verbose):
# put correcting coils to zeros
for i in range(len(ID.cch)):
ID.put('cch'+str(i), 0.0, unitsys=None)
return True
else:
return False
def checkBeam(Imin=2, Tmin=2, online=False):
"""
check beam life time and current
if beam lifetime is less than Tmin [hr], 2hrs by default,
or current is less then Imin [mA], 2mA by default
return False, otherwise True
"""
tau, Ib = ap.getLifetimeCurrent()
if Ib < Imin:
print 'Beam current too low ({0} < {1})'.format(Ib, Imin)
return False
if tau < Tmin:
print 'Beam lifetime too short ({0} < {1})'.format(tau, Tmin)
return False
return True
def checkGapPhase(ID, **kwargs):
"""
check ID gap, phase
return True if success, otherwise False
"""
gapMin, gapMax, gapStep, gapTol = kwargs.get("gap",
_params[ID.name]["gap"])
phaseMin, phaseMax, phaseStep, phaseTol = \
kwargs.get("phase", _params[ID.name].get("phase", (None, None, None, None)))
timeout = kwargs.get("timeout", 150)
throw = kwargs.get("throw", True)
unitsys = kwargs.get("unitsys", _params[ID.name]["unitsys"])
verbose = kwargs.get("verbose", 0)
gapStep = kwargs.get("gapStep", gapStep)
phaseStep = kwargs.get("phaseStep", phaseStep)
flds = ID.fields()
if 'gap' in flds:
for gap in np.linspace(gapMin, gapMax, gapStep):
gapList = [['gap',gap, gapTol]]
gapStatus = putPar(ID,gapList,timeout=timeout,
throw=throw,unitsys=unitsys,verbose=verbose)
if not gapStatus:
return False
if 'phase' in flds:
for phase in np.linspace(phaseMin,phaseMax,phaseStep):
phaseList = [['phase',phase,phaseTol]]
phaseStatus = putPar(ID,phaseList,timeout=timeout,
throw=throw,unitsys=unitsys,verbose=verbose)
if not phaseStatus:
return False
return True
def switchFeedback(fftable = "off"):
"""
switchFeedback("on") or "off"
"""
if fftable not in ["on", "off"]:
raise RuntimeError("invalid feed forward table state: ('on'|'off')")
for dw in ap.getGroupMembers(["DW",], op="union"):
if "gap" not in dw.fields():
print "WARNING: no 'gap' field in {0}".format(dw.name)
continue
pv = dw.pv(field="gap", handle="setpoint")[0]
m = re.match(r"([^\{\}]+)\{(.+)\}", pv)
if not m:
print "WARNING: inconsistent naming '{0}'".format(pv)
pvffwd = "{0}{{{1}}}MPS:Lookup_.INPA".format(m.group(1), m.group(2))
pvffwd_pref = "{0}{{{1}-Mtr:Gap}}.RBV ".format(m.group(1), m.group(2))
pvffwd_val = {"on": pvffwd_pref + "CP NM",
"off": pvffwd_pref + "NPP N"}
print "set {0}='{1}'".format(pvffwd, pvffwd_val[fftable])
ap.caput(pvffwd, pvffwd_val[fftable])
# fast/slow co
# all ID feed forward
# weixing Bunch by Bunch
def initFile(ID, fieldList, parTable):
"""initilize file name with path, save parameter table to hdf5"""
fileName = ap.outputFileName("ID", ID.name+"_")
fid = h5py.File(fileName)
grp = fid.require_group(ID.name)
grp.attrs["__FORMAT__"] = 1
# setup parameters
subg = grp.require_group("parameters")
subg["scanTable"] = parTable #
subg["scanTable"].attrs["columns"] = fieldList
#for p in nameList:
# subg["scanTable"].attrs[p] = []
bkg = _params[ID.name]["background"]
# like one row of scanTable, same columns
subg["background"] = [bkg[fld] for fld in fieldList]
subg["background"].attrs["columns"] = fieldList
# timestamp ISO "2007-03-01 13:00:00"
subg["minCurrent"] = _params[ID.name]["Imin"]
subg["minCurrent"].attrs["unit"] = "mA"
subg["minLifetime"] = _params[ID.name]["Tmin"]
subg["minLifetime"].attrs["unit"] = "hr"
fid.close()
return fileName
def chooseBpmCor(ID, userBpm=False):
"""
choose bpm and corrector
"""
bpms = ap.getElements('BPM')
if userBpm:
bpms += ap.getElements('UBPM')
bpmfields = []
for bpm in bpms:
bpmflds.append([bpm,'x'])
bpmflds.append([bpm,'y'])
corfields = []
for i in range(len(ID.cch)):
corflds.append([ID,'cch'+'%i'%i])
return bpmFields, corFields
def saveToDB(fileName):
print "save to file (Guobao's DB)"
pass
def measBackground(ID, output, iiter):
"""measure the background and return saved group name"""
if not nsls2id.putBackground(ID):
print "Failed at setting {0} to background mode".format(ID)
return None
# create background subgroup with index
fid = h5py.File(output)
prefix = "iter_"
iterindex = max([int(g[len(prefix):]) for g in fid[ID.name].keys()
if g.startswith(prefix)] + [-1]) + 1
bkgGroup = "iter_{0:04d}".format(iterindex)
grp = fid[ID.name].create_group(bkgGroup)
orb0 = ap.getOrbit(spos=True)
grp["orbit"] = orb0
tau, I = ap.getLifetimeCurrent()
grp["lifetime"] = tau
grp["current"] = I
grp.attrs["iter"] = iiter
fid.close()
return bkgGroup
def virtKicks2FldInt(virtK1, virtK2, idLen, idKickOffset1, idKickOffset2, E_GeV):
"""
Calculate the 1st and 2nd field integrals of an insertion device (ID)
from the given upstream/downstream virtual kicks.
Parameters
----------
virtK1, virtK2 : float
Virtual kick values [rad] at the upsteam and downstream of the ID,
respectively.
idLen : float
Length of the ID [m].
idKickOffset1, idKickOffset2 : float
Position offset [m] of virtual kicks with respect to the undulator
extremeties. `idKickOffset1` == 0 means that the upstream virtual kick
is exactly located at the upstream entrance of the ID. If `idKickOffset1`
is a positive value, then the virtual kick is inside of the ID by the
amount `idKickOffset1`. If negative, the virtual kick is outside of the
ID by the absolute value of `idKickOffset1`. The same is true for the
downstream side.
E_GeV : float
Electron beam energy [GeV].
Returns
-------
I1 : float
First field integral [G*m].
I2 : float
Second field integral [G*(m^2)].
"""
Brho = getBrho(E_GeV) # magnetic rigidity [T*m]
common = Brho * 1e4
I1 = common * (virtK1 + virtK2) # [G*m]
I2 = common * ((idLen-idKickOffset1)*virtK1 + idKickOffset2*virtK2) # [G*(m^2)]
return I1, I2
# <codecell>
def fldInt2VirtKicks(I1, I2, idLen, idKickOffset1, idKickOffset2, E_GeV):
"""
Calculate upstream/downstream virtual kicks from the given 1st and 2nd field
integrals of an insertion device (ID).
Parameters
----------
I1 : float
First field integral [G*m].
I2 : float
Second field integral [G*(m^2)].
idLen : float
Length of the ID [m].
idKickOffset1, idKickOffset2 : float
Position offset [m] of virtual kicks with respect to the undulator
extremeties. `idKickOffset1` == 0 means that the upstream virtual kick
is exactly located at the upstream entrance of the ID. If `idKickOffset1`
is a positive value, then the virtual kick is inside of the ID by the
amount `idKickOffset1`. If negative, the virtual kick is outside of the
ID by the absolute value of `idKickOffset1`. The same is true for the
downstream side.
E_GeV : float
Electron beam energy [GeV].
Returns
-------
virtK1, virtK2 : float
Virtual kick values [rad] at the upsteam and downstream of the ID,
respectively.
"""
Brho = getBrho(E_GeV) # magnetic rigidity [T*m]
common = 1e-4 / Brho / (idLen-idKickOffset1-idKickOffset2)
virtK1 = common * (I2 - I1 * idKickOffset2) # [rad]
virtK2 = common * (I1 * (idLen-idKickOffset1) - I2) # [rad]
return virtK1, virtK2
def save1DFeedFowardTable(filepath, table, fmt='%.16e'):
"""
Save a valid 1-D Stepped Feedforward table (NSLS-II format) to a text file.
"""
np.savetxt(filepath, table, fmt=fmt, delimiter=', ', newline='\n')
def get1DFeedForwardTable(centers, half_widths, dI_array,
I0_array=None, fmt='%.16e'):
"""
Get a valid 1-D Stepped Feedforward table (NSLS-II format)
"""
if I0_array is None:
I_array = dI_array
else:
I_array = I0_array + dI_array
table = np.hstack((np.array(centers).reshape((-1,1)),
np.array(half_widths).reshape((-1,1)),
I_array))
return table
def getZeroed1DFeedForwardTable(parDict, nIDCor):
"""
Get a valid 1-D Stepped Feedforward table (NSLS-II format) with
all ID correctors being set to zero for all the entire range of
ID property specified in "parDict".
"""
try:
scanVectors = parDict['vectors']
bkgList = parDict['bkgTable'].flatten().tolist()
assert len(scanVectors) == len(bkgList) == 1
except:
print 'len(scanVectors) = {0:d}'.format(len(scanVectors))
print 'len(bkgList) = {0:d}'.format(len(bkgList))
print 'This function is only for 1D feedforward table.'
raise RuntimeError(('Lengths of "scanVectors" and "bkgList" must be 1.'))
array = scanVectors[0] + [bkgList[0]]
minVal, maxVal = np.min(array), np.max(array)
centers = [(minVal + maxVal) / 2.0]
half_widths = [(maxVal - minVal) / 2.0 * 1.01] # Extra margin of 1% added
dI_array = np.array([0.0]*nIDCor).reshape((1,-1))
return get1DFeedForwardTable(centers, half_widths, dI_array,
I0_array=None, fmt='%.16e')
def create1DFeedForwardTable(centers, half_widths, dI_array, I0_array=None):
"""
Create a valid 1-D Stepped Feedforward table (NSLS-II format)
"""
if I0_array is None:
I_array = dI_array
else:
I_array = I0_array + dI_array
table = np.hstack((np.array(centers).reshape((-1,1)),
np.array(half_widths).reshape((-1,1)),
I_array))
return table
def calc1DFeedForwardColumns(
ID_filepath, n_interp_pts=None, interp_step_size=None, step_size_unit=None,
cor_inds_ignored=None, bpm_inds_ignored=None, nsv=None):
"""
"""
# TODO: Make sure all the units are correct in the generated table
# Gap & interval are in microns => [um]
# Currents in ppm of 10 Amps => [10uA]
compIterInds = getCompletedIterIndexes(ID_filepath)
nCompletedIter = len(compIterInds)
f = h5py.File(ID_filepath, 'r')
ID_name = f.keys()[0]
grp = f[ID_name]
meas_state_1d_array = grp['parameters']['scanTable'].value
state_unitsymb = grp['parameters']['scanTable'].attrs['unit'] # TODO: need unit conversion
nIter, ndim = meas_state_1d_array.shape
if ndim != 1:
f.close()
raise NotImplementedError('Only 1-D scan has been implemented.')
if nCompletedIter != nIter:
print '# of completed scan states:', nCompletedIter
print '# of requested scan states:', nIter
f.close()
raise RuntimeError('You have not scanned all specified states.')
meas_state_1d_array = meas_state_1d_array.flatten()
state_min = np.min(meas_state_1d_array)
state_max = np.max(meas_state_1d_array)
if (n_interp_pts is not None) and (interp_step_size is not None):
f.close()
raise ValueError(('You can only specify either one of "n_interp_pts" '
'or "interp_step_size", not both.'))
elif n_interp_pts is not None:
interp_state_1d_array = np.linspace(state_min, state_max, n_interp_pts)
elif interp_step_size is not None:
interp_state_1d_array = np.arange(state_min, state_max, interp_step_size)
if interp_state_1d_array[-1] != state_max:
interp_state_1d_array = np.array(interp_state_1d_array.tolist()+
[state_max])
else:
interp_state_1d_array = meas_state_1d_array
M_list = [None]*nIter
diff_orb_list = [None]*nIter
for k in grp.keys():
if k.startswith('iter_'):
iIter = grp[k].attrs['iteration']
M_list[iIter] = grp[k]['orm']['m'].value
orb = grp[k]['orbit'].value
bkgGroup = grp[k].attrs['background']
orb0 = grp[bkgGroup]['orbit'].value[:,:-1] # Ignore s-pos column
diff_orb_list[iIter] = orb - orb0
f.close()
interp_state_1d_array = np.sort(interp_state_1d_array)
center_list = interp_state_1d_array.tolist()
half_width_list = (np.diff(interp_state_1d_array)/2.0).tolist()
half_width_list.append(center_list[-1]-center_list[-2]-half_width_list[-1])
dI_list = []
for M, diff_orb in zip(M_list, diff_orb_list):
TF = np.ones(diff_orb.shape)
if bpm_inds_ignored is not None:
for i in bpm_inds_ignored:
TF[i,:] = 0
TF = TF.astype(bool)
diff_orb_trunc = diff_orb[TF].reshape((-1,2))
# Reverse sign to get desired orbit change
dObs = (-1.0)*diff_orb_trunc.T.flatten().reshape((-1,1))
TF = np.ones(M.shape)
if cor_inds_ignored is not None:
for i in cor_inds_ignored:
TF[:,i] = 0
if bpm_inds_ignored is not None:
nBPM = M.shape[0]/2
try:
assert nBPM*2 == M.shape[0]
except:
raise ValueError('Number of rows for response matrix must be 2*nBPM.')
for i in bpm_inds_ignored:
TF[i ,:] = 0
TF[i+nBPM,:] = 0
TF = TF.astype(bool)
M_trunc = M[TF].reshape((dObs.size,-1))
U, sv, V = np.linalg.svd(M_trunc, full_matrices=0, compute_uv=1)
S_inv = np.linalg.inv(np.diag(sv))
if nsv is not None:
S_inv[nsv:, nsv:] = 0.0
dI = V.T.dot(S_inv.dot(U.T.dot(dObs))).flatten().tolist()
if cor_inds_ignored is not None:
for i in cor_inds_ignored:
# Set 0 Amp for unused correctors
dI.insert(i, 0.0)
dI_list.append(dI)
dI_array = np.array(dI_list)
nCor = dI_array.shape[1]
interp_dI_array = np.zeros((interp_state_1d_array.size, nCor))
for i in range(nCor):
interp_dI_array[:,i] = np.interp(
interp_state_1d_array, meas_state_1d_array, dI_array[:,i])
return {'centers': np.array(center_list),
'half_widths': np.array(half_width_list),
'raw_dIs': dI_array, 'interp_dIs': interp_dI_array}
#----------------------------------------------------------------------
def getCompletedIterIndexes(ID_filepath):
"""
"""
f = h5py.File(ID_filepath, 'r')
ID_name = f.keys()[0]
grp = f[ID_name]
completed_iter_indexes = []
for k in grp.keys():
if k.startswith('iter_') and grp[k].attrs.has_key('completed'):
completed_iter_indexes.append(grp[k].attrs['iteration'].value)
f.close()
if completed_iter_indexes != []:
if not np.all(np.diff(completed_iter_indexes) == 1):
raise RuntimeError(
'List of completed iteration indexes has some missing indexes.')
if np.min(completed_iter_indexes) != 0:
raise RuntimeError('List of completed iteration indexes does not start from 0.')
return completed_iter_indexes
if __name__ == '__main__':
ID_filepath = '/epics/data/aphla/SR/2014_09/ID/dw100g1c08u_2014_09_24_142644.hdf5'
d = calc1DFeedForwardColumns(ID_filepath, interp_step_size=1.0,
cor_inds_ignored=[2,3],
bpm_inds_ignored=None, nsv=None)
table = create1DFeedForwardTable(d['centers'], d['half_widths'],
d['interp_dIs'])
save1DFeedFowardTable('test_ff.txt', table, fmt='%.16e')
| 33.383798 | 105 | 0.584533 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,484 | 0.377277 |
a8b1c17407c071201c3fa293959d5fdb12639e2e | 12,012 | py | Python | Tests/DimensionService.py | lapstue/tm1py | a71886b0f96a3e270d5a39ef999e45b413ba4232 | [
"MIT"
] | null | null | null | Tests/DimensionService.py | lapstue/tm1py | a71886b0f96a3e270d5a39ef999e45b413ba4232 | [
"MIT"
] | null | null | null | Tests/DimensionService.py | lapstue/tm1py | a71886b0f96a3e270d5a39ef999e45b413ba4232 | [
"MIT"
] | null | null | null | import configparser
import unittest
from pathlib import Path
from TM1py.Objects import Dimension, Hierarchy, Element
from TM1py.Objects import ElementAttribute
from TM1py.Services import TM1Service
class TestDimensionService(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Establishes a connection to TM1 and creates objects to use across all tests
"""
# Connection to TM1
cls.config = configparser.ConfigParser()
cls.config.read(Path(__file__).parent.joinpath('config.ini'))
cls.tm1 = TM1Service(**cls.config['tm1srv01'])
cls.prefix = "TM1py_Tests_Dimension_"
cls.dimension_name = cls.prefix + "Some_Dimension"
cls.hierarchy_name = cls.dimension_name
cls.dimension_name_with_multi_hierarchy = cls.prefix + "Dimension_With_Multiple_Hierarchies"
@classmethod
def setUp(cls):
cls.create_dimension()
@classmethod
def tearDown(cls):
cls.delete_dimensions()
@classmethod
def create_dimension(cls):
root_element = Element(name='Root', element_type='Consolidated')
elements = [root_element]
edges = {}
for i in range(1, 1001):
element_name = "Element {}".format(i)
elements.append(Element(name=element_name, element_type='Numeric'))
edges[('Root', element_name)] = i
element_attributes = [
ElementAttribute(name='Name Long', attribute_type='Alias'),
ElementAttribute(name='Name Short', attribute_type='Alias')]
h = Hierarchy(
name=cls.dimension_name,
dimension_name=cls.dimension_name,
elements=elements,
edges=edges,
element_attributes=element_attributes)
d = Dimension(name=cls.dimension_name, hierarchies=[h])
cls.tm1.dimensions.create(d)
@classmethod
def create_dimension_with_multiple_hierarchies(cls):
dimension = Dimension(cls.dimension_name_with_multi_hierarchy)
dimension.add_hierarchy(
Hierarchy(
name="Hierarchy1",
dimension_name=dimension.name,
elements=[Element("Elem1", "Numeric"), Element("Elem2", "Numeric"), Element("Elem3", "Numeric")]))
dimension.add_hierarchy(
Hierarchy(
name="Hierarchy2",
dimension_name=dimension.name,
elements=[Element("Elem1", "Numeric"), Element("Elem2", "Numeric"), Element("Elem3", "Numeric")]))
dimension.add_hierarchy(
Hierarchy(
name="Hierarchy3",
dimension_name=dimension.name,
elements=[Element("Elem1", "Numeric"), Element("Elem2", "Numeric"), Element("Elem3", "Numeric")]))
cls.tm1.dimensions.create(dimension)
@classmethod
def delete_dimensions(cls):
cls.tm1.dimensions.delete(cls.dimension_name)
if cls.tm1.dimensions.exists(cls.dimension_name_with_multi_hierarchy):
cls.tm1.dimensions.delete(cls.dimension_name_with_multi_hierarchy)
def test_get_dimension(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
self.assertIsInstance(d, Dimension)
self.assertEqual(d.name, self.dimension_name)
h = d.hierarchies[0]
self.assertIsInstance(h, Hierarchy)
self.assertEqual(h.name, self.dimension_name)
self.assertEqual(len(h.elements), 1001)
self.assertEqual(len(h.edges), 1000)
def test_dimension__get__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
h = d[self.dimension_name]
self.assertIsInstance(h, Hierarchy)
self.assertEqual(h.name, self.dimension_name)
self.assertEqual(len(h.elements), 1001)
self.assertEqual(len(h.edges), 1000)
def test_dimension__contains__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
self.assertIn(self.dimension_name, d)
def test_dimension__iter__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
first_hierarchy = next(h for h in d)
self.assertIsInstance(first_hierarchy, Hierarchy)
self.assertEqual(first_hierarchy.name, self.dimension_name)
self.assertEqual(len(first_hierarchy.elements), 1001)
self.assertEqual(len(first_hierarchy.edges), 1000)
def test_dimension__len__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
self.assertEqual(len(d), 1)
def test_update_dimension(self):
# get dimension from tm1
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
# create element objects
elements = [Element(name='e1', element_type='Consolidated'),
Element(name='e2', element_type='Numeric'),
Element(name='e3', element_type='Numeric'),
Element(name='e4', element_type='Numeric')]
# create edge objects
edges = {
('e1', 'e2'): 1,
('e1', 'e3'): 1,
('e1', 'e4'): 1}
# create the element_attributes objects
element_attributes = [ElementAttribute(name='Name Long', attribute_type='Alias'),
ElementAttribute(name='Name Short', attribute_type='Alias'),
ElementAttribute(name='Currency', attribute_type='String')]
# create hierarchy object
hierarchy = Hierarchy(name=self.dimension_name, dimension_name=self.dimension_name, elements=elements,
element_attributes=element_attributes, edges=edges)
# replace existing hierarchy with new hierarchy
d.remove_hierarchy(self.dimension_name)
d.add_hierarchy(hierarchy)
# update dimension in TM1
self.tm1.dimensions.update(d)
# Test
dimension = self.tm1.dimensions.get(self.dimension_name)
self.assertEqual(len(dimension.hierarchies[0].elements), len(elements))
def test_update_dimension_remove_hierarchy(self):
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(self.dimension_name_with_multi_hierarchy)
self.assertEqual(dimension.hierarchy_names, ['Hierarchy1', 'Hierarchy2', 'Hierarchy3', 'Leaves'])
dimension.remove_hierarchy('Hierarchy2')
dimension.remove_hierarchy('Hierarchy3')
self.tm1.dimensions.update(dimension)
dimension = self.tm1.dimensions.get(self.dimension_name_with_multi_hierarchy)
self.assertEqual(dimension.hierarchy_names, ['Hierarchy1', 'Leaves'])
def test_get_all_names(self):
self.assertIn(self.dimension_name, self.tm1.dimensions.get_all_names())
def test_get_number_of_dimensions(self):
number_of_dimensions = self.tm1.dimensions.get_number_of_dimensions()
self.assertIsInstance(number_of_dimensions, int)
def test_execute_mdx(self):
mdx = "{TM1SubsetAll(" + self.dimension_name + ")}"
elements = self.tm1.dimensions.execute_mdx(self.dimension_name, mdx)
self.assertEqual(len(elements), 1001)
mdx = "{ Tm1FilterByLevel ( {TM1SubsetAll(" + self.dimension_name + ")}, 0) }"
elements = self.tm1.dimensions.execute_mdx(self.dimension_name, mdx)
self.assertEqual(len(elements), 1000)
for element in elements:
self.assertTrue(element.startswith("Element"))
def test_hierarchy_names(self):
# create dimension with two Hierarchies
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(
set(dimension.hierarchy_names),
{"Leaves", "Hierarchy1", "Hierarchy2", "Hierarchy3"})
dimension.remove_hierarchy("Hierarchy1")
self.assertEqual(
set(dimension.hierarchy_names),
{"Leaves", "Hierarchy2", "Hierarchy3"})
def test_remove_leaves_hierarchy(self):
# create dimension with two Hierarchies
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
try:
dimension.remove_hierarchy("LEAVES")
raise Exception("Did not throw expected Exception")
except ValueError:
pass
def test_remove_hierarchy(self):
# create dimension with two Hierarchies
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(len(dimension.hierarchies), 4)
self.assertIn("Hierarchy1", dimension)
self.assertIn("Hierarchy2", dimension)
self.assertIn("Hierarchy3", dimension)
self.assertIn("Leaves", dimension)
dimension.remove_hierarchy("Hierarchy1")
self.tm1.dimensions.update(dimension)
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(len(dimension.hierarchies), 3)
self.assertNotIn("Hierarchy1", dimension)
self.assertIn("Hierarchy2", dimension)
self.assertIn("Hierarchy3", dimension)
self.assertIn("Leaves", dimension)
dimension.remove_hierarchy("H i e r a r c h y 3".upper())
self.tm1.dimensions.update(dimension)
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(len(dimension.hierarchies), 2)
self.assertNotIn("Hierarchy1", dimension)
self.assertIn("Hierarchy2", dimension)
self.assertNotIn("Hierarchy3", dimension)
self.assertIn("Leaves", dimension)
def test_rename_dimension(self):
original_dimension_name = self.prefix + "Original_Dimension"
renamed_dimension_name = self.prefix + "Renamed_Dimension"
# if dimensions exist in TM1.. delete them
for dim_name in (original_dimension_name, renamed_dimension_name):
if self.tm1.dimensions.exists(dim_name):
self.tm1.dimensions.delete(dimension_name=dim_name)
# create dimension
original_dimension = Dimension(original_dimension_name)
hierarchy = Hierarchy(name=original_dimension_name, dimension_name=original_dimension_name)
hierarchy.add_element(element_name="Total", element_type="Consolidated")
hierarchy.add_element(element_name="Elem1", element_type="Numeric")
hierarchy.add_element(element_name="Elem2", element_type="Numeric")
hierarchy.add_element(element_name="Elem3", element_type="Numeric")
hierarchy.add_edge(parent="Total", component="Elem1", weight=1)
hierarchy.add_edge(parent="Total", component="Elem2", weight=1)
hierarchy.add_edge(parent="Total", component="Elem3", weight=1)
original_dimension.add_hierarchy(hierarchy)
self.tm1.dimensions.create(original_dimension)
# rename
renamed_dimension = self.tm1.dimensions.get(original_dimension.name)
renamed_dimension.name = renamed_dimension_name
self.tm1.dimensions.create(renamed_dimension)
# challenge equality of dimensions
summary1 = self.tm1.dimensions.hierarchies.get_hierarchy_summary(
dimension_name=original_dimension_name,
hierarchy_name=original_dimension_name)
summary2 = self.tm1.dimensions.hierarchies.get_hierarchy_summary(
dimension_name=renamed_dimension_name,
hierarchy_name=renamed_dimension_name)
self.assertEqual(summary1, summary2)
# delete
for dim_name in (original_dimension_name, renamed_dimension_name):
self.tm1.dimensions.delete(dimension_name=dim_name)
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| 42.9 | 114 | 0.676157 | 11,761 | 0.979104 | 0 | 0 | 2,867 | 0.238678 | 0 | 0 | 1,674 | 0.139361 |
a8b203712862c05aef3aa6e6577ae93f4675e284 | 387 | py | Python | tests/test_version.py | datatags/pyCraft | 769d903e90a704ae860e16d74bc7c73437028ee8 | [
"Apache-2.0"
] | 759 | 2015-01-30T13:04:58.000Z | 2022-03-30T22:42:40.000Z | tests/test_version.py | datatags/pyCraft | 769d903e90a704ae860e16d74bc7c73437028ee8 | [
"Apache-2.0"
] | 220 | 2015-03-17T17:26:48.000Z | 2022-03-17T21:42:39.000Z | tests/test_version.py | datatags/pyCraft | 769d903e90a704ae860e16d74bc7c73437028ee8 | [
"Apache-2.0"
] | 284 | 2015-03-23T16:24:48.000Z | 2022-03-24T15:37:22.000Z | from distutils.version import StrictVersion as SV
import unittest
import minecraft
class VersionTest(unittest.TestCase):
def test_module_version_is_a_valid_pep_386_strict_version(self):
SV(minecraft.__version__)
def test_protocol_version_is_an_int(self):
for version in minecraft.SUPPORTED_PROTOCOL_VERSIONS:
self.assertTrue(type(version) is int)
| 27.642857 | 68 | 0.780362 | 300 | 0.775194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8b29c236abc1337dcc504b9375143897ebfec61 | 1,276 | py | Python | td3/Twin_Delayed_DDPG/policy_network.py | Gregory-Eales/ml-reimplementations | ef2652224cc31ca3b569c9ccd9089a4053eb2b2f | [
"MIT"
] | 1 | 2019-05-03T21:48:26.000Z | 2019-05-03T21:48:26.000Z | td3/Twin_Delayed_DDPG/policy_network.py | Gregory-Eales/ML-Reimplementations | ef2652224cc31ca3b569c9ccd9089a4053eb2b2f | [
"MIT"
] | null | null | null | td3/Twin_Delayed_DDPG/policy_network.py | Gregory-Eales/ML-Reimplementations | ef2652224cc31ca3b569c9ccd9089a4053eb2b2f | [
"MIT"
] | null | null | null | import torch
from torch.nn import functional as F
from torch import optim
from torch import nn
class PolicyNetwork(torch.nn.Module):
def __init__(self, in_dim, out_dim, alpha=0.01):
super(PolicyNetwork, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.l1 = nn.Linear(in_dim, 128)
self.l2 = nn.Linear(128, 128)
self.l3 = nn.Linear(128, 64)
self.l4 = nn.Linear(64, out_dim)
self.relu = nn.LeakyReLU()
self.optimizer = torch.optim.Adam(lr=alpha, params=self.parameters())
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')
self.to(self.device)
def forward(self, x):
out = torch.Tensor(x).reshape(-1, self.in_dim)
out = self.l1(out)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.l4(out)
out = torch.tanh(out)
return out
def loss(self, q):
return -torch.sum(q)/q.shape[0]
def optimize(self, q):
torch.cuda.empty_cache()
self.optimizer.zero_grad()
loss = self.loss(q)
loss.backward(retain_graph=True)
self.optimizer.step()
return -loss.detach().numpy()
def main():
pn = PolicyNetwork(in_dim=3, out_dim=1)
x = torch.ones(10, 3)
print(pn.forward(x))
if __name__ == "__main__":
main()
| 20.918033 | 80 | 0.673981 | 1,044 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.019592 |
a8b302617db36ad0d8226865d1c39855fc3e1ea4 | 2,542 | py | Python | MoodyBeatsRecommenderAPI/api/serializers.py | labs12-music-stream-selector/DS | 8029556547c2478a647649c89cfb834893647795 | [
"MIT"
] | null | null | null | MoodyBeatsRecommenderAPI/api/serializers.py | labs12-music-stream-selector/DS | 8029556547c2478a647649c89cfb834893647795 | [
"MIT"
] | 19 | 2019-12-26T17:21:07.000Z | 2022-02-17T22:21:18.000Z | MoodyBeatsRecommenderAPI/api/serializers.py | labs12-music-stream-selector/DS | 8029556547c2478a647649c89cfb834893647795 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from songs.models import (
Song,
Tag,
NewVideo,
NewComment,
NewVideoTag,
NewVideoStats,
NewVideoCorrectMood,
)
class NewVideoStatsSerializer(serializers.ModelSerializer):
new_video = serializers.StringRelatedField()
class Meta:
model = NewVideoStats
fields = [
'video_id',
'video_view_count',
'video_like_count',
'new_video',
]
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = '__all__'
class SongSerializer(serializers.ModelSerializer):
api_tags = TagSerializer(many=True)
class Meta:
model = Song
fields = (
'id',
#'slug',
'songs',
'mood',
'api_tags',
'video_id',
#'song_embed_code',
'recommendation_one',
'recommendation_one_link',
'recommendation_two',
'recommendation_two_link',
'recommendation_three',
'recommendation_three_link',
'recommendation_four',
'recommendation_four_link',
'recommendation_five',
'recommendation_five_link',
)
read_only_fields = (
'id',
#'slug',
'songs',
'mood',
'api_tags',
'video_id',
#'song_embed_code',
'recommendation_one',
'recommendation_one_link',
'recommendation_two',
'recommendation_two_link',
'recommendation_three',
'recommendation_three_link',
'recommendation_four',
'recommendation_four_link',
'recommendation_five',
'recommendation_five_link',
)
#lookup_field = 'slug'
class NewVideoTagSerializer(serializers.ModelSerializer):
class Meta:
model = NewVideoTag
fields = [
'topics',
]
class NewVideoSerializer(serializers.ModelSerializer):
new_video_tags = NewVideoTagSerializer(many=True)
class Meta:
model = NewVideo
fields = [
'video_title',
'video_id',
'moods',
'new_video_tags',
]
class NewVideoDetailSerializer(serializers.ModelSerializer):
new_video_tags = NewVideoTagSerializer(many=True)
class Meta:
model = NewVideo
depth=3
fields = [
'video_id',
'video_title',
'moods',
'new_video_tags',
]
class NewVideoCorrectMoodSerializer(serializers.ModelSerializer):
class Meta:
model = NewVideoCorrectMood
fields = [
'video_id',
'video_title',
'correct_moods',
]
class NewCommentSerializer(serializers.ModelSerializer):
class Meta:
model = NewComment
fields = '__all__'
"""
class NewVideoStatsDetailSerializer(serializers.ModelSerializer):
new_video_stats = NewVideoStatsSerializer(many=True)
class Meta:
model = NewVideoStats
fields = [
'new_video_stats',
]
"""
| 17.652778 | 65 | 0.712038 | 2,147 | 0.844611 | 0 | 0 | 0 | 0 | 0 | 0 | 1,033 | 0.406373 |
a8b4ca0aa758fa6b9fa0e302dfe53d2794b1c6f8 | 2,107 | py | Python | lab/migrations/0003_auto_20191202_1534.py | jtdub/prod2lab | 054c922f731ad377b83714194ef806325f79336a | [
"MIT"
] | 11 | 2019-11-20T02:05:30.000Z | 2021-08-22T13:15:14.000Z | lab/migrations/0003_auto_20191202_1534.py | jtdub/prod2lab | 054c922f731ad377b83714194ef806325f79336a | [
"MIT"
] | 12 | 2019-11-20T02:07:54.000Z | 2019-12-11T14:57:59.000Z | lab/migrations/0003_auto_20191202_1534.py | jtdub/prod2lab | 054c922f731ad377b83714194ef806325f79336a | [
"MIT"
] | 2 | 2019-11-20T02:05:33.000Z | 2019-11-28T01:29:20.000Z | # Generated by Django 2.2.8 on 2019-12-02 15:34
from django.db import migrations, models
import django.db.models.deletion
from lab.models import OperatingSystem
def initial_os_data(app, schema_editor):
data = [
{"os": "ios", "os_type": "cisco_ios", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "iosxr", "os_type": "cisco_xr", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "iosxe", "os_type": "cisco_xe", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "nxos", "os_type": "cisco_nxos", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "eos", "os_type": "arista_eos", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "edgeos", "os_type": "vyos", "term_length": "terminal length 0", "fetch_config": "show configuration"},
]
for item in data:
OperatingSystem.objects.create(
name=item['os'],
netmiko_type=item['os_type'],
terminal_length_cmd=item['term_length'],
fetch_config_cmd=item['fetch_config']
)
class Migration(migrations.Migration):
dependencies = [
('lab', '0002_auto_20191120_1951'),
]
operations = [
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('netmiko_type', models.CharField(max_length=255)),
('terminal_length_cmd', models.CharField(max_length=255)),
('fetch_config_cmd', models.CharField(max_length=255)),
],
),
migrations.AlterField(
model_name='device',
name='os_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab.OperatingSystem'),
),
migrations.RunPython(initial_os_data),
]
| 41.313725 | 123 | 0.613669 | 899 | 0.426673 | 0 | 0 | 0 | 0 | 0 | 0 | 817 | 0.387755 |
a8b4f74502adf34f654e79bfc28668dafc30babc | 3,734 | py | Python | org.cohorte.eclipse.runner.basic/files/test/cohorte/composer/top/criteria/distance/configuration.py | isandlaTech/cohorte-devtools | 9ba9021369188d2f0ad5c845ef242fd5a7097b57 | [
"Apache-2.0"
] | 1 | 2017-03-04T14:37:15.000Z | 2017-03-04T14:37:15.000Z | org.cohorte.eclipse.runner.basic/files/test/cohorte/composer/top/criteria/distance/configuration.py | isandlaTech/cohorte-devtools | 9ba9021369188d2f0ad5c845ef242fd5a7097b57 | [
"Apache-2.0"
] | 4 | 2017-08-21T08:17:14.000Z | 2018-03-02T13:51:43.000Z | org.cohorte.eclipse.runner.basic/files/test/cohorte/composer/top/criteria/distance/configuration.py | isandlaTech/cohorte-devtools | 9ba9021369188d2f0ad5c845ef242fd5a7097b57 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Top Composer: Group by configuration
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
This file is part of Cohorte.
Cohorte is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Cohorte is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Cohorte. If not, see <http://www.gnu.org/licenses/>.
"""
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Instantiate
# Composer
import cohorte
import cohorte.composer
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (3, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_TOP_CRITERION_DISTANCE)
@Requires('_configuration', cohorte.SERVICE_CONFIGURATION_READER)
@Instantiate('cohorte-composer-criterion-distance-configuration')
class ConfigurationCriterion(object):
"""
Groups components by configuration
"""
def __init__(self):
"""
Sets up members
"""
self._configuration = None
def _get_isolate_node(self, isolate_name):
"""
Reads the configuration of the given isolate and returns the specified
node name, or None
:param isolate_name: Name of an isolate
:return: A node name or None
"""
try:
# Read the configuration, without logging file errors
config = self._configuration.read("{0}.js".format(isolate_name),
False)
# Return the indicated node
return config.get('node')
except IOError:
# Ignore I/O error: the isolate has no specific configuration
pass
def group(self, components, groups):
"""
Groups components according to their implementation language
:param components: List of components to group
:param groups: Dictionary of current groups
:return: A tuple:
* Dictionary of grouped components (group -> components)
* List of components that haven't been grouped
"""
nodes = {}
isolate_nodes = {}
remaining = set(components)
for component in components:
node = None
if component.node:
# Explicit node
node = component.node
elif component.isolate:
# Explicit isolate
try:
node = isolate_nodes[component.isolate]
except KeyError:
# Look for the node associated to the isolate
node = self._get_isolate_node(component.isolate)
# Store the information
isolate_nodes[component.isolate] = node
if node:
# Found a node
nodes.setdefault(node, set()).add(component)
remaining.remove(component)
# Return the result
return nodes, remaining
| 30.606557 | 80 | 0.598822 | 2,166 | 0.580075 | 0 | 0 | 2,377 | 0.636583 | 0 | 0 | 2,118 | 0.56722 |
a8b59e7ec8b58b8d89bcd8526c562fd7f8d61a04 | 14,324 | py | Python | hask3/lang/lazylist.py | mvaled/hask | e2c8457552675c4f7953c596c5af30cb925233c6 | [
"BSD-2-Clause"
] | 7 | 2018-08-25T07:52:19.000Z | 2021-03-12T15:48:19.000Z | hask3/lang/lazylist.py | mvaled/hask | e2c8457552675c4f7953c596c5af30cb925233c6 | [
"BSD-2-Clause"
] | 2 | 2018-08-22T01:59:10.000Z | 2018-12-03T09:14:08.000Z | hask3/lang/lazylist.py | mvaled/hask | e2c8457552675c4f7953c596c5af30cb925233c6 | [
"BSD-2-Clause"
] | 1 | 2020-09-23T04:52:05.000Z | 2020-09-23T04:52:05.000Z | from collections import Sequence
from hask3.hack import objectify
from hask3.lang.type_system import Typeclass
from hask3.lang.type_system import Hask
from hask3.lang.typeclasses import Show
from hask3.lang.typeclasses import Eq
from hask3.lang.typeclasses import Ord
from hask3.lang.syntax import Syntax
from hask3.lang.syntax import instance
from hask3.lang.syntax import sig
from hask3.lang.syntax import H
# LT, EQ, GT = -1, 0, 1
try:
from __builtin__ import cmp
except ImportError:
def cmp(a, b):
if a == b:
return 0
elif a < b:
return -1
else:
return 1
class Enum(Typeclass):
"""
Class Enum defines operations on sequentially ordered types.
The enumFrom... methods are used in translation of arithmetic sequences.
Instances of Enum may be derived for any enumeration type (types whose
constructors have no fields). The nullary constructors are assumed to be
numbered left-to-right by fromEnum from 0 through n-1.
Attributes:
- ``toEnum``
- ``fromEnum``
- ``succ``
- ``pred``
- ``enumFrom``
- ``enumFromThen``
- ``enumFrom``
- ``enumFromThenTo``
- ``EnumFromTo``
Minimal complete definition:
- ``toEnum``
- ``fromEnum``
"""
@classmethod
def make_instance(typeclass, cls, toEnum, fromEnum):
from hask3.lang.type_system import build_instance
def succ(a):
return toEnum(fromEnum(a) + 1)
def pred(a):
return toEnum(fromEnum(a) - 1)
def enumFromThen(start, second):
pointer = fromEnum(start)
step = fromEnum(second) - pointer
while True:
yield toEnum(pointer)
pointer += step
def enumFrom(start):
return enumFromThen(start, succ(start))
def enumFromThenTo(start, second, end):
if start == end:
yield start
return
elif (second >= start > end) or (second <= start < end):
return
pointer, stop = fromEnum(start), fromEnum(end)
step = fromEnum(second) - pointer
while (start < end and pointer <= stop) or \
(start > end and pointer >= stop):
yield toEnum(pointer)
pointer += step
def enumFromTo(start, end):
second = succ(start) if start < end else pred(start)
return enumFromThenTo(start, second, end)
attrs = {"toEnum": toEnum, "fromEnum": fromEnum, "succ": succ, "pred":
pred, "enumFromThen": enumFromThen, "enumFrom": enumFrom,
"enumFromThenTo": enumFromThenTo, "enumFromTo": enumFromTo}
build_instance(Enum, cls, attrs)
@sig(H/ "a" >> int)
def fromEnum(a):
"""``fromEnum :: a -> int``
Convert to an int.
"""
return Enum[a].toEnum(a)
@sig(H/ "a" >> "a")
def succ(a):
"""``succ :: a -> a``
the successor of a value. For numeric types, succ adds 1.
"""
return Enum[a].succ(a)
@sig(H/ "a" >> "a")
def pred(a):
"""
pred :: a -> a
the predecessor of a value. For numeric types, pred subtracts 1.
"""
return Enum[a].pred(a)
@sig(H/ "a" >> "a" >> ["a"])
def enumFromThen(start, second):
"""``enumFromThen :: a -> a -> [a]``
Used in translation of ``[n, n_, ...]``.
"""
return L[Enum[start].enumFromThen(start, second)]
@sig(H/ "a" >> ["a"])
def enumFrom(start):
"""``enumFrom :: a -> [a]``
Used in translation of L[n, ...]
"""
return L[Enum[start].enumFrom(start)]
@sig(H/ "a" >> "a" >> "a" >> ["a"])
def enumFromThenTo(start, second, end):
"""``enumFromThenTo :: a -> a -> a -> [a]``
Used in translation of ``L[n, n_, ..., m]``.
"""
return L[Enum[start].enumFromThenTo(start, second, end)]
@sig(H/ "a" >> "a" >> ["a"])
def enumFromTo(start, end):
"""``enumFromTo :: a -> a -> [a]``
Used in translation of L[n, ..., m]
"""
return L[Enum[start].enumFromTo(start, end)]
instance(Enum, int).where(fromEnum=int, toEnum=int)
instance(Enum, bool).where(fromEnum=int, toEnum=bool)
instance(Enum, str).where(fromEnum=ord, toEnum=chr)
class List(Sequence, Hask):
"""Statically typed lazy sequence datatype.
See `L`:obj: for more information.
"""
def __init__(self, head=None, tail=None):
from itertools import chain
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import unify
if head is not None:
count = len(head)
if count > 0:
fst = head[0]
i = 1
while i < count:
unify(typeof(fst), typeof(head[i]))
i += 1
self.__head = list(head)
else:
self.__head = []
self.__is_evaluated = tail is None
self.__tail = chain([] if self.__is_evaluated else tail)
def __type__(self):
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import TypeVariable, ListType
if len(self.__head) == 0:
if self.__is_evaluated:
return ListType(TypeVariable())
else:
self.__next()
return self.__type__()
else:
return ListType(typeof(self[0]))
def __next(self):
"""Evaluate the next element of the tail, and add it to the head."""
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import unify
if self.__is_evaluated:
raise StopIteration
else:
try:
next_iter = next(self.__tail)
if len(self.__head) > 0:
unify(typeof(self[0]), typeof(next_iter))
self.__head.append(next_iter)
except StopIteration:
self.__is_evaluated = True
def __evaluate(self):
"""Evaluate the entire List."""
while not self.__is_evaluated:
self.__next()
def __rxor__(self, item):
"""``^`` is the ``cons`` operator (equivalent to ``:`` in Haskell)."""
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import ListType, unify
unify(self.__type__(), ListType(typeof(item)))
if self.__is_evaluated:
return List(head=[item] + self.__head)
return List(head=[item] + self.__head, tail=self.__tail)
def __add__(self, other):
"""``(+) :: [a] -> [a] -> [a]``
``+`` is the list concatenation operator, equivalent to ``++`` in
Haskell and + for Python lists
"""
from itertools import chain
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import unify
unify(self.__type__(), typeof(other))
if self.__is_evaluated and other.__is_evaluated:
return List(head=self.__head + other.__head)
elif self.__is_evaluated and not other.__is_evaluated:
return List(head=self.__head + other.__head, tail=other.__tail)
else:
return List(head=self.__head, tail=chain(self.__tail, other))
def __str__(self):
from hask3.lang.typeclasses import show
body = ", ".join(map(show, self.__head))
if self.__is_evaluated:
if len(self.__head) <= 1:
body = f'[{body}]'
suffix = ''
else:
suffix = ' ...'
return f"L[{body}{suffix}]"
def __cmp__(self, other):
if self.__is_evaluated and other.__is_evaluated:
return cmp(self.__head, other.__head)
elif len(self.__head) >= len(other.__head):
# check the evaluated heads
heads = zip(self.__head[:len(other.__head)], other.__head)
heads_comp = ((cmp(h1, h2) for h1, h2 in heads))
for comp in heads_comp:
if comp != 0:
return comp
# evaluate the shorter-headed list until it is the same size
while len(self.__head) > len(other.__head):
if other.__is_evaluated:
return 1
other.__next()
comp = cmp(self.__head[len(other.__head)-1], other.__head[-1])
if comp != 0:
return comp
# evaluate the tails, checking each time
while not self.__is_evaluated or not other.__is_evaluated:
if not self.__is_evaluated:
self.__next()
if not other.__is_evaluated:
other.__next()
len_comp = cmp(len(self.__head), len(other.__head))
if len_comp != 0:
return len_comp
if len(self.__head) > 0:
value_comp = cmp(self.__head[-1], other.__head[-1])
if value_comp != 0:
return value_comp
elif len(other.__head) > len(self.__head):
return -other.__cmp__(self)
return 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) == -1
def __gt__(self, other):
return self.__cmp__(other) == 1
def __le__(self, other):
comp = self.__cmp__(other)
return comp in (-1, 0)
def __ge__(self, other):
comp = self.__cmp__(other)
return comp in (1, 0)
def __len__(self):
self.__evaluate()
return len(self.__head)
def __iter__(self):
for item in self.__head:
yield item
for item in self.__tail:
self.__head.append(item)
yield item
def count(self, x):
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import ListType, unify
unify(self.__type__(), ListType(typeof(x)))
self.__evaluate()
return self.__head.count(x)
def index(self, x):
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import ListType, unify
unify(self.__type__(), ListType(typeof(x)))
self.__evaluate()
return self.__head.index(x)
def __contains__(self, x):
from hask3.hack import isin
from hask3.lang.type_system import typeof
from hask3.lang.hindley_milner import ListType, unify
unify(self.__type__(), ListType(typeof(x)))
return isin(x, iter(self))
def __getitem__(self, ix):
is_slice = isinstance(ix, slice)
if is_slice:
i = ix.start if ix.stop is None else ix.stop
else:
i = ix
# make sure that the list is evaluated enough to do the indexing, but
# not any more than necessary
# if index is negative, evaluate the entire list
if i is None:
# In Python 3, `None >= 0` is a TypeError, but in Python 2 returns
# False. So let's go negative in any case...
i = -1
if i >= 0:
while (i+1) > len(self.__head):
try:
self.__next()
except StopIteration:
break
else:
self.__evaluate()
if is_slice:
if ix.stop is None and not self.__is_evaluated:
return List(head=self.__head[ix], tail=self.__tail)
else:
return List(head=self.__head[ix])
else:
return self.__head[i]
# Basic typeclass instances for list
instance(Show, List).where(
show = List.__str__
)
instance(Eq, List).where(
eq = List.__eq__
)
instance(Ord, List).where(
lt = List.__lt__,
gt = List.__gt__,
le = List.__le__,
ge = List.__ge__
)
@objectify
class L(Syntax):
"""``L`` is for comprehensions and lazy creation of Haskell-style lists.
To create a new List, just wrap an interable in ``L[ ]``.
List comprehensions can be used with any instance of Enum, including the
built-in types int, float, and char.
There are four basic list comprehension patterns::
>>> L[1, ...]
# list from 1 to infinity, counting by ones
>>> L[1, 3, ...]
# list from 1 to infinity, counting by twos
>>> L[1, ..., 20]
# list from 1 to 20 (inclusive), counting by ones
>>> L[1, 5, ..., 20]
# list from 1 to 20 (inclusive), counting by fours
There is a semantic problem because differences between Python sequences
and Haskell List. Because of that ``L[1, 2]`` will pass a tuple to
`__getitem__` magic, but ``L[1]`` will not. To avoid, as much as
possible, this issue related with two phrases with equivalent denotations,
singular elements will be converted to lists. The logic to test if a
given value is singular is whether is not an instance of
`~collections.Sequence`:class: or a string. For example:
>>> from hask3 import L
>>> L[1] == L[[1]]
True
"""
invalid_syntax_message = "Invalid input to list constructor"
def __getitem__(self, lst):
from collections import Sequence
from hask3.hack import isin, is_iterator
if isinstance(lst, tuple) and len(lst) < 5 and isin(Ellipsis, lst):
# L[x, ...]
if len(lst) == 2 and lst[1] is Ellipsis:
return enumFrom(lst[0])
# L[x, y, ...]
elif len(lst) == 3 and lst[2] is Ellipsis:
return enumFromThen(lst[0], lst[1])
# L[x, ..., y]
elif len(lst) == 3 and lst[1] is Ellipsis:
return enumFromTo(lst[0], lst[2])
# L[x, y, ..., z]
elif len(lst) == 4 and lst[2] is Ellipsis:
return enumFromThenTo(lst[0], lst[1], lst[3])
else:
raise SyntaxError("Invalid list comprehension: %s" % str(lst))
elif is_iterator(lst) or isinstance(lst, List):
return List(tail=lst)
elif isinstance(lst, Sequence) and not isinstance(lst, str):
return List(head=list(lst))
else:
return List(head=[lst])
del Sequence, objectify
del Typeclass, Hask, Show, Eq, Ord
del Syntax, instance, sig, H
| 30.155789 | 78 | 0.563809 | 11,882 | 0.829517 | 1,648 | 0.115052 | 5,145 | 0.359187 | 0 | 0 | 3,608 | 0.251885 |
a8b637cea2a69b4761c18bfab888fa45e412a121 | 3,603 | py | Python | code/test_dropout.py | lisa-1010/smart-tutor | 5453fff780d769c8011ee66cd4b30d9656e0d3db | [
"MIT"
] | 20 | 2017-02-01T19:09:56.000Z | 2021-11-14T20:39:13.000Z | code/test_dropout.py | lisa-1010/smart-tutor | 5453fff780d769c8011ee66cd4b30d9656e0d3db | [
"MIT"
] | 2 | 2019-06-04T17:40:56.000Z | 2019-07-01T11:17:03.000Z | code/test_dropout.py | lisa-1010/smart-tutor | 5453fff780d769c8011ee66cd4b30d9656e0d3db | [
"MIT"
] | 7 | 2018-05-31T22:21:04.000Z | 2021-11-14T20:39:17.000Z |
# coding: utf-8
# # Using Dropout
# Let's see how we can use dropout for early stopping
from concept_dependency_graph import ConceptDependencyGraph
import data_generator as dg
from student import *
import simple_mdp as sm
import dynamics_model_class as dmc
import numpy as np
import dataset_utils
import tensorflow as tf
import tflearn
import copy
import time
def main():
n_concepts = 4
use_student2 = True
student2_str = '2' if use_student2 else ''
learn_prob = 0.5
lp_str = '-lp{}'.format(int(learn_prob*100)) if not use_student2 else ''
n_students = 100000
seqlen = 7
filter_mastery = True
filter_str = '' if not filter_mastery else '-filtered'
policy = 'random'
filename = 'test{}-n{}-l{}{}-{}{}.pickle'.format(student2_str, n_students, seqlen,
lp_str, policy, filter_str)
#concept_tree = sm.create_custom_dependency()
concept_tree = ConceptDependencyGraph()
concept_tree.init_default_tree(n_concepts)
if not use_student2:
test_student = Student(n=n_concepts,p_trans_satisfied=learn_prob, p_trans_not_satisfied=0.0, p_get_ex_correct_if_concepts_learned=1.0)
else:
test_student = Student2(n_concepts)
print(filename)
# load toy data
data = dataset_utils.load_data(filename='{}{}'.format(dg.SYN_DATA_DIR, filename))
print('Average posttest: {}'.format(sm.expected_reward(data)))
print('Percent of full posttest score: {}'.format(sm.percent_complete(data)))
print('Percent of all seen: {}'.format(sm.percent_all_seen(data)))
input_data_, output_mask_, target_data_ = dataset_utils.preprocess_data_for_rnn(data)
train_data = (input_data_[:,:,:], output_mask_[:,:,:], target_data_[:,:,:])
print(input_data_.shape)
print(output_mask_.shape)
print(target_data_.shape)
# test_model hidden=16
# test_model_mid hidden=10
# test_model_small hidden=5
# test_model_tiny hidden=3
model_id = "test2_model_small"
dropouts = np.array([1.0])
n_dropouts = dropouts.shape[0]
total_epochs = 14
reps = 20
class ExtractCallback(tflearn.callbacks.Callback):
def __init__(self):
self.tstates = []
def on_epoch_end(self, training_state):
self.tstates.append(copy.copy(training_state))
def test_dropout_losses():
losses = np.zeros((n_dropouts,reps,total_epochs))
val_losses = np.zeros((n_dropouts, reps,total_epochs))
for d in range(n_dropouts):
dropout = dropouts[d]
for r in range(reps):
print('----------------------------------------')
print('---------- Dropout {:3.1f} Rep {:2d} ----------'.format(dropout, r+1))
print('----------------------------------------')
ecall = ExtractCallback()
dmodel = dmc.DynamicsModel(model_id=model_id, timesteps=seqlen, dropout=dropout, load_checkpoint=False)
dmodel.train(train_data, n_epoch=total_epochs, callbacks=ecall, shuffle=False, load_checkpoint=False)
losses[d,r,:] = np.array([s.global_loss for s in ecall.tstates])
val_losses[d,r,:] = np.array([s.val_loss for s in ecall.tstates])
return losses, val_losses
losses, val_losses = test_dropout_losses()
np.savez("dropoutput",dropouts=dropouts, losses=losses, vals=val_losses)
if __name__ == '__main__':
starttime = time.time()
np.random.seed()
main()
endtime = time.time()
print('Time elapsed {}s'.format(endtime-starttime))
| 33.672897 | 142 | 0.644185 | 215 | 0.059672 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.164308 |
a8b7116ef65af10b72e0c07da19900a3a9d62e85 | 48,908 | py | Python | deep_disfluency/corpus/tree_pos_map_writer.py | askender/deep_disfluency | bea8403ed954df8eadd3e2b9d98bb7c2b416a665 | [
"MIT"
] | null | null | null | deep_disfluency/corpus/tree_pos_map_writer.py | askender/deep_disfluency | bea8403ed954df8eadd3e2b9d98bb7c2b416a665 | [
"MIT"
] | null | null | null | deep_disfluency/corpus/tree_pos_map_writer.py | askender/deep_disfluency | bea8403ed954df8eadd3e2b9d98bb7c2b416a665 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import defaultdict
import re
from nltk import tree
from swda import CorpusReader
from tree_pos_map import TreeMapCorpus
from tree_pos_map import POSMapCorpus
possibleMistranscription = [("its", "it's"),
("Its", "It's"),
("it's", "its"),
("It's", "Its"),
("whose", "who's"),
("Whose", "Who's"),
("who's", "whose"),
("Who's", "Whose"),
("you're", "your"),
("You're", "Your"),
("your", "you're"),
("Your", "You're"),
("their", "they're"),
("Their", "They're"),
("they're", "their"),
("They're", "Their"),
("programme", "program"),
("program", "programme"),
("centre", "center"),
("center", "centre"),
("travelling", "traveling"),
("traveling", "travelling"),
("colouring", "coloring"),
("coloring", "colouring")]
class TreeMapWriter:
"""Object which writes mappings from the words in utterances
to the nodes of the corresponding trees in a treebank
"""
def __init__(self, corpus_path="../swda",
metadata_path="swda-metadata.csv",
target_folder_path="Maps",
ranges=None,
errorLog=None):
print "started TreeMapWriting"
self.write_to_file(corpus_path,
metadata_path,
target_folder_path,
ranges,
errorLog)
def write_to_file(self, corpus_path,
metadata_path,
target_folder_path,
ranges,
errorLog):
"""Writes files to a target folder with the mappings
from words in utterances to tree nodes in trees.
"""
if errorLog:
errorLog = open(errorLog, 'w')
corpus = CorpusReader(corpus_path, metadata_path)
# Iterate through all transcripts
incorrectTrees = 0
folder = None
corpus_file = None
for trans in corpus.iter_transcripts():
# print "iterating",trans.conversation_no
if not trans.has_pos():
continue
# print "has pos"
if ranges and not trans.conversation_no in ranges:
continue
# print "in range"
# just look at transcripts WITH trees as compliment to the
# below models
if not trans.has_trees():
continue
end = trans.swda_filename.rfind("/")
start = trans.swda_filename.rfind("/", 0, end)
c_folder = trans.swda_filename[start + 1:end]
if c_folder != folder:
# for now splitting the maps by folder
folder = c_folder
if corpus_file:
corpus_file.close()
corpus_file = open(target_folder_path +
"/Tree_map_{0}.csv.text".format(folder), 'w')
wordTreeMapList = TreeMapCorpus(False, errorLog)
print "new map for folder", folder
translist = trans.utterances
translength = len(translist)
count = 0
# iterating through transcript utterance by utterance
# create list of tuples i.e. map from word to the index(ices)
# (possibly multiple or null) of the relevant leaf/ves
# of a given tree i.e. utt.tree[0].leaves[0] would be a pair (0,0))
while count < translength:
utt = trans.utterances[count]
words = utt.text_words()
wordTreeMap = [] # [((word), (List of LeafIndices))]
forwardtrack = 0
backtrack = 0
continued = False
# print "\n COUNT" + str(count)
# print utt.damsl_act_tag()
if len(utt.trees) == 0 or utt.damsl_act_tag() == "x":
wordTreeMap.append((utt, [])) # just dummy value
# errormessage = "WARNING: NO TREE for file/utt: " +\
# str(utt.swda_filename) + " " + utt.caller + "." + \
# str(utt.utterance_index) + "." + \
#str(utt.subutterance_index) + " " + utt.text
# print(errormessage)
count += 1
continue
# raw_input()
# indices for which tree and leaf we're at:
i = 0 # tree
j = 0 # leaf
# initialise pairs of trees and ptb pairs
trees = []
for l in range(0, len(utt.trees)):
trees.append(
(utt.ptb_treenumbers[l], count, l, utt.trees[l]))
# print "TREES = "
# for tree in trees:
# print tree
origtrees = list(trees)
origcount = count
# overcoming the problem of previous utterances contributing
# to the tree at this utterance, we need to add the words from
# the previous utt add in all the words from previous utterance
# with a dialogue act tag/or the same tree?
# check that the last tree in the previous utterance
# is the same as the previous one
previousUttSame = trans.previous_utt_same_speaker(utt)
# print previousUttSame
lastTreeMap = None
if previousUttSame:
# print "search for previous full act utt
# for " + str(utt.swda_filename) +
# str(utt.transcript_index)
lastTreeMap = wordTreeMapList.get_treemap(
trans,
previousUttSame)
if ((not lastTreeMap) or (len(lastTreeMap) == 0) or
(len(lastTreeMap) == 1 and lastTreeMap[0][1] == [])):
# print "no last tree map, backwards searching"
while previousUttSame and \
((not lastTreeMap) or (len(lastTreeMap) == 0) or
(len(lastTreeMap) == 1 and lastTreeMap[0][1] == [])):
previousUttSame = trans.previous_utt_same_speaker(
previousUttSame) # go back one more
lastTreeMap = wordTreeMapList.get_treemap(trans,
previousUttSame)
if previousUttSame:
pass
# print previousUttSame.transcript_index
if not lastTreeMap:
pass
# print "no last treemap found for:"
# print utt.swda_filename
# print utt.transcript_index
if lastTreeMap and \
(utt.damsl_act_tag() == "+" or
(len(lastTreeMap.treebank_numbers) > 0
and lastTreeMap.treebank_numbers[-1] ==
utt.ptb_treenumbers[0])):
continued = True
# might have to backtrack
# now checking for wrong trees
lastPTB = lastTreeMap.treebank_numbers
lastIndexes = lastTreeMap.transcript_numbers
lastTreesTemp = lastTreeMap.get_trees(trans)
lastTrees = []
for i in range(0, len(lastPTB)):
lastTrees.append([lastPTB[i], lastIndexes[i][0],
lastIndexes[i][1], lastTreesTemp[i]])
if not (lastPTB[-1] == utt.ptb_treenumbers[0]):
# print "not same, need to correct!"
# print words
# print trees
# print "last one"
# print previousUttSame.text_words()
# print lastTrees
if utt.ptb_treenumbers[0] - lastPTB[-1] > 1:
# backtrack and redo the antecedent
count = count - (count - lastIndexes[-1][0])
utt = previousUttSame
words = utt.text_words()
mytrees = []
for i in range(0, len(lastTrees) - 1):
mytrees.append(lastTrees[i])
trees = mytrees + [origtrees[0]]
# print "\n(1)backtrack to with new trees:"
backtrack = 1
# print utt.transcript_index
# print words
# print trees
# raw_input()
# alternately, this utt's tree may be further back
# than its antecdent's, rare mistake
elif utt.ptb_treenumbers[0] < lastTrees[-1][0]:
# continue with this utterance and trees
# (if there are any), but replace its first
# tree with its antecdents last one
forwardtrack = 1
trees = [lastTrees[-1]] + origtrees[1:]
# print "\n(2)replacing first one to lasttreemap's:"
# print words
# print trees
# raw_input()
if backtrack != 1: # we should have no match
found_treemap = False
# resetting
# for t in wordTreeMapList.keys():
# print t
# print wordTreeMapList[t]
for t in range(len(lastTreeMap) - 1, -1, -1):
# print lastTreeMap[t][1]
# if there is a leafIndices for the
# word being looked at, gets last mapped one
if len(lastTreeMap[t][1]) > 0:
# print "last treemapping of last
# caller utterance =
# " + str(lastTreeMap[t][1][-1])
j = lastTreeMap[t][1][-1][1] + 1
found_treemap = True
# print "found last mapping, j -1 = " + str(j-1)
# raw_input()
break
if not found_treemap:
pass
# print "NO matched last TREEMAP found for \
# previous Utt Same Speaker of " + \
# str(trans.swda_filename) + " " + \
# str(utt.transcript_index)
# print lastTreeMap
# for tmap in wordTreeMapList.keys():
# print tmap
# print wordTreeMapList[tmap]
# raw_input()
possibleComment = False # can have comments, flag
mistranscribe = False
LeafIndices = [] # possibly empty list of leaf indices
word = words[0]
# loop until no more words left to be matched in utterance
while len(words) > 0:
# print "top WORD:" + word
if not mistranscribe:
wordtest = re.sub(r"[\.\,\?\"\!]", "", word)
wordtest = wordtest.replace("(", "").replace(")", "")
match = False
LeafIndices = [] # possibly empty list of leaf indices
if (possibleComment
or word[0:1] in ["{", "}", "-"]
or word in ["/", ".", ",", "]"]
or wordtest == ""
or any([x in word for x in ["<", ">", "*", "[", "+", "]]",
"...", "#", "="]])):
# no tree equivalent for {D } type annotations
if (word[0:1] == "-" or
any([x in word for x in
["*", "<<", "<+", "[[", "<"]])) \
and not possibleComment:
possibleComment = True
if possibleComment:
#print("match COMMENT!:" + word)
# raw_input()
LeafIndices = []
match = True
#wordTreeMap.append((word, LeafIndices))
if any([x in word for x in [">>", "]]", ">"]]) or \
word[0] == "-": # turn off comment
possibleComment = False
#del words[0]
# LeadIndices will be null here
wordTreeMap.append((word, LeafIndices))
LeafIndices = []
match = True
# print "match annotation!:" + word
del words[0] # word is consumed, should always be one
if len(words) > 0:
word = words[0]
wordtest = re.sub(r"[\.\,\?\/\)\(\"\!]", "", word)
wordtest = wordtest.replace("(", "")
wordtest = wordtest.replace(")", "")
else:
break
continue
# carry on to next word without updating indices?
else:
while i < len(trees):
# print "i number of trees :" + str(len(utt.trees))
# print "i tree number :" + str(i)
# print "i loop word :" + word
tree = trees[i][3]
# print "looking at ptb number " + str(trees[i][0])
# print "looking at index number " \
#+ str(trees[i][1])+","+str(trees[i][2])
while j < len(tree.leaves()):
leaf = tree.leaves()[j]
# print "j number of leaves : " \
#+ str(len(tree.leaves()))
# print "j loop word : " + word
# print "j loop wordtest : " + wordtest
# print "j leaf : " + str(j) + " " + leaf
breaker = False
# exact match
if wordtest == leaf or word == leaf:
LeafIndices.append((i, j))
wordTreeMap.append((word, LeafIndices))
# print("match!:" + word + " " + \
# str(utt.swda_filename) + " " + \
# utt.caller + "." + \
# str(utt.utterance_index) + \
# "." + str(utt.subutterance_index))
del words[0] # word is consumed
if len(words) > 0:
word = words[0] # next word
wordtest = re.sub(
r"[\.\,\?\/\)\(\"\!]", "", word)
wordtest = wordtest.replace("(", "")
wordtest = wordtest.replace(")", "")
LeafIndices = []
j += 1 # increment loop to next leaf
match = True
breaker = True
# raw_input()
break
elif leaf in wordtest or \
leaf in word and not leaf == ",":
testleaf = leaf
LeafIndices.append((i, j))
j += 1
for k in range(j, j + 3): # 3 beyond
if (k >= len(tree.leaves())):
j = 0
i += 1
#breaker = True
breaker = True
break # got to next tree
if (testleaf + tree.leaves()[k]) \
in wordtest or (testleaf +
tree.leaves()[k])\
in word:
testleaf += tree.leaves()[k]
LeafIndices.append((i, k))
j += 1
# concatenation
if testleaf == wordtest or \
testleaf == word: # word matched
wordTreeMap.append((word,
LeafIndices))
del words[0] # remove word
# print "match!:" + word +\
#str(utt.swda_filename) + " "\
# + utt.caller + "." + \
# str(utt.utterance_index) +\
# "." + \
# str(utt.subutterance_index))
if len(words) > 0:
word = words[0]
wordtest = re.sub(
r"[\.\,\?\/\)\(\"\!]",
"", word)
wordtest = wordtest.\
replace("(", "")
wordtest = wordtest.\
replace(")", "")
# reinitialise leaves
LeafIndices = []
j = k + 1
match = True
breaker = True
# raw_input()
break
else:
# otherwise go on
j += 1
if breaker:
break
if match:
break
if j >= len(tree.leaves()):
j = 0
i += 1
if match:
break
# could not match word! try mistranscriptions first:
if not match:
if not mistranscribe: # one final stab at matching!
mistranscribe = True
for pair in possibleMistranscription:
if pair[0] == wordtest:
wordtest = pair[1]
if len(wordTreeMap) > 0:
if len(wordTreeMap[-1][1]) > 0:
i = wordTreeMap[-1][1][-1][0]
j = wordTreeMap[-1][1][-1][1]
else:
# go back to beginning of
# tree search
i = 0
j = 0
else:
i = 0 # go back to beginning
j = 0
break # matched
elif continued:
# possible lack of matching up of words in
# previous utterance same caller and same
# tree// not always within same tree!!
errormessage = "Possible bad start for \
CONTINUED UTT ''" + words[0] + "'' in file/utt: "\
+ str(utt.swda_filename) + "\n " + utt.caller + \
"." + str(utt.utterance_index) + "." + \
str(utt.subutterance_index) + \
"POSSIBLE COMMENT = " + str(possibleComment)
# print errormessage
if not errorLog is None:
errorLog.write(errormessage + "\n")
# raw_input()
if backtrack == 1:
backtrack += 1
elif backtrack == 2:
# i.e. we've done two loops and
# still haven't found it, try the other way
count = origcount
utt = trans.utterances[count]
words = utt.text_words()
word = words[0]
trees = [lastTrees[-1]] + origtrees[1:]
# print "\nSECOND PASS(2)replacing \
# first one to lasttreemap's:"
# print words
# print trees
backtrack += 1
# mistranscribe = False #TODO perhaps needed
wordTreeMap = []
# switch to forward track this is
# the only time we want to try
# from the previous mapped leaf in the
# other tree
foundTreemap = False
for t in range(len(lastTreeMap) - 1, -1, -1):
# backwards iteration through words
# print lastTreeMap[t][1]
if len(lastTreeMap[t][1]) > 0:
# print "last treemapping of last \
# caller utterance = " + \
# str(lastTreeMap[t][1][-1])
j = lastTreeMap[t][1][-1][1] + 1
foundTreemap = True
# print "found last mapping, j = " \
#+ str(j)
# raw_input()
# break when last tree
# mapped word from this caller is found
break
if not foundTreemap:
# print "NO matched last TREEMAP found\
# for previous Utt Same Speaker of " + \
# str(utt.swda_filename) + " " + \
# utt.caller + "." + \
# str(utt.utterance_index) + "." +\
# str(utt.subutterance_index)
j = 0
# for tmap in wordTreeMapList.keys():
# print tmap
# print wordTreeMapList[tmap]
# raw_input()
i = 0 # go back to first tree
continue
elif forwardtrack == 1:
forwardtrack += 1
elif forwardtrack == 2:
count = count - (count - lastIndexes[-1][0])
utt = previousUttSame
words = utt.text_words()
word = words[0]
mytrees = []
for i in range(0, len(lastTrees) - 1):
mytrees.append(lastTrees[i])
trees = mytrees + [origtrees[0]]
# print "\nSECOND PASS(1)backtrack to \
# with new trees:"
# print utt.transcript_index
# print words
# print trees
forwardtrack += 1
# mistranscribe = False #TODO maybe needed
wordTreeMap = []
# raw_input()
elif forwardtrack == 3 or backtrack == 3:
# if this hasn't worked reset to old trees
# print "trying final reset"
count = origcount
utt = trans.utterances[count]
words = utt.text_words()
word = words[0]
trees = origtrees
forwardtrack = 0
backtrack = 0
# mistranscribe = False #TODO maybe needed
wordTreeMap = []
# raw_input()
else:
pass
# print "resetting search"
# raw_input()
# unless forward tracking now,
# just go back to beginning
i = 0 # go back to beginning of tree search
j = 0
else:
mistranscribe = False
LeafIndices = []
wordTreeMap.append((word, LeafIndices))
errormessage = "WARNING: 440 no/partial tree \
mapping for ''" + words[0] + "'' in file/utt: "\
+ str(utt.swda_filename) + " \n" + utt.caller\
+ "." + str(utt.utterance_index) + "." + \
str(utt.subutterance_index) + \
"POSSIBLE COMMENT = " + str(possibleComment)
# print utt.text_words()
del words[0] # remove word
# for trip in wordTreeMap:
# print "t",trip
if len(words) > 0:
word = words[0]
wordtest = re.sub(r"[\.\,\?\/\)\(\"\!]", "",
word)
wordtest = wordtest.replace("(", "")
wordtest = wordtest.replace(")", "")
# print errormessage
if errorLog:
errorLog.write("possible wrong tree mapping:"
+ errormessage + "\n")
raw_input()
# end of while loop (words)
mytreenumbers = []
for treemap in trees:
# the whole list but the tree
mytreenumbers.append(treemap[:-1])
if not len(utt.text_words()) == len(wordTreeMap):
print "ERROR. uneven lengths!"
print utt.text_words()
print wordTreeMap
print trans.swda_filename
print utt.transcript_index
raw_input()
count += 1
continue
# add the treemap
wordTreeMapList.append(trans.conversation_no,
utt.transcript_index,
tuple(mytreenumbers),
tuple(wordTreeMap))
count += 1
# rewrite after each transcript
filedict = defaultdict(str)
for key in wordTreeMapList.keys():
csv_string = '"' + str(list(wordTreeMapList[key])) + '"'
mytreenumbers = wordTreeMapList[key].transcript_numbers
myptbnumbers = wordTreeMapList[key].treebank_numbers
tree_list_string = '"'
for i in range(0, len(mytreenumbers)):
treemap = [myptbnumbers[i]] + mytreenumbers[i]
tree_list_string += str(treemap) + ";"
tree_list_string = tree_list_string[:-1] + '"'
filename = '"' + key[0:key.rfind(':')] + '"'
transindex = key[key.rfind(':') + 1:]
filedict[int(transindex)] = filename \
+ "\t" + transindex + '\t' + csv_string + "\t" \
+ tree_list_string + "\n"
for key in sorted(filedict.keys()):
corpus_file.write(filedict[key])
wordTreeMapList = TreeMapCorpus(False, errorLog) # reset each time
print "\n" + str(incorrectTrees) + " incorrect trees"
corpus_file.close()
if not errorLog is None:
errorLog.close()
class POSMapWriter:
"""Object which writes mappings from the words in utterances
to the corresponding POS tags.
"""
def __init__(self, corpus_path="../swda",
metadata_path="swda-metadata.csv",
target_folder_path="Maps",
ranges=None,
errorLog=None):
print "started MapWriting"
self.write_to_file(corpus_path,
metadata_path,
target_folder_path,
ranges,
errorLog)
def write_to_file(self, corpus_path,
metadata_path,
target_folder_path,
ranges,
errorLog):
"""Writes files to a target folder with the mappings
from words in utterances to corresponding POS tags.
"""
if errorLog:
errorLog = open(errorLog, 'w')
corpus = CorpusReader(corpus_path, metadata_path)
folder = None
corpus_file = None
for trans in corpus.iter_transcripts():
# print "iterating",trans.conversation_no
if not trans.has_pos():
continue
# print "has pos"
if ranges and not trans.conversation_no in ranges:
continue
# print "in range"
# just look at transcripts WITHOUT trees as compliment to the
# above models
if trans.has_trees():
continue
end = trans.swda_filename.rfind("/")
start = trans.swda_filename.rfind("/", 0, end)
c_folder = trans.swda_filename[start + 1:end]
if c_folder != folder:
# for now splitting the maps by folder
folder = c_folder
if corpus_file:
corpus_file.close()
corpus_file = open(target_folder_path +
"/POS_map_{0}.csv.text".format(folder), 'w')
wordPOSMapList = POSMapCorpus(False, errorLog)
print "new map for folder", folder
translist = trans.utterances
translength = len(translist)
count = 0
# iterating through transcript utterance by utterance
while count < translength:
utt = trans.utterances[count]
words = utt.text_words()
wordPOSMap = []
if len(utt.pos) == 0: # no POS
wordPOSMap.append((utt, [])) # just dummy value
wordPOSMapList.append(trans.conversation_no,
utt.transcript_index,
list(wordPOSMap))
errormessage = "WARNING: NO POS for file/utt: " +\
str(utt.swda_filename) + " " + utt.caller + "." + \
str(utt.utterance_index) + "." + \
str(utt.subutterance_index) + " " + utt.text
# print errormessage
# raw_input()
else:
# indices for which POS we're at
j = 0
possibleComment = False # can have comments, flag
mistranscribe = False
word = words[0]
# loop until no more words left to be matched in utterance
while len(words) > 0:
word = words[0]
# print "top WORD:" + word
if not mistranscribe:
wordtest = re.sub(r"[\.\,\?\/\)\(\"\!\\]", "",
word)
wordtest = wordtest.replace("(", "").\
replace(")", "").replace("/", "")
match = False
POSIndices = []
if (possibleComment
or word[0:1] in ["{", "}", "-"]
or word in ["/", ".", ",", "]"]
or wordtest == ""
or any([x in word for x in
["<", ">", "*", "[", "+", "]]",
"...", "#", "="]])):
# no tree equivalent for {D } type annotations
if (word[0:1] == "-" or
any([x in word for x in
["*", "<<", "<+", "[[", "<"]])) \
and not possibleComment:
possibleComment = True
if possibleComment:
# print "match COMMENT!:" + word
# raw_input()
POSIndices = []
match = True
if (any([x in word for x in [">>", "]]", "))",
">"]]) or
word[0] == "-") \
and not word == "->":
# turn off comment
possibleComment = False
if (">>" in word or "]]" in word or "))"
in word or ">" in word and
not word == "->"): # turn off comment
possibleComment = False
#del words[0]
wordPOSMap.append((word, POSIndices))
POSIndices = []
match = True
# print "match annotation!:" + word
del words[0] # word is consumed
if len(words) > 0:
word = words[0]
wordtest = re.sub(r"[\.\,\?\/\)\(\"\!\\]",
"", word)
wordtest = wordtest.replace("(", "")
wordtest = wordtest.replace(")", "")
else:
break
continue # carry on to next word
else:
myPOS = utt.regularize_pos_lemmas()
while j < len(myPOS):
pos = myPOS[j][0] # pair of (word,POS)
# print "j number of pos : " + str(len(myPOS))
# print "j loop word : " + word
# print "j loop wordtest : " + wordtest
# print "j pos : " + str(j) + " " + str(pos)
# raw_input()
breaker = False
if wordtest == pos or word == pos: # exact match
POSIndices.append(j)
wordPOSMap.append((word, POSIndices))
# print "match!:" + word + " in file/utt: "\
# + str(utt.swda_filename) + \
# str(utt.transcript_index))
del words[0] # word is consumed
if len(words) > 0:
word = words[0] # next word
wordtest = re.sub(
r"[\.\,\?\/\)\(\"\!\\]",
"", word)
wordtest = wordtest.replace("(", "").\
replace(")", "").replace("/", "")
POSIndices = []
j += 1 # increment lead number
match = True
breaker = True
# raw_input()
break
elif (pos in wordtest or pos in word) \
and not pos in [",", "."]:
# substring relation
testpos = pos
POSIndices.append(j)
j += 1
if wordtest[-1] == "-" and \
pos == wordtest[0:-1]:
wordPOSMap.append((word, POSIndices))
del words[0] # remove word
# print "match!:" + word + " in \
# file/utt: " + str(utt.swda_filename) \
#+ str(utt.transcript_index)
if len(words) > 0:
word = words[0]
wordtest = re.sub(
r"[\.\,\?\/\)\(\"\!\\]",
"", word)
wordtest = wordtest.\
replace("(", "").\
replace(")", "").\
replace("/", "")
POSIndices = []
match = True
breaker = True
break
for k in range(j, j + 3):
if (k >= len(myPOS)):
breaker = True
break
if (testpos + myPOS[k][0]) in wordtest\
or (testpos + myPOS[k][0]) in word:
testpos += myPOS[k][0]
POSIndices.append(k)
j += 1
# concatenation
if testpos == wordtest or \
testpos == word: # matched
wordPOSMap.append((word,
POSIndices))
del words[0] # remove word
# print "match!:" +\
# word + " in file/utt: " + \
# str(utt.swda_filename) +\
# str(utt.transcript_index))
if len(words) > 0:
word = words[0]
wordtest = re.sub(
r"[\.\,\?\/\)\(\"\!\\]",
"", word)
wordtest = wordtest.\
replace("(", "")
wordtest = wordtest.\
replace(")", "")
POSIndices = []
j = k + 1
match = True
breaker = True
break
else:
j += 1 # otherwise go on
if breaker:
break
if match:
break
# could not match word! Could be mistransription
if not match:
# print "false checking other options"
# print j
# print word
# print wordtest
if not mistranscribe:
mistranscribe = True
for pair in possibleMistranscription:
if pair[0] == wordtest:
wordtest = pair[1]
break # matched
if wordtest[-1] == "-": # partial words
wordtest = wordtest[0:-1]
if "'" in wordtest:
wordtest = wordtest.replace("'", "")
if len(wordPOSMap) > 0:
found = False
for n in range(
len(wordPOSMap) - 1, -1, -1):
if len(wordPOSMap[n][1]) > 0:
j = wordPOSMap[n][1][-1] + 1
# print j
found = True
break
if not found:
# if not possible go back to
# the beginning!
j = 0
else:
j = 0
# print j
else:
mistranscribe = False
wordPOSMap.append((word, POSIndices))
errormessage = "WARNING: no/partial POS \
mapping for ''" + words[0] + "'' in file/utt:"\
+ str(utt.swda_filename) + "-" + \
str(utt.transcript_index) + \
"POSSIBLE COMMENT = " + \
str(possibleComment)
del words[0] # remove word
if len(words) > 0:
word = words[0]
wordtest = re.sub(r"[\.\,\?\/\)\(\"\!\\]",
"", word)
wordtest = wordtest.replace("(", "").\
replace(")", "").replace("/", "")
# print errormessage
if errorLog:
errorLog.write("possible wrong POS : " +
errormessage + "\n")
# raw_input()
# end of while loop (words)
if not len(wordPOSMap) == len(utt.text_words()):
print "Error "
print "Length mismatch in file/utt: " + \
str(utt.swda_filename) + str(utt.transcript_index)
print utt.text_words()
print wordPOSMap
raw_input()
wordPOSMapList.append(trans.conversation_no,
str(utt.transcript_index),
list(wordPOSMap))
# print "\nadded POSmap " + str(trans.swda_filename) + \
#"." + str(utt.transcript_index) + "\n"
csv_string = '"' + str(wordPOSMap) + '"'
corpus_file.write('"' + str(utt.conversation_no) +
'"\t' + str(utt.transcript_index) +
'\t' + csv_string + "\n")
count += 1
corpus_file.close()
if errorLog:
errorLog.close()
if __name__ == '__main__':
t = TreeMapWriter()
| 53.393013 | 86 | 0.329598 | 47,452 | 0.97023 | 0 | 0 | 0 | 0 | 0 | 0 | 10,177 | 0.208085 |
a8b79a3e7277f7dbfdec484f93d81238ae19f3b6 | 2,650 | py | Python | account_ux/wizards/account_invoice_tax_wizard.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | 1 | 2021-01-25T15:57:58.000Z | 2021-01-25T15:57:58.000Z | account_ux/wizards/account_invoice_tax_wizard.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | null | null | null | account_ux/wizards/account_invoice_tax_wizard.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | 2 | 2020-10-17T16:36:02.000Z | 2021-01-24T10:20:05.000Z | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields, api
class AccountInvoiceTaxWizard(models.TransientModel):
_name = 'account.invoice.tax.wizard'
_description = 'Account Invoice Tax Wizard'
@api.model
def _get_invoice(self):
return self._context.get('active_id', False)
tax_id = fields.Many2one(
'account.tax',
'Tax',
required=True,
)
name = fields.Char(
'Tax Description',
required=True,
)
amount = fields.Float(
digits='Account',
required=True,
)
move_id = fields.Many2one(
'account.move',
'Invoice',
default=_get_invoice,
)
base = fields.Float(
digits='Account',
help='Not stored, only used to suggest amount',
)
account_analytic_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
)
invoice_type = fields.Selection(
related='move_id.type',
string='Invoice Type',
)
invoice_company_id = fields.Many2one(
'res.company',
'Company',
related='move_id.company_id',
)
@api.onchange('move_id')
def onchange_invoice(self):
self.base = self.move_id.amount_untaxed
@api.onchange('tax_id')
def onchange_tax(self):
res = self.tax_id.compute_all(self.base)
self.name = res.get('taxes', False) and res['taxes'][0].get(
'name', False) or False
@api.onchange('base', 'tax_id')
def onchange_base(self):
res = self.tax_id.compute_all(self.base)
self.amount = res.get('taxes', False) and res['taxes'][0].get(
'amount', False) or False
def confirm(self):
self.ensure_one()
if not self.move_id or not self.tax_id:
return False
invoice = self.move_id
res = self.tax_id.compute_all(self.base)
tax = res['taxes'][0]
val = {
'move_id': invoice.id,
'name': self.name,
'tax_id': self.tax_id.id,
'amount': self.amount,
'manual': True,
'sequence': 99,
'account_analytic_id': self.account_analytic_id.id,
'account_id': invoice.type in ('out_invoice', 'in_invoice') and (
tax['account_id'] or False) or (
tax['refund_account_id'] or False),
}
self.env['account.invoice.tax'].create(val)
| 30.113636 | 78 | 0.54566 | 2,363 | 0.891698 | 0 | 0 | 618 | 0.233208 | 0 | 0 | 780 | 0.29434 |
a8b831c04a0da0348f5f1df321ba4849d4c19f2a | 7,428 | py | Python | dslogparser/dslogparser.py | brettle/dslogparser | 81c822fbbe487ed64fd5da54b8e2d39f7a713382 | [
"MIT"
] | null | null | null | dslogparser/dslogparser.py | brettle/dslogparser | 81c822fbbe487ed64fd5da54b8e2d39f7a713382 | [
"MIT"
] | null | null | null | dslogparser/dslogparser.py | brettle/dslogparser | 81c822fbbe487ed64fd5da54b8e2d39f7a713382 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Parse the FRC drive station logs which are packed binary data
# Notes on comparison to DSLog-Parse:
# D-P has packet_loss as a *signed* integer, which makes no sense. Unsigned looks sensible.
import datetime
import math
import re
import struct
import bitstring
MAX_INT64 = 2**63 - 1
DSLOG_TIMESTEP = 0.020
def read_timestamp(strm):
# Time stamp: int64, uint64
b1 = strm.read(8)
b2 = strm.read(8)
if not b1 or not b2:
return None
sec = struct.unpack('>q', b1)[0]
millisec = struct.unpack('>Q', b2)[0]
# for now, ignore
dt = datetime.datetime(1904, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
dt += datetime.timedelta(seconds=(sec + float(millisec) / MAX_INT64))
return dt
class DSLogParser():
def __init__(self, input_file):
self.strm = open(input_file, 'rb')
self.record_time_offset = datetime.timedelta(seconds=DSLOG_TIMESTEP)
self.curr_time = None
self.read_header()
return
def close(self):
self.strm.close()
return
def read_records(self):
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
while True:
r = self.read_record_v3()
if r is None:
break
yield r
return
def read_header(self):
self.version = struct.unpack('>i', self.strm.read(4))[0]
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
self.curr_time = read_timestamp(self.strm)
return
def read_record_v3(self):
data_bytes = self.strm.read(10)
if not data_bytes or len(data_bytes) < 10:
return None
pdp_bytes = self.strm.read(25)
if not pdp_bytes or len(pdp_bytes) < 25:
# should not happen!!
raise EOFError("No data for PDP. Unexpected end of file.")
res = {'time': self.curr_time}
res.update(self.parse_data_v3(data_bytes))
res.update(self.parse_pdp_v3(pdp_bytes))
self.curr_time += self.record_time_offset
return res
@staticmethod
def shifted_float(raw_value, shift_right):
return raw_value / (2.0**shift_right)
@staticmethod
def unpack_bits(raw_value):
'''Unpack and invert the bits in a byte'''
status_bits = bitstring.Bits(bytes=raw_value)
# invert them all
return [not b for b in status_bits]
@staticmethod
def uint_from_bytes(bytes, offset, size_in_bits):
'''Pull out an unsigned int from an array of bytes, with arbitrary bit start and length'''
first_byte = math.floor(offset / 8)
num_bytes = math.ceil(size_in_bits / 8)
if num_bytes == 1:
uint = struct.unpack_from('>B', bytes, first_byte)[0]
elif num_bytes == 2:
uint = struct.unpack_from('>H', bytes, first_byte)[0]
else:
# not needed here, and general case is harder
raise Exception('not supported')
# Need to mask off the incorrect high bits and then shift right to get rid of the incorrect low bits
left_bitshift = offset - first_byte * 8
right_bitshift = num_bytes * 8 - size_in_bits - left_bitshift
return (uint & (0xFFFF >> left_bitshift)) >> right_bitshift
def parse_data_v3(self, data_bytes):
raw_values = struct.unpack('>BBHBcBBH', data_bytes)
status_bits = self.unpack_bits(raw_values[4])
res = {
'round_trip_time': self.shifted_float(raw_values[0], 1),
'packet_loss': 0.04 * raw_values[1], # not shifted
'voltage': self.shifted_float(raw_values[2], 8),
'rio_cpu': 0.01 * self.shifted_float(raw_values[3], 1),
'can_usage': 0.01 * self.shifted_float(raw_values[5], 1),
'wifi_db': self.shifted_float(raw_values[6], 1),
'bandwidth': self.shifted_float(raw_values[7], 8),
'robot_disabled': status_bits[7],
'robot_auto': status_bits[6],
'robot_tele': status_bits[5],
'ds_disabled': status_bits[4],
'ds_auto': status_bits[3],
'ds_tele': status_bits[2],
'watchdog': status_bits[1],
'brownout': status_bits[0],
}
return res
def parse_pdp_v3(self, pdp_bytes):
# from CD post https://www.chiefdelphi.com/forums/showpost.php?p=1556451&postcount=11
# pdp_offsets = (8, 18, 28, 38, 52, 62, 72, 82, 92, 102, 116, 126, 136, 146, 156, 166)
# from DSLog-Reader
# these make more sense in terms of defining a packing scheme, so stick with them
# looks like this is a 64-bit int holding 6 10-bit numbers and they ignore the extra 4 bits
pdp_offsets = (8, 18, 28, 38, 48, 58,
72, 82, 92, 102, 112, 122,
136, 146, 156, 166)
vals = []
for offset in pdp_offsets:
vals.append(self.shifted_float(self.uint_from_bytes(pdp_bytes, offset, 10), 3))
total_i = 0.0
for i in vals:
total_i += i
# the scaling on R, V and T are almost certainly not correct
# need to find a reference for those values
res = {
'pdp_id': self.uint_from_bytes(pdp_bytes, 0, 8),
'pdp_currents': vals,
'pdp_resistance': self.uint_from_bytes(pdp_bytes, 176, 8),
'pdp_voltage': self.uint_from_bytes(pdp_bytes, 184, 8),
'pdp_temp': self.uint_from_bytes(pdp_bytes, 192, 8),
'pdp_total_current': total_i,
}
return res
class DSEventParser():
def __init__(self, input_file):
self.strm = open(input_file, 'rb')
self.version = None
self.start_time = None
self.read_header()
return
def close(self):
self.strm.close()
return
def read_records(self):
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
while True:
r = self.read_record_v3()
if r is None:
break
yield r
return
def read_header(self):
self.version = struct.unpack('>i', self.strm.read(4))[0]
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
self.start_time = read_timestamp(self.strm) # file starttime
return
def read_record_v3(self):
t = read_timestamp(self.strm)
if t is None:
return None
msg_len = struct.unpack('>i', self.strm.read(4))[0]
msg = struct.unpack('%ds' % msg_len, self.strm.read(msg_len))[0]
msg = msg.decode('ascii', "backslashreplace")
return {'time': t, 'message': msg}
@staticmethod
def find_match_info(filename):
rdr = DSEventParser(filename)
try:
for rec in rdr.read_records():
m = re.match(r'FMS Connected:\s+(?P<match>.*),\s+Field Time:\s+(?P<time>[0-9/ :]*)', rec['message'])
if m:
return {'match_name': m.group('match'),
'field_time': datetime.datetime.strptime(m.group('time'), '%y/%m/%d %H:%M:%S')}
finally:
rdr.close()
return None
| 32.436681 | 116 | 0.585757 | 6,644 | 0.894453 | 556 | 0.074852 | 1,699 | 0.228729 | 0 | 0 | 1,757 | 0.236537 |
a8b9201f03e684e775e72ea44750cbcfe85144ef | 1,177 | py | Python | app/define/views.py | alejandro-mosso/language-service | 014377243d82174a3a4aeae398d1eb9100a079ac | [
"MIT"
] | null | null | null | app/define/views.py | alejandro-mosso/language-service | 014377243d82174a3a4aeae398d1eb9100a079ac | [
"MIT"
] | null | null | null | app/define/views.py | alejandro-mosso/language-service | 014377243d82174a3a4aeae398d1eb9100a079ac | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework import status
from django.http import JsonResponse
from .services import DefineService
from .serializers import DefineSerializer
class DefineApiView(APIView):
service = DefineService()
"""
$ curl "http://localhost:9000/define?word=Hello"
"""
def get(self, request, format=None):
data = DefineSerializer(data=request.GET)
if data.is_valid():
definition = self.service.meaning(word=data['word'].value)
response = JsonResponse(content_type='application/json',
data=definition,
safe=False)
response.status_code = status.HTTP_200_OK
response['Access-Control-Allow-Origin'] = '*'
response['Access-Control-Allow-Headers'] = 'Origin, X-Requested-With, Content-Type, Accept'
return response
else:
return JsonResponse(content_type='application/json',
data={'error': 'Invalid request'},
safe=False,
status=status.HTTP_400_BAD_REQUEST)
| 37.967742 | 103 | 0.596432 | 984 | 0.836024 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.203908 |
a8bb48e45e4b0f9fb98a1e00d99c968055bb32df | 4,004 | py | Python | tango_shared/settings.py | doismellburning/tango-shared-core | f0f35a2d068ecd6bf02f663e0d9ba21b36e5b22b | [
"MIT"
] | null | null | null | tango_shared/settings.py | doismellburning/tango-shared-core | f0f35a2d068ecd6bf02f663e0d9ba21b36e5b22b | [
"MIT"
] | null | null | null | tango_shared/settings.py | doismellburning/tango-shared-core | f0f35a2d068ecd6bf02f663e0d9ba21b36e5b22b | [
"MIT"
] | null | null | null | import django.conf.global_settings as DEFAULT_SETTINGS
### Tango-unique settings
# Thumbnail aliases determines default image sizes for easy-thumbnails
THUMBNAIL_ALIASES = {
'': {
'thumb': {'size': (50, 50), 'autocrop': True, 'crop': 'smart', 'upscale': True},
'one_col': {'size': (60, 60), 'autocrop': True, 'crop': 'smart', 'upscale': True},
't_80': {'size': (80, 80), 'autocrop': True, 'crop': 'smart', 'upscale': True},
'two_col': {'size': (140, 140), 'autocrop': True,},
'two_col_crop': {'size': (140, 140), 'autocrop': True, 'crop': 'smart', 'upscale': True},
'three_col': {'size': (220, 220), 'autocrop': True,},
'three_col_crop': {'size': (220, 220), 'autocrop': True, 'crop': 'smart', 'upscale': True},
'three_col_uncrop': {'size': (220, 660), 'autocrop': True},
't_180': {'size': (180, 180), 'autocrop': True, 'crop': 'smart', 'upscale': True},
't_180t': {'size': (180, 240), 'autocrop': True, 'crop': '0,-10', 'upscale': True},
't_180u': {'size': (180, 240), 'autocrop': True},
't_360': {'size': (360, 360), 'autocrop': True, 'crop': 'smart', 'upscale': True},
't_360u': {'size': (360, 540), 'autocrop': True},
't_420': {'size': (420, 420), 'autocrop': True, 'crop': 'scale'},
't_420u': {'size': (420, 420), 'autocrop': True},
't_540': {'size': (540, 540), 'autocrop': True, 'crop': 'scale'},
't_540u': {'size': (540, 540), 'autocrop': True},
't_640': {'size': (640, 640), 'autocrop': True, 'crop': 'scale'},
't_720': {'size': (720, 720), 'autocrop': True, 'crop': 'scale'},
't_960': {'size': (960, 960), 'autocrop': True, 'crop': 'scale'},
},
}
# sets default pagination
PAGINATE_BY = 25
# Google analytics GA code
GOOGLE_ANALYTICS_ID = ''
# Project name
PROJECT_NAME = 'tango'
# if set to false, RESTRICT_CONTENT_TO_SITE will allow
# sites/projects to share content.
# If true, content will be limited to the current site.
RESTRICT_CONTENT_TO_SITE = True
# If your site is a news source, set to True.
# This will attach the name of your organization to articles
# as well as add extra fields news organizations need,
# including options to mark content as
# opinion/editorial, dateline, and noting another source as
# the origin of the content.
NEWS_SOURCE = True
# Comment moderation settings
# Number of days after publication until comments close:
COMMENTS_CLOSE_AFTER = 30
# Number of days after publication until comments require moderation:
COMMENTS_MOD_AFTER = 15
# tango apps will be added to installed apps. Or should be.
TANGO_APPS = (
'tango_capo',
'tango_shared',
'user_profiles',
'articles',
'autotagger',
'contact_manager',
'happenings',
'photos',
'video',
'typogrify',
'voting',
'easy_thumbnails',
)
### Django settings...
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# Adds Context processors you'll want.
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'tango_shared.context_processors.site_processor',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
| 35.122807 | 101 | 0.64985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,526 | 0.630869 |
a8bbe7e825f46ffee0807079debe846ac5d0afca | 1,374 | py | Python | examples/mini_site/steps/stale_steps.py | xrg/behave_manners | 93cb570e04af870f3401048d26dd7f0e8350759e | [
"BSD-2-Clause"
] | 3 | 2020-02-02T11:11:44.000Z | 2021-11-17T22:21:18.000Z | examples/mini_site/steps/stale_steps.py | xrg/behave_manners | 93cb570e04af870f3401048d26dd7f0e8350759e | [
"BSD-2-Clause"
] | null | null | null | examples/mini_site/steps/stale_steps.py | xrg/behave_manners | 93cb570e04af870f3401048d26dd7f0e8350759e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
from __future__ import print_function
from behave import given, when, then, step
from behave_manners.pagelems import DOMScope
from behave_manners.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
@when(u'I click to have the dropdown visible')
def click_dropdown1(context):
if not context.cur_element['input'].owns:
context.cur_element.click()
context.cur_element._scope.wait_all('short', welem=context.cur_element._remote)
print("Owns: %s" % context.cur_element['input'].owns)
context.cur_overlays = context.cur_element['overlays']
@when(u'I click again to hide the dropdown')
def click_hide_dropdown(context):
input_elem = context.cur_element['input']
if input_elem.owns:
input_elem.send_keys(Keys.ESCAPE)
assert not input_elem.owns
@when(u'I click again to show the dropdown')
def click_dropdown2(context):
context.cur_element['input'].send_keys('o')
context.cur_element._scope.wait_all('short', welem=context.cur_element._remote)
assert context.cur_element['input'].owns, "Did not present overlay"
@then(u'the previous dropdown component resolves')
def check_resolve_dropdown1(context):
print("Cur dropdown %s" % context.cur_element['input'].owns)
print("Cur overlays %s" % context.cur_overlays.is_displayed())
raise AssertionError
| 33.512195 | 87 | 0.749636 | 0 | 0 | 0 | 0 | 1,109 | 0.807132 | 0 | 0 | 317 | 0.230713 |
a8bcb5a8054da5895bcfc5e72fd283ac76f8d6a9 | 4,418 | py | Python | django_sendfile/utils.py | ParikhKadam/django-sendfile2 | 6145876dae2ffb838a249a0090fc20897b868981 | [
"BSD-3-Clause"
] | 46 | 2018-05-15T14:41:25.000Z | 2022-03-24T08:10:13.000Z | django_sendfile/utils.py | ParikhKadam/django-sendfile2 | 6145876dae2ffb838a249a0090fc20897b868981 | [
"BSD-3-Clause"
] | 45 | 2018-03-20T16:55:48.000Z | 2022-03-04T02:34:30.000Z | django_sendfile/utils.py | ParikhKadam/django-sendfile2 | 6145876dae2ffb838a249a0090fc20897b868981 | [
"BSD-3-Clause"
] | 15 | 2018-04-05T17:32:09.000Z | 2022-03-06T13:36:09.000Z | from functools import lru_cache
from importlib import import_module
from mimetypes import guess_type
from pathlib import Path, PurePath
from urllib.parse import quote, quote_plus
import logging
import unicodedata
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
logger = logging.getLogger(__name__)
@lru_cache(maxsize=None)
def _get_sendfile():
backend = getattr(settings, 'SENDFILE_BACKEND', None)
if not backend:
raise ImproperlyConfigured('You must specify a value for SENDFILE_BACKEND')
module = import_module(backend)
return module.sendfile
def _convert_file_to_url(path):
try:
url_root = PurePath(getattr(settings, "SENDFILE_URL", None))
except TypeError:
return path
path_root = PurePath(settings.SENDFILE_ROOT)
path_obj = PurePath(path)
relpath = path_obj.relative_to(path_root)
# Python 3.5: Path.resolve() has no `strict` kwarg, so use pathmod from an
# already instantiated Path object
url = relpath._flavour.pathmod.normpath(str(url_root / relpath))
return quote(str(url))
def _sanitize_path(filepath):
try:
path_root = Path(getattr(settings, 'SENDFILE_ROOT', None))
except TypeError:
raise ImproperlyConfigured('You must specify a value for SENDFILE_ROOT')
filepath_obj = Path(filepath)
# get absolute path
# Python 3.5: Path.resolve() has no `strict` kwarg, so use pathmod from an
# already instantiated Path object
filepath_abs = Path(filepath_obj._flavour.pathmod.normpath(str(path_root / filepath_obj)))
# if filepath_abs is not relative to path_root, relative_to throws an error
try:
filepath_abs.relative_to(path_root)
except ValueError:
raise Http404('{} wrt {} is impossible'.format(filepath_abs, path_root))
return filepath_abs
def sendfile(request, filename, attachment=False, attachment_filename=None,
mimetype=None, encoding=None):
"""
Create a response to send file using backend configured in ``SENDFILE_BACKEND``
``filename`` is the absolute path to the file to send.
If ``attachment`` is ``True`` the ``Content-Disposition`` header will be set accordingly.
This will typically prompt the user to download the file, rather
than view it. But even if ``False``, the user may still be prompted, depending
on the browser capabilities and configuration.
The ``Content-Disposition`` filename depends on the value of ``attachment_filename``:
``None`` (default): Same as ``filename``
``False``: No ``Content-Disposition`` filename
``String``: Value used as filename
If neither ``mimetype`` or ``encoding`` are specified, then they will be guessed via the
filename (using the standard Python mimetypes module)
"""
filepath_obj = _sanitize_path(filename)
logger.debug('filename \'%s\' requested "\
"-> filepath \'%s\' obtained', filename, filepath_obj)
_sendfile = _get_sendfile()
if not filepath_obj.exists():
raise Http404('"%s" does not exist' % filepath_obj)
guessed_mimetype, guessed_encoding = guess_type(str(filepath_obj))
if mimetype is None:
if guessed_mimetype:
mimetype = guessed_mimetype
else:
mimetype = 'application/octet-stream'
response = _sendfile(request, filepath_obj, mimetype=mimetype)
# Suggest to view (inline) or download (attachment) the file
parts = ['attachment' if attachment else 'inline']
if attachment_filename is None:
attachment_filename = filepath_obj.name
if attachment_filename:
attachment_filename = str(attachment_filename)
ascii_filename = unicodedata.normalize('NFKD', attachment_filename)
ascii_filename = ascii_filename.encode('ascii', 'ignore').decode()
parts.append('filename="%s"' % ascii_filename)
if ascii_filename != attachment_filename:
quoted_filename = quote_plus(attachment_filename)
parts.append('filename*=UTF-8\'\'%s' % quoted_filename)
response['Content-Disposition'] = '; '.join(parts)
response['Content-length'] = filepath_obj.stat().st_size
response['Content-Type'] = mimetype
if not encoding:
encoding = guessed_encoding
if encoding:
response['Content-Encoding'] = encoding
return response
| 34.248062 | 94 | 0.704391 | 0 | 0 | 0 | 0 | 270 | 0.061114 | 0 | 0 | 1,643 | 0.371888 |
a8bcfe7dd530695d5f14c79ff05aca1766647425 | 1,632 | py | Python | core/cmake_gens/tmake_cmake_generator_mac.py | tomken/tmake | ee4f2c709a3e021fb04427bd7ae72f99d2714176 | [
"MIT"
] | 2 | 2020-01-11T04:26:27.000Z | 2022-01-10T06:35:22.000Z | core/cmake_gens/tmake_cmake_generator_mac.py | tomken/tmake | ee4f2c709a3e021fb04427bd7ae72f99d2714176 | [
"MIT"
] | null | null | null | core/cmake_gens/tmake_cmake_generator_mac.py | tomken/tmake | ee4f2c709a3e021fb04427bd7ae72f99d2714176 | [
"MIT"
] | 1 | 2020-04-25T01:06:39.000Z | 2020-04-25T01:06:39.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""cmake generator mac file"""
import os
import core
from core.info.tmake_builtin import tmake_path
from .tmake_cmake_generator import CMakeGenerator, CMakeSourceItem
PLUGIN_VERSION = "1.0.0"
CMAKE_IOS_RESOURCE_TEMPLATE = """
set({} {})
set_source_files_properties(${{{}}} PROPERTIES MACOSX_PACKAGE_LOCATION {})
"""
class CMakeGeneratorOSX(CMakeGenerator):
"""cmake generator cmake class for OSX"""
def generate(self):
self.info.global_c_flags += " -g "
self.info.global_cxx_flags += " -g "
self.info.global_defines.append("PLATFORM_MAC")
for app in self.info.apps:
app.link_style = core.CXX_LIBRARY_LINK_STYLE_MACOSX_BUNDLE
if app.plist:
plist_path = tmake_path(app.plist)
app.properties["MACOSX_BUNDLE_INFO_PLIST"] = plist_path
CMakeGenerator.generate(self)
def create_build_vars(self):
arch = 'i386' if self.arch == core.TARGET_CPU_X86 else 'x86_64'
self.info.build_vars["CMAKE_OSX_ARCHITECTURES"] = arch
self.info.build_vars["CMAKE_C_COMPILER"] = 'clang'
self.info.build_vars["CMAKE_CXX_COMPILER"] = 'clang++'
def generate_module_common(self, module):
module.include_dirs.append("/usr/local/include")
module.lib_dirs.append("/usr/local/lib")
return CMakeGenerator.generate_module_common(self, module)
def generate_global_common(self):
self.info.global_defines.append("PLATFORM_MAC")
return ""
def cmake_plugin_init(arch):
"""cmake plugin entry"""
return CMakeGeneratorOSX(arch)
| 32 | 74 | 0.683824 | 1,173 | 0.71875 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.265319 |
a8be0db32e967cb0153586a373f4da4226c176be | 517 | py | Python | students/models.py | estudeplus/perfil | 58b847aa226b885ca6a7a128035f09de2322519f | [
"MIT"
] | null | null | null | students/models.py | estudeplus/perfil | 58b847aa226b885ca6a7a128035f09de2322519f | [
"MIT"
] | 21 | 2019-05-11T18:01:10.000Z | 2022-02-10T11:22:01.000Z | students/models.py | estudeplus/perfil | 58b847aa226b885ca6a7a128035f09de2322519f | [
"MIT"
] | null | null | null | from django.db import models
class Student(models.Model):
name = models.CharField(max_length=100)
student_id = models.CharField(max_length=10)
email = models.EmailField(max_length=254)
password = models.CharField(max_length=15)
def __str__(self):
return self.name
class InstitutionalEmail(models.Model):
address_email = models.CharField(max_length=50)
title_email = models.CharField(max_length=20, default='Assunto do email', editable=False)
body_email = models.TextField()
| 32.3125 | 93 | 0.742747 | 484 | 0.93617 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.034816 |
a8c0597a44875c6084c1bcd00b54ef0ec72bcfc2 | 1,087 | py | Python | essentials/oo-programming/device_log.py | iomegak12/intel-training-usecase-1 | 0d1ab6f6076f46f7fbb290ceb41d6b851da1af3a | [
"MIT"
] | null | null | null | essentials/oo-programming/device_log.py | iomegak12/intel-training-usecase-1 | 0d1ab6f6076f46f7fbb290ceb41d6b851da1af3a | [
"MIT"
] | null | null | null | essentials/oo-programming/device_log.py | iomegak12/intel-training-usecase-1 | 0d1ab6f6076f46f7fbb290ceb41d6b851da1af3a | [
"MIT"
] | null | null | null | class DeviceLog:
def __init__(self, deviceId, deviceName, temperature, location, recordDate):
self.deviceId = deviceId
self.deviceName = deviceName
self.temperature = temperature
self.location = location
self.recordDate = recordDate
def getStatus(self):
if self.temperature is None:
raise Exception('Invalid Temperature Value Specified!')
if self.temperature < 18:
status = 'COLD'
elif self.temperature >= 18 and self.temperature < 25:
status = 'WARM'
else:
status = 'HOT'
return status
def __str__(self):
return '%s, %s, %s, %s, %s' % (self.deviceId, self.deviceName,
self.temperature, self.location, self.recordDate)
try:
device_log_object = DeviceLog(
'D1001', 'Device-X93984', 24, 'Bangalore', '2022-01-01')
print(device_log_object)
print('Status : %s' % device_log_object.getStatus())
except Exception as error:
print('Error Occurred, Details : %s ' % str(error))
| 31.057143 | 88 | 0.600736 | 809 | 0.74425 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.150874 |
a8c0d365f8b2270c4c27edb145a6a68f64309eca | 2,644 | py | Python | tigerline/management/commands/load_counties.py | adamfast/geodjango-tigerline | f071a49ec4c50955f4f5bcb40633a75fd53b9a3a | [
"BSD-3-Clause"
] | 11 | 2015-03-10T23:06:28.000Z | 2021-09-15T10:29:39.000Z | tigerline/management/commands/load_counties.py | adamfast/geodjango-tigerline | f071a49ec4c50955f4f5bcb40633a75fd53b9a3a | [
"BSD-3-Clause"
] | 2 | 2016-06-15T04:04:20.000Z | 2017-08-17T17:07:17.000Z | tigerline/management/commands/load_counties.py | adamfast/geodjango-tigerline | f071a49ec4c50955f4f5bcb40633a75fd53b9a3a | [
"BSD-3-Clause"
] | 7 | 2015-04-14T21:34:30.000Z | 2018-07-24T06:30:18.000Z | import datetime
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
try:
from django.contrib.gis.utils import LayerMapping
except ImportError:
print("gdal is required")
sys.exit(1)
from tigerline.models import County
def county_import(county_shp, year):
if year == "2010":
county_mapping = {
'state_fips_code': 'STATEFP10',
'fips_code': 'COUNTYFP10',
'county_identifier': 'GEOID10',
'name': 'NAME10',
'name_and_description': 'NAMELSAD10',
'legal_statistical_description': 'LSAD10',
'fips_55_class_code': 'CLASSFP10',
'feature_class_code': 'MTFCC10',
'functional_status': 'FUNCSTAT10',
'mpoly': 'POLYGON',
}
else:
county_mapping = {
'state_fips_code': 'STATEFP',
'fips_code': 'COUNTYFP',
'county_identifier': 'GEOID',
'name': 'NAME',
'name_and_description': 'NAMELSAD',
'legal_statistical_description': 'LSAD',
'fips_55_class_code': 'CLASSFP',
'feature_class_code': 'MTFCC',
'functional_status': 'FUNCSTAT',
'mpoly': 'POLYGON',
}
lm = LayerMapping(County, county_shp, county_mapping, encoding='LATIN1')
lm.save(verbose=True)
class Command(BaseCommand):
help = 'Installs the 2010-2016 tigerline files for counties'
def add_arguments(self, parser):
parser.add_argument('--path', default='', dest='path',
help='The directory where the county data is stored.'
)
def handle(self, *args, **kwargs):
path = kwargs['path']
# With DEBUG on this will DIE.
settings.DEBUG = False
# figure out which path we want to use.
years = ["2016", "2015", "2014", "2013", "2012", "2011", "2010"]
directories = [('tl_%s_us_county' % year, year) for year in years]
tiger_file = ""
for (directory, year) in directories:
if year == "2010":
directory = directory + "10"
if os.path.exists(os.path.join(path, directory)):
print('Found %s files.' % year)
tiger_file = os.path.join(path, directory + "/" + directory + ".shp")
break
if not tiger_file:
print('Could not find files.')
exit()
print("Start Counties: %s" % datetime.datetime.now())
county_import(tiger_file, year)
print("End Counties: %s" % datetime.datetime.now())
| 31.855422 | 85 | 0.57829 | 1,226 | 0.463691 | 0 | 0 | 0 | 0 | 0 | 0 | 910 | 0.344175 |
a8c0f73f7ea21f51a4d844bc06568bc871e02184 | 14,318 | py | Python | nitorch/tools/registration/utils.py | balbasty/nitorch | d30c3125a8a66ea1434f2b39ed03338afd9724b4 | [
"MIT"
] | 46 | 2020-07-31T10:14:05.000Z | 2022-03-24T12:51:46.000Z | nitorch/tools/registration/utils.py | balbasty/nitorch | d30c3125a8a66ea1434f2b39ed03338afd9724b4 | [
"MIT"
] | 36 | 2020-10-06T19:01:38.000Z | 2022-02-03T18:07:35.000Z | nitorch/tools/registration/utils.py | balbasty/nitorch | d30c3125a8a66ea1434f2b39ed03338afd9724b4 | [
"MIT"
] | 6 | 2021-01-05T14:59:05.000Z | 2021-11-18T18:26:45.000Z | """Utility functions for registration algorithms.
Some (jg, jhj, affine_grid_backward) should maybe be moved to the `spatial`
module (?).
"""
from nitorch.core import py, utils, linalg
from nitorch import spatial
import torch
from . import optim as optm
def defaults_velocity(prm=None):
if prm is None:
prm = dict()
# values from SPM shoot
prm.setdefault('absolute', 1e-4)
prm.setdefault('membrane', 1e-3)
prm.setdefault('bending', 0.2)
prm.setdefault('lame', (0.05, 0.2))
prm.setdefault('voxel_size', 1.)
return prm
def defaults_template(prm=None):
if prm is None:
prm = dict()
# values from SPM shoot
prm.setdefault('absolute', 1e-4)
prm.setdefault('membrane', 0.08)
prm.setdefault('bending', 0.8)
prm.setdefault('voxel_size', 1.)
return prm
def loadf(x):
"""Load data from disk if needed"""
return x.fdata() if hasattr(x, 'fdata') else x
def savef(x, parent):
"""Save data to disk if needed"""
if hasattr(parent, 'fdata'):
parent[...] = x
else:
parent.copy_(x)
def smart_pull(image, grid, **kwargs):
"""spatial.grid_pull that accepts None grid"""
if image is None or grid is None:
return image
return spatial.grid_pull(image, grid, **kwargs)
def smart_push(image, grid, **kwargs):
"""spatial.grid_push that accepts None grid"""
if image is None or grid is None:
return image
return spatial.grid_push(image, grid, **kwargs)
def smart_exp(vel, **kwargs):
"""spatial.exp that accepts None vel"""
if vel is not None:
vel = spatial.exp(vel, **kwargs)
return vel
def smart_pull_grid(vel, grid, type='disp', *args, **kwargs):
"""Interpolate a velocity/grid/displacement field.
Notes
-----
Defaults differ from grid_pull:
- bound -> dft
- extrapolate -> True
Parameters
----------
vel : ([batch], *spatial, ndim) tensor
Velocity
grid : ([batch], *spatial, ndim) tensor
Transformation field
kwargs : dict
Options to ``grid_pull``
Returns
-------
pulled_vel : ([batch], *spatial, ndim) tensor
Velocity
"""
if grid is None or vel is None:
return vel
kwargs.setdefault('bound', 'dft')
kwargs.setdefault('extrapolate', True)
dim = vel.shape[-1]
if type == 'grid':
id = spatial.identity_grid(vel.shape[-dim-1:-1], **utils.backend(vel))
vel = vel - id
vel = utils.movedim(vel, -1, -dim-1)
vel_no_batch = vel.dim() == dim + 1
grid_no_batch = grid.dim() == dim + 1
if vel_no_batch:
vel = vel[None]
if grid_no_batch:
grid = grid[None]
vel = spatial.grid_pull(vel, grid, *args, **kwargs)
vel = utils.movedim(vel, -dim-1, -1)
if vel_no_batch:
vel = vel[0]
if type == 'grid':
id = spatial.identity_grid(vel.shape[-dim-1:-1], **utils.backend(vel))
vel += id
return vel
def smart_pull_jac(jac, grid, *args, **kwargs):
"""Interpolate a jacobian field.
Notes
-----
Defaults differ from grid_pull:
- bound -> dft
- extrapolate -> True
Parameters
----------
jac : ([batch], *spatial_in, ndim, ndim) tensor
Jacobian field
grid : ([batch], *spatial_out, ndim) tensor
Transformation field
kwargs : dict
Options to ``grid_pull``
Returns
-------
pulled_jac : ([batch], *spatial_out, ndim) tensor
Jacobian field
"""
if grid is None or jac is None:
return jac
kwargs.setdefault('bound', 'dft')
kwargs.setdefault('extrapolate', True)
dim = jac.shape[-1]
jac = jac.reshape([*jac.shape[:-2], dim*dim]) # collapse matrix
jac = utils.movedim(jac, -1, -dim - 1)
jac_no_batch = jac.dim() == dim + 1
grid_no_batch = grid.dim() == dim + 1
if jac_no_batch:
jac = jac[None]
if grid_no_batch:
grid = grid[None]
jac = spatial.grid_pull(jac, grid, *args, **kwargs)
jac = utils.movedim(jac, -dim - 1, -1)
jac = jac.reshape([*jac.shape[:-1], dim, dim])
if jac_no_batch:
jac = jac[0]
return jac
def smart_push_grid(vel, grid, *args, **kwargs):
"""Push a velocity/grid/displacement field.
Notes
-----
Defaults differ from grid_push:
- bound -> dft
- extrapolate -> True
Parameters
----------
vel : ([batch], *spatial, ndim) tensor
Velocity
grid : ([batch], *spatial, ndim) tensor
Transformation field
kwargs : dict
Options to ``grid_pull``
Returns
-------
pulled_vel : ([batch], *spatial, ndim) tensor
Velocity
"""
if grid is None or vel is None:
return vel
kwargs.setdefault('bound', 'dft')
kwargs.setdefault('extrapolate', True)
dim = vel.shape[-1]
vel = utils.movedim(vel, -1, -dim-1)
vel_no_batch = vel.dim() == dim + 1
grid_no_batch = grid.dim() == dim + 1
if vel_no_batch:
vel = vel[None]
if grid_no_batch:
grid = grid[None]
vel = spatial.grid_push(vel, grid, *args, **kwargs)
vel = utils.movedim(vel, -dim-1, -1)
if vel_no_batch and grid_no_batch:
vel = vel[0]
return vel
def make_optim_grid(optim, lr=None, sub_iter=None, kernel=None, **prm):
"""Prepare optimizer for displacement/velocity"""
correct_keys = ('absolute', 'membrane', 'bending', 'lame',
'factor', 'voxel_size')
prm = {k: prm[k] for k in prm if k in correct_keys}
optim = (optm.GradientDescent() if optim == 'gd' else
optm.Momentum() if optim == 'momentum' else
optm.Nesterov() if optim == 'nesterov' else
optm.OGM() if optim == 'ogm' else
optm.GridCG(max_iter=sub_iter, **prm) if optim == 'cg' else
optm.GridRelax(max_iter=sub_iter, **prm) if optim == 'relax' else
optm.GridJacobi(max_iter=sub_iter, **prm) if optim == 'jacobi' else
optm.GridNesterov(max_iter=sub_iter, **prm) if optim.startswith('gnnesterov') else
optim)
if lr:
optim.lr = lr
if kernel is not None and hasattr(optim, 'preconditioner'):
optim.preconditioner = lambda x: spatial.greens_apply(x, kernel)
return optim
def make_optim_field(optim, lr=None, sub_iter=None, kernel=None, **prm):
"""Prepare optimizer for vector field"""
correct_keys = ('absolute', 'membrane', 'bending', 'factor', 'voxel_size')
prm = {k: prm[k] for k in prm if k in correct_keys}
optim = (optm.GradientDescent() if optim == 'gd' else
optm.Momentum() if optim == 'momentum' else
optm.Nesterov() if optim == 'nesterov' else
optm.OGM() if optim == 'ogm' else
optm.FieldCG(max_iter=sub_iter, **prm) if optim == 'cg' else
optm.FieldRelax(max_iter=sub_iter, **prm) if optim == 'relax' else
optim)
if lr:
optim.lr = lr
if kernel is not None and hasattr(optim, 'preconditioner'):
optim.preconditioner = lambda x: spatial.greens_apply(x, kernel)
return optim
def make_iteroptim_grid(optim, lr=None, ls=None, max_iter=None, sub_iter=None,
kernel=None, **prm):
"""Prepare iterative optimizer for displacement/velocity"""
if optim == 'lbfgs':
optim = optm.LBFGS(max_iter=max_iter)
else:
optim = make_optim_grid(optim, lr=lr, sub_iter=sub_iter,
kernel=kernel, **prm)
if not hasattr(optim, 'iter'):
optim = optm.IterateOptim(optim, max_iter=max_iter, ls=ls)
return optim
def make_optim_affine(optim, lr=None):
"""Prepare optimizer for affine matrices"""
optim = (optm.GradientDescent() if optim == 'gd' else
optm.Momentum() if optim == 'momentum' else
optm.Nesterov() if optim == 'nesterov' else
optm.OGM() if optim == 'ogm' else
optm.GaussNewton() if optim == 'gn' else
optim)
if lr:
optim.lr = lr
return optim
def make_iteroptim_affine(optim, lr=None, ls=None, max_iter=None):
"""Prepare iterative optimizer for displacement/velocity"""
if optim == 'lbfgs':
optim = optm.LBFGS(max_iter=max_iter)
else:
optim = make_optim_affine(optim, lr=lr)
if not hasattr(optim, 'iter'):
optim = optm.IterateOptim(optim, max_iter=max_iter, ls=ls)
return optim
@torch.jit.script
def _affine_grid_backward_g(grid, grad):
# type: (Tensor, Tensor) -> Tensor
dim = grid.shape[-1]
g = torch.empty([grad.shape[0], dim, dim+1], dtype=grad.dtype, device=grad.device)
for i in range(dim):
g[..., i, -1] = grad[..., i].sum(1, dtype=torch.double).to(g.dtype)
for j in range(dim):
g[..., i, j] = (grad[..., i] * grid[..., j]).sum(1, dtype=torch.double).to(g.dtype)
return g
@torch.jit.script
def _affine_grid_backward_gh(grid, grad, hess):
# type: (Tensor, Tensor, Tensor) -> Tuple[Tensor, Tensor]
dim = grid.shape[-1]
g = torch.zeros([grad.shape[0], dim, dim+1], dtype=grad.dtype, device=grad.device)
h = torch.zeros([hess.shape[0], dim, dim+1, dim, dim+1], dtype=grad.dtype, device=grad.device)
basecount = dim - 1
for i in range(dim):
basecount = basecount + i * (dim-i)
for j in range(dim+1):
if j == dim:
g[..., i, j] = (grad[..., i]).sum(1)
else:
g[..., i, j] = (grad[..., i] * grid[..., j]).sum(1)
for k in range(dim):
idx = k
if k < i:
continue
elif k != i:
idx = basecount + (k - i)
for l in range(dim+1):
if l == dim and j == dim:
h[..., i, j, k, l] = h[..., k, j, i, l] = hess[..., idx].sum(1)
elif l == dim:
h[..., i, j, k, l] = h[..., k, j, i, l] = (hess[..., idx] * grid[..., j]).sum(1)
elif j == dim:
h[..., i, j, k, l] = h[..., k, j, i, l] = (hess[..., idx] * grid[..., l]).sum(1)
else:
h[..., i, j, k, l] = h[..., k, j, i, l] = (hess[..., idx] * grid[..., j] * grid[..., l]).sum(1)
return g, h
def affine_grid_backward(*grad_hess, grid=None):
"""Converts ∇ wrt dense displacement into ∇ wrt affine matrix
g = affine_grid_backward(g, [grid=None])
g, h = affine_grid_backward(g, h, [grid=None])
Parameters
----------
grad : (..., *spatial, dim) tensor
Gradient with respect to a dense displacement.
hess : (..., *spatial, dim*(dim+1)//2) tensor, optional
Hessian with respect to a dense displacement.
grid : (*spatial, dim) tensor, optional
Pre-computed identity grid
Returns
-------
grad : (..., dim, dim+1) tensor
Gradient with respect to an affine matrix
hess : (..., dim, dim+1, dim, dim+1) tensor, optional
Hessian with respect to an affine matrix
"""
has_hess = len(grad_hess) > 1
grad, *hess = grad_hess
hess = hess.pop(0) if hess else None
del grad_hess
dim = grad.shape[-1]
shape = grad.shape[-dim-1:-1]
batch = grad.shape[:-dim-1]
nvox = py.prod(shape)
if grid is None:
grid = spatial.identity_grid(shape, **utils.backend(grad))
grid = grid.reshape([1, nvox, dim])
grad = grad.reshape([-1, nvox, dim])
if hess is not None:
hess = hess.reshape([-1, nvox, dim*(dim+1)//2])
grad, hess = _affine_grid_backward_gh(grid, grad, hess)
hess = hess.reshape([*batch, dim, dim+1, dim, dim+1])
else:
grad = _affine_grid_backward_g(grid, grad)
grad = grad.reshape([*batch, dim, dim+1])
return (grad, hess) if has_hess else grad
def jg(jac, grad, dim=None):
"""Jacobian-gradient product: J*g
Parameters
----------
jac : (..., K, *spatial, D)
grad : (..., K, *spatial)
Returns
-------
new_grad : (..., *spatial, D)
"""
if grad is None:
return None
dim = dim or (grad.dim() - 1)
grad = utils.movedim(grad, -dim-1, -1)
jac = utils.movedim(jac, -dim-2, -1)
grad = linalg.matvec(jac, grad)
return grad
def jhj(jac, hess, dim=0):
"""Jacobian-Hessian product: J*H*J', where H is symmetric and stored sparse
The Hessian can be symmetric (K*(K+1)//2), diagonal (K) or
a scaled identity (1).
Parameters
----------
jac : (..., K, *spatial, D)
hess : (..., 1|K|K*(K+1)//2, *spatial)
Returns
-------
new_hess : (..., *spatial, D*(D+1)//2)
"""
if hess is None:
return None
dim = dim or (hess.dim() - 1)
hess = utils.fast_movedim(hess, -dim-1, -1)
jac = utils.fast_movedim(jac, -dim-2, -1)
@torch.jit.script
def _jhj(jac, hess):
# type: (Tensor, Tensor) -> Tensor
K = jac.shape[-1]
D = jac.shape[-2]
D2 = D*(D+1)//2
if hess.shape[-1] == 1:
hess = hess.expand(list(hess.shape[:-1]) + [K])
is_diag = hess.shape[-1] == K
out = hess.new_zeros(list(jac.shape[:-2]) + [D2])
dacc = 0
for d in range(D):
doffset = (d+1)*D - dacc # offdiagonal offset
dacc += d+1
# diagonal of output
hacc = 0
for k in range(K):
hoffset = (k+1)*K - hacc
hacc += k+1
out[..., d] += hess[..., k] * jac[..., d, k].square()
if not is_diag:
for i, l in enumerate(range(k+1, K)):
out[..., d] += 2 * hess[..., i+hoffset] * jac[..., d, k] * jac[..., d, l]
# off diagonal of output
for j, e in enumerate(range(d+1, D)):
hacc = 0
for k in range(K):
hoffset = (k+1)*K - hacc
hacc += k+1
out[..., j+doffset] += hess[..., k] * jac[..., d, k] * jac[..., e, k]
if not is_diag:
for i, l in enumerate(range(k+1, K)):
out[..., j+doffset] += hess[..., i+hoffset] * (jac[..., d, k] * jac[..., e, l] + jac[..., d, l] * jac[..., e, k])
return out
return _jhj(jac, hess) | 31.330416 | 141 | 0.547912 | 0 | 0 | 0 | 0 | 3,340 | 0.233208 | 0 | 0 | 3,990 | 0.278592 |
a8c1d8a918972ec6d7de78e07073b8569d788358 | 5,014 | py | Python | utils/The_Alchemist_Code/conceptcard.py | heboric/ouro | 5209a9946b39874ed02c2602baf39fd299de609b | [
"Apache-2.0"
] | 3 | 2019-04-25T22:24:22.000Z | 2019-11-21T08:39:33.000Z | utils/The_Alchemist_Code/conceptcard.py | heboric/ouro | 5209a9946b39874ed02c2602baf39fd299de609b | [
"Apache-2.0"
] | null | null | null | utils/The_Alchemist_Code/conceptcard.py | heboric/ouro | 5209a9946b39874ed02c2602baf39fd299de609b | [
"Apache-2.0"
] | 3 | 2018-08-13T17:49:34.000Z | 2019-02-14T22:30:09.000Z | from ._main import DIRS,LinkDB,ConvertFields,StrBuff,StrCondition,Embed, Rarity,GitLabLink
import copy
def Conceptcard(iname, page):
#get card dir
card=DIRS['Conceptcard'][iname]
#create basic embed
embed= Embed(
title='', #page name
url=LinkDB('card',iname,'',True,'jp') #page link
)
embed.set_author(name=card['name'],url=embed.url)#, url=LinkDB('card',iname))
embed.set_thumbnail(url=GitLabLink('/ConceptCardIcon/%s.png'%iname))#'http://cdn.alchemistcodedb.com/images/cards/icons/{}.png'.format(iname))
embed.set_image(url=GitLabLink('/ConceptCard/%s.png'%iname))#'http://cdn.alchemistcodedb.com/images/cards/artworks/{}.png'.format(iname))
while page:
if page=='main':
embed.ConvertFields(main(card))
embed.title='main'
break
break
return embed
def main(card):
try:
unit=DIRS['Unit']['UN_V2_'+card['iname'].rsplit('_',2)[1]]['name']
except:
unit='None'
fields = [
{'name': 'Unit', 'value': unit, 'inline':True},
{'name': 'Rarity', 'value': Rarity(card['rare'],4), 'inline':True},
{'name': 'Enhance Cost', 'value': str(card['en_cost']), 'inline':True},
{'name': 'Enhance EXP', 'value': str(card['en_exp']), 'inline':True},
]
#Weapon Type ~ Tag
if 'trust_reward' in card:
fields.append({'name': 'Trust Reward(s)', 'value': '```\t%s```'%'\n\t'.join(
[
'%s%s (%s)'%('%sx'%reward['reward_num'] if reward['reward_num']>1 else '', DIRS[reward['reward_type']][reward['iname']]['name'],reward['reward_type'].replace('Artifact','Gear'))
for reward in DIRS['Conceptcardtrustreward'][card['trust_reward']]['rewards']
]
), 'inline': False})
#effects
for i,effect in enumerate(card['effects']):
value=[]
#cnds_iname
if 'cnds_iname' in effect:
value.append(
'__**Condition(s):**__\n'+CardConditions(effect['cnds_iname'])
)
#abil_iname
if 'abil_iname' in effect:
value.append('__**Vision Ability:**__\n'+
DIRS['Ability'][effect['abil_iname']]['name']
)
#skin
if 'skin' in effect:
value.append('__**Skin:**__\n'+DIRS['Artifact'][effect['skin']]['name'])
#statusup_skill ~ from equipping
if 'statusup_skill' in effect:
value.append('__**Stats:**__\n'+
StrBuff(DIRS['Skill'][effect['statusup_skill']]['target_buff_iname'],2,2)
)
#####
# LV 30: stats (x per level)
# Limit Break: x per level
# Max Limit Break: total stats (lv 40, all limit breaks, + mlb bonus)
#card_skill
buffs=[]
if 'card_skill' in effect:
buffs.append(DIRS['Skill'][effect['card_skill']]['target_buff_iname'])
value.append(
'__**LV 30 Stats:**__ %s'%(
StrBuff(DIRS['Skill'][effect['card_skill']]['target_buff_iname'],30,40)
)
)
#add_card_skill_buff_awake
if 'add_card_skill_buff_awake' in effect:
buffs.append(effect['add_card_skill_buff_awake'])
value.append('__**Limit Break Stats:**__\n1:%s\n5:%s'%(StrBuff(effect['add_card_skill_buff_awake'],1,5),StrBuff(effect['add_card_skill_buff_awake'],5,5))
)
#add_card_skill_buff_lvmax
if 'add_card_skill_buff_lvmax' in effect:
buffs.append(effect['add_card_skill_buff_lvmax'])
value.append('__**MLB Bonus Stats:**__\n'+
StrBuff(effect['add_card_skill_buff_lvmax'],2,2)
)
#total stats
if buffs:
value.append('__**Max Stats:**__\n'+
StrBuff(add_buffs(buffs),2,2)
)
for buff in buffs:
buff=DIRS['Buff'][buff]
if 'un_group' in buff:
value.append('__**Boosted Units:**__\n%s'%(', '.join([
DIRS['Unit'][unit]['name']
for unit in DIRS['Unitgroup'][buff['un_group']]['units']
])))
break
#elif costum target
fields.append({'name': 'Effect '+str(i+1), 'value': '\n'.join(value), 'inline':False})
#lore
fields.append({'name': 'Description', 'value': card['expr'], 'inline': False})
return fields
def CardConditions(iname):
conds=DIRS['Conceptcardconditions'][iname]
ret=[]
#unit group
if 'unit_group' in conds:
ret.append('Units: '+', '.join([
DIRS['Unit'][uiname]['name']
for uiname in DIRS['Unitgroup'][conds['unit_group']]['units']
]))
#job group
if 'job_group' in conds:
ret.append('Jobs: '+', '.join([
DIRS['Job'][jiname]['name']
for jiname in DIRS['Jobgroup'][conds['job_group']]['jobs']
]))
#element
if 'conditions_elements' in conds:
ret.append('Element: '+', '.join(conds['conditions_elements']))
#birth
if 'birth' in conds:
ret.append('Birth: '+', '.join(conds['birth']))
return '\n'.join(ret)
def add_buffs(buffs):
#check type
buffs=[
copy.deepcopy(DIRS['Buff'][buff]) if type(buff)==str else copy.deepcopy(buff)
for buff in buffs
]
#add effects up
types={'Scale':{},'Add':{},'Fixed':{}}
for buff in buffs:
for stat in buff['buffs']:
typ=types[stat['calc']]
if stat['type'] not in typ:
typ[stat['type']]=stat
else:
typ[stat['type']]['value_ini']+=stat['value_ini']
typ[stat['type']]['value_max']+=stat['value_max']
typ[stat['type']]['value_one']+=stat['value_one']
#revert back
return ({'buffs':[
buff
for typ,items in types.items()
for key,buff in items.items()
]}) | 31.734177 | 182 | 0.64639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,285 | 0.455724 |
a8c1f516c23fafb626557728f3570858f9201939 | 8,397 | py | Python | xdagtool/xdu_test.py | rulerson/xdagtool | 1eb7fb352dd7cadcd6611279720210e04a5ec3f6 | [
"MIT"
] | null | null | null | xdagtool/xdu_test.py | rulerson/xdagtool | 1eb7fb352dd7cadcd6611279720210e04a5ec3f6 | [
"MIT"
] | null | null | null | xdagtool/xdu_test.py | rulerson/xdagtool | 1eb7fb352dd7cadcd6611279720210e04a5ec3f6 | [
"MIT"
] | null | null | null | import uuid, os
from pathlib import Path
from xdagtool import xdu
class TestPathCopy:
def test_file_copy1(self, tmp_path: Path, capsys):
# file a
srcpath = tmp_path / 'srcpath-{}'.format(uuid.uuid4())
destpath = tmp_path / 'destpath-{}'.format(uuid.uuid4())
with capsys.disabled():
print(tmp_path)
srcpath.mkdir(parents=True)
destpath.mkdir(parents=True)
tmpfname = 'a-{}'.format(uuid.uuid4())
tmp_txt = 'text-{}'.format(uuid.uuid4())
srcfile_path = srcpath / tmpfname
srcfile_path.write_text(tmp_txt)
destfile_path = destpath / tmpfname
xdu.path_copy(srcfile_path, destfile_path)
assert destfile_path.exists()
assert destfile_path.read_text() == tmp_txt
def test_file_copy2(self, tmp_path: Path, capsys):
# file a/b/c
srcpath = tmp_path / 'd1'
destpath = tmp_path / 'd2'
srcpath.mkdir(parents=True)
destpath.mkdir(parents=True)
tmpfile = Path('c') / 'b' / 'a-{}'.format(uuid.uuid4())
tmp_txt = 'text-{}'.format(uuid.uuid4())
srcfile_path = srcpath / tmpfile
srcfile_path.parent.mkdir(parents=True, exist_ok=True)
srcfile_path.write_text(tmp_txt)
destfile_path = destpath / tmpfile
xdu.path_copy(srcfile_path, destfile_path)
assert destfile_path.exists()
assert destfile_path.read_text() == tmp_txt
# @pytest.mark.skip(reason='temp')
def test_dir_copy1(self, tmp_path: Path, capsys):
# dir srcpath/1.dat 2.dat
srcpath = tmp_path / 'd1'
destpath = tmp_path / 'd2'
srcpath.mkdir(parents=True)
# src dir file list
tmp_txt1 = 'text-{}'.format(uuid.uuid4())
srcfile1 = srcpath / 'a-{}'.format(uuid.uuid4())
srcfile1.write_text(tmp_txt1)
tmp_txt2 = 'text-{}'.format(uuid.uuid4())
srcfile2 = srcpath / 'a-{}'.format(uuid.uuid4())
srcfile2.write_text(tmp_txt2)
# go
xdu.path_copy(srcpath, destpath)
# assert
assert destpath.exists()
destfile1 = destpath / srcfile1.name
assert destfile1.exists()
assert destfile1.read_text() == tmp_txt1
destfile2 = destpath / srcfile2.name
assert destfile2.exists()
assert destfile2.read_text() == tmp_txt2
def test_dir_copy2(self, tmp_path: Path, capsys):
# dir srcpath/ empty
srcpath = tmp_path / 'd1'
destpath = tmp_path / 'd2'
srcpath.mkdir(parents=True)
# src dir is empty
# go
xdu.path_copy(srcpath, destpath)
# assert
assert destpath.exists()
assert len(os.listdir(destpath)) == 0
def test_dir_copy3(self, tmp_path: Path, capsys):
# dir srcpath/b/c/ empty
srcpath = tmp_path / 'd1' / 'b' / 'c'
destpath = tmp_path / 'd2' / 'b' / 'c'
srcpath.mkdir(parents=True)
# src dir is empty
# go
xdu.path_copy(srcpath, destpath)
# assert
destdir = tmp_path / 'd2'
assert destdir.exists()
destdir = tmp_path / 'd2' / 'b'
assert destdir.exists()
destdir = tmp_path / 'd2' / 'b' / 'c'
assert destdir.exists()
assert len(os.listdir(destdir)) == 0
def test_dir_copy4(self, tmp_path: Path, capsys):
# dir srcpath/b/c/ 1.dat 2.dat
srcpath = tmp_path / 'd1' / 'b' / 'c'
destpath = tmp_path / 'd2' / 'b' / 'c'
srcpath.mkdir(parents=True)
# src dir files
tmp_txt1 = 'text-{}'.format(uuid.uuid4())
srcfile1 = srcpath / 'a-{}'.format(uuid.uuid4())
srcfile1.write_text(tmp_txt1)
tmp_txt2 = 'text-{}'.format(uuid.uuid4())
srcfile2 = srcpath / 'a-{}'.format(uuid.uuid4())
srcfile2.write_text(tmp_txt2)
# go
xdu.path_copy(srcpath, destpath)
# assert
destdir = tmp_path / 'd2'
assert destdir.exists()
destdir = tmp_path / 'd2' / 'b'
assert destdir.exists()
destdir = tmp_path / 'd2' / 'b' / 'c'
assert destdir.exists()
assert destpath.exists()
destfile1 = destpath / srcfile1.name
assert destfile1.exists()
assert destfile1.read_text() == tmp_txt1
destfile2 = destpath / srcfile2.name
assert destfile2.exists()
assert destfile2.read_text() == tmp_txt2
def test_dir_copy5(self, tmp_path: Path, capsys):
# dir srcpath/b/
# c1/d1/1.dat 2.dat # dir1
# c2/1.dat 2.dat # dir2
# c3/ # dir3
srcpath = tmp_path / 'd1' / 'b'
destpath = tmp_path / 'd2' / 'b'
srcpath.mkdir(parents=True)
# src dir initialize
# dir1
dir1 = srcpath / 'c1' / 'd1'
dir1.mkdir(parents=True)
dir1_tmp_txt1 = 'text-{}'.format(uuid.uuid4())
dir1_srcfile1 = dir1 / 'a-{}'.format(uuid.uuid4())
dir1_srcfile1.write_text(dir1_tmp_txt1)
dir1_tmp_txt2 = 'text-{}'.format(uuid.uuid4())
dir1_srcfile2 = dir1 / 'a-{}'.format(uuid.uuid4())
dir1_srcfile2.write_text(dir1_tmp_txt2)
# dir2
dir2 = srcpath / 'c2'
dir2.mkdir(parents=True)
dir2_tmp_txt1 = 'text-{}'.format(uuid.uuid4())
dir2_srcfile1 = dir2 / 'a-{}'.format(uuid.uuid4())
dir2_srcfile1.write_text(dir2_tmp_txt1)
dir2_tmp_txt2 = 'text-{}'.format(uuid.uuid4())
dir2_srcfile2 = dir2 / 'a-{}'.format(uuid.uuid4())
dir2_srcfile2.write_text(dir2_tmp_txt2)
# dir3
dir3 = srcpath / 'c3'
dir3.mkdir(parents=True)
# go
xdu.path_copy(srcpath, destpath)
# assert
assert destpath.exists()
# dir1
destdir = destpath / 'c1' / 'd1'
assert destdir.exists()
destfile1 = destdir / dir1_srcfile1.name
assert destfile1.exists()
assert destfile1.read_text() == dir1_tmp_txt1
# dir2
destdir = destpath / 'c2'
assert destdir.exists()
# dir3
destdir = destpath / 'c3'
assert destdir.exists()
class TestPathsCopy:
def test_paths_copy1(self, tmp_path: Path, shared_datadir: Path, capsys):
assert (shared_datadir / 'storage').exists()
xdu.paths_copy(str(shared_datadir / 'storage'), str(tmp_path / 'testdata'), ['01/75/f4/18.dat',
'01/75/f4/32.dat',
'01/75/f4/32.dat',
'01/75/f4/40.dat',
'01/75/f4/42.dat', # not exist
'01/75/f4/sums.dat',
'01/75/f7',
'01/75/f8', # not exist
'01/75/f9',
'empty1',
'empty2/empty3',])
assert (tmp_path / 'testdata/01/75/f4/40.dat').exists()
assert (tmp_path / 'testdata/01/75/f4/32.dat').exists()
assert (tmp_path / 'testdata/01/75/f4/sums.dat').exists()
assert (tmp_path / 'testdata/01/75/f7/01.dat').exists()
assert (tmp_path / 'testdata/01/75/f9/sums.dat').exists()
assert (tmp_path / 'testdata/empty1').exists()
assert (tmp_path / 'testdata/empty2/empty3').exists() and (tmp_path / 'testdata/empty2/empty3').is_dir()
assert not (tmp_path / 'testdata/01/75/f4/39.dat').exists()
assert not (tmp_path / 'testdata/01/75/fb').exists()
assert (not (tmp_path / 'storage/01/75/f4/42.dat').exists()) and (not (tmp_path / 'testdata/01/75/f4/42.dat').exists())
assert (not (tmp_path / 'storage/01/75/f8').exists()) and (not (tmp_path / 'testdata/01/75/f8').exists())
| 33.722892 | 127 | 0.519709 | 8,325 | 0.991426 | 0 | 0 | 0 | 0 | 0 | 0 | 1,365 | 0.162558 |
a8c4518ebd49ebea7566e47563f73c0667eeaa48 | 2,194 | py | Python | rnaseq.py | albertoriva/pipelines | cb7d89c44883c3ed43526b2e7079bcb233b5afd0 | [
"MIT"
] | null | null | null | rnaseq.py | albertoriva/pipelines | cb7d89c44883c3ed43526b2e7079bcb233b5afd0 | [
"MIT"
] | null | null | null | rnaseq.py | albertoriva/pipelines | cb7d89c44883c3ed43526b2e7079bcb233b5afd0 | [
"MIT"
] | null | null | null | # DibigActor
# (c) 2015, A. Riva, DiBiG
# RNAseq processing pipeline, v2.0
from SampleCollection import SampleCollection
from Logger import Logger
from Director import Director
D = Director(ACT)
## Initialization
ACT.loadConfiguration(ACT.Arguments[0])
ACT.bt2idx = ACT.getConf("bt2idx")
ACT.staridx = ACT.getConf("staridx")
ACT.rsemidx = ACT.getConf("rsemidx")
ACT.reference = ACT.checkPath(ACT.getConf("reference"))
ACT.genesdb = ACT.checkPath(ACT.getConf("genesdb"))
ACT.cufflinksMask = ACT.checkPath(ACT.getConf("cufflinksMask"))
ACT.fdr = ACT.getConf("fdr")
ACT.fc = float(ACT.getConf("fc"))
ACT.erccdb = ACT.getConf("ERCCdb")
ACT.bamcovd = ACT.getConf("bamcovd", default=5)
## A few additional things we need
ACT.fastqCountsPreTrim = "fastq-counts-pretrim.csv" # Number of input reads before trimming
ACT.fastqCounts = "fastq-counts.csv" # Number of input reads after trimming
ACT.beforeCounts = "beforeCounts.csv" # Number of aligned reads before dedup
ACT.afterCounts = "afterCounts.csv" # Number of aligned reads after dedup
ACT.genomeCounts = "genomeCounts.csv" # Number of aligned reads in genome BAM files
## Now define the pipeline
D.setSteps(ACT.getConf("steps"))
D.step('rnasamples')
D.step('fastqcount.1', outfile=ACT.fastqCountsPreTrim, propname="fastqCountsPre", delay=True)
D.step('trim')
D.step('fastqcount.2', outfile=ACT.fastqCounts, propname="fastqCounts", delay=True)
D.step('startx')
#D.step('tophat')
#D.step('cufflinks')
#D.step('cuffdiff')
D.step('merge', indexBAM=True, bamkey='genomebam', byCondition=True)
D.step('bamcount.1', outfile=ACT.afterCounts, propname='afterCounts', delay=True)
D.step('bamcount.2', outfile=ACT.genomeCounts, propname='genomeCounts', source='genomebam', delay=True)
D.step('bamcov')
D.step('kallisto')
D.step('rsemquant')
D.step('rsemdiff')
D.step('bamtowig', countsfile=ACT.genomeCounts, bamkey='genomebam')
D.step('hub', kind='rnaseq')
D.startAt(ACT.getConf("startat"))
D.stopAt(ACT.getConf("stopAt"))
D.showSteps()
## Action starts here
ACT.script(ACT.title, "RNAseq - Alignment and differential expression analysis")
ACT.begin(timestamp=False, copyConf=True)
ACT.initFiles()
D.RunScript()
ACT.cleanup()
| 32.746269 | 103 | 0.746126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 924 | 0.421149 |
a8c4ef65d4167eae76cb55b333d7586f25c61d43 | 169 | py | Python | base/NeighborResult.py | Holly-Jiang/QCTSA | b90136b9df18fc21ae53b431f1e5e0c6ef786fae | [
"MIT"
] | null | null | null | base/NeighborResult.py | Holly-Jiang/QCTSA | b90136b9df18fc21ae53b431f1e5e0c6ef786fae | [
"MIT"
] | null | null | null | base/NeighborResult.py | Holly-Jiang/QCTSA | b90136b9df18fc21ae53b431f1e5e0c6ef786fae | [
"MIT"
] | null | null | null | class NeighborResult:
def __init__(self):
self.solutions = []
self.choose_path = []
self.current_num = 0
self.curr_solved_gates = []
| 24.142857 | 35 | 0.585799 | 168 | 0.994083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a8c6a442acbe32317e2a13205457bfa40d84b691 | 5,098 | py | Python | src/models/sequence_predictor_model.py | rajatdiptabiswas/NN_compression | 1a2650ad897bcc1f32f3b63d0a6477b8f6be6e29 | [
"MIT"
] | 211 | 2017-04-05T15:37:37.000Z | 2022-03-03T16:17:02.000Z | src/models/sequence_predictor_model.py | Benjamin-Etheredge/NN_compression | 1ded955ac6069299a8fbe37e14373f9e173ad912 | [
"MIT"
] | 14 | 2017-08-31T02:40:29.000Z | 2020-04-29T06:26:19.000Z | src/models/sequence_predictor_model.py | Benjamin-Etheredge/NN_compression | 1ded955ac6069299a8fbe37e14373f9e173ad912 | [
"MIT"
] | 45 | 2017-07-08T10:19:21.000Z | 2021-05-15T07:35:35.000Z | from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import sys
from zoneout_wrapper import ZoneoutWrapper
class SequencePredictor():
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
"""
self.inputs_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="x")
self.labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="y")
self.dropout_placeholder = tf.placeholder(tf.float32)
def create_feed_dict(self, inputs_batch, labels_batch=None, initial_state=None, keep_prob=1.0):
"""Creates the feed_dict for the model.
NOTE: You do not have to do anything here.
"""
feed_dict = {
self.inputs_placeholder: inputs_batch,
self.dropout_placeholder: keep_prob,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
if initial_state is not None:
feed_dict[self.in_state] = initial_state
return feed_dict
def add_embedding(self):
""" Creates one-hot encoding for the input. No embedding is used as of now
"""
embedding = tf.one_hot(self.inputs_placeholder, self.config.num_classes)
return embedding
def add_prediction_op(self):
""" Get the input from the embedding layer
"""
x = self.add_embedding()
""" Create a RNN first & define a placeholder for the initial state
"""
if self.config.model_type == "gru":
cell = tf.nn.rnn_cell.GRUCell(self.config.hidden_size)
elif self.config.model_type == "rnn":
cell = tf.nn.rnn_cell.BasicRNNCell(self.config.hidden_size)
else:
raise Exception("Unsuppoprted model type...")
if self.config.regularization == "dropout":
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout_placeholder)
elif self.config.regularization == "zoneout":
cell = ZoneoutWrapper(cell, zoneout_prob=self.dropout_placeholder)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.config.num_layers, state_is_tuple=False)
batch_size = tf.shape(x)[0]
dynamic_max_length = tf.shape(x)[1]
zero_state = cell.zero_state(batch_size, tf.float32)
self.in_state = tf.placeholder_with_default(zero_state, [None, cell.state_size])
""" First find the sequence length and then use it to run the model
"""
#length = tf.reduce_sum(tf.reduce_max(tf.sign(x), 2), 1)
output, self.out_state = tf.nn.dynamic_rnn(cell, x, initial_state=self.in_state)
output = tf.reshape(output, shape=[-1, self.config.hidden_size])
""" Pass it through a linear + Softmax layer to get the predictions
"""
xavier_init = tf.contrib.layers.xavier_initializer()
W = tf.get_variable("W", shape=[self.config.hidden_size, self.config.num_classes], initializer=xavier_init )
b1 = tf.get_variable("b1", shape=[self.config.num_classes], initializer=xavier_init )
preds = tf.add(tf.matmul(output,W),b1)
preds = tf.reshape(preds, shape=[batch_size,dynamic_max_length, self.config.num_classes])
return preds
def add_loss_op(self, preds):
loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=preds) )
scaled_loss = loss/np.log(2)
tf.summary.scalar('loss', scaled_loss);
return scaled_loss
def add_training_op(self, loss):
"""Sets up the training Ops.
"""
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
return global_step, train_op
def loss_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=1.0)
loss, out_state = sess.run([self.loss,self.out_state], feed_dict=feed)
return loss, out_state
def train_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None, dropout=1.0):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=dropout)
_, loss,out_state,_step, summary = sess.run([self.train_op, self.loss, self.out_state, self.global_step, self.merged_summaries], feed_dict=feed)
return loss, out_state, _step, summary
def build(self):
self.add_placeholders()
self.pred = self.add_prediction_op()
self.loss = self.add_loss_op(self.pred)
self.global_step, self.train_op = self.add_training_op(self.loss)
self.merged_summaries = tf.summary.merge_all()
def __init__(self, config):
self.config = config
self.build()
| 42.840336 | 152 | 0.678501 | 4,938 | 0.968615 | 0 | 0 | 0 | 0 | 0 | 0 | 740 | 0.145155 |
a8c77d5a50c3e402e1747bc1dedba092e6e2fa50 | 4,131 | py | Python | machine/EventCase.py | technosvitman/sm_gene | 243d060fec7c642ce74843776a016ded13a68855 | [
"MIT"
] | null | null | null | machine/EventCase.py | technosvitman/sm_gene | 243d060fec7c642ce74843776a016ded13a68855 | [
"MIT"
] | 9 | 2021-08-11T07:25:53.000Z | 2021-09-01T11:10:33.000Z | machine/EventCase.py | technosvitman/sm_gene | 243d060fec7c642ce74843776a016ded13a68855 | [
"MIT"
] | null | null | null |
'''
@brief this class reflect action decision regarding condition
'''
class EventAction():
'''
@brief build event action
@param cond the conditions to perform the action
@param to the target state if any
@param job the job to do if any
'''
def __init__(self, cond="", to="", job=""):
self.__to = to
self.__job = job
self.__cond = cond
'''
@brief get action state target
@return state name
'''
def getState(self) :
return self.__to
'''
@brief has transition condition
@return true if not empty
'''
def hasCond(self) :
return ( self.__cond != "" )
'''
@brief get action conditions
@return condition
'''
def getCond(self) :
return self.__cond
'''
@brief get action job
@return job
'''
def getJob(self) :
return self.__job
'''
@brief string represtation for state action
@return the string
'''
def __str__(self):
return "Act( %s, %s, %s )"%(self.__to, self.__job, self.__cond)
'''
@brief this class reflect the output switch on event received regarding condition and action to perform
'''
class EventCase():
'''
@brief build event case
@param event the event title
'''
def __init__(self, event):
self.__event = event
self.__acts = []
'''
@brief get iterator
'''
def __iter__(self):
return iter(self.__acts)
'''
@brief equality implementation
@param other the other element to compare with
'''
def __eq__(self, other):
if isinstance(other, str):
return self.__event == other
if not isinstance(other, EventCase):
return False
if self.__event != other.getEvent():
return False
return True
'''
@brief get action event
@return event
'''
def getEvent(self) :
return self.__event
'''
@brief add action
@param act the new action
'''
def addAct(self, act) :
if act not in self.__acts:
self.__acts.append(act)
'''
@brief string represtation for state action
@return the string
'''
def __str__(self):
output = "Event( %s ) { "%self.__event
if len(self.__acts):
output += "\n"
for act in self.__acts:
output += "%s\n"%str(act)
return output + "}"
'''
@brief this class store all event case for a state
'''
class EventCaseList():
'''
@brief build event case list
'''
def __init__(self):
self.__events = []
'''
@brief get iterator
'''
def __iter__(self):
return iter(self.__events)
'''
@brief append from StateAction
@param act the state action
'''
def append(self, act):
for cond in act.getConds():
evt = None
a = EventAction(cond=cond.getCond(),\
to=act.getState(),\
job=act.getJob())
for e in self.__events:
if e == cond.getEvent():
evt = e
break
if not evt:
evt = EventCase(cond.getEvent())
self.__events.append(evt)
evt.addAct(a)
'''
@brief append from State
@param state the state
'''
def appendState(self, state):
for act in state.getActions():
self.append(act)
'''
@brief string represtation for state action
@return the string
'''
def __str__(self):
output = "{ "
if len(self.__events):
output += "\n"
for e in self.__events:
output += "%s\n"%str(e)
return output + "}" | 25.189024 | 107 | 0.487775 | 3,861 | 0.934641 | 0 | 0 | 0 | 0 | 0 | 0 | 1,683 | 0.407407 |
a8c7974a6483483179cdfc40e57d4d46564982cf | 1,995 | py | Python | tests/test_utils.py | initOS/dob-lib | a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | initOS/dob-lib | a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | initOS/dob-lib | a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b | [
"Apache-2.0"
] | null | null | null | # © 2021 Florian Kantelberg (initOS GmbH)
# License Apache-2.0 (http://www.apache.org/licenses/).
import argparse
from unittest.mock import patch
import pytest
from doblib import utils
def test_merge():
assert utils.merge([1, 2], [3, 4]) == [1, 2, 3, 4]
assert utils.merge({1, 2}, {3, 4}) == {1, 2, 3, 4}
assert utils.merge({3, 4}, {1, 2}) == {1, 2, 3, 4}
assert utils.merge([1, 2], {3, 4}) == {3, 4}
assert utils.merge({1: 2}, {1: 3}) == {1: 3}
assert utils.merge({1: 2}, {2: 3}) == {1: 2, 2: 3}
assert utils.merge({1: {2: 3}}, {1: {3: 4}}) == {1: {2: 3, 3: 4}}
assert utils.merge({1: {2: 3}}, {1: {3: 4}}, replace={1}) == {1: {3: 4}}
def test_keyboard_interrupt():
with pytest.raises(KeyboardInterrupt):
utils.raise_keyboard_interrupt()
def test_version():
ver = utils.Version("1.2.3")
assert ver == "1.2.3"
assert utils.Version(ver) == ver
assert utils.Version() == ()
assert utils.Version(1) == (1,)
assert utils.Version((1, 2, 3)) == (1, 2, 3)
assert utils.Version(None) == ()
ver = utils.Version("1.2.3")
assert str(ver) == "1.2.3"
assert ver == (1, 2, 3)
assert ver < 2
assert ver > "1.2"
assert ver <= (1, 2, 3)
assert ver >= (1, 2, 2)
def test_default_parser():
parser = utils.default_parser("test")
assert isinstance(parser, argparse.ArgumentParser)
@patch("os.path.isfile")
def test_config_file(mock):
found = []
mock.side_effect = lambda file: file in found
assert utils.get_config_file() is None
found = ["odoo.local.yaml"]
assert utils.get_config_file() == "odoo.local.yaml"
found = ["odoo.project.yaml"]
assert utils.get_config_file() == "odoo.project.yaml"
found = ["odoo.project.yaml", "odoo.local.yaml"]
assert utils.get_config_file() == "odoo.local.yaml"
def test_call():
output = utils.call("ls")
assert isinstance(output, str) and output
output = utils.call("ls", pipe=False)
assert output == 0
| 27.708333 | 76 | 0.595489 | 0 | 0 | 0 | 0 | 450 | 0.225451 | 0 | 0 | 285 | 0.142786 |
a8c9e7922225b107156958f1d399ae85adb6a1dc | 1,721 | py | Python | quedex_api/keys.py | mchalapuk/python-api | 370e3536da69de4b26c241fb9ff32aa0f1f895f3 | [
"Apache-2.0"
] | 17 | 2017-07-13T12:06:01.000Z | 2022-02-07T01:37:36.000Z | quedex_api/keys.py | mchalapuk/python-api | 370e3536da69de4b26c241fb9ff32aa0f1f895f3 | [
"Apache-2.0"
] | 17 | 2017-07-13T09:09:40.000Z | 2022-02-11T03:38:06.000Z | quedex_api/keys.py | mchalapuk/python-api | 370e3536da69de4b26c241fb9ff32aa0f1f895f3 | [
"Apache-2.0"
] | 10 | 2017-07-12T17:56:02.000Z | 2020-02-21T12:58:32.000Z |
quedex_public_key = """-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1
mQENBFlPvjsBCACr/UfHzXAezskLqcq9NiiaNFDDT5A+biC8VrOglB0ZSQOYRira
NgQ2Cp8Jd+XU77F+J1012BjB5y87Z+hdnwBDsqF7CjkjeQzsE3PSvm9I+E3cneqx
UcinRaUD1wfwVytbg9Q9rpqQ7CTjVWY1UPYjs6dAo1WAp/ux/VTeOFbpO0R3D7if
ZGY1QeISRpLWiMpcG2YCOALnuazABVCNXLhVqa8Y7tt2I+cI0uE9tBf41gjGPPtd
KdASPVz1plpOEl2dOpmy8jICqcSzUsT4Sy8vAqW3U1HF+TA2QGRcrrUItL4GjxNL
lcL8wh7mclsjRe5Q5dYnrACC9NWS6vSp/eAPABEBAAG0G1F1ZWRleCA8Y29udGFj
dEBxdWVkZXgubmV0PokBOAQTAQIAIgUCWU++OwIbAwYLCQgHAwIGFQgCCQoLBBYC
AwECHgECF4AACgkQzsLQUmv6vk9Rlwf+LiJA37dhDdGFU/fexNRHZWTUh2TdqBsv
MiNtarf+HlZIioWMCzlHmb3bolVrfFUNUh/GGlPENtlaSmFGuPhMlFcNDGYM+I7k
ufhM95jxmtzy97NYMeMx4xjnaBAu8kFsvi80BR/05ZhCHqyI3K9NpYoXBfsyzss+
j/jX1NHayzMmXNdqQ5JjzuICZj0EY9ryLP/jPAZ6DS9LVwi9Vr2JzZheCx5Q77Ud
HuGTOBu3Azor2f4n4ccELs7lgU7uGrt1cK/oiML9UDmqjelunzTFU/5Q0tp7C3Qm
1wymd+PYTvvX/5htnLar1nIuYmmvtCZb1zyuzPzJWWtCcFFsiV9kerkBDQRZT747
AQgAn/9rwQn4yM7pVYO9rTxNveI60lgp4RLKi1WYpAy6gxwUp3zLE/h59rykb7qM
9QArABsMEGGKt9urq7hjsKu8bM+hVTcAuoDre5qNFEfhZuPBS9LF7wQWDikORZxR
Mk5WIiCt3U2soQ4Lismw1bLDX8uqkv3GFtR+IaKzuwYBEVPwuZ15EOt9G83JR3uV
MKqeUtFW9+z5WEAh2JLU6C357sftJIJeWDEgF2TPtQOzc8isI8rpIFNyl6x1Aiy6
LaSWmOI3d9EQ8SH4LxCXtAgWvnIoPL0JsP5/FWzt6qJR4teu+A2xwG7001va+DUc
34AbSV9Ogqa519OfbKK6HDyFIQARAQABiQEfBBgBAgAJBQJZT747AhsMAAoJEM7C
0FJr+r5PtEUH/0KmXQWbm4qXxOnaXrk+CKLDBxtfY6BaoJ6ekdGfqVMd8YM+UGnL
6d49vex4O80uIhIDSex446gKVlhdwOjIlUFmTCtMgGOa06G2T4sx8a9y2UYK45hN
rj9aVfhJ8nn9yuPj7cBNtLEYJ4VkRKxJO9XX8cfhUsomhB3DQDbOLfikYqfmupm6
mYX84CO/DD8JAXx4qt9Rg+5AUQegq26iZ/og1ZjYZ/tvBjrc45u23XCWvgVHbGhb
wWCNjZijaY1VnTwTe6uZv1AqovZpprqZKWImN5myaJI3AJU2W2FCbI0ezfoVEVO4
zMipOYZzRziJeCz1gX9geNseLvfJ8EtZRKU=
=e4C9
-----END PGP PUBLIC KEY BLOCK-----"""
| 53.78125 | 64 | 0.933178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,699 | 0.987217 |
a8ca1caaa92e342ad9f0cb6b39795b9f66f38f5b | 1,918 | py | Python | main.py | Habejota/Osys2 | ba0ee692e19c007e5b219b7b4ad8bb1304a660f1 | [
"MIT"
] | null | null | null | main.py | Habejota/Osys2 | ba0ee692e19c007e5b219b7b4ad8bb1304a660f1 | [
"MIT"
] | null | null | null | main.py | Habejota/Osys2 | ba0ee692e19c007e5b219b7b4ad8bb1304a660f1 | [
"MIT"
] | null | null | null | # Importando as dependencias
from os import system, chdir, mkdir, rmdir
from time import sleep
from socket import gethostname, gethostbyname
# Informações
__version__ = "1.0"
__tag_version__ = "Osys2 Beta"
# Ethernet and Socket informations
hostname = gethostname()
host = gethostbyname(hostname)
# Modulos e funções do OSys2
def clearDisplay():
system("cls")
# Aplicativos
# Kernel
class kernel:
def __init__(self):
clearDisplay()
try:
self.db = open("home\config.log").read()
except FileNotFoundError:
try:
mkdir("home")
except FileExistsError:
a = open("home\config.log", "wt+")
print("Welcome to Osystem"), sleep(2)
print("")
print("here you can simule Linux in your Windows NT")
print("Create your accont to use this sub-desktop")
nsm = str(input("Username: ")).strip()
a.write(nsm)
a.close()
del nsm
else:
a = open("home\config.log", "wt+")
print("Welcome to Osystem"), sleep(2)
print("")
print("here you can simule Linux in your Windows NT")
print("Create your accont to use this sub-desktop")
nsm = str(input("Username: ")).strip()
a.write(nsm)
a.close()
del nsm
self.db = open("home\config.log").read()
clearDisplay()
self.inital_mensage()
self.command_prompt()
def command_prompt(self):
while True:
cmd: str = input("{}@{}:$ ".format(hostname, self.db))
def inital_mensage(self):
print("Welcome to Osystem Beta: {} - (Gnu\Linux {})".format(__version__, __tag_version__))
print("")
# Executando
kernel()
| 27.014085 | 98 | 0.535454 | 1,487 | 0.773673 | 0 | 0 | 0 | 0 | 0 | 0 | 552 | 0.287201 |
a8ca966f580953fa3afc118449b17258788e105f | 9,349 | py | Python | src/mybot_pkg/scripts/imu_pub.py | leytpapas/thesis_project | eeac775c13d035c2d68a03e5b448617a70c65072 | [
"MIT"
] | null | null | null | src/mybot_pkg/scripts/imu_pub.py | leytpapas/thesis_project | eeac775c13d035c2d68a03e5b448617a70c65072 | [
"MIT"
] | null | null | null | src/mybot_pkg/scripts/imu_pub.py | leytpapas/thesis_project | eeac775c13d035c2d68a03e5b448617a70c65072 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rospy
import sys
from sensor_msgs.msg import Imu, Temperature
from geometry_msgs.msg import Pose
from tf.transformations import quaternion_from_euler
from math import radians
import serial
import subprocess
class Imu_unit(object):
def __init__(self, imu_topic=rospy.get_param('/imu_topic'), rate=rospy.get_param('/rate'),debug=False):
rospy.init_node('imu_pub', anonymous=True)
# self.serial_port = 'ls /dev/ttyUSB*'
self.serial_port = 'ls /dev/ttyACM*'
p = subprocess.Popen(self.serial_port,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.port, err = p.communicate()
self.port = self.port.decode("utf-8").rstrip()
self.ser = serial.Serial(
port=self.port,
baudrate=115200,
# parity=serial.PARITY_NONE,
# stopbits=serial.STOPBITS_ONE,
# bytesize=serial.EIGHTBITS,
timeout=1
)
self.debug = debug
self.ser.flush()
self.data = {
"yaw": None,
"pitch": None,
"roll": None,
"ax": None,
"ay": None,
"az": None,
"gx": None,
"gy": None,
"gz": None
}
self.rate = rospy.Rate(rate)
self.pub = rospy.Publisher(imu_topic+"/data", Imu, queue_size=1)
self.temp_pub = rospy.Publisher(imu_topic+"/temp", Temperature, queue_size=1)
rospy.loginfo("Robot Imu Publisher Started...")
self.seq = 0
# # Factors for unit conversions
self.acc_fact = 1
self.gyr_fact = 1
self.time_present = rospy.Time.now()
self.time_past = rospy.Time.now()
self.accel_deadzone = 8
self.gyro_deadzone = 1
self.accel_x_offset = self.accel_y_offset = self.accel_z_offset = 0
self.rot_x_offset = self.rot_y_offset = self.rot_z_offset = 0
self.gyro_x_past = self.gyro_y_past = self.gyro_z_past = 0
self.gyro_x_present = self.gyro_y_present = self.gyro_z_present = 0
self.gyro_x_calli = self.gyro_y_calli = self.gyro_z_calli = 0
self.rot_x = self.rot_y = self.rot_z = 0
self.angle_x = self.angle_y = self.angle_z = 0
# self.callibrate(100)
self.Q = []
self.G = []
def serial_read(self):
while True:
try:
if self.ser.in_waiting > 0:
# line = self.ser.readline()
# print(line.decode('utf-8'))
line = self.ser.readline().decode('utf-8').rstrip()
values = line.split(",")
for v in values:
key,val = v.split(":")
self.data[key] = float(val)
self.gyro_x_past = self.gyro_x_present
self.gyro_y_past = self.gyro_y_present
self.gyro_z_past = self.gyro_z_present
self.time_past = self.time_present
self.time_present = rospy.Time.now()
return True
except:
print(line)
return False
def calculate_angle(self):
self.angle_x = self.angle_x + ((self.time_present - self.time_past).to_sec() * (self.gyro_x_present + self.gyro_x_past - 2 * self.gyro_z_calli)) * 0.00000382
self.angle_y = self.angle_y + ((self.time_present - self.time_past).to_sec() * (self.gyro_x_present + self.gyro_y_past - 2 * self.gyro_z_calli)) * 0.00000382
self.angle_z = self.angle_z + ((self.time_present - self.time_past).to_sec() * (self.gyro_x_present + self.gyro_z_past - 2 * self.gyro_z_calli)) * 0.00000382
return self.angle_x, self.angle_y, self.angle_z
# def callibrate(self, times):
# while not self.serial_read() and sum(1 for key,value in self.data.items() if value is not None )!=7: pass
# print(self.data)
# Ax, Ay, Az, self.gyro_x_present,self.gyro_y_present,self.gyro_z_present, tempC = self.data["ax"],self.data["ay"],self.data["az"],self.data["gx"],self.data["gy"],self.data["gz"],self.data["T"]
# ax_offset =- Ax/8
# ay_offset =- Ay/8
# az_offset = (16384-Az)/8
# gx_offset =- self.gyro_x_present/4
# gy_offset =- self.gyro_y_present/4
# gz_offset =- self.gyro_z_present/4
# print("Starting Calibration. Please wait a few seconds")
# while True:
# ready = 0
# self.accel_x_offset = ax_offset
# self.accel_y_offset = ay_offset
# self.accel_z_offset = az_offset
# self.rot_x_offset = gx_offset
# self.rot_y_offset = gy_offset
# self.rot_z_offset = gz_offset
# # self.enablePrint()
# while not self.serial_read() and sum(1 for key,value in self.data.items() if value is not None)!=7: pass
# Ax, Ay, Az, self.gyro_x_present,self.gyro_y_present,self.gyro_z_present, tempC = self.data["ax"]-ax_offset,self.data["ay"]-ay_offset,self.data["az"]-az_offset,self.data["gx"]-gx_offset,self.data["gy"]-gy_offset,self.data["gz"]-gz_offset,self.data["T"]
# # print(self.data)
# # Ax, Ay, Az, self.gyro_x_present,self.gyro_y_present,self.gyro_z_present, tempC, angle = self.fetch_data()
# # self.blockPrint()
# # print ("rot_x=%.2f" %self.gyro_x_present, u'\u00b0'+ "/s", "\trot_y=%.2f" %self.gyro_y_present, u'\u00b0'+ "/s", "\trot_z=%.2f" %self.gyro_z_present, u'\u00b0'+ "/s", "\tAx=%.2f g" %Ax, "\tAy=%.2f g" %Ay, "\tAz=%.2f g" %Az)
# if abs(Ax)<=self.accel_deadzone:
# ready+=1
# else:
# ax_offset = ax_offset - Ax/self.accel_deadzone
# if abs(Ay)<=self.accel_deadzone:
# ready+=1
# else:
# ay_offset = ay_offset - Ay/self.accel_deadzone
# if abs(16384 - Az)<=self.accel_deadzone:
# ready+=1
# else:
# az_offset = az_offset - (16384 - Az)/self.accel_deadzone
# if abs(self.gyro_x_present) <= self.gyro_deadzone:
# ready += 1
# else:
# gx_offset=gx_offset - self.gyro_x_present/(self.gyro_deadzone+1)
# if abs(self.gyro_z_present) <= self.gyro_deadzone:
# ready += 1
# else:
# gz_offset = gz_offset - self.gyro_z_present/(self.gyro_deadzone+1)
# if abs(self.gyro_z_present) <= self.gyro_deadzone:
# ready += 1
# else:
# gz_offset = gz_offset - self.gyro_z_present/(self.gyro_deadzone+1)
# if ready == 6: break
# # time.sleep(0.002)
# # sleep(.5)
# # self.gyro_x_calli = self.gyro_x_calli // times
# # self.gyro_y_calli = self.gyro_y_calli // times
# # self.gyro_z_calli = self.gyro_z_calli // times
# print("Calibration ended")
def to_quaternion(self, yaw, pitch, roll): # yaw (Z), pitch (Y), roll (X)
# Abbreviations for the various angular functions
pose = Pose()
quaternion = quaternion_from_euler(roll, pitch, yaw)
# print(quaternion)
pose.orientation.x = quaternion[0]
pose.orientation.y = quaternion[1]
pose.orientation.z = quaternion[2]
pose.orientation.w = quaternion[3]
return pose.orientation
def step(self):
msg = Imu()
msg.header.frame_id = "imu_link"
msg.header.stamp = rospy.Time.now()
msg.header.seq = self.seq
msg.orientation_covariance[0] = -1
while not self.serial_read() and sum(1 for key,value in self.data.items() if value is not None)!=len(self.data.keys()): pass
print("YAW", self.data["yaw"])
Ax, Ay, Az = self.data["ax"]/16384, self.data["ay"]/16384, self.data["az"]/16384
self.gyro_x_present,self.gyro_y_present,self.gyro_z_present = self.data["gx"]/131, self.data["gy"]/131, self.data["gz"]/131
# x,y,z = Ax, Ay, Az
msg.linear_acceleration.x = Ax / self.acc_fact
msg.linear_acceleration.y = Ay / self.acc_fact
msg.linear_acceleration.z = Az / self.acc_fact
msg.linear_acceleration_covariance[0] = -1
# x,y,z = self.gyro_x_present,self.gyro_y_present, self.gyro_z_present
msg.angular_velocity.x = self.gyro_x_present / self.gyr_fact
msg.angular_velocity.y = self.gyro_y_present / self.gyr_fact
msg.angular_velocity.z = self.gyro_z_present / self.gyr_fact
msg.orientation = self.to_quaternion(self.data["yaw"], self.data["pitch"], self.data["roll"])
# msg_temp = Temperature()
# msg_temp.header.frame_id = "imu_link"
# msg_temp.header.stamp = rospy.Time.now()
# msg_temp.header.seq = self.seq
# msg_temp.temperature = tempC
# self.temp_pub.publish(msg_temp)
self.pub.publish(msg)
self.seq += 1
def spin(self):
while not rospy.is_shutdown():
if self.serial_read():
self.step()
self.rate.sleep()
if __name__ == '__main__':
robot_sense = Imu_unit()
try:
robot_sense.spin()
except rospy.ROSInterruptException:
pass | 42.303167 | 265 | 0.579099 | 8,964 | 0.958819 | 0 | 0 | 0 | 0 | 0 | 0 | 3,825 | 0.409135 |
a8caad8563cecc8db84796425abde40bb649818d | 385 | py | Python | Timbuchalka/Section-4/conditions.py | Advik-B/Learn-Python | 66ac57259764e8f2c3c6513a8de6c106800d8abe | [
"MIT"
] | 6 | 2021-07-26T14:21:25.000Z | 2021-07-26T14:32:01.000Z | Timbuchalka/Section-4/conditions.py | Advik-B/Learn-Python | 66ac57259764e8f2c3c6513a8de6c106800d8abe | [
"MIT"
] | 2 | 2021-12-10T10:25:19.000Z | 2021-12-10T10:27:15.000Z | Timbuchalka/Section-4/conditions.py | Advik-B/Learn-Python | 66ac57259764e8f2c3c6513a8de6c106800d8abe | [
"MIT"
] | null | null | null | age = int(input("How old are you ?"))
#if age >= 16 and age <= 65:
#if 16 <= age <= 65:
if age in range(16,66):
print ("Have a good day at work.")
elif age > 100 or age <= 0:
print ("Nice Try. This program is not dumb.")
endkey = input ("Press enter to exit")
else:
print (f"Enjoy your free time, you need to work for us after {65 - age} years.")
print ("-"*80)
| 20.263158 | 84 | 0.592208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.587013 |
a8cc30af5264812a9ba73581c7d77ae45adcd522 | 2,847 | py | Python | agents/fundamentalist.py | anthonyozerov/market-des | 178f89cfa9d83407c7622ce3cc7a74c467916362 | [
"MIT"
] | null | null | null | agents/fundamentalist.py | anthonyozerov/market-des | 178f89cfa9d83407c7622ce3cc7a74c467916362 | [
"MIT"
] | null | null | null | agents/fundamentalist.py | anthonyozerov/market-des | 178f89cfa9d83407c7622ce3cc7a74c467916362 | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
from agent import Agent
from probability import Probability
import copy
import data
from random import sample
import numpy as np
class Trader(Agent):
def init_params(self,params):
self.next_trades = [0, 0, 0, 0]
self.belief_state =[ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0] ]
self.initial_hand = [0, 0, 0, 0]
self.sell_expectations = np.empty([0,data.m+1])
self.buy_expectations = np.empty([0,data.m+1])
self.color = 'skyblue'
self.r = params['r'] if 'r' in params else 1.5
#this overwrites the consider function of the parent Agent class
def consider(self):
if all( [v == 0 for v in self.initial_hand] ):
self.initial_hand = copy.copy(self.inventory)
self.belief_state[self.number] = copy.copy(self.inventory)
assets = data.assets
for asset in assets:
assetno = asset.assetno
j = self.next_trades[assetno]
while j < len(asset.trades):
trade = asset.trades[j]
buyerno = trade.buyer.number
sellerno = trade.seller.number
volume = trade.volume
self.belief_state[sellerno][assetno] = self.belief_state[sellerno][assetno] - volume if self.belief_state[sellerno][assetno] > 0 else 0
self.belief_state[buyerno][assetno] = self.belief_state[buyerno][assetno] + volume
j += 1
self.next_trades[assetno] = j
total_state = [0, 0, 0, 0]
for belief_list in self.belief_state:
for i in range(4):
total_state[i] = total_state[i] + belief_list[i]
### for now, I implemented the likelihoods using total_state, but we can just use the self.intial_hand as Alejandra mentioned by changing it below
likelihoods = Probability.model_probabilities(total_state)
values = [Probability.expected_value_v2(likelihoods, self.inventory, card_index, self.r) for card_index in range(0, 4)]
buy_values = [values[i][0] for i in range(4)]
sell_values = [values[i][1] for i in range(4)]
self.save_expectations([buy_values,sell_values])
for order in self.orders:
if order.buy == True:
if order.price > values[order.assetno][0]:
order.deleted = True
else:
if order.price < values[order.assetno][1]:
order.deleted = True
#print(likelihoods)
#print(values)
i = sample
assetno = sample(range(0,data.m),1)[0]
return self.get_order(assetno, values[assetno])
#to implement: making an order based on the price series seen
#(for example, see parent Agent class)
| 37.96 | 155 | 0.5915 | 2,683 | 0.942396 | 0 | 0 | 0 | 0 | 0 | 0 | 363 | 0.127503 |
a8cce89c7aef767b4e56ec6d4bce5ee004bdf34e | 124 | py | Python | examples/src/python/misc/__init__.py | ajorgensen/heron | 6430c51a4a6030e93018e0ed40e5936a64317636 | [
"Apache-2.0"
] | 2 | 2016-07-04T07:10:31.000Z | 2018-03-28T16:59:02.000Z | examples/src/python/misc/__init__.py | ajorgensen/heron | 6430c51a4a6030e93018e0ed40e5936a64317636 | [
"Apache-2.0"
] | null | null | null | examples/src/python/misc/__init__.py | ajorgensen/heron | 6430c51a4a6030e93018e0ed40e5936a64317636 | [
"Apache-2.0"
] | 1 | 2017-06-05T17:55:45.000Z | 2017-06-05T17:55:45.000Z | """Miscellaneous example topology-related modules"""
__all__ = ['test_task_hook']
from .test_task_hook import TestTaskHook
| 24.8 | 52 | 0.798387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.548387 |
a8ce6fc717b4afcfe60ede52c71a36ecb551cce6 | 747 | py | Python | src/banditplot.py | schmit/human_interaction | e08be9bf45cb0b9de40cb3bdbcfa5f029173c1c1 | [
"MIT"
] | 4 | 2017-03-05T21:14:53.000Z | 2020-12-01T10:32:16.000Z | src/banditplot.py | schmit/human_interaction | e08be9bf45cb0b9de40cb3bdbcfa5f029173c1c1 | [
"MIT"
] | null | null | null | src/banditplot.py | schmit/human_interaction | e08be9bf45cb0b9de40cb3bdbcfa5f029173c1c1 | [
"MIT"
] | 1 | 2021-01-16T06:14:30.000Z | 2021-01-16T06:14:30.000Z | import numpy as np
import toyplot as tp
from banditutil import create_running_ema
def selection_emas(simulation_output, alpha=0.99):
k = simulation_output["k"]
rema = create_running_ema(alpha, initial=1/k)
return [rema((a == i for a in simulation_output["selection"]), return_list=True)
for i in range(k)]
def plot_scores(out, axes):
for i, score in enumerate(zip(*out["score"])):
T = len(score)
axes.plot(score)
axes.text(T-80, score[-1]+(-1)**i*0.1, "{:.3f}".format(score[-1]), style={"font-size":"14px"})
def plot_selection(out, axes, alpha=0.99):
remas = selection_emas(out, alpha)
for i, selection in enumerate(remas):
T = len(selection)
axes.plot(selection)
| 28.730769 | 102 | 0.645248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.06158 |
a8cfa6b65da313be457d7b1e85c1b549c3d118ce | 806 | py | Python | tests/test_reducer.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | tests/test_reducer.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | tests/test_reducer.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | 1 | 2022-01-03T21:56:14.000Z | 2022-01-03T21:56:14.000Z | from pyg_base import reducer, reducing, dictable
import pytest
from operator import add, mul
from functools import reduce
def test_reducer():
assert reducer(add, [1,2,3,4]) == 10
assert reducer(mul, [1,2,3,4]) == 24
assert reducer(add, [1]) == 1
assert reducer(add, []) is None
with pytest.raises(TypeError):
reduce(add, [])
def test_reducing():
from operator import mul
assert reducing(mul)([1,2,3,4]) == 24
assert reducing(mul)(6,4) == 24
assert reducing('__add__')([1,2,3,4]) == 10
assert reducing('__add__')(6,4) == 10
d = dictable(a = [1,2,3,5,4])
assert reducing('inc')(d, dict(a=1))
f = lambda a, b, c: a+b+c
assert reducing(f)([1,2,3,4,5], c = 0) == 15
assert reducing(f)([1,2,3,4,5], c = 1) == 19
| 26 | 48 | 0.580645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.028536 |
a8d1e944180d38f03c0bd79f64abd10f90aee5c1 | 1,978 | py | Python | newsapp/scraper/parser.py | atahanyorganci/dailyspokesman | e8c980e8ce63a2bf0596c9126b580f1429efe05b | [
"MIT"
] | null | null | null | newsapp/scraper/parser.py | atahanyorganci/dailyspokesman | e8c980e8ce63a2bf0596c9126b580f1429efe05b | [
"MIT"
] | 3 | 2020-01-06T11:42:15.000Z | 2021-06-19T13:11:02.000Z | newsapp/scraper/parser.py | atahanyorganci/dailyspokesman | e8c980e8ce63a2bf0596c9126b580f1429efe05b | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Any
from yarl import URL
from newsapp.config import Config
from newsapp.models.article import Article
from newsapp.scraper import NewsItem, ScrapeError, scrape
def get_category_url(category: str) -> URL:
category_path = Config.CATEGORIES[category]["url"]
return Config.SCRAPER_BASE_URL / category_path
def parse_category_page(category: str) -> set[NewsItem]:
category_url = get_category_url(category)
category_page = scrape(category_url)
urls: set[URL] = set()
for comp in category_page.findAll("div", {"class": "news-item"}):
url = URL(comp.a["href"])
slug = url.path.strip("/").split("/")[-1]
serial_no = int(slug.split("-")[-1])
# URL paths containing "?_szc_galeri" are not valid articles
if "?_szc_galeri" in url.path:
continue
# Check if Article with given serial_no exists
if Article.query.filter_by(serial_no=int(serial_no)).first():
continue
urls.add(NewsItem(url=url, slug=slug, serial_no=serial_no, category=category))
return urls
def parse_news_item(news_item: NewsItem) -> dict[str, Any]:
page = scrape(news_item.url)
article = page.find("article")
if article is None:
raise ScrapeError(f"{news_item} doesn't contain article element.")
parsed = {}
# Article content
parsed["title"] = article.h1.text
parsed["subtitle"] = article.h2.text
parsed["content"] = " ".join([p.text for p in article.findAll("p")])
# Link info
parsed["url"] = str(news_item.url)
parsed["category"] = news_item.category
parsed["serial_no"] = news_item.serial_no
# Parse article date
meta_date = article.findAll("span", {"class": "content-meta-date"})[-1]
date_str = meta_date.time["datetime"]
parsed["date"] = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S%z")
return parsed
| 32.42623 | 87 | 0.646613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.201719 |
a8d250efd6232e3b2ad01e4163fc3cd5e0932a48 | 1,066 | py | Python | programmingz3/code/subterm_simplify.py | Z3Prover/doc | 3b5c4275b6556df4a95aafca605fd42ae600f9a8 | [
"MIT"
] | 21 | 2018-10-01T08:10:17.000Z | 2022-02-09T17:07:31.000Z | programmingz3/code/subterm_simplify.py | Z3Prover/doc | 3b5c4275b6556df4a95aafca605fd42ae600f9a8 | [
"MIT"
] | 3 | 2018-10-01T12:29:25.000Z | 2019-12-03T11:21:42.000Z | programmingz3/code/subterm_simplify.py | Z3Prover/doc | 3b5c4275b6556df4a95aafca605fd42ae600f9a8 | [
"MIT"
] | 15 | 2018-10-01T12:19:25.000Z | 2022-02-23T10:44:25.000Z | from z3 import *
H = Int('H')
s = Solver()
t = 4 + 4 * (((H - 1) / 2) / 2)
s.add(H % 4 == 0)
s.check()
m = s.model()
def subterms(t):
seen = {}
def subterms_rec(t):
if is_app(t):
for ch in t.children():
if ch in seen:
continue
seen[ch] = True
yield ch
for sub in subterms_rec(ch):
yield sub
return { s for s in subterms_rec(t) }
def are_equal(s, t1, t2):
s.push()
s.add(t1 != t2)
r = s.check()
s.pop()
return r == unsat
def simplify(slv, mdl, t):
subs = subterms(t)
values = { s : mdl.eval(s) for s in subs }
values[t] = mdl.eval(t)
def simplify_rec(t):
subs = subterms(t)
for s in subs:
if s.sort().eq(t.sort()) and values[s].eq(values[t]) and are_equal(slv, s, t):
return simplify_rec(s)
chs = [simplify_rec(ch) for ch in t.children()]
return t.decl()(chs)
return simplify_rec(t)
print(t, "-->", simplify(s, m, t))
| 24.227273 | 90 | 0.482176 | 0 | 0 | 347 | 0.325516 | 0 | 0 | 0 | 0 | 8 | 0.007505 |
a8d2869f1b9f1c0b78363d4c3113427c11c89cfc | 8,982 | py | Python | packages/robot/odahuflow/robot/libraries/sdk_wrapper.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | packages/robot/odahuflow/robot/libraries/sdk_wrapper.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | packages/robot/odahuflow/robot/libraries/sdk_wrapper.py | odahu/odahuflow | 58c3220a266a61bb893cf79c4b994569e3445097 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # for class Model
import json
from odahuflow.sdk import config
from odahuflow.sdk.clients.api_aggregated import parse_resources_file_with_one_item
from odahuflow.sdk.clients.configuration import ConfigurationClient
from odahuflow.sdk.clients.connection import ConnectionClient
from odahuflow.sdk.clients.deployment import ModelDeploymentClient
from odahuflow.sdk.clients.model import ModelClient
from odahuflow.sdk.clients.packaging import ModelPackagingClient
from odahuflow.sdk.clients.packaging_integration import PackagingIntegrationClient
from odahuflow.sdk.clients.route import ModelRouteClient
from odahuflow.sdk.clients.toolchain_integration import ToolchainIntegrationClient
from odahuflow.sdk.clients.training import ModelTrainingClient
from odahuflow.sdk.clients.batch_service import BatchInferenceServiceClient
from odahuflow.sdk.clients.batch_job import BatchInferenceJobClient
from odahuflow.sdk.clients.api import EntityAlreadyExists
from odahuflow.sdk.clients.user_info import UserInfoClient
class Login:
@staticmethod
def reload_config():
config._INI_FILE_TRIED_TO_BE_LOADED = False
config.reinitialize_variables()
class Configuration:
@staticmethod
def config_get(**kwargs):
return ConfigurationClient(**kwargs).get()
class Connection:
@staticmethod
def connection_get():
return ConnectionClient().get_all()
@staticmethod
def connection_get_id(conn_id: str):
return ConnectionClient().get(conn_id)
@staticmethod
def connection_get_id_decrypted(conn_id: str):
return ConnectionClient().get_decrypted(conn_id)
@staticmethod
def connection_put(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ConnectionClient().edit(api_object)
@staticmethod
def connection_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ConnectionClient().create(api_object)
@staticmethod
def connection_delete(conn_id: str):
return ConnectionClient().delete(conn_id)
class ModelDeployment:
@staticmethod
def deployment_get():
return ModelDeploymentClient().get_all()
@staticmethod
def deployment_get_id(dep_id: str):
return ModelDeploymentClient().get(dep_id)
@staticmethod
def deployment_put(payload_file, image=None):
api_object = parse_resources_file_with_one_item(payload_file).resource
if image:
api_object.spec.image = image
return ModelDeploymentClient().edit(api_object)
@staticmethod
def deployment_post(payload_file, *, id_=None, image=None):
api_object = parse_resources_file_with_one_item(payload_file).resource
if id_:
api_object.id = id_
if image:
api_object.spec.image = image
return ModelDeploymentClient().create(api_object)
@staticmethod
def deployment_delete(dep_id: str):
return ModelDeploymentClient().delete(dep_id)
@staticmethod
def deployment_get_default_route(dep_id: str):
return ModelDeploymentClient().get_default_route(dep_id)
class ModelPackaging:
@staticmethod
def packaging_get():
return ModelPackagingClient().get_all()
@staticmethod
def packaging_get_id(pack_id: str):
return ModelPackagingClient().get(pack_id)
@staticmethod
def packaging_put(payload_file, artifact_name=None):
api_object = parse_resources_file_with_one_item(payload_file).resource
if artifact_name:
api_object.spec.artifact_name = artifact_name
return ModelPackagingClient().edit(api_object)
@staticmethod
def packaging_post(payload_file, artifact_name=None):
api_object = parse_resources_file_with_one_item(payload_file).resource
if artifact_name:
api_object.spec.artifact_name = artifact_name
return ModelPackagingClient().create(api_object)
@staticmethod
def packaging_delete(pack_id: str):
return ModelPackagingClient().delete(pack_id)
@staticmethod
def packaging_get_log(pack_id):
log_generator = ModelPackagingClient().log(pack_id, follow=False)
# logs_list will be list of log lines
logs_list = list(log_generator)
text = "\n".join(logs_list)
return text
class ModelTraining:
@staticmethod
def training_get():
return ModelTrainingClient().get_all()
@staticmethod
def training_get_id(train_id: str):
return ModelTrainingClient().get(train_id)
@staticmethod
def training_put(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ModelTrainingClient().edit(api_object)
@staticmethod
def training_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ModelTrainingClient().create(api_object)
@staticmethod
def training_delete(train_id: str):
return ModelTrainingClient().delete(train_id)
@staticmethod
def training_get_log(train_id):
log_generator = ModelTrainingClient().log(train_id, follow=False)
# logs_list will be list of log lines
logs_list = list(log_generator)
text = "\n".join(logs_list)
return text
class ModelRoute:
@staticmethod
def route_get():
return ModelRouteClient().get_all()
@staticmethod
def route_get_id(route_id: str):
return ModelRouteClient().get(route_id)
@staticmethod
def route_put(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ModelRouteClient().edit(api_object)
@staticmethod
def route_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ModelRouteClient().create(api_object)
@staticmethod
def route_delete(route_id: str):
return ModelRouteClient().delete(route_id)
class Packager:
@staticmethod
def packager_get():
return PackagingIntegrationClient().get_all()
@staticmethod
def packager_get_id(pi_id: str):
return PackagingIntegrationClient().get(pi_id)
@staticmethod
def packager_put(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return PackagingIntegrationClient().edit(api_object)
@staticmethod
def packager_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return PackagingIntegrationClient().create(api_object)
@staticmethod
def packager_delete(pi_id: str):
return PackagingIntegrationClient().delete(pi_id)
class Toolchain:
@staticmethod
def toolchain_get():
return ToolchainIntegrationClient().get_all()
@staticmethod
def toolchain_get_id(ti_id: str):
return ToolchainIntegrationClient().get(ti_id)
@staticmethod
def toolchain_put(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ToolchainIntegrationClient().edit(api_object)
@staticmethod
def toolchain_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
return ToolchainIntegrationClient().create(api_object)
@staticmethod
def toolchain_delete(ti_id: str):
return ToolchainIntegrationClient().delete(ti_id)
class UserInfo:
@staticmethod
def user_info_get():
return UserInfoClient().get()
class Model:
@staticmethod
def model_get(base_url, model_route=None, model_deployment=None, url_prefix=None, **kwargs):
return ModelClient(
base_url,
model_route=model_route,
model_deployment=model_deployment,
url_prefix=url_prefix,
token=config.API_TOKEN
).info()
@staticmethod
def model_post(base_url, model_route=None, model_deployment=None, url_prefix=None, json_input=None, **kwargs):
return ModelClient(
base_url,
model_route=model_route,
model_deployment=model_deployment,
url_prefix=url_prefix,
token=config.API_TOKEN
).invoke(**json.loads(json_input))
class InferenceService:
@staticmethod
def service_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
try:
BatchInferenceServiceClient().create(api_object)
except EntityAlreadyExists:
pass
class InferenceJob:
@staticmethod
def job_post(payload_file):
api_object = parse_resources_file_with_one_item(payload_file).resource
en = BatchInferenceJobClient().create(api_object)
return en.id
@staticmethod
def job_get_id(id_: str):
return BatchInferenceJobClient().get(id_)
| 29.352941 | 114 | 0.725006 | 7,934 | 0.883322 | 0 | 0 | 7,423 | 0.826431 | 0 | 0 | 99 | 0.011022 |
a8d39e7bc43d7332ddc41fbd99b4d1baa9f1a728 | 3,398 | py | Python | src/lh/service_directives.py | Plazmaz/LiquidHoney | eb983a3223b01e19a360132f9661d4c68cd3ae8c | [
"MIT"
] | 20 | 2019-03-24T19:27:59.000Z | 2021-06-11T18:03:02.000Z | src/lh/service_directives.py | Plazmaz/LiquidHoney | eb983a3223b01e19a360132f9661d4c68cd3ae8c | [
"MIT"
] | null | null | null | src/lh/service_directives.py | Plazmaz/LiquidHoney | eb983a3223b01e19a360132f9661d4c68cd3ae8c | [
"MIT"
] | 1 | 2020-04-10T17:12:37.000Z | 2020-04-10T17:12:37.000Z | from abc import ABC
class ProbeConfig(object):
def __init__(self):
self.directives = {}
def add_directive(self, directive):
name = directive.keyword
if name not in self.directives:
self.directives[name] = []
self.directives[name].append(directive)
def get_directives(self, name):
return self.directives.get(name)
def has_directive(self, name):
return name in self.directives
def get_directive(self, name):
return self.directives.get(name)[0]
def __str__(self):
return ' '.join([s for s in self.directives])
class Directive(ABC):
"""
Represents a directive type.
See https://nmap.org/book/vscan-fileformat.html
"""
def __init__(self, keyword, param_count, raw):
self.keyword = keyword
self.raw = raw
self.parameters = raw.split(" ", param_count)[1:]
def validate(self):
pass
class Exclude(Directive):
"""
This line tells nmap what ports identified by the probe are found on
(only once per section)
"""
def __init__(self, raw):
super().__init__('exclude', 1, raw)
# This will need to be parsed into proper port format later
self.ports = self.parameters[0]
class Probe(Directive):
"""
This directive describes what nmap will send to fingerprint this service
"""
def __init__(self, raw):
super().__init__('probe', 3, raw)
self.protocol = self.parameters[0]
self.probename = self.parameters[1]
self.probestring = self.parameters[2]
def validate(self):
assert self.protocol == 'TCP' or self.protocol == 'UDP', \
'Invalid protocol {} found, expected "UDP" or "TCP"'.format(self.protocol)
class Match(Directive):
"""
This directive describes the response nmap is expecting to recieve for a service
"""
def __init__(self, raw):
super().__init__('match', 2, raw)
self.service = self.parameters[0]
self.raw_pattern = self.parameters[1]
self.pattern = None
self.flags = []
self.version_info = []
class SoftMatch(Match):
"""
Similar to match, but after a softmap, nmap will only send probes matching the given service.
This is intended to eventually lead to a 'hard' match that will provide more version info
"""
def __init__(self, raw):
super().__init__(raw)
self.service = self.parameters[0]
self.raw_pattern = self.parameters[1]
self.keyword = 'softmatch'
class Ports(Directive):
"""
This line tells nmap what ports identified by the probe are found on
(only once per section)
"""
def __init__(self, raw):
super().__init__('ports', 1, raw)
# This will need to be parsed into proper port format later
self.ports = self.parameters[0]
class SslPorts(Ports):
"""
Same as Ports, but wrapped in ssl
"""
def __init__(self, raw):
super().__init__(raw)
self.keyword = 'sslports'
class Rarity(Directive):
"""
Determines how frequently a probe returns useful results. The higher the number, the rarer the probe is
https://nmap.org/book/vscan-technique.html#vscan-selection-and-rarity
"""
def __init__(self, raw):
super().__init__('rarity', 1, raw)
self.rarity = int(self.parameters[0])
| 26.546875 | 107 | 0.629488 | 3,351 | 0.986168 | 0 | 0 | 0 | 0 | 0 | 0 | 1,194 | 0.351383 |
a8d46736754468d2c17428db5889acddd766302d | 14,224 | py | Python | Dnaml.py | williamdlees/BioTools | 4aeb0e620c11437b746bee87023343db08f80b70 | [
"MIT"
] | null | null | null | Dnaml.py | williamdlees/BioTools | 4aeb0e620c11437b746bee87023343db08f80b70 | [
"MIT"
] | null | null | null | Dnaml.py | williamdlees/BioTools | 4aeb0e620c11437b746bee87023343db08f80b70 | [
"MIT"
] | null | null | null | # Copyright (c) 2015 William Lees
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
from Bio import AlignIO, SeqIO
from Bio import Phylo
from StringIO import StringIO
from Alignment import Alignment
from AnalyseCDR import AnalyseCDR
import os
import subprocess
import copy
import re
import sys
class Dnaml:
"""A class to manage preparation of files for dnaml, invocation, and parsing of results. At the moment this is
restricted to the use of dnaml for ancestral reconstruction, based on an existing tree."""
def __init__(self):
pass
def run_dnaml(self, seq_align, ptree, seqpattern, cdrfile, wdir, rep, tag=""):
"""Run dnaml. Arguments are:
seq_align: the input nt sequences (MultipleSequenceAlignment)
ptree: phylogenetic tree (Bio.Phylo)
seqpattern: A list of sequence number directives, in the format accepted by Alignment.set_position_numbers
wdir: the name of a directory that run_paml should use. This must exist already.
rep: a function that takes a string as an argument. This will be called should an error or warning be
necessary (may be called multiple times in one invocation).
tag: an optional tag to prefix filenames with
Sequences in seq_align must be the same length, must start on a codon boundary, and be an integral number
of codons in length. The first sequence must be the ancestral sequence or outgroup. Exactly he same sequence
names must occur in the alignment and the tree. Sequence name format is pretty flexible (sequences are
mapped to names acceptable to PAML and remapped after PAML has run).
"""
root_id = seq_align[0].id
# Translate clade names to something safe
namedict = {}
serial = 1
for seq in seq_align:
namedict[seq.id] = "N%09d" % serial
seq.id = namedict[seq.id]
serial += 1
qtree = copy.deepcopy(ptree)
for clade in qtree.get_terminals():
if clade.name and clade.name in namedict:
clade.name = namedict[clade.name]
# Root the tree on the first record
first = "N%09d" % 1
try:
qtree.root_with_outgroup(qtree.find_clades(name=re.escape(first)).next())
except:
raise ValueError("Error: root sequence not found in tree.")
try:
inv_dict = {v: k for k, v in namedict.items()}
ptree.root_with_outgroup(ptree.find_clades(name=re.escape(inv_dict[first])))
Phylo.write(ptree, wdir + "/" + "input_treefile.new", "newick", plain=False)
except:
raise ValueError("Error rooting trees: check for corrupt tree file or duplicated sequences.")
# Write the sequences, in PHYLIP format (real PHYLIP format, as used by PHYLIP!)
with open(wdir + "/" + "infile", "w") as f:
f.write(" %d %d\n" % (len(seq_align), len(seq_align[0])))
for seq in seq_align:
f.write("%10s%s\n" % (seq.id, seq.seq.upper()))
# Write the tree file
Phylo.write(qtree, wdir + "/" + "intree", "newick")
if os.path.exists(wdir + "/" + "outfile"):
os.remove(wdir + "/" + "outfile")
if os.path.exists(wdir + "/" + "outtree"):
os.remove(wdir + "/" + "outtree")
# The path to the module may reference either a .py or a .pyc file...
ctlfile = os.path.abspath(__file__).replace(".pyc", ".ctl") if ".pyc" in os.path.abspath(__file__) \
else os.path.abspath(__file__).replace(".py", ".ctl")
# Check for dnaml in the current directory
dnamlfile = os.path.abspath(__file__).replace("Dnaml.pyc", "dnaml") if ".pyc" in os.path.abspath(__file__) \
else os.path.abspath(__file__).replace("Dnaml.py", "dnaml")
if not os.path.exists(dnamlfile):
dnamlfile = "dnaml" # must be on the path somewhere
with open(wdir + "/" + "dnaml.txt", "w") as o, open(ctlfile, "r") as i:
subprocess.call(dnamlfile, cwd=wdir, stdin = i, stdout=o)
if not os.path.isfile(wdir + "/" + "outfile"):
rep("No output returned by dnaml: please check the logs for the issue.")
return None
if os.path.isfile(wdir + "/" + "outfile.txt"):
os.remove(wdir + "/" + "outfile.txt")
os.rename(wdir + "/" + "outfile", wdir + "/" + "outfile.txt")
intseqs = self.__parse_outfile(wdir + "/" + "outfile.txt")
if not intseqs:
rep("Unexpected output returned by dnaml: please check the logs for the issue.")
return None
# Custom sort function to put the root record first, then others supplied by the user, then intermediate nodes
def key_ids(rec):
if rec.id == "N%09d" % 1:
return 'a__' + rec.id
elif 'node_' in rec.id:
return 'z__' + "%04d" % (int)(rec.id.split("_")[1])
else:
return 'l__' + rec.id
labelled_tree = Phylo.read(wdir + "/" + "outtree", "newick")
intseqs.seek(0)
int_seqs = Alignment(file_name=intseqs, format="fasta")
int_seqs.sort(key=key_ids)
intseqs.seek(0)
int_aas = Alignment()
int_aas.read_nt(intseqs, "fasta")
int_aas.sort(key=key_ids)
int_aas.set_position_numbers(position_numbers = seqpattern)
# Put back the original names in all our collections
for seq in int_seqs:
if seq.id in inv_dict:
seq.id = inv_dict[seq.id]
seq.name = ""
seq.description = ""
for seq in int_aas:
if seq.id in inv_dict:
seq.id = inv_dict[seq.id]
seq.name = ""
seq.description = ""
nodeid = 1
for clade in labelled_tree.find_clades(order="preorder"):
if clade.name is None:
clade.name = "node_%d" % nodeid # This relies on our traversal using the same order as dnaml
nodeid += 1
else:
if clade.name in inv_dict:
clade.name = inv_dict[clade.name]
# Now we need to map the labelling of the nodes in the labelled tree to the nodes in the original tree
self.__map_names(ptree, labelled_tree)
Phylo.write(ptree, wdir + "/" + tag + "intermediates_treefile.new", "newick", plain=False)
cladenames = []
new_int_aas = Alignment()
for clade in ptree.find_clades():
if clade.name is not None:
cladenames.append(clade.name)
for rec in int_aas:
if rec.id in cladenames:
new_int_aas.append(rec)
int_aas = new_int_aas
int_aas.set_position_numbers(position_numbers = seqpattern)
copy_tree = copy.deepcopy(ptree)
# Calculate AA diffs between each node and its parent, and write to the tree
labels = {}
def diffkey(diff):
return int_aas.index_of(diff[1:-1])
for clade in ptree.find_clades():
if clade.name is not None:
parent = self.__get_parent(ptree, clade)
if parent is None:
path = ptree.get_path(clade)
if len(path) == 1 and clade.name != first:
fname = inv_dict[first]
parent = ptree.find_clades(name = re.escape(fname)).next()
if parent is not None and parent.name is not None:
diffs = list(int_aas.seqdiff(clade.name, parent.name))
diffs.sort(key = diffkey)
diffs = "+".join(diffs)
if "node_" in clade.name:
labels[clade.name] = diffs
else:
labels[clade.name] = str(clade.name) + " " + diffs
for clade in ptree.find_clades():
if clade.name is not None and clade.name in labels:
clade.name = labels[clade.name]
Phylo.write(ptree, wdir + "/" + tag + "annotated_treefile.new", "newick", plain=False)
# Now write a tree with summary CDR/FR total changes
if cdrfile is not None:
ptree = copy.deepcopy(copy_tree)
acdr = AnalyseCDR(int_aas, file_name=cdrfile)
labels = {}
for clade in ptree.find_clades():
if clade.name is not None:
parent = self.__get_parent(ptree, clade)
if parent is None:
path = ptree.get_path(clade)
if len(path) == 1 and clade.name != first:
fname = inv_dict[first]
parent = ptree.find_clades(name = re.escape(fname)).next()
if parent is not None and parent.name is not None:
diffs = acdr.category_diff(clade.name, parent.name)
if "node_" in clade.name:
labels[clade.name] = diffs
else:
labels[clade.name] = str(clade.name) + " " + diffs
for clade in ptree.find_clades():
if clade.name is not None and clade.name in labels:
clade.name = labels[clade.name]
Phylo.write(ptree, wdir + "/" + tag + "annotated_treefile_sum.new", "newick", plain=False)
# And write a tree with counts of total AA changes
ptree = copy.deepcopy(copy_tree)
labels = {}
for clade in ptree.find_clades():
if clade.name is not None:
parent = self.__get_parent(ptree, clade)
if parent is None:
path = ptree.get_path(clade)
if len(path) == 1 and clade.name != first:
fname = inv_dict[first]
parent = ptree.find_clades(name = re.escape(fname)).next()
if parent is not None and parent.name is not None:
diffs = list(int_aas.seqdiff(clade.name, parent.name))
if "node_" in clade.name:
labels[clade.name] = str(len(diffs)) if len(diffs) > 0 else ""
else:
labels[clade.name] = str(clade.name) + (" " + str(len(diffs)) if len(diffs) > 0 else "")
for clade in ptree.find_clades():
if clade.name is not None and clade.name in labels:
clade.name = labels[clade.name]
Phylo.write(ptree, wdir + "/" + tag + "annotated_treefile_tot.new", "newick", plain=False)
f = open(wdir + "/" + tag + "aa_alignment.txt", "w")
f.write(int_aas.report(100))
f.close()
f = open(wdir + "/" + tag + "nt_alignment.txt", "w")
f.write(int_seqs.report(100))
f.close()
for rec in int_aas:
rec.description = ""
AlignIO.write(int_aas, wdir + "/" + tag + "aa_alignment.fa", "fasta")
AlignIO.write(int_seqs, wdir + "/" + tag + "nt_alignment.fa", "fasta")
return int_aas
def __parse_outfile(self, filename):
"""Internal method to parse the dnaml output file created after ancestral reconstruction."""
#Fish out the tree with node labels, and the ancestral sequences
seqs = {}
with open(filename, "r") as f:
for line in f:
if "Reconstructed sequence" in line or not line:
break
if not line:
return
for line in f:
if len(line) > 10:
id = line[:11].replace(" ", "")
if 'N' not in id:
id = "node_" + id
seq = line[11:].strip().replace(" ", "")
seqs[id] = seqs.get(id, "") + seq
intseqs = StringIO()
for id,seq in seqs.iteritems():
intseqs.write(">%s\n" % id)
intseqs.write("%s\n" % seq)
return intseqs
def __get_parent(self, tree, child_clade):
"""Internal method to find the parent of a clade"""
node_path = tree.get_path(child_clade)
if len(node_path) > 1:
return node_path[-2]
else:
return None
def __map_names(self, ptree, reftree):
"""Map the names of intermediate nodes across from reftree to ptree"""
for clade in ptree.find_clades(order = 'postorder'):
if clade.name is None:
childname = clade.clades[0].name
if childname is not None:
refchild = reftree.find_clades(name=re.escape(childname))
refp = self.__get_parent(reftree, refchild.next())
if refp is not None:
clade.name = refp.name
elif clade != ptree.clade:
clade.name = reftree.root.name
| 40.64 | 120 | 0.566437 | 12,865 | 0.904457 | 0 | 0 | 0 | 0 | 0 | 0 | 4,509 | 0.316999 |
a8d51fd115f735ca8b9bacde53faaf9aede7e370 | 4,699 | py | Python | gaia_project/communication_layer.py | yogurt-company/gaia_ai | 1885059e99a96f45e6cc082b87c189829c4eaed9 | [
"MIT"
] | null | null | null | gaia_project/communication_layer.py | yogurt-company/gaia_ai | 1885059e99a96f45e6cc082b87c189829c4eaed9 | [
"MIT"
] | null | null | null | gaia_project/communication_layer.py | yogurt-company/gaia_ai | 1885059e99a96f45e6cc082b87c189829c4eaed9 | [
"MIT"
] | null | null | null | from traits.api import (HasPrivateTraits, Instance, List, Dict)
from .player import Player
from .board import GameBoard
from .tech_board import TechBoard
from gaia_project.faction_board.player_panel import PlayerPanel
from .layout import Layout
from .constants import BASIC_4P_SETUP
import pygame
import sys
class CommunicationLayer(HasPrivateTraits):
pass
class LocalCommunicationLayer(CommunicationLayer):
players = List(Instance(Player))
board = Instance(GameBoard)
tech_board = Instance(TechBoard)
player_panels = Dict(Instance(Player), Instance(PlayerPanel))
layout = Instance(Layout)
def __init__(self, players=None, cfg=BASIC_4P_SETUP, *args, **kwargs):
super().__init__(*args, **kwargs)
if players is not None:
self.players = players
else:
self.players = [Player('Hadsch Hallas', 'Freddy'),
Player('Xenos', 'Jebediah'),
Player('Taklons', 'Vivian')]
self.layout = Layout(self.players, cfg)
self.board = self.layout.board
self.tech_board = self.layout.tech_board
pp_w, pp_h = self.layout.player_panel_coords()
self.player_panels = {
player : (
PlayerPanel(pp_w, pp_h, player)
if player is not self.players[0] else
self.layout.player_panel) for player in self.players}
pygame.init()
pygame.event.set_allowed(None)
pygame.event.set_allowed((pygame.QUIT, pygame.MOUSEBUTTONUP,
pygame.VIDEORESIZE))
def make_move(self, player, game_state):
# set the layout to have the current player panel showing
if player.intelligence == 'human':
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.hide_choice()
self.update_gfx()
move = self.process_events()
return move
elif player.intelligence == 'automa':
move = player.automa.make_move(player, game_state)
elif player.intelligence == 'ai':
raise NotImplementedError
else:
raise NotImplemented
def make_choice(self, player, choice, move):
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.show_choice(choice, move.description)
self.update_gfx()
choice = self.process_events()
print('gottud chois')
return choice
def inform_illegal_choice(self, player, explanation):
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.display_error(explanation)
self.update_gfx()
self.process_events()
def process_events(self):
while True:
#we are now accepting mouse events
pygame.event.set_allowed(pygame.MOUSEBUTTONUP)
for event in pygame.event.get():
#this event does not need to be executed in order
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#this event does not need to be executed in order
elif event.type == pygame.VIDEORESIZE:
self.layout.resize(event.w, event.h)
elif event.type == pygame.MOUSEBUTTONUP:
#disallow mouse events until this is handled
pygame.event.set_blocked(pygame.MOUSEBUTTONUP)
origin_surf = self.layout.determine_origin(event.pos)
if origin_surf is None:
continue
event = self.layout.pass_event(origin_surf, event.pos)
if event is not None:
return event
def add_building(self, player, coords, building_type, lantid_share=False):
x, y = coords
self.board.add_building(x, y, player.color, building_type,
lantid_share=lantid_share)
def add_orbital(self, player, coords, orbital_type):
x, y = coords
self.board.add_orbital(x, y, player.color, orbital_type)
def techup(self, player, tech_track):
self.tech_board.techup(player.color, tech_track)
def update_available_buildings(self, player):
pass
def update_bonus_tiles(self, tiles):
for player in self.player_panels:
panel = self.player_panels[player]
panel.update_bonus_tiles(tiles)
def update_turn_order(self, next_order):
pass
def update_advanced_tech_tiles(self, tiles):
pass
def update_terraforming_fed(self, fed):
pass
def update_available_feds(self, feds):
pass
def update_available_power_actions(self, power_actions):
self.tech_board.update_power_actions(power_actions)
def update_available_special_actions(self, player, spec_actions):
panel = self.player_panels[player]
panel.update_special_actions( spec_actions[player] )
def update_misc_info(self, score):
pass
def update_gfx(self):
self.layout.paint()
| 27.319767 | 76 | 0.685678 | 4,376 | 0.931262 | 0 | 0 | 0 | 0 | 0 | 0 | 323 | 0.068738 |
a8d55f37a7ad9934a5f3c141465620693ff5ff0b | 2,710 | py | Python | dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 104 | 2020-03-30T09:40:00.000Z | 2022-03-06T22:34:25.000Z | dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 25 | 2020-06-12T01:36:35.000Z | 2022-02-19T07:30:44.000Z | dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 10 | 2020-04-02T23:43:56.000Z | 2021-05-14T13:47:01.000Z | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.security.linking.accountlinker import BasicAccountLinkerService
from programytest.client import TestClient
class AccountLinkerTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(AccountLinkerTestClient, self).load_storage()
self.add_default_stores()
self.add_categories_store([os.path.dirname(__file__)])
class AccountLinkerAIMLTests(unittest.TestCase):
def setUp(self):
config = SQLStorageConfiguration()
storage_engine = SQLStorageEngine(config)
storage_engine.initialise()
client = AccountLinkerTestClient()
self.context = client.create_client_context("TESTUSER")
self.context.brain._security._account_linker = BasicAccountLinkerService(storage_engine)
def test_account_link_happy_path(self):
response = self.context.bot.ask_question(self.context, "LINK PRIMARY ACCOUNT USER1 CONSOLE PASSWORD123")
self.assertIsNotNone(response)
self.assertTrue(response.startswith('Your generated key is'))
words = response.split(" ")
self.assertTrue(5, len(words))
generated_key = words[4][:-1]
command = "LINK SECONDARY ACCOUNT USER1 USER2 FACEBOOK PASSWORD123 %s" % generated_key
response = self.context.bot.ask_question(self.context, command)
self.assertIsNotNone(response)
self.assertEqual('Your accounts are now linked.', response)
| 44.42623 | 126 | 0.761255 | 1,349 | 0.497786 | 0 | 0 | 0 | 0 | 0 | 0 | 1,245 | 0.45941 |
a8d5da5c788737a8cc6ea3c6e3295911495ac3e9 | 5,179 | py | Python | activity/activity_ApprovePublication.py | gnott/elife-bot | 584c315d15d1289e0d2c27c28aaaae31174812e4 | [
"MIT"
] | null | null | null | activity/activity_ApprovePublication.py | gnott/elife-bot | 584c315d15d1289e0d2c27c28aaaae31174812e4 | [
"MIT"
] | null | null | null | activity/activity_ApprovePublication.py | gnott/elife-bot | 584c315d15d1289e0d2c27c28aaaae31174812e4 | [
"MIT"
] | null | null | null | import base64
import json
import activity
import os
import requests
import boto.sqs
from boto.sqs.message import Message
from provider import eif
"""
ConvertJATS.py activity
"""
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
class activity_ApprovePublication(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "ApprovePublication"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Approve a previously submitted article"
self.rules = []
self.info = None
self.logger = logger
# TODO : better exception handling
def do_activity(self, data=None):
"""
Do the work
"""
if self.logger:
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
article_id = data['article_id']
version = data['version']
run = data['run']
try:
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "start",
"Starting approval of article " + article_id)
publication_data = data['publication_data']
article_version_id = str(article_id) + '.' + str(version)
destination = self.settings.drupal_approve_endpoint
destination = destination + article_version_id + '.json'
headers = {'content-type': 'application/json'}
auth = None
if self.settings.drupal_update_user and self.settings.drupal_update_user != '':
auth = requests.auth.HTTPBasicAuth(self.settings.drupal_update_user,
self.settings.drupal_update_pass)
r = requests.put(destination, data="{ \"publish\": \"1\" }", headers=headers, auth=auth)
self.logger.info("PUT response was %s, retrying" % r.status_code)
if r.status_code == 500:
return activity.activity.ACTIVITY_TEMPORARY_FAILURE
if r.status_code == 200:
self.set_monitor_property(self.settings, article_id, 'publication-status',
'published', "text", version=version)
message = base64.decodestring(publication_data)
message = self.modify_update_date(message, r)
sqs_conn = boto.sqs.connect_to_region(
self.settings.sqs_region,
aws_access_key_id=self.settings.aws_access_key_id,
aws_secret_access_key=self.settings.aws_secret_access_key)
out_queue = sqs_conn.get_queue(self.settings.workflow_starter_queue)
m = Message()
m.set_body(message)
out_queue.write(m)
else:
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "error",
"Website ingest returned an error code: " +
str(r.status_code))
self.logger.error("Body:" + r.text)
return False
except Exception as e:
self.logger.exception("Exception when submitting article EIF")
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "error",
"Error approving article publication for " + article_id +
" message:" + str(e.message))
return False
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "end",
"Finished approving article" + article_id +
" status was " + str(r.status_code))
return True
def modify_update_date(self, message, response):
update_date = self.extract_update_date(
self.workflow_data(message),
response.json())
if update_date:
message_json = json.loads(message)
if ("workflow_data" in message_json and
"update_date" in message_json["workflow_data"]):
message_json["workflow_data"]["update_date"] = update_date
message = json.dumps(message_json)
return message
def workflow_data(self, message):
message_json = json.loads(message)
if "workflow_data" in message_json:
return message_json["workflow_data"]
return {}
def extract_update_date(self, passthrough_json, response_json):
return eif.extract_update_date(passthrough_json, response_json)
| 40.147287 | 100 | 0.583897 | 4,892 | 0.944584 | 0 | 0 | 0 | 0 | 0 | 0 | 777 | 0.150029 |
a8d7f2fc85888b5e64602a155d1dd889102580a5 | 3,479 | py | Python | density_estimation.py | agilevaluechain/100-page-ml | 9236ec87b0cf563e0998d723be19e58155003a9d | [
"MIT"
] | 3 | 2019-02-12T16:46:23.000Z | 2020-12-02T15:39:38.000Z | density_estimation.py | a272573094/theMLbook | 9236ec87b0cf563e0998d723be19e58155003a9d | [
"MIT"
] | null | null | null | density_estimation.py | a272573094/theMLbook | 9236ec87b0cf563e0998d723be19e58155003a9d | [
"MIT"
] | 2 | 2019-07-22T15:28:58.000Z | 2019-10-14T16:07:51.000Z | import numpy as np
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
import math
from sklearn.neighbors import KernelDensity
import scipy.integrate as integrate
from sklearn.kernel_ridge import KernelRidge
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams.update({'font.size': 18})
mu1, sigma1 = 3.0, 1.0
mu2, sigma2 = 8.0, 1.5
def sample_points():
s1 = np.random.normal(mu1, sigma1, 50)
s2 = np.random.normal(mu2, sigma2, 50)
return list(s1) + list(s2)
# generate points used to plot
x_plot = np.linspace(0, 12, 100)
# generate points and keep a subset of them
x = sample_points()
colors = ['red', 'blue', 'orange', 'green']
lw = 2
def kernel(x1, x2, bi = 2.0):
z = (x1 - x2) / bi
return (1.0/math.sqrt(2.0 * 3.14)) * math.exp((-1.0/2.0)*(z**2))
def fb(xx, data, bi):
return (1/(len(data)*bi)) * sum([kernel(xx, xi, bi) for xi in data])
def fbi(i, data, bi):
data_minus_i = []
for ii in range(len(data)):
if i != ii:
data_minus_i.append(data[ii])
return (1/(len(data_minus_i)*bi)) * sum([kernel(data[i], xi, bi) for xi in data_minus_i])
def sum_pdf(x):
result = []
for i in range(len(x)):
result.append((sp.stats.norm.pdf(x, mu1, sigma1)[i] + sp.stats.norm.pdf(x, mu2, sigma2)[i])/2.0)
#result.append(sp.stats.norm.pdf(x, mu1, sigma1)[i])
return result
b = np.linspace(0.01, 3.0, 100)
score = []
for bi in b:
def fb2(xx):
return fb(xx, x, bi)**2
s = integrate.quad(fb2, -np.inf, np.inf)[0] - 2.0*np.mean([fbi(i, x, bi) for i in range(len(x))])
score.append(s)
plt.figure(1)
plt.plot(b,score)
plt.xlabel("$b$")
plt.ylabel("$l$")
plt.tight_layout()
plt.xticks(np.arange(0, 3.5, 0.5))
#plt.show()
fig1 = plt.gcf()
fig1.subplots_adjust(top = 0.98, bottom = 0.1, right = 0.98, left = 0.08, hspace = 0, wspace = 0)
fig1.savefig('../../Illustrations/density-estimation-loss.eps', format='eps', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-loss.pdf', format='pdf', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-loss.png', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
minb = [bi for bi, s in zip(b, score) if s == min(score)][0]
print minb
for count, degree in enumerate([round(minb, 2)] + [0.2, 2.0]):
plt.figure(count+2)
axes = plt.gca()
axes.set_xlim([0,12])
axes.set_ylim([0,0.3])
plt.xlabel("$x$")
plt.ylabel("pdf")
plt.scatter(x, [0.005] * len(x), color='navy', s=30, marker=2, label="training examples")
plt.plot(x_plot, [fb(xp ,x, degree) for xp in x_plot], color=colors[count], linewidth=lw, label="$\\hat{f}_b$, $b = " + str(degree) + "$")
plt.plot(x_plot,sum_pdf(x_plot), label="true pdf")
plt.legend(loc='upper right')
plt.tight_layout()
fig1 = plt.gcf()
fig1.subplots_adjust(top = 0.98, bottom = 0.1, right = 0.98, left = 0.08, hspace = 0, wspace = 0)
fig1.savefig('../../Illustrations/density-estimation-' + str(count) + '.eps', format='eps', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-' + str(count) + '.pdf', format='pdf', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-' + str(count) + '.png', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
plt.show()
| 33.451923 | 144 | 0.635527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 665 | 0.191147 |
a8d822b3f26b1178674db70984322334b870204a | 1,157 | py | Python | bot/modules/list.py | styloxyash1/mybot | 285efe23d8fa429738ff2198da684d846fe2bf6f | [
"MIT"
] | null | null | null | bot/modules/list.py | styloxyash1/mybot | 285efe23d8fa429738ff2198da684d846fe2bf6f | [
"MIT"
] | null | null | null | bot/modules/list.py | styloxyash1/mybot | 285efe23d8fa429738ff2198da684d846fe2bf6f | [
"MIT"
] | null | null | null | from telegram.ext import CommandHandler, run_async
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot import LOGGER, dispatcher
from bot.helper.telegram_helper.message_utils import sendMessage, sendMarkup, editMessage
from bot.helper.telegram_helper.filters import CustomFilters
from bot.helper.telegram_helper.bot_commands import BotCommands
@run_async
def list_drive(update,context):
try:
search = update.message.text.split(' ',maxsplit=1)[1]
LOGGER.info(f"Searching: {search}")
reply = sendMessage('Searching..... Please wait!', context.bot, update)
gdrive = GoogleDriveHelper(None)
msg, button = gdrive.drive_list(search)
if button:
editMessage(msg, reply, button)
else:
editMessage('No result found', reply, button)
except IndexError:
sendMessage('send a search key along with command', context.bot, update)
list_handler = CommandHandler(BotCommands.ListCommand, list_drive,filters=CustomFilters.authorized_chat | CustomFilters.authorized_user)
dispatcher.add_handler(list_handler)
| 41.321429 | 137 | 0.729473 | 0 | 0 | 0 | 0 | 584 | 0.504754 | 0 | 0 | 109 | 0.094209 |
a8d8c0ac90a58dbfc0b8d43c0093181c142ff0dc | 2,348 | py | Python | relational/scripts/comments.py | snspam/sn_spam | e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5 | [
"MIT"
] | null | null | null | relational/scripts/comments.py | snspam/sn_spam | e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5 | [
"MIT"
] | null | null | null | relational/scripts/comments.py | snspam/sn_spam | e0bb8e9c843e26e5f4be8a49a960ebf7a0d5bfd5 | [
"MIT"
] | null | null | null | """
Spam comments to be classified by the relational model.
"""
import os
import numpy as np
class Comments:
def __init__(self, config_obj, util_obj):
self.config_obj = config_obj
self.util_obj = util_obj
# public
def build(self, df, dset, data_f=None, tuffy=False, iden='0'):
"""Writes predicate info to the designated data folder.
df: comments dataframe.
dset: dataset (e.g. val or test).
data_f: data folder to save predicate files.
tuffy: boolean indicating if tuffy is the engine being used."""
if data_f is None:
data_f = self.define_file_folders()
unique_df = self.drop_duplicate_comments(df)
if tuffy:
self.write_tuffy_predicates(unique_df, dset, data_f)
else:
self.write_psl_predicates(unique_df, dset, data_f, iden=iden)
# private
def define_file_folders(self):
rel_dir = self.config_obj.rel_dir
domain = self.config_obj.domain
data_f = rel_dir + 'data/' + domain + '/'
if not os.path.exists(data_f):
os.makedirs(data_f)
return data_f
def drop_duplicate_comments(self, df):
temp_df = df.filter(['com_id', 'ind_pred', 'label'], axis=1)
unique_df = temp_df.drop_duplicates()
return unique_df
def write_psl_predicates(self, df, dset, data_f, iden='0'):
df.to_csv(data_f + dset + '_no_label_' + iden + '.tsv',
columns=['com_id'], sep='\t', header=None, index=None)
df.to_csv(data_f + dset + '_' + iden + '.tsv',
columns=['com_id', 'label'], sep='\t', header=None,
index=None)
df.to_csv(data_f + dset + '_pred_' + iden + '.tsv',
columns=['com_id', 'ind_pred'], sep='\t', header=None,
index=None)
def write_tuffy_predicates(self, df, dset, data_f):
ev = open(data_f + dset + '_evidence.txt', 'w')
q = open(data_f + dset + '_query.txt', 'w')
for index, row in df.iterrows():
pred = row.ind_pred
com_id = str(int(row.com_id))
wgt = str(np.log(self.util_obj.div0(pred, (1 - pred))))
ev.write('Indpred(' + com_id + ', ' + wgt + ')\n')
q.write('Spam(' + com_id + ')\n')
ev.close()
q.close()
| 34.529412 | 73 | 0.572828 | 2,252 | 0.959114 | 0 | 0 | 0 | 0 | 0 | 0 | 533 | 0.227002 |
a8d8dcbf034882ed1303c2c3e7ff44cf31f5c09e | 2,216 | py | Python | offline_validate.py | akshaykr/oracle_cb | 68f10fce5eca8ebe3f57fd5a56a0ef8d82537ab4 | [
"MIT"
] | 26 | 2017-08-02T19:58:06.000Z | 2021-11-03T06:31:01.000Z | offline_validate.py | akshaykr/oracle_cb | 68f10fce5eca8ebe3f57fd5a56a0ef8d82537ab4 | [
"MIT"
] | 1 | 2020-03-03T06:06:32.000Z | 2020-03-03T06:06:32.000Z | offline_validate.py | akshaykr/oracle_cb | 68f10fce5eca8ebe3f57fd5a56a0ef8d82537ab4 | [
"MIT"
] | 10 | 2017-06-02T19:34:38.000Z | 2022-03-22T10:38:51.000Z | import numpy as np
import sklearn.ensemble
import sklearn.linear_model
import Simulators
import pickle
if __name__=='__main__':
L = 3
B = Simulators.DatasetBandit(dataset='mq2008', L=3, loop=True, metric=None)
Bval = Simulators.DatasetBandit(dataset='mq2008val', L=3, loop=True, metric=None)
forest_sizes = np.arange(10,101, 10)
depths = np.arange(1, 6, 1)
iters = 10
out1 = {}
out2 = {}
out1['linear'] = []
out2['linear'] = []
for i in range(iters):
model = B.get_best_policy(learning_alg=lambda: sklearn.linear_model.LinearRegression(), classification=False)
s1 = B.offline_evaluate(model)
s2 = Bval.offline_evaluate(model)
out1['linear'].append(s1)
out2['linear'].append(s2)
print("Linear: %d s1: %0.3f s2: %0.3f" % (i, s1, s2), flush=True)
for a in forest_sizes:
for b in depths:
out1['forest_%d_%d' % (a,b)] = []
out2['forest_%d_%d' % (a,b)] = []
out1['gradient_%d_%d' % (a,b)] = []
out2['gradient_%d_%d' % (a,b)] = []
for i in range(iters):
model = B.get_best_policy(learning_alg=lambda: sklearn.ensemble.RandomForestRegressor(n_estimators=a, max_depth=b), classification=False)
s1 = B.offline_evaluate(model)
s2 = Bval.offline_evaluate(model)
out1['forest_%d_%d' % (a,b)].append(s1)
out2['forest_%d_%d' % (a,b)].append(s2)
print("Forest: %d %d %d s1: %0.3f s2: %0.3f" % (a,b,i, s1, s2), flush=True)
model = B.get_best_policy(learning_alg=lambda: sklearn.ensemble.GradientBoostingRegressor(n_estimators=a, max_depth=b), classification=False)
s1 = B.offline_evaluate(model)
s2 = Bval.offline_evaluate(model)
out1['gradient_%d_%d' % (a,b)].append(s1)
out2['gradient_%d_%d' % (a,b)].append(s2)
print("Gradient: %d %d %d s1: %0.3f s2: %0.3f" % (a,b,i, s1, s2), flush=True)
pickle.dump(dict(train=out1, val=out2), open("./out/mq2008_offline_L=%d.out" % (L), "wb"))
| 38.206897 | 157 | 0.557762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 326 | 0.147112 |
a8d932ece02949d905ec995a8df044a8d62825fb | 4,755 | py | Python | yang_et_al/run_tasks.py | wagner-group/geoadex | 693856dc4537937fa09ec7a22e175f8243483b44 | [
"MIT"
] | 4 | 2021-11-01T18:18:28.000Z | 2022-02-14T05:58:57.000Z | yang_et_al/run_tasks.py | wagner-group/geoadex | 693856dc4537937fa09ec7a22e175f8243483b44 | [
"MIT"
] | null | null | null | yang_et_al/run_tasks.py | wagner-group/geoadex | 693856dc4537937fa09ec7a22e175f8243483b44 | [
"MIT"
] | null | null | null | import os
import logging
import json
from nnattack.variables import auto_var, get_file_name
from params import (
compare_attacks,
compare_defense,
#compare_nns,
nn_k1_robustness,
nn_k3_robustness,
nn_k1_approx_robustness_figs,
dt_robustness_figs,
rf_robustness_figs,
nn_k1_robustness_figs,
nn_k3_robustness_figs,
dt_robustness,
rf_robustness,
mlp_ap_robustness,
mlp_at_robustness,
lr_ap_robustness,
lr_at_robustness,
nn1_def,
nn3_def,
dt_def,
rf_def,
lr_def,
mlp_def,
)
from main import eps_accuracy
logging.basicConfig(level=logging.DEBUG)
DEBUG = True if os.environ.get('DEBUG', False) else False
def main():
experiments = [
compare_attacks(),
compare_defense(),
#nn_k1_robustness_figs(),
#nn_k3_robustness_figs(),
#rf_robustness_figs(),
#dt_robustness_figs(),
dt_robustness(),
rf_robustness(),
nn_k3_robustness(),
nn_k1_robustness(),
#mlp_ap_robustness(),
#mlp_at_robustness(),
#lr_ap_robustness(),
#lr_at_robustness(),
#nn1_def(),
#nn3_def(),
#dt_def(),
#rf_def(),
#lr_def(),
#mlp_def(),
]
grid_params = []
for exp in experiments:
exp_fn, _, grid_param, run_param = exp()
if isinstance(grid_param, list):
grid_params.extend(grid_param)
else:
grid_params.append(grid_param)
if DEBUG:
run_param['n_jobs'] = 1
run_param['allow_failure'] = False
else:
run_param['n_jobs'] = 4
run_param['allow_failure'] = True
auto_var.run_grid_params(exp_fn, grid_params, **run_param)
#auto_var.run_grid_params(delete_file, grid_params, n_jobs=1,
# with_hook=False, allow_failure=False)
#auto_var.run_grid_params(celery_run, grid_params, n_jobs=1,
# allow_failure=False)
#auto_var.run_grid_params(temp_fix, grid_params, n_jobs=6,
# allow_failure=False, with_hook=False)
def delete_file(auto_var):
os.unlink(get_file_name(auto_var) + '.json')
def celery_run(auto_var):
run_exp.delay(auto_var.var_value)
from main import set_random_seed
import numpy as np
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
def temp_fix(auto_var):
file_name = get_file_name(auto_var)
print(file_name)
if os.path.exists("%s.json" % file_name):
with open("%s.json" % file_name, "r") as f:
ret = json.load(f)
if "tst_score" in ret:
return
else:
return
random_state = set_random_seed(auto_var)
ord = auto_var.get_var("ord")
X, y, eps_list = auto_var.get_var("dataset")
idxs = np.arange(len(X))
random_state.shuffle(idxs)
trnX, tstX, trny, tsty = X[idxs[:-200]], X[idxs[-200:]], y[idxs[:-200]], y[idxs[-200:]]
scaler = MinMaxScaler()
trnX = scaler.fit_transform(trnX)
tstX = scaler.transform(tstX)
lbl_enc = OneHotEncoder(categories=[np.sort(np.unique(y))], sparse=False)
#lbl_enc = OneHotEncoder(sparse=False)
lbl_enc.fit(trny.reshape(-1, 1))
auto_var.set_intermidiate_variable("lbl_enc", lbl_enc)
results = []
auto_var.set_intermidiate_variable("trnX", trnX)
auto_var.set_intermidiate_variable("trny", trny)
model_name = auto_var.get_variable_value("model")
attack_name = auto_var.get_variable_value("attack")
if 'adv_rf' in model_name:
pre_model = auto_var.get_var_with_argument('model', model_name[4:])
pre_model.fit(trnX, trny)
if 'blackbox' in attack_name:
auto_var.set_intermidiate_variable("model", pre_model)
elif 'adv_nn' in model_name and 'blackbox' in attack_name:
pre_model = auto_var.get_var_with_argument('model', model_name[4:])
pre_model.fit(trnX, trny)
auto_var.set_intermidiate_variable("model", pre_model)
model = auto_var.get_var("model")
auto_var.set_intermidiate_variable("model", model)
model.fit(trnX, trny)
pred = model.predict(tstX)
ori_tstX, ori_tsty = tstX, tsty # len = 200
idxs = np.where(pred == tsty)[0]
random_state.shuffle(idxs)
augX = None
if ('adv' in model_name) or ('advPruning' in model_name) or ('robustv2' in model_name):
assert hasattr(model, 'augX')
auto_var.set_intermidiate_variable("trnX", model.augX)
auto_var.set_intermidiate_variable("trny", model.augy)
augX, augy = model.augX, model.augy
ret['tst_score'] = (model.predict(ori_tstX) == ori_tsty).mean()
with open("%s.json" % file_name, "w") as f:
json.dump(ret, f)
if __name__ == "__main__":
main()
| 28.303571 | 91 | 0.647739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 951 | 0.2 |
a8d9cfd29512a9bafa442afe772db196ced778d9 | 849 | py | Python | asyncio/asyncio02_libs/scrapli/ex01_scrapli_async_basic_factory.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-05T09:30:23.000Z | 2022-03-09T13:27:56.000Z | asyncio/asyncio02_libs/scrapli/ex01_scrapli_async_basic_factory.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | null | null | null | asyncio/asyncio02_libs/scrapli/ex01_scrapli_async_basic_factory.py | levs72/pyneng-examples | d6288292dcf9d1ebc5a9db4a0d620bd11b4a2df9 | [
"MIT"
] | 11 | 2021-04-06T03:44:35.000Z | 2022-03-04T21:20:40.000Z | import asyncio
from scrapli import AsyncScrapli
from scrapli.exceptions import ScrapliException
r1 = {
"host": "192.168.100.1",
"auth_username": "cisco",
"auth_password": "cisco",
"auth_secondary": "cisco",
"auth_strict_key": False,
"timeout_socket": 5, # timeout for establishing socket/initial connection in seconds
"timeout_transport": 10, # timeout for ssh|telnet transport in seconds
"platform": "cisco_iosxe",
"transport": "asyncssh",
}
async def send_show(device, command):
try:
async with AsyncScrapli(**device) as conn:
result = await conn.send_command(command)
return result.result
except ScrapliException as error:
print(error, device["host"])
if __name__ == "__main__":
output = asyncio.run(send_show(r1, "show ip int br"))
print(output)
| 28.3 | 89 | 0.672556 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.305065 | 324 | 0.381625 |
a8da296b2bb797803ccea64d180cb52ee83c797e | 3,534 | py | Python | tests/test_nn/test_activations.py | VIVelev/nujo | 56c3058b14c4e0b7ae86d0f22dbe4c4dc81e8e71 | [
"MIT"
] | 5 | 2020-03-02T22:14:38.000Z | 2022-03-09T11:13:13.000Z | tests/test_nn/test_activations.py | VIVelev/nujo | 56c3058b14c4e0b7ae86d0f22dbe4c4dc81e8e71 | [
"MIT"
] | 30 | 2020-03-09T10:43:54.000Z | 2020-06-09T20:05:45.000Z | tests/test_nn/test_activations.py | VIVelev/nujo | 56c3058b14c4e0b7ae86d0f22dbe4c4dc81e8e71 | [
"MIT"
] | 3 | 2020-03-20T13:54:23.000Z | 2020-10-17T01:03:17.000Z | import pytest
from numpy import allclose, exp, maximum, sum
import nujo.nn.activations as activ
from nujo.autodiff.tensor import Tensor
# ====================================================================================================
# Test BinaryStep activation function
def test_binary_step(inputs):
# Test Forward pass
output = activ.BinaryStep()(inputs)
assert (output == [[0, 0, 0], [1, 0, 1]]).all()
# Test Backward pass
output.backward()
assert (inputs.grad == 0).all()
# ====================================================================================================
# Test Sigmoid activation function
def test_sigmoid(inputs):
# Test Forward pass
output = activ.Sigmoid()(inputs)
x = inputs.value
assert (output == 1 / (1 + exp(-x))).all()
# Test Backward pass
output.backward()
assert (inputs.grad == output.value * (1 - output.value)).all()
# ====================================================================================================
# Test TanH activation function
def test_tanh(inputs):
# Test Forward pass
output = activ.TanH()(inputs)
x = inputs.value
assert allclose(output.value, (exp(x) - exp(-x)) / (exp(x) + exp(-x)))
# Test Backward pass
output.backward()
assert (inputs.grad == 1 - output.value**2).all()
# ====================================================================================================
# Test ReLU activation function
def test_relu(inputs):
# Test Forward pass
output = activ.ReLU()(inputs)
x = inputs.value
assert (output == maximum(0, x)).all()
# Test Backward pass
output.backward()
assert (inputs.grad[inputs.grad > 0] == 1).all()
assert (inputs.grad[inputs.grad <= 0] == 0).all()
# ====================================================================================================
# Test LeakyReLU activation function
def test_leaky_relu(inputs):
# Test Forward pass
eps = 0.1
output = activ.LeakyReLU(eps=eps)(inputs)
x = inputs.value
assert (output == maximum(eps * x, x)).all()
# Test Backward pass
output.backward()
assert (inputs.grad[inputs.grad > 0] == 1).all()
assert (inputs.grad[inputs.grad <= 0] == eps).all()
# ====================================================================================================
# Test Swish activation function
def test_swish(inputs):
# Test Forward pass
beta = 1
output = activ.Swish(beta=beta)(inputs)
x = inputs.value
sigma = activ.Sigmoid()(beta * x).value
assert (output == x * sigma).all()
# Test Backward pass
output.backward()
assert (inputs.grad == output.value + sigma * (1 - output.value)).all()
# ====================================================================================================
# Test Softmax activation function
def test_softmax(inputs):
# Test Forward pass
output = activ.Softmax()(inputs)
exps = exp(inputs.value)
sums = sum(exps, axis=0, keepdims=True)
assert allclose(output.value, exps / sums)
# Test Backward pass
# TODO: Test Backward pass appropriately.
output.backward()
assert True
# ====================================================================================================
# Unit Test fixtures
@pytest.fixture
def inputs():
return Tensor([[0.42, 0.32, 0.34], [0.6, 0.1, 1.1]], diff=True)
# ====================================================================================================
| 26.373134 | 102 | 0.47142 | 0 | 0 | 0 | 0 | 97 | 0.027448 | 0 | 0 | 1,487 | 0.42077 |
763395a4150fc73e36ef226e3bec1f8ba494db9b | 3,611 | py | Python | data/data_utils.py | csalt-research/OpenASR-py | 9aea6753689d87d321260d7eb0ea0544e1b3403a | [
"MIT"
] | 2 | 2019-11-29T15:46:14.000Z | 2021-05-28T06:54:41.000Z | data/data_utils.py | csalt-research/OpenASR-py | 9aea6753689d87d321260d7eb0ea0544e1b3403a | [
"MIT"
] | null | null | null | data/data_utils.py | csalt-research/OpenASR-py | 9aea6753689d87d321260d7eb0ea0544e1b3403a | [
"MIT"
] | null | null | null | import torch
import os
from tqdm import tqdm
import numpy as np
from multiprocessing.pool import Pool
from itertools import islice, cycle
from utils.logging import logger
from utils.misc import ensure_dir
class Vocab(object):
def __init__(self):
self.tok2idx = {}
self.idx2tok = []
self.add('<pad>') # PAD index is 0
self.add('<unk>') # UNK index is 1
self.add('<bos>') # BOS index is 2
self.add('<eos>') # EOS index is 3
def __len__(self):
return len(self.idx2tok)
def add(self, token):
if token not in self.tok2idx:
self.tok2idx[token] = len(self.idx2tok)
self.idx2tok.append(token)
def encode(self, token):
return self.tok2idx.get(token, self.tok2idx['<unk>'])
def decode(self, token_id):
assert token_id < len(self.idx2tok), \
'token id must be less than %d, got %d' % (len(self.idx2tok), token_id)
return self.idx2tok[token_id]
def split_corpus(path, shard_size):
with open(path, "r") as f:
if shard_size <= 0:
yield f.readlines()
else:
while True:
shard = list(islice(f, shard_size))
if not shard:
break
yield shard
def build_vocab(src_file, max_vocab_size=0):
with open(src_file, 'r') as f:
tokens = f.read().split()
freq_dict = {}
for t in tokens:
freq_dict[t] = freq_dict.get(t, 0) + 1
tokens = sorted(
list(freq_dict.items()),
key=lambda x: x[1],
reverse=True
)
vsize = max_vocab_size if max_vocab_size > 0 else len(tokens)
vocab = [t[0] for t in tokens[:vsize]]
ret = Vocab()
for t in vocab:
ret.add(t)
return ret
def _worker(args):
src, tgt, feat_ext, vocab = args
if tgt == '':
return None
try:
return feat_ext(src), tgt, [vocab.encode(x) for x in ('<bos> '+tgt+' <eos>').split()]
except Exception as e:
return None
def build_shards(src_dir, save_dir, src_file, tgt_file, vocab,
shard_size, feat_ext, mode='train', feats=None
):
src_shards = split_corpus(src_file, shard_size)
tgt_shards = split_corpus(tgt_file, shard_size)
ensure_dir(save_dir)
shard_index = 0
for src_shard, tgt_shard in zip(src_shards, tgt_shards):
logger.info('Building %s shard %d' % (mode, shard_index))
audio_paths = [os.path.join(src_dir, p.strip()) for p in src_shard]
assert all([os.path.exists(p) for p in audio_paths]), \
"following audio files not found: %s" % \
' '.join([p.strip() for p in audio_paths if not os.path.exists(p)])
targets = [t.strip() for t in tgt_shard]
src_tgt_pairs = list(zip(audio_paths, targets, cycle([feat_ext]), cycle([vocab])))
with Pool(50) as p:
result = list(tqdm(p.imap(_worker, src_tgt_pairs), total=len(src_tgt_pairs)))
result = [r for r in result if r is not None]
audio_feats, transcriptions, indices = zip(*result)
shard = {
'src': np.asarray(audio_feats),
'tgt': np.asarray(transcriptions),
'indices': np.asarray([np.asarray(x).reshape(-1,1) for x in indices]),
'feats': feats
}
shard_path = os.path.join(save_dir, '%s.%05d.pt' % (mode, shard_index))
logger.info('Saving shard %d to %s' % (shard_index, shard_path))
torch.save(shard, shard_path)
shard_index += 1 | 33.12844 | 93 | 0.577402 | 776 | 0.214899 | 300 | 0.083079 | 0 | 0 | 0 | 0 | 292 | 0.080864 |
7633bcef8f558a190b0edf5806df25a3b35e668d | 1,188 | py | Python | config/wsgi.py | poblouin/budgetme-rest-api | 74d9237bc7b0a118255a659029637c5ed1a8b7a1 | [
"MIT"
] | 2 | 2018-03-07T09:43:07.000Z | 2018-03-11T04:50:41.000Z | config/wsgi.py | poblouin/budgetme-rest-api | 74d9237bc7b0a118255a659029637c5ed1a8b7a1 | [
"MIT"
] | 13 | 2017-12-28T02:44:09.000Z | 2020-06-05T21:13:13.000Z | config/wsgi.py | poblouin/budgetme-rest-api | 74d9237bc7b0a118255a659029637c5ed1a8b7a1 | [
"MIT"
] | null | null | null | """
WSGI config for budgetme project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 38.322581 | 78 | 0.795455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 942 | 0.792929 |
76341c212af4ec1ae958c0839064e13798eee03d | 3,983 | py | Python | src/compas_ui/objects/networkobject.py | BlockResearchGroup/compas_ui | 8b5a6121eee837d306bf20c44c91f94a5c185f90 | [
"MIT"
] | null | null | null | src/compas_ui/objects/networkobject.py | BlockResearchGroup/compas_ui | 8b5a6121eee837d306bf20c44c91f94a5c185f90 | [
"MIT"
] | 3 | 2022-02-24T17:56:30.000Z | 2022-03-31T09:48:40.000Z | src/compas_ui/objects/networkobject.py | BlockResearchGroup/compas_ui | 8b5a6121eee837d306bf20c44c91f94a5c185f90 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
from operator import mul
from compas.geometry import Point
from compas.geometry import Scale
from compas.geometry import Translation
from compas.geometry import Rotation
from compas.geometry import transform_points
from .object import Object
class NetworkObject(Object):
"""Class for representing COMPAS networkes in Rhino.
Attributes
----------
anchor : int
The node of the network that is anchored to the location of the object.
location : :class:`compas.geometry.Point`
The location of the object.
Default is the origin of the world coordinate system.
scale : float
A uniform scaling factor for the object in the scene.
The scale is applied relative to the location of the object in the scene.
rotation : list[float]
The rotation angles around the 3 axis of the coordinate system
with the origin placed at the location of the object in the scene.
node_xyz : dict[int, list[float]]
The view coordinates of the network object.
"""
SETTINGS = {
'color.nodes': (255, 255, 255),
'color.edges': (0, 0, 0),
'show.nodes': True,
'show.edges': True,
'show.nodelabels': False,
'show.edgelabels': False,
}
def __init__(self, *args, **kwargs):
super(NetworkObject, self).__init__(*args, **kwargs)
self._anchor = None
self._location = None
self._scale = None
self._rotation = None
@property
def network(self):
return self.item
@network.setter
def network(self, network):
self.item = network
@property
def anchor(self):
return self._anchor
@anchor.setter
def anchor(self, node):
if self.network.has_node(node):
self._anchor = node
@property
def location(self):
if not self._location:
self._location = Point(0, 0, 0)
return self._location
@location.setter
def location(self, location):
self._location = Point(*location)
@property
def scale(self):
if not self._scale:
self._scale = 1.0
return self._scale
@scale.setter
def scale(self, scale):
self._scale = scale
@property
def rotation(self):
if not self._rotation:
self._rotation = [0, 0, 0]
return self._rotation
@rotation.setter
def rotation(self, rotation):
self._rotation = rotation
@property
def node_xyz(self):
origin = Point(0, 0, 0)
nodes = list(self.network.nodes())
xyz = self.network.nodes_attributes(['x', 'y', 'z'], keys=nodes)
stack = []
if self.scale != 1.0:
S = Scale.from_factors([self.scale] * 3)
stack.append(S)
if self.rotation != [0, 0, 0]:
R = Rotation.from_euler_angles(self.rotation)
stack.append(R)
if self.location != origin:
if self.anchor is not None:
xyz = self.network.node_attributes(self.anchor, 'xyz')
point = Point(* xyz)
T1 = Translation.from_vector(origin - point)
stack.insert(0, T1)
T2 = Translation.from_vector(self.location)
stack.append(T2)
if stack:
X = reduce(mul, stack[::-1])
xyz = transform_points(xyz, X)
return dict(zip(nodes, xyz))
def select_nodes(self):
raise NotImplementedError
def select_edges(self):
raise NotImplementedError
def modify_nodes(self, nodes, names=None):
raise NotImplementedError
def modify_edges(self, edges, names=None):
raise NotImplementedError
def move_node(self, node):
raise NotImplementedError
def move_edge(self, edge):
raise NotImplementedError
| 27.659722 | 81 | 0.615114 | 3,597 | 0.903088 | 0 | 0 | 1,878 | 0.471504 | 0 | 0 | 856 | 0.214913 |
7635bfe6eba52bcc67cdc83102f247a92f82a569 | 1,276 | py | Python | sa/profiles/NAG/SNR/profile.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/NAG/SNR/profile.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/NAG/SNR/profile.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: NAG
# OS: SNR
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "NAG.SNR"
pattern_more = [
(r"^ --More-- ", "\n"),
(r"^Confirm to overwrite current startup-config configuration \[Y/N\]:", "y\n"),
]
username_submit = "\r"
password_submit = "\r"
command_submit = "\r"
command_disable_pager = "terminal length 200"
command_exit = "exit"
config_tokenizer = "indent"
config_tokenizer_settings = {"line_comment": "!"}
INTERFACE_TYPES = {
"Ethe": "physical", # Ethernet
"Vlan": "SVI", # Vlan
"Port": "aggregated", # Port-Channel
"Vsf-": "aggregated", # Vsf-Port
"vpls": "unknown", # vpls_dev
"l2ov": "tunnel", # l2overgre
}
@classmethod
def get_interface_type(cls, name):
if name == "Ethernet0":
return "management"
return cls.INTERFACE_TYPES.get(name[:4])
| 30.380952 | 88 | 0.487461 | 876 | 0.68652 | 0 | 0 | 164 | 0.128527 | 0 | 0 | 683 | 0.535266 |
7636eeaae62c9c102a883b907b773aecc04889f1 | 754 | py | Python | src/common/views.py | danpercic86/e-notary | ba15a9a80e2091593fb088feacfaf9574c816d6e | [
"Apache-2.0"
] | 6 | 2020-12-25T20:46:51.000Z | 2022-01-27T20:52:13.000Z | src/common/views.py | danpercic86/e-notary | ba15a9a80e2091593fb088feacfaf9574c816d6e | [
"Apache-2.0"
] | 3 | 2021-01-26T12:20:14.000Z | 2022-01-27T20:45:28.000Z | src/common/views.py | danpercic86/e-notary | ba15a9a80e2091593fb088feacfaf9574c816d6e | [
"Apache-2.0"
] | null | null | null | from django.db.models import QuerySet
from drf_spectacular.utils import extend_schema
from rest_framework import status
from rest_framework.viewsets import ModelViewSet
from common.models import Example
from common.serializers import ExampleSerializer
from common.utils import create_swagger_info
@create_swagger_info(
extend_schema(
operation_id="Operation id",
description="Example description",
summary="Example operation summary",
auth=[],
request=ExampleSerializer(),
responses={status.HTTP_201_CREATED: ExampleSerializer()},
tags=["Common"],
)
)
class ExampleViewSet(ModelViewSet):
serializer_class = ExampleSerializer
queryset: QuerySet[Example] = Example.published.all()
| 30.16 | 65 | 0.753316 | 134 | 0.177719 | 0 | 0 | 453 | 0.600796 | 0 | 0 | 70 | 0.092838 |
7637c23ca9d4d52673449e7efa95f66c10e2908c | 35,728 | py | Python | modpypes/pdu.py | JoelBender/modpypes | f6e33c48fdc70f873bc2823b1ac4111cafe2a700 | [
"MIT"
] | 8 | 2015-06-21T18:06:22.000Z | 2022-03-03T13:12:35.000Z | modpypes/pdu.py | JoelBender/modpypes | f6e33c48fdc70f873bc2823b1ac4111cafe2a700 | [
"MIT"
] | null | null | null | modpypes/pdu.py | JoelBender/modpypes | f6e33c48fdc70f873bc2823b1ac4111cafe2a700 | [
"MIT"
] | 3 | 2016-01-29T19:23:40.000Z | 2017-04-25T13:12:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Protocol Data Units
===================
"""
import struct
from bacpypes.debugging import bacpypes_debugging, DebugContents, ModuleLogger
from bacpypes.comm import PDUData, PCI
from bacpypes.errors import DecodingError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# a dictionary of functions and classes
request_types = {}
response_types = {}
def register_request_type(klass):
request_types[klass.functionCode] = klass
def register_response_type(klass):
response_types[klass.functionCode] = klass
#
# Packing and Unpacking Functions
#
def _packBitsToString(bits):
barry = []
i = packed = 0
for bit in bits:
if bit:
packed += 128
i += 1
if i == 8:
barry.append(packed)
i = packed = 0
else:
packed >>= 1
if i > 0 and i < 8:
packed >>= 7 - i
barry.append(packed)
return struct.pack("B" * len(barry), *barry)
def _unpackBitsFromString(string):
barry = struct.unpack("B" * len(string), string)
bits = []
for byte in barry:
for bit in range(8):
bits.append((byte & 1) == 1)
byte >>= 1
return bits
#
# _Struct
#
class _Struct:
"""
This is an abstract class for functions that pack and unpack the
variably encoded portion of a PDU. Each of the derived classes
produces or consumes a number of 16-registers.
"""
registerLength = None
def pack(self, value):
raise NotImplementedError("pack is not implemented in %s" % (self.__class__.__name__,))
def unpack(self, registers):
raise NotImplementedError("unpack is not implemented in %s" % (self.__class__.__name__,))
@bacpypes_debugging
class Byte(_Struct):
"""
This class packs and unpacks a register as an unsigned octet.
"""
registerLength = 1
def pack(self, value):
if _debug: Byte._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
Byte._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFF]
def unpack(self, registers):
if _debug: Byte._debug("unpack %r", registers)
return registers[0]
@bacpypes_debugging
class Int(_Struct):
"""
This class packs and unpacks a register as a 16-bit signed integer.
"""
registerLength = 1
def pack(self, value):
if _debug: Int._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
Int._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF]
def unpack(self, registers):
if _debug: Int._debug("unpack %r", registers)
value = registers[0]
if (value & 0x8000):
value = (-1 << 16) | value
return value
@bacpypes_debugging
class UnsignedInt(_Struct):
"""
This class packs and unpacks a register as a 16-bit unsigned integer.
"""
registerLength = 1
def pack(self, value):
if _debug: UnsignedInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
UnsignedInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF]
def unpack(self, registers):
if _debug: UnsignedInt._debug("unpack %r", registers)
return registers[0]
@bacpypes_debugging
class DoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a 32-bit signed integer.
"""
registerLength = 2
def pack(self, value):
if _debug: DoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
DoubleInt._error("coercion error: %r not an int", value)
value = 0
return [(value >> 16) & 0xFFFF, value & 0xFFFF]
def unpack(self, registers):
if _debug: DoubleInt._debug("unpack %r", registers)
value = (registers[0] << 16) | registers[1]
if (value & 0x80000000):
value = (-1 << 32) | value
return value
@bacpypes_debugging
class UnsignedDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a 32-bit unsigned integer.
"""
registerLength = 2
def pack(self, value):
if _debug: UnsignedDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
UnsignedDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [(value >> 16) & 0xFFFF, value & 0xFFFF]
def unpack(self, registers):
if _debug: UnsignedDoubleInt._debug("unpack %r", registers)
return (registers[0] << 16) | registers[1]
@bacpypes_debugging
class Real(_Struct):
registerLength = 2
def pack(self, value):
if _debug: Real._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
BigEndianReal._error("coercion error: %r not a float", value)
value = 0.0
registers = struct.unpack(">HH", struct.pack(">f", value))
return [registers[1], registers[0]]
def unpack(self, registers):
if _debug: Real._debug("unpack %r", registers)
value, = struct.unpack(">f", struct.pack(">HH", registers[1], registers[0]))
return value
@bacpypes_debugging
class ROCReal(_Struct):
registerLength = 1
def pack(self, value):
if _debug: ROCReal._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
ROCReal._error("coercion error: %r not a float", value)
value = 0.0
raise NotImplementedError("packing ROCReal is not supported")
def unpack(self, registers):
if _debug: ROCReal._debug("unpack %r", registers)
# byte-swap the registers
r0, r1 = registers
r0 = ((r0 & 0xFF00) >> 8) | ((r0 & 0x00FF) << 8)
r1 = ((r1 & 0xFF00) >> 8) | ((r1 & 0x00FF) << 8)
value, = struct.unpack(">f", struct.pack(">HH", r1, r0))
return value
@bacpypes_debugging
class BigEndianDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a bit endian 32-bit signed integer.
"""
registerLength = 2
def pack(self, value):
if _debug: BigEndianDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
BigEndianDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF, (value >> 16) & 0xFFFF]
def unpack(self, registers):
if _debug: BigEndianDoubleInt._debug("unpack %r", registers)
value = (registers[1] << 16) | registers[0]
if (value & 0x80000000):
value = (-1 << 32) | value
return value
@bacpypes_debugging
class BigEndianUnsignedDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a bit endian 32-bit unsigned integer.
"""
registerLength = 2
def pack(self, value):
if _debug: BigEndianUnsignedDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
BigEndianUnsignedDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF, (value >> 16) & 0xFFFF]
def unpack(self, registers):
if _debug: BigEndianUnsignedDoubleInt._debug("unpack %r", registers)
return (registers[1] << 16) | registers[0]
@bacpypes_debugging
class BigEndianReal(_Struct):
registerLength = 2
def pack(self, value):
if _debug: BigEndianReal._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
BigEndianReal._error("coercion error: %r not a float", value)
value = 0.0
registers = struct.unpack(">HH", struct.pack(">f", value))
return [registers[0], registers[1]]
def unpack(self, registers):
if _debug: BigEndianReal._debug("unpack %r", registers)
value, = struct.unpack(">f", struct.pack(">HH", registers[0], registers[1]))
return value
@bacpypes_debugging
class String(_Struct):
"""
This class packs and unpacks a list of registers as a null terminated string.
"""
def __init__(self, registerLength=6):
if _debug: String._debug("__init__ %r", registerLength)
# save the length
self.registerLength = registerLength
def pack(self, value):
if _debug: String._debug("pack %r", value)
raise NotImplementedError("packing strings is not implemeted")
def unpack(self, registers):
if _debug: String._debug("unpack %r", registers)
octets = []
for reg in registers:
octets.append(reg >> 8)
octets.append(reg & 0xFF)
value = ''.join(chr(c) for c in octets)
value = value[:value.find('\x00')]
return value
@bacpypes_debugging
class BigEndianString(_Struct):
"""
This class packs and unpacks a list of registers as a null terminated string.
"""
def __init__(self, registerLength=6):
if _debug: String._debug("__init__ %r", registerLength)
# save the length
self.registerLength = registerLength
def pack(self, value):
if _debug: String._debug("pack %r", value)
raise NotImplementedError("packing strings is not implemeted")
def unpack(self, registers):
if _debug: String._debug("unpack %r", registers)
octets = []
for reg in registers:
octets.append(reg & 0xFF)
octets.append(reg >> 8)
value = ''.join(chr(c) for c in octets)
value = value[:value.find('\x00')]
return value
#
# ModbusStruct
#
ModbusStruct = {
'byte': Byte(),
'int': Int(),
'uint': UnsignedInt(),
'dint': DoubleInt(),
'udint': UnsignedDoubleInt(),
'real': Real(),
'roc-real': ROCReal(),
'be-dint': BigEndianDoubleInt(),
'be-udint': BigEndianUnsignedDoubleInt(),
'be-real': BigEndianReal(),
'str': String(),
'be-str': BigEndianString(),
}
#
# MPCI
#
@bacpypes_debugging
class MPCI(PCI, DebugContents):
"""
This class contains the MODBUS protocol control information which
is the 8 octet header at the front of all MODBUS PDUs.
"""
_debug_contents = (
'mpduTransactionID',
'mpduProtocolID',
'mpduLength',
'mpduUnitID',
'mpduFunctionCode',
)
readCoils = 1
readDiscreteInputs = 2
readMultipleRegisters = 3
readInputRegisters = 4
writeSingleCoil = 5
writeSingleRegister = 6
writeMultipleCoils = 15
writeMultipleRegisters = 16
readWriteMultipleRegisters = 23
announceMaster = 100
registerSlave = 105
def __init__(self, *args, **kwargs):
if _debug: MPCI._debug("__init__ %r %r", args, kwargs)
PCI.__init__(self, *args, **kwargs)
self.mpduTransactionID = 0
self.mpduProtocolID = 0
self.mpduLength = None
self.mpduUnitID = 0
self.mpduFunctionCode = None
def update(self, mpci):
if _debug: MPCI._debug("update %r", mpci)
PCI.update(self, mpci)
self.mpduTransactionID = mpci.mpduTransactionID
self.mpduProtocolID = mpci.mpduProtocolID
self.mpduLength = mpci.mpduLength
self.mpduUnitID = mpci.mpduUnitID
self.mpduFunctionCode = mpci.mpduFunctionCode
def encode(self, pdu):
"""Encode the contents into the PDU."""
if _debug: MPCI._debug("encode %r", pdu)
PCI.update(pdu, self)
pdu.put_short(self.mpduTransactionID)
pdu.put_short(self.mpduProtocolID)
pdu.put_short(self.mpduLength)
pdu.put(self.mpduUnitID)
pdu.put(self.mpduFunctionCode)
def decode(self, pdu):
"""Decode the contents of the PDU."""
if _debug: MPCI._debug("decode %r", pdu)
PCI.update(self, pdu)
self.mpduTransactionID = pdu.get_short()
self.mpduProtocolID = pdu.get_short()
self.mpduLength = pdu.get_short()
self.mpduUnitID = pdu.get()
self.mpduFunctionCode = pdu.get()
# check the length
if self.mpduLength != len(pdu.pduData) + 2:
raise DecodingError("invalid length")
#
# MPDU
#
@bacpypes_debugging
class MPDU(MPCI, PDUData):
"""
This class is a generic MODBUS PDU. It inherits the :class:`MPCI`
layer and the more generic PDU data functions.
"""
def __init__(self, *args, **kwargs):
if _debug: MPDU._debug("__init__ %r %r", args, kwargs)
MPCI.__init__(self, **kwargs)
PDUData.__init__(self, *args)
def encode(self, pdu):
if _debug: MPDU._debug("encode %r", pdu)
MPCI.encode(self, pdu)
pdu.put_data(self.pduData)
def decode(self, pdu):
if _debug: MPDU._debug("decode %r", pdu)
MPCI.decode(self, pdu)
self.pduData = pdu.get_data(len(pdu.pduData))
#------------------------------
@bacpypes_debugging
class ReadBitsRequestBase(MPCI, DebugContents):
"""
Base class for messages requesting bit values. This is inherited by
both :class:`ReadCoilsRequest` and :class:`ReadDiscreteInputsRequest`.
"""
_debug_contents = ('address', 'count')
def __init__(self, address, count, **kwargs):
if _debug: ReadBitsRequestBase._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self, pdu):
if _debug: ReadBitsRequestBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadBitsRequestBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
@bacpypes_debugging
class ReadBitsResponseBase(MPCI, DebugContents):
"""
Base class for messages that are responses to reading bit values.
This is inherited by both :class:`ReadCoilsResponse` and
:class:`ReadDiscreteInputsResponse`.
"""
_debug_contents = ('bits',)
def __init__(self, values=None, **kwargs):
if _debug: ReadBitsResponseBase._debug("__init__ %r %r", values, kwargs)
MPCI.__init__(self, **kwargs)
if values is not None:
self.bits = values
else:
self.bits = []
def encode(self, pdu):
if _debug: ReadBitsResponseBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
stringbits = _packBitsToString(self.bits)
if _debug: ReadBitsResponseBase._debug(" - stringbits: %r", stringbits)
pdu.put(len(stringbits))
pdu.put_data(stringbits)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadBitsResponseBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.bits = _unpackBitsFromString(pdu.get_data(datalen))
@bacpypes_debugging
class ReadRegistersRequestBase(MPCI, DebugContents):
"""
Base class for messages requesting register values.
This is inherited by both :class:`ReadMultipleRegistersRequest` and
:class:`ReadInputRegistersRequest`.
"""
_debug_contents = ('address', 'count')
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadRegistersRequestBase._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self, pdu):
if _debug: ReadRegistersRequestBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadRegistersRequestBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
@bacpypes_debugging
class ReadRegistersResponseBase(MPCI, DebugContents):
"""
Base class for messages requesting register values.
This is inherited by both :class:`ReadMultipleRegistersResponse` and
:class:`ReadInputRegistersResponse`.
"""
_debug_contents = ('registers',)
def __init__(self, values=None, **kwargs):
if _debug: ReadRegistersResponseBase._debug("__init__ %r %r", values, kwargs)
MPCI.__init__(self, **kwargs)
if values is not None:
self.registers = values
else:
self.registers = []
def encode(self, pdu):
if _debug: ReadRegistersResponseBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadRegistersResponseBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
@bacpypes_debugging
class ReadWriteValueBase(MPCI, DebugContents):
"""
Base class for messages reading and writing values. This class is
inherted by :class:`WriteSingleCoilRequest`, :class:`WriteSingleCoilResponse`,
:class:`WriteSingleRegisterRequest`, and :class:`WriteSingleRegisterResponse`.
"""
_debug_contents = ('address', 'value')
def __init__(self, address=None, value=None, **kwargs):
if _debug: ReadWriteValueBase._debug("__init__ %r %r %r", address, value, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.value = value
def encode(self, pdu):
if _debug: ReadWriteValueBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.value)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteValueBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.value = pdu.get_short()
#------------------------------
#
# ReadCoils
#
@bacpypes_debugging
class ReadCoilsRequest(ReadBitsRequestBase):
"""
Read Coils Request
"""
functionCode = MPCI.readCoils
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadCoilsRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadCoilsRequest.functionCode
register_request_type(ReadCoilsRequest)
@bacpypes_debugging
class ReadCoilsResponse(ReadBitsResponseBase):
"""
Read Coils Response
"""
functionCode = MPCI.readCoils
def __init__(self, values=None, **kwargs):
if _debug: ReadCoilsResponse._debug("__init__ %r %r", values, kwargs)
ReadBitsResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadCoilsResponse.functionCode
register_response_type(ReadCoilsResponse)
#
# ReadDescreteInputs
#
@bacpypes_debugging
class ReadDiscreteInputsRequest(ReadBitsRequestBase):
"""
Read Discrete Inputs Request
"""
functionCode = MPCI.readDiscreteInputs
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadDiscreteInputsRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadDiscreteInputsRequest.functionCode
register_request_type(ReadDiscreteInputsRequest)
@bacpypes_debugging
class ReadDiscreteInputsResponse(ReadBitsResponseBase):
"""
Read Discrete Inputs Response
"""
functionCode = MPCI.readDiscreteInputs
def __init__(self, values=None, **kwargs):
if _debug: ReadDiscreteInputsResponse._debug("__init__ %r %r", values, kwargs)
ReadBitsResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadDiscreteInputsResponse.functionCode
register_response_type(ReadDiscreteInputsResponse)
#
# ReadMultipleRegisters
#
@bacpypes_debugging
class ReadMultipleRegistersRequest(ReadRegistersRequestBase):
"""
Read Multiple Registers Request
"""
functionCode = MPCI.readMultipleRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadMultipleRegistersRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadMultipleRegistersRequest.functionCode
register_request_type(ReadMultipleRegistersRequest)
@bacpypes_debugging
class ReadMultipleRegistersResponse(ReadRegistersResponseBase):
"""
Read Multiple Registers Response
"""
functionCode = MPCI.readMultipleRegisters
def __init__(self, values=None, **kwargs):
if _debug: ReadMultipleRegistersResponse._debug("__init__ %r %r", values, kwargs)
ReadRegistersResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadMultipleRegistersResponse.functionCode
register_response_type(ReadMultipleRegistersResponse)
#
# ReadInputRegisters
#
@bacpypes_debugging
class ReadInputRegistersRequest(ReadRegistersRequestBase):
"""
Read Input Registers Request
"""
functionCode = MPCI.readInputRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadInputRegistersRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadInputRegistersRequest.functionCode
register_request_type(ReadInputRegistersRequest)
@bacpypes_debugging
class ReadInputRegistersResponse(ReadRegistersResponseBase):
"""
Read Input Registers Response
"""
functionCode = MPCI.readInputRegisters
def __init__(self, values=None, **kwargs):
if _debug: ReadInputRegistersResponse._debug("__init__ %r %r", values, kwargs)
ReadRegistersResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadInputRegistersResponse.functionCode
register_response_type(ReadInputRegistersResponse)
#
# WriteSingleCoil
#
@bacpypes_debugging
class WriteSingleCoilRequest(ReadWriteValueBase):
"""
Write Single Coil Request
"""
functionCode = MPCI.writeSingleCoil
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleCoilRequest._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleCoilRequest.functionCode
register_request_type(WriteSingleCoilRequest)
@bacpypes_debugging
class WriteSingleCoilResponse(ReadWriteValueBase):
"""
Write Single Coil Response
"""
functionCode = MPCI.writeSingleCoil
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleCoilResponse._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleCoilResponse.functionCode
register_response_type(WriteSingleCoilResponse)
#
# WriteSingleRegister
#
@bacpypes_debugging
class WriteSingleRegisterRequest(ReadWriteValueBase):
"""
Write Single Register Request
"""
functionCode = MPCI.writeSingleRegister
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleRegisterRequest._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleRegisterRequest.functionCode
register_request_type(WriteSingleRegisterRequest)
@bacpypes_debugging
class WriteSingleRegisterResponse(ReadWriteValueBase):
"""
Write Single Register Response
"""
functionCode = MPCI.writeSingleRegister
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleRegisterResponse._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleRegisterResponse.functionCode
register_response_type(WriteSingleRegisterResponse)
#
# WriteMultipleCoils
#
@bacpypes_debugging
class WriteMultipleCoilsRequest(MPCI, DebugContents):
"""
Write Multiple Coils Request
"""
_debug_contents = ('address', 'count', 'coils')
functionCode = MPCI.writeMultipleCoils
def __init__(self, address=None, count=None, coils=None, **kwargs):
if _debug: WriteMultipleCoilsRequest._debug("__init__ %r %r %r %r", address, count, coils, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleCoilsRequest.functionCode
self.address = address
self.count = count
if coils is not None:
self.coils = coils
else:
self.coils = [False] * count
def encode(self, pdu):
if _debug: WriteMultipleCoilsRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
stringbits = _packBitsToString(self.coils)
pdu.put(len(stringbits))
pdu.put_data(stringbits)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleCoilsRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
datalen = pdu.get()
coils = _unpackBitsFromString(pdu.get_data(datalen))
self.coils = coils[:self.count]
register_request_type(WriteMultipleCoilsRequest)
@bacpypes_debugging
class WriteMultipleCoilsResponse(MPCI, DebugContents):
"""
Write Multiple Coils Response
"""
_debug_contents = ('address', 'count')
functionCode = MPCI.writeMultipleCoils
def __init__(self, address=None, count=None, **kwargs):
if _debug: WriteMultipleCoilsResponse._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleCoilsResponse.functionCode
self.address = address
self.count = count
def encode(self, pdu):
if _debug: WriteMultipleCoilsResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleCoilsResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
register_response_type(WriteMultipleCoilsResponse)
#
# WriteMultipleRegisters
#
@bacpypes_debugging
class WriteMultipleRegistersRequest(MPCI, DebugContents):
"""
Write Multiple Registers Request
"""
_debug_contents = ('address', 'count', 'registers')
functionCode = MPCI.writeMultipleRegisters
def __init__(self, address=None, count=None, registers=None, **kwargs):
if _debug: WriteMultipleRegistersRequest._debug("__init__ %r %r %r %r", address, count, registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleRegistersRequest.functionCode
self.address = address
self.count = count
if registers is not None:
self.registers = registers
elif count is not None:
self.registers = [0] * self.count
else:
self.registers = None
def encode(self, pdu):
if _debug: WriteMultipleRegistersRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleRegistersRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_request_type(WriteMultipleRegistersRequest)
@bacpypes_debugging
class WriteMultipleRegistersResponse(MPCI, DebugContents):
"""
Write Multiple Registers Response
"""
_debug_contents = ('address', 'count')
functionCode = MPCI.writeMultipleRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: WriteMultipleRegistersResponse._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleRegistersResponse.functionCode
self.address = address
self.count = count
def encode(self, pdu):
if _debug: WriteMultipleRegistersResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleRegistersResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
register_response_type(WriteMultipleRegistersResponse)
#
# ReadWriteMultipleRegistersRequest
#
@bacpypes_debugging
class ReadWriteMultipleRegistersRequest(MPCI, DebugContents):
"""
Read Write Multiple Registers Request
"""
_debug_contents = ('raddress', 'rcount', 'waddress', 'wcount', 'registers')
functionCode = MPCI.readWriteMultipleRegisters
def __init__(self, raddress=None, rcount=None, waddress=None, wcount=None, registers=None, **kwargs):
if _debug: ReadWriteMultipleRegistersRequest._debug("__init__ %r %r %r %r %r %r", raddress, rcount, waddress, wcount, registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = ReadWriteMultipleRegistersRequest.functionCode
self.raddress = raddress
self.rcount = rcount
self.waddress = waddress
self.wcount = wcount
if registers is not None:
self.registers = registers
else:
self.registers = [0] * wcount
def encode(self, pdu):
if _debug: ReadWriteMultipleRegistersRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.raddress)
pdu.put_short(self.rcount)
pdu.put_short(self.waddress)
pdu.put_short(self.wcount)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteMultipleRegistersRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.raddress = pdu.get_short()
self.rcount = pdu.get_short()
self.waddress = pdu.get_short()
self.wcount = pdu.get_short()
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_request_type(ReadWriteMultipleRegistersRequest)
@bacpypes_debugging
class ReadWriteMultipleRegistersResponse(MPCI, DebugContents):
"""
Read Write Multiple Registers Response
"""
_debug_contents = ('registers',)
functionCode = MPCI.readWriteMultipleRegisters
def __init__(self, registers=None, **kwargs):
if _debug: ReadWriteMultipleRegistersResponse._debug("__init__ %r %r", registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = ReadWriteMultipleRegistersResponse.functionCode
if registers is not None:
self.registers = registers
else:
self.registers = []
def encode(self, pdu):
if _debug: ReadWriteMultipleRegistersResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteMultipleRegistersResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_response_type(ReadWriteMultipleRegistersResponse)
#
# ExceptionResponse
#
@bacpypes_debugging
class ExceptionResponse(MPCI, DebugContents):
"""
Exception Response
"""
_debug_contents = ('exceptionCode',)
ILLEGAL_FUNCTION = 0x01
ILLEGAL_DATA_ADDRESS = 0x02
ILLEGAL_DATA_VALUE = 0x03
ILLEGAL_RESPONSE_LENGTH = 0x04
ACKNOWLEDGE = 0x05
SLAVE_DEVICE_BUSY = 0x06
NEGATIVE_ACKNOWLEDGE = 0x07
MEMORY_PARITY_ERROR = 0x08
GATEWAY_PATH_UNAVAILABLE = 0x0A
GATEWAY_TARGET_DEVICE_FAILED_TO_RESPOND = 0x0B
def __init__(self, function=None, exceptionCode=None, **kwargs):
if _debug: ExceptionResponse._debug("__init__ %r %r %r", function, exceptionCode, kwargs)
MPCI.__init__(self, **kwargs)
if function is not None:
self.mpduFunctionCode = function + 128
else:
self.mpduFunctionCode = None
self.exceptionCode = exceptionCode
def encode(self, pdu):
if _debug: ExceptionResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(self.exceptionCode)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ExceptionResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.exceptionCode = pdu.get()
| 27.847233 | 144 | 0.637231 | 31,938 | 0.893921 | 0 | 0 | 32,196 | 0.901142 | 0 | 0 | 6,146 | 0.172022 |
763d00edb1aabcadb474534e123fc73d5db212cc | 2,650 | py | Python | rastervision/core/rv_pipeline/object_detection_config.py | jeromemaleski/raster-vision | 1b0c7c789afbe1db27da4bc2dd0f58a02fca9f01 | [
"Apache-2.0"
] | null | null | null | rastervision/core/rv_pipeline/object_detection_config.py | jeromemaleski/raster-vision | 1b0c7c789afbe1db27da4bc2dd0f58a02fca9f01 | [
"Apache-2.0"
] | null | null | null | rastervision/core/rv_pipeline/object_detection_config.py | jeromemaleski/raster-vision | 1b0c7c789afbe1db27da4bc2dd0f58a02fca9f01 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
from rastervision.pipeline.config import register_config, Config, Field
from rastervision.core.rv_pipeline import RVPipelineConfig
from rastervision.core.data.label_store import ObjectDetectionGeoJSONStoreConfig
from rastervision.core.evaluation import ObjectDetectionEvaluatorConfig
class ObjectDetectionWindowMethod(Enum):
"""Enum for window methods
Attributes:
chip: the default method
"""
chip = 'chip'
@register_config('object_detection_chip_options')
class ObjectDetectionChipOptions(Config):
neg_ratio: float = Field(
1.0,
description=
('The ratio of negative chips (those containing no bounding '
'boxes) to positive chips. This can be useful if the statistics '
'of the background is different in positive chips. For example, '
'in car detection, the positive chips will always contain roads, '
'but no examples of rooftops since cars tend to not be near rooftops.'
))
ioa_thresh: float = Field(
0.8,
description=
('When a box is partially outside of a training chip, it is not clear if (a '
'clipped version) of the box should be included in the chip. If the IOA '
'(intersection over area) of the box with the chip is greater than ioa_thresh, '
'it is included in the chip.'))
window_method: ObjectDetectionWindowMethod = ObjectDetectionWindowMethod.chip
@register_config('object_detection_predict_options')
class ObjectDetectionPredictOptions(Config):
merge_thresh: float = Field(
0.5,
description=
('If predicted boxes have an IOA (intersection over area) greater than '
'merge_thresh, then they are merged into a single box during postprocessing. '
'This is needed since the sliding window approach results in some false '
'duplicates.'))
score_thresh: float = Field(
0.5,
description=
('Predicted boxes are only output if their score is above score_thresh.'
))
@register_config('object_detection')
class ObjectDetectionConfig(RVPipelineConfig):
chip_options: ObjectDetectionChipOptions = ObjectDetectionChipOptions()
predict_options: ObjectDetectionPredictOptions = ObjectDetectionPredictOptions(
)
def build(self, tmp_dir):
from rastervision.core.rv_pipeline.object_detection import ObjectDetection
return ObjectDetection(self, tmp_dir)
def get_default_label_store(self, scene):
return ObjectDetectionGeoJSONStoreConfig()
def get_default_evaluator(self):
return ObjectDetectionEvaluatorConfig()
| 37.857143 | 89 | 0.718491 | 2,191 | 0.826792 | 0 | 0 | 2,184 | 0.824151 | 0 | 0 | 1,063 | 0.401132 |
763e6130c882fc4e05156ff0a6f93880ac497dc1 | 2,652 | py | Python | applications/terms_of_service/views.py | awwong1/apollo | 5571b5f222265bec3eed45b21e862636ccdc9a97 | [
"MIT"
] | null | null | null | applications/terms_of_service/views.py | awwong1/apollo | 5571b5f222265bec3eed45b21e862636ccdc9a97 | [
"MIT"
] | null | null | null | applications/terms_of_service/views.py | awwong1/apollo | 5571b5f222265bec3eed45b21e862636ccdc9a97 | [
"MIT"
] | null | null | null | from apollo.viewmixins import LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin
from applications.terms_of_service.models import TermsOfService
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
class TermsOfServiceViewList(LoginRequiredMixin, ListView):
context_object_name = "termsofservices"
model = TermsOfService
template_name = "terms_of_service/termsofservice_list.html"
class TermsOfServiceViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'termsofservice'
model = TermsOfService
template_name = "terms_of_service/termsofservice_detail.html"
class TermsOfServiceViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'termsofservice'
model = TermsOfService
success_message = "%(title)s was created successfully!"
template_name = "terms_of_service/termsofservice_form.html"
activity_verb = 'created terms of service'
def get_success_url(self):
return reverse_lazy('termsofservice_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(TermsOfServiceViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
return context
class TermsOfServiceViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin, UpdateView):
context_object_name = 'termsofservice'
model = TermsOfService
success_message = "%(title)s was updated successfully!"
template_name = "terms_of_service/termsofservice_form.html"
activity_verb = 'updated terms of service'
def get_success_url(self):
return reverse_lazy('termsofservice_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(TermsOfServiceViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
return context
class TermsOfServiceViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'termsofservice'
model = TermsOfService
success_url = reverse_lazy('termsofservice_list')
template_name = "terms_of_service/termsofservice_form.html"
target_object_valid = False
activity_verb = 'deleted terms of service'
def get_context_data(self, **kwargs):
context = super(TermsOfServiceViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
return context | 42.095238 | 123 | 0.77187 | 2,284 | 0.861237 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.217572 |
763f4b908722ffb161ebab1922f9c5497d86b5d9 | 748 | py | Python | gateway/helper/event_log.py | ehoefig/stout | 3c4fc75891665dc35f67ad84b6b86a02aeed4688 | [
"MIT"
] | null | null | null | gateway/helper/event_log.py | ehoefig/stout | 3c4fc75891665dc35f67ad84b6b86a02aeed4688 | [
"MIT"
] | null | null | null | gateway/helper/event_log.py | ehoefig/stout | 3c4fc75891665dc35f67ad84b6b86a02aeed4688 | [
"MIT"
] | null | null | null | import re
import logging
from pydispatch import dispatcher
__author__ = 'edzard'
logger = logging.getLogger(__name__)
_filters = {}
def _handler(sender, **kwargs):
global _filters
for parameter_name in kwargs:
if parameter_name in _filters:
data = kwargs[parameter_name]
if _filters[parameter_name].match(data) is None:
return
logger.info("<{}> event from {} -> {}".format(kwargs['signal'], sender, kwargs))
dispatcher.connect(_handler, signal=dispatcher.Any, sender=dispatcher.Any)
def set_filter(**kwargs):
global _filters
for parameter_name in kwargs:
regex = kwargs[parameter_name]
pattern = re.compile(regex)
_filters[parameter_name] = pattern | 25.793103 | 84 | 0.677807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.05615 |
763ffc0241362b9ed7c4968aba6ab05141e6413b | 106 | py | Python | olufekoscrumy/apps.py | olufekosamuel/scrumy | 8d93794bb04a3b3ec76ae41ae685f4e2c7b55730 | [
"MIT"
] | null | null | null | olufekoscrumy/apps.py | olufekosamuel/scrumy | 8d93794bb04a3b3ec76ae41ae685f4e2c7b55730 | [
"MIT"
] | null | null | null | olufekoscrumy/apps.py | olufekosamuel/scrumy | 8d93794bb04a3b3ec76ae41ae685f4e2c7b55730 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class OlufekoscrumyConfig(AppConfig):
name = 'olufekoscrumy'
| 17.666667 | 38 | 0.745283 | 65 | 0.613208 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.141509 |
7640076addca84aec71e6807681f274ca64228cc | 9,395 | py | Python | admin/tests/test_layers_vectortiles.py | rbovard/c2cgeoportal | 61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8 | [
"BSD-2-Clause-FreeBSD"
] | 43 | 2015-02-16T06:56:25.000Z | 2021-09-12T17:49:16.000Z | admin/tests/test_layers_vectortiles.py | rbovard/c2cgeoportal | 61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8 | [
"BSD-2-Clause-FreeBSD"
] | 3,227 | 2015-01-05T10:30:59.000Z | 2022-03-31T03:25:39.000Z | admin/tests/test_layers_vectortiles.py | rbovard/c2cgeoportal | 61b7a4fc98f686f9b7d4c5fda7bb4c5cc09f8de8 | [
"BSD-2-Clause-FreeBSD"
] | 57 | 2015-01-29T08:32:12.000Z | 2022-03-16T07:07:33.000Z | # pylint: disable=no-self-use
import re
import pytest
from . import AbstractViewsTests, factory_build_layers, get_test_default_layers
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("dbsession", "transact")
def layer_vectortiles_test_data(dbsession, transact):
del transact
from c2cgeoportal_commons.models.main import LayerVectorTiles, OGCServer
servers = [OGCServer(name=f"server_{i}") for i in range(0, 4)]
for i, server in enumerate(servers):
server.url = f"http://wms.geo.admin.ch_{i}"
server.image_type = "image/jpeg" if i % 2 == 0 else "image/png"
def layer_builder(i):
name = f"layer_vectortiles_{i}"
layer = LayerVectorTiles(name=name)
layer.layer = name
layer.public = 1 == i % 2
layer.style = "https://vectortiles-staging.geoportail.lu/styles/roadmap/style.json"
layer.xyz = "https://vectortiles-staging.geoportail.lu/styles/roadmap/{z}/{x}/{y}.png"
return layer
data = factory_build_layers(layer_builder, dbsession)
data["default"] = get_test_default_layers(dbsession, server)
dbsession.flush()
yield data
@pytest.mark.usefixtures("layer_vectortiles_test_data", "test_app")
class TestLayerVectortiles(AbstractViewsTests):
_prefix = "/admin/layers_vectortiles"
def test_index_rendering(self, test_app):
resp = self.get(test_app)
self.check_left_menu(resp, "Vector Tiles Layers")
expected = [
("actions", "", "false"),
("id", "id", "true"),
("name", "Name"),
("description", "Description"),
("public", "Public"),
("geo_table", "Geo table"),
("exclude_properties", "Exclude properties"),
("style", "Style"),
("xyz", "Raster URL"),
("dimensions", "Dimensions", "false"),
("interfaces", "Interfaces"),
("restrictionareas", "Restriction areas", "false"),
("parents_relation", "Parents", "false"),
("metadatas", "Metadatas", "false"),
]
self.check_grid_headers(resp, expected)
def test_grid_complex_column_val(self, test_app, layer_vectortiles_test_data):
json = self.check_search(test_app, sort="name")
row = json["rows"][0]
layer = layer_vectortiles_test_data["layers"][0]
assert layer.id == int(row["_id_"])
assert layer.name == row["name"]
def test_new(self, test_app, layer_vectortiles_test_data, dbsession):
default_vectortiles = layer_vectortiles_test_data["default"]["vectortiles"]
default_vectortiles.name = "so can I not be found"
dbsession.flush()
form = self.get_item(test_app, "new").form
assert "" == self.get_first_field_named(form, "id").value
assert "" == self.get_first_field_named(form, "name").value
assert "" == self.get_first_field_named(form, "style").value
assert "" == self.get_first_field_named(form, "xyz").value
def test_grid_search(self, test_app):
self.check_search(test_app, "layer_vectortiles_10", total=1)
def test_base_edit(self, test_app, layer_vectortiles_test_data):
layer = layer_vectortiles_test_data["layers"][10]
form = self.get_item(test_app, layer.id).form
assert "layer_vectortiles_10" == self.get_first_field_named(form, "name").value
assert "" == self.get_first_field_named(form, "description").value
def test_public_checkbox_edit(self, test_app, layer_vectortiles_test_data):
layer = layer_vectortiles_test_data["layers"][10]
form = self.get_item(test_app, layer.id).form
assert not form["public"].checked
layer = layer_vectortiles_test_data["layers"][11]
form = self.get_item(test_app, layer.id).form
assert form["public"].checked
def test_edit(self, test_app, layer_vectortiles_test_data, dbsession):
layer = layer_vectortiles_test_data["layers"][0]
form = self.get_item(test_app, layer.id).form
assert str(layer.id) == self.get_first_field_named(form, "id").value
assert "hidden" == self.get_first_field_named(form, "id").attrs["type"]
assert layer.name == self.get_first_field_named(form, "name").value
assert str(layer.description or "") == self.get_first_field_named(form, "description").value
assert layer.public is False
assert layer.public == form["public"].checked
assert str(layer.geo_table or "") == form["geo_table"].value
assert str(layer.exclude_properties or "") == form["exclude_properties"].value
assert str(layer.style or "") == form["style"].value
assert str(layer.xyz or "") == form["xyz"].value
interfaces = layer_vectortiles_test_data["interfaces"]
assert {interfaces[0].id, interfaces[2].id} == {i.id for i in layer.interfaces}
self._check_interfaces(form, interfaces, layer)
ras = layer_vectortiles_test_data["restrictionareas"]
assert {ras[0].id, ras[2].id} == {i.id for i in layer.restrictionareas}
self._check_restrictionsareas(form, ras, layer)
new_values = {
"name": "new_name",
"description": "new description",
"public": True,
"geo_table": "new_geo_table",
"exclude_properties": "property1,property2",
"style": "https://new_style.json",
"xyz": "https://new_style/{x}/{y}/{z}.png",
}
for key, value in new_values.items():
self.set_first_field_named(form, key, value)
form["interfaces"] = [interfaces[1].id, interfaces[3].id]
form["restrictionareas"] = [ras[1].id, ras[3].id]
resp = form.submit("submit")
assert str(layer.id) == re.match(
fr"http://localhost{self._prefix}/(.*)\?msg_col=submit_ok", resp.location
).group(1)
dbsession.expire(layer)
for key, value in new_values.items():
if isinstance(value, bool):
assert value == getattr(layer, key)
else:
assert str(value or "") == str(getattr(layer, key) or "")
assert {interfaces[1].id, interfaces[3].id} == {interface.id for interface in layer.interfaces}
assert {ras[1].id, ras[3].id} == {ra.id for ra in layer.restrictionareas}
def test_submit_new(self, dbsession, test_app, layer_vectortiles_test_data):
from c2cgeoportal_commons.models.main import LayerVectorTiles
resp = test_app.post(
"/admin/layers_vectortiles/new",
{
"name": "new_name",
"description": "new description",
"public": True,
"style": "https://new_style/styles/layer/style.json",
"xyz": "https://new_style/styles/layer/{z}/{x}/{y}.png",
},
status=302,
)
layer = dbsession.query(LayerVectorTiles).filter(LayerVectorTiles.name == "new_name").one()
assert str(layer.id) == re.match(
r"http://localhost/admin/layers_vectortiles/(.*)\?msg_col=submit_ok", resp.location
).group(1)
def test_duplicate(self, layer_vectortiles_test_data, test_app, dbsession):
from c2cgeoportal_commons.models.main import LayerVectorTiles
layer = layer_vectortiles_test_data["layers"][3]
resp = test_app.get(f"/admin/layers_vectortiles/{layer.id}/duplicate", status=200)
form = resp.form
assert "" == self.get_first_field_named(form, "id").value
assert layer.name == self.get_first_field_named(form, "name").value
assert str(layer.description or "") == self.get_first_field_named(form, "description").value
assert layer.public is True
assert layer.public == form["public"].checked
assert str(layer.geo_table or "") == form["geo_table"].value
assert str(layer.exclude_properties or "") == form["exclude_properties"].value
assert str(layer.style or "") == form["style"].value
assert str(layer.xyz or "") == form["xyz"].value
interfaces = layer_vectortiles_test_data["interfaces"]
assert {interfaces[3].id, interfaces[1].id} == {i.id for i in layer.interfaces}
self._check_interfaces(form, interfaces, layer)
self.set_first_field_named(form, "name", "clone")
resp = form.submit("submit")
layer = dbsession.query(LayerVectorTiles).filter(LayerVectorTiles.name == "clone").one()
assert str(layer.id) == re.match(
r"http://localhost/admin/layers_vectortiles/(.*)\?msg_col=submit_ok", resp.location
).group(1)
assert layer.id == layer.metadatas[0].item_id
assert layer_vectortiles_test_data["layers"][3].metadatas[0].name == layer.metadatas[0].name
assert layer_vectortiles_test_data["layers"][3].metadatas[1].name == layer.metadatas[1].name
def test_delete(self, test_app, dbsession):
from c2cgeoportal_commons.models.main import Layer, LayerVectorTiles, TreeItem
layer_id = dbsession.query(LayerVectorTiles.id).first().id
test_app.delete(f"/admin/layers_vectortiles/{layer_id}", status=200)
assert dbsession.query(LayerVectorTiles).get(layer_id) is None
assert dbsession.query(Layer).get(layer_id) is None
assert dbsession.query(TreeItem).get(layer_id) is None
| 41.941964 | 103 | 0.640021 | 8,176 | 0.87025 | 924 | 0.09835 | 9,252 | 0.984779 | 0 | 0 | 1,951 | 0.207664 |
7641a3c782ad92dcb116db2b6333f07537b9c204 | 7,608 | py | Python | SSRF/dns.py | cxosmo/curated-wordlists | 0703f0aeb3d68ec1244e408e14823bf65a0fe270 | [
"BSD-3-Clause"
] | 2 | 2021-12-30T10:36:15.000Z | 2022-02-23T07:26:52.000Z | SSRF/dns.py | Funhity/curated-wordlists | e50900faeb18bc3071ff02b741408a90442c6e30 | [
"BSD-3-Clause"
] | null | null | null | SSRF/dns.py | Funhity/curated-wordlists | e50900faeb18bc3071ff02b741408a90442c6e30 | [
"BSD-3-Clause"
] | 2 | 2021-12-13T03:18:28.000Z | 2021-12-28T08:32:12.000Z | #encoding: utf-8
from __future__ import print_function
from builtins import str
import ipaddress
import datetime
import os
import sys
from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve
from twisted.internet import reactor
from twisted.python.runtime import platform
TTL = 0
dict = {}
dont_print_ip_ranges = ['172.253.0.0/16','172.217.0.0/16']
dont_rebind_nameservers = ["ns1.", "ns2."]
FILENAME = "dns-log-" + str(datetime.datetime.now().strftime("%H-%M-%S.%f-%d-%m-%Y"))+'.log'
WHITELISTEDIP = ''
INTERNALIP = ''
SERVERIP = ''
PORT = 53
DOMAIN = ''
def OpenLogFile():
global f
major = sys.version_info[0]
if major == 3:
f = open(FILENAME, 'a')
else:
f = open(FILENAME, 'ab')
def CloseLogFile():
f.close()
def search_file_for_all(hosts_file, name):
results = []
if name.decode().lower() not in dont_rebind_nameservers:
if name not in dict or dict[name] < 1:
ip = WHITELISTEDIP
else:
ip = INTERNALIP
if name not in dict:
dict[name] = 0
dict[name] += 1
else:
ip = SERVERIP
print('================================================================================================')
print("ServerTime - A record: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='')
print('Response with A record: ', name.decode('utf-8'), ' -> ', ip, sep='')
print('================================================================================================')
OpenLogFile()
print('================================================================================================', file=f)
print("ServerTime - A record: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='', file=f)
print('Response with A record: ', name.decode('utf-8'), ' -> ', ip, sep='', file=f)
print('================================================================================================', file=f)
CloseLogFile()
results.append(hosts_module.nativeString(ip))
return results
class Resolver(hosts_module.Resolver):
def _aRecords(self, name):
return tuple([
dns.RRHeader(name, dns.A, dns.IN, TTL, dns.Record_A(addr, TTL))
for addr in search_file_for_all(hosts_module.FilePath(self.file), name)
if hosts_module.isIPAddress(addr)
])
class PrintClientAddressDNSServerFactory(server.DNSServerFactory):
def check_network(self, network):
for dont_print_ip_range in dont_print_ip_ranges:
if ipaddress.ip_address(u"%s" % network) in ipaddress.ip_network(u"%s" % dont_print_ip_range):
return True
return False
def buildProtocol(self, addr):
if not self.check_network(addr.host):
print('------------------------------------------------------------------------------------------------')
print("ServerTime - DNSServerFactory: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='')
print("Request: Connection to DNSServerFactory from: ", addr.host," on port: ",addr.port," using ",addr.type,sep='')
print('------------------------------------------------------------------------------------------------')
OpenLogFile()
print('------------------------------------------------------------------------------------------------', file=f)
print("ServerTime: - DNSServerFactory: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), file=f, sep='')
print("Request: Connection to DNSServerFactory from: ", addr.host," on port: ",addr.port," using ",addr.type, file=f, sep='')
print('------------------------------------------------------------------------------------------------', file=f)
CloseLogFile()
return server.DNSServerFactory.buildProtocol(self, addr)
class PrintClientAddressDNSDatagramProtocol(dns.DNSDatagramProtocol):
def check_network(self, network):
for dont_print_ip_range in dont_print_ip_ranges:
if ipaddress.ip_address(u"%s" % network) in ipaddress.ip_network(u"%s" % dont_print_ip_range):
return True
return False
def datagramReceived(self, datagram, addr):
if not self.check_network(addr[0]):
print('------------------------------------------------------------------------------------------------')
print("ServerTime - DNSDatagramProtocol: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='')
print("Request: Datagram to DNSDatagramProtocol from: ", addr[0], " on port: ", addr[1], sep='')
print('------------------------------------------------------------------------------------------------')
OpenLogFile()
print('------------------------------------------------------------------------------------------------', file=f)
print("ServerTime - DNSDatagramProtocol: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), file=f, sep='')
print("Request: Datagram to DNSDatagramProtocol from: ", addr[0], " on port: ", addr[1], file=f, sep='')
print('------------------------------------------------------------------------------------------------', file=f)
CloseLogFile()
return dns.DNSDatagramProtocol.datagramReceived(self, datagram, addr)
def create_resolver(servers=None, resolvconf=None, hosts=None):
if platform.getType() == 'posix':
if resolvconf is None:
resolvconf = b'/etc/resolv.conf'
if hosts is None:
hosts = b'/etc/hosts'
the_resolver = client.Resolver(resolvconf, servers)
host_resolver = Resolver(hosts)
else:
if hosts is None:
hosts = r'c:\windows\hosts'
from twisted.internet import reactor
bootstrap = client._ThreadedResolverImpl(reactor)
host_resolver = Resolver(hosts)
the_resolver = root.bootstrap(bootstrap, resolverFactory=client.Resolver)
return resolve.ResolverChain([host_resolver, cache.CacheResolver(), the_resolver])
def main(port):
factory = PrintClientAddressDNSServerFactory(
clients=[create_resolver(servers=[('8.8.8.8', 53)], hosts='hosts')],
)
protocol = PrintClientAddressDNSDatagramProtocol(controller=factory)
reactor.listenUDP(PORT, protocol)
reactor.listenTCP(PORT, factory)
print('-------------------------------------------------------------------------------------------------------------')
print("DNS Server started...\nListening on 0.0.0.0:" + str(PORT))
print("Log file name: " + FILENAME)
print("Not showing/logging requests from IP range: " + ', '.join(dont_print_ip_ranges))
print("Not rebinding requests for A records: " + ', '.join(dont_rebind_nameservers) + " -> " + SERVERIP)
print('-------------------------------------------------------------------------------------------------------------\n\n')
reactor.run()
if __name__ == '__main__':
if len(sys.argv) != 6:
print("Usage: python "+sys.argv[0]+" WhitelistedIP InternalIP ServerIP Port Domain")
print ("Example: python "+sys.argv[0]+" 216.58.214.206 169.254.169.254 78.47.24.216 53 localdomains.pw")
exit(1)
else:
WHITELISTEDIP = sys.argv[1]
INTERNALIP = sys.argv[2]
SERVERIP = sys.argv[3]
PORT = int(sys.argv[4])
DOMAIN = sys.argv[5]
dont_rebind_nameservers = [dont_rebind_nameservers[0] + DOMAIN, dont_rebind_nameservers[1] + DOMAIN]
main(PORT)
| 46.109091 | 137 | 0.511435 | 3,336 | 0.438486 | 0 | 0 | 0 | 0 | 0 | 0 | 2,612 | 0.343323 |
764337fd61b07695cb56126ab069e65c8c4a854d | 31 | py | Python | relevanceai/_api/endpoints/admin/__init__.py | RelevanceAI/RelevanceAI | a0542f35153d9c842f3d2cd0955d6b07f6dfc07b | [
"Apache-2.0"
] | 21 | 2021-11-23T13:01:36.000Z | 2022-03-23T03:45:30.000Z | relevanceai/_api/endpoints/admin/__init__.py | RelevanceAI/RelevanceAI | a0542f35153d9c842f3d2cd0955d6b07f6dfc07b | [
"Apache-2.0"
] | 217 | 2021-11-23T00:11:01.000Z | 2022-03-30T08:11:49.000Z | relevanceai/_api/endpoints/admin/__init__.py | RelevanceAI/RelevanceAI | a0542f35153d9c842f3d2cd0955d6b07f6dfc07b | [
"Apache-2.0"
] | 4 | 2022-01-04T01:48:30.000Z | 2022-02-11T03:19:32.000Z | from .admin import AdminClient
| 15.5 | 30 | 0.83871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
76435f733479de6951b0abd9a686d53e6f3120f0 | 2,684 | py | Python | kotdbot.py | tvezet/KoTDBot | 8dff34420463be1bc3567bf20380d2f20cb35846 | [
"Apache-2.0"
] | null | null | null | kotdbot.py | tvezet/KoTDBot | 8dff34420463be1bc3567bf20380d2f20cb35846 | [
"Apache-2.0"
] | null | null | null | kotdbot.py | tvezet/KoTDBot | 8dff34420463be1bc3567bf20380d2f20cb35846 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#from difflib import SequenceMatcher
import discord
import numpy as np
from joblib import dump, load
import sys
import os
import math
helpText = "Tell me the sizes of two of your gems and i will calculate the optimal size of the third gem based on the current ritual bonus. If running a ritual with a bigger third gem will still yield a maximum sized gem (2,999,997). You can also change the bonus to use or get the current bonus used for calculations.\n"
helpText = helpText + "Type \"!max SIZE1,SIZE2\" where SIZE1 and SIZE2 are numeric values, the sizes of your first and second gem respectively. Type !bonus [set=VAL] to get or set the current ritual bonus.\n"
def calculate(text):
text = text.split(",")
if len(text) != 2:
return helpText
val = [-1,-1]
for i in range(2):
try:
val[i] = int(text[i].replace(" ",""))
except ValueError:
return "Could not parse value \"" + text[i] + "\". Must be an integer value, check your input and try again!"
bonus = load('bonus.joblib')
target = math.ceil(2999997/bonus) - val[0] - val[1]
if 0 < target <= 999999:
return "Optimal 3rd gem size is " + str(target) + ". Absolute bonus will be " + str(2999997 - val[0] - val[1] - target) + "."
elif not 0 < val[0] <= 999999 or not 0 < val[1] <= 999999:
return "Whoops, something went wrong with your input values... valid gems have a positive value below 1,000,000. Your input values where \"" + text[0] + "\" and \"" + text[1] + "\". Check your input and try again!"
else:
return "Optimal 3rd gem size would be " + str(target) + ". Try bigger 1st and 2nd gem for a maximum gem ritual."
def bonus(text):
text = text.split("=")
if len(text) != 2 or text[0].lower().replace(" ","") != "set":
return "Current ritual bonus is " + str(load('bonus.joblib'))
try:
val = float(text[1].replace(" ",""))
except ValueError:
return "Could not parse value \"" + text[i] + "\". Must be a floating point value, check your input and try again!"
dump(val,'bonus.joblib')
return "Bonus set to " + str(val) +"."
if len(sys.argv) > 1:
TOKEN = sys.argv[1]
else:
TOKEN = os.environ["ACCESS_TOKEN"]
client = discord.Client()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!max'):
msg = calculate(message.content[4:])
msg = msg.format(message)
await message.channel.send(msg)
elif message.content.startswith('!bonus'):
msg = bonus(message.content[6:])
msg = msg.format(message)
await message.channel.send(msg)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN)
| 35.315789 | 321 | 0.679955 | 0 | 0 | 0 | 0 | 493 | 0.183681 | 465 | 0.173249 | 1,224 | 0.456036 |
764382ce910f9b78f0afeeafad12a7ae05b25d0c | 2,879 | py | Python | lib/ezdxf/sections/abstract.py | tapnair/DXFer | 8ec957d80c2f251bb78440147d1478106f99b3eb | [
"MIT"
] | 4 | 2019-03-31T00:41:13.000Z | 2021-07-31T05:09:07.000Z | lib/ezdxf/sections/abstract.py | tapnair/DXFer | 8ec957d80c2f251bb78440147d1478106f99b3eb | [
"MIT"
] | null | null | null | lib/ezdxf/sections/abstract.py | tapnair/DXFer | 8ec957d80c2f251bb78440147d1478106f99b3eb | [
"MIT"
] | 5 | 2018-03-29T06:28:07.000Z | 2021-07-31T05:09:08.000Z | # Purpose: entity section
# Created: 13.03.2011
# Copyright (C) 2011, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <mozman@gmx.at>"
from itertools import islice
from ..lldxf.tags import TagGroups, DXFStructureError
from ..lldxf.classifiedtags import ClassifiedTags, get_tags_linker
from ..query import EntityQuery
class AbstractSection(object):
name = 'abstract'
def __init__(self, entity_space, tags, drawing):
self._entity_space = entity_space
self.drawing = drawing
if tags is not None:
self._build(tags)
@property
def dxffactory(self):
return self.drawing.dxffactory
@property
def entitydb(self):
return self.drawing.entitydb
def get_entity_space(self):
return self._entity_space
def _build(self, tags):
if tags[0] != (0, 'SECTION') or tags[1] != (2, self.name.upper()) or tags[-1] != (0, 'ENDSEC'):
raise DXFStructureError("Critical structure error in {} section.".format(self.name.upper()))
if len(tags) == 3: # empty entities section
return
linked_tags = get_tags_linker()
store_tags = self._entity_space.store_tags
entitydb = self.entitydb
fix_tags = self.dxffactory.modify_tags
for group in TagGroups(islice(tags, 2, len(tags)-1)):
tags = ClassifiedTags(group)
fix_tags(tags) # post read tags fixer for VERTEX!
handle = entitydb.add_tags(tags)
if not linked_tags(tags, handle): # also creates the link structure as side effect
store_tags(tags) # add to entity space
def write(self, stream):
stream.write(" 0\nSECTION\n 2\n%s\n" % self.name.upper())
self._entity_space.write(stream)
stream.write(" 0\nENDSEC\n")
def create_new_dxf_entity(self, _type, dxfattribs):
""" Create new DXF entity add it to th entity database and add it to the entity space.
"""
dxf_entity = self.dxffactory.create_db_entry(_type, dxfattribs)
self._entity_space.add_handle(dxf_entity.dxf.handle)
return dxf_entity
def add_handle(self, handle):
self._entity_space.add_handle(handle)
def remove_handle(self, handle):
self._entity_space.remove(handle)
def delete_entity(self, entity):
self.remove_handle(entity.dxf.handle)
self.entitydb.delete_entity(entity)
# start of public interface
def __len__(self):
return len(self._entity_space)
def __contains__(self, handle):
return handle in self._entity_space
def query(self, query='*'):
return EntityQuery(iter(self), query)
def delete_all_entities(self):
""" Delete all entities. """
self._entity_space.delete_all_entities()
# end of public interface
| 31.293478 | 104 | 0.658909 | 2,506 | 0.870441 | 0 | 0 | 144 | 0.050017 | 0 | 0 | 544 | 0.188954 |
7644bc4bec765da9c73ffce65a56e216576e72d0 | 812 | py | Python | spacy-textdescriptives/subsetters.py | HLasse/spacy-textdescriptives | c079c6617ef266b54f28c51e619d2429a5dafb83 | [
"Apache-2.0"
] | null | null | null | spacy-textdescriptives/subsetters.py | HLasse/spacy-textdescriptives | c079c6617ef266b54f28c51e619d2429a5dafb83 | [
"Apache-2.0"
] | null | null | null | spacy-textdescriptives/subsetters.py | HLasse/spacy-textdescriptives | c079c6617ef266b54f28c51e619d2429a5dafb83 | [
"Apache-2.0"
] | null | null | null | """Helpers to subset an extracted dataframe"""
readability_cols = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog",
"gunning_fog",
"automated_readability_index",
"coleman_liau_index",
"lix",
"rix",
]
dependency_cols = [
"dependency_distance_mean",
"dependency_distance_std",
"prop_adjacent_dependency_relation_mean",
"prop_adjacent_dependency_relation_std",
]
descriptive_stats_cols = [
"token_length_mean",
"token_length_median",
"token_length_std",
"sentence_length_mean",
"sentence_length_median",
"sentence_length_std",
"syllables_per_token_mean",
"syllables_per_token_median",
"syllables_per_token_std",
"n_tokens",
"n_unique_tokens",
"percent_unique_tokens",
"n_sentences",
"n_characters",
]
| 21.945946 | 46 | 0.699507 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 578 | 0.711823 |
7646049ee9e15471718b70b947f345973762d6d3 | 2,168 | py | Python | aoc/day22.py | martinhenstridge/adventofcode2020 | ffc34bbb6b8d8682a3bc9b16b7082b63adf3fc77 | [
"MIT"
] | null | null | null | aoc/day22.py | martinhenstridge/adventofcode2020 | ffc34bbb6b8d8682a3bc9b16b7082b63adf3fc77 | [
"MIT"
] | null | null | null | aoc/day22.py | martinhenstridge/adventofcode2020 | ffc34bbb6b8d8682a3bc9b16b7082b63adf3fc77 | [
"MIT"
] | null | null | null | from . import util
def get_decks(lines):
player = 0
cards = []
for line in lines:
if line.startswith("Player"):
player = int(line[-2])
elif line:
cards.append(int(line))
else:
yield player, cards
player = 0
cards = []
def play_combat(_deck1, _deck2):
deck1 = _deck1.copy()
deck2 = _deck2.copy()
while True:
card1 = deck1.pop(0)
card2 = deck2.pop(0)
if card1 > card2:
deck1.append(card1)
deck1.append(card2)
if not deck2:
return 1, deck1
else:
deck2.append(card2)
deck2.append(card1)
if not deck1:
return 2, deck2
def play_recursive_combat(_deck1, _deck2):
deck1 = _deck1.copy()
deck2 = _deck2.copy()
rounds = set()
while True:
# Check for identical previous round.
signature = tuple(deck1), tuple(deck2)
if signature in rounds:
return 1, deck1
rounds.add(signature)
# Draw cards and play.
card1 = deck1.pop(0)
card2 = deck2.pop(0)
if len(deck1) >= card1 and len(deck2) >= card2:
winner, _ = play_recursive_combat(deck1[:card1], deck2[:card2])
elif card1 > card2:
winner = 1
else:
winner = 2
# Assign winnings and check for termination conditions.
if winner == 1:
deck1.append(card1)
deck1.append(card2)
if not deck2:
return 1, deck1
else:
deck2.append(card2)
deck2.append(card1)
if not deck1:
return 2, deck2
def calculate_score(deck):
return sum(card * (len(deck) - idx) for idx, card in enumerate(deck))
def run():
inputlines = util.get_input_lines("22.txt")
decks = {p: cs for p, cs in get_decks(inputlines)}
_, winning = play_combat(decks[1], decks[2])
score1 = calculate_score(winning)
_, winning = play_recursive_combat(decks[1], decks[2])
score2 = calculate_score(winning)
return score1, score2
| 24.636364 | 75 | 0.541052 | 0 | 0 | 294 | 0.135609 | 0 | 0 | 0 | 0 | 130 | 0.059963 |
764616758280c2d006fdbc4717715cf4984760d6 | 3,434 | py | Python | src/test_trained_webcam.py | sumit-chavan/Face-Recognition-Model | 3d5c55d587fe1e8e66e6f47b3b4145e5ca8f13d6 | [
"MIT"
] | null | null | null | src/test_trained_webcam.py | sumit-chavan/Face-Recognition-Model | 3d5c55d587fe1e8e66e6f47b3b4145e5ca8f13d6 | [
"MIT"
] | null | null | null | src/test_trained_webcam.py | sumit-chavan/Face-Recognition-Model | 3d5c55d587fe1e8e66e6f47b3b4145e5ca8f13d6 | [
"MIT"
] | null | null | null | import math
from sklearn import neighbors
import os
import os.path
import pickle
from PIL import Image, ImageDraw
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import cv2
#from webapp import db
#from DB import Actor
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
def predict(X_img, knn_clf=None, model_path=None, distance_threshold=0.6):
# if not os.path.isfile(X_img_path) or os.path.splitext(X_img_path)[1][1:] not in ALLOWED_EXTENSIONS:
# raise Exception("Invalid image path: {}".format(X_img_path))
if knn_clf is None and model_path is None:
raise Exception("Must supply knn classifier either thourgh knn_clf or model_path")
# Load a trained KNN model (if one was passed in)
if knn_clf is None:
with open(model_path, 'rb') as f:
knn_clf = pickle.load(f)
# Load image file and find face locations
# X_img = face_recognition.load_image_file(X_img_path)
X_face_locations = face_recognition.face_locations(X_img)
# If no faces are found in the image, return an empty result.
if len(X_face_locations) == 0:
return []
# Find encodings for faces in the test iamge
faces_encodings = face_recognition.face_encodings(X_img, known_face_locations=X_face_locations)
# Use the KNN model to find the best matches for the test face
closest_distances = knn_clf.kneighbors(faces_encodings, n_neighbors=1)
are_matches = [closest_distances[0][i][0] <= distance_threshold for i in range(len(X_face_locations))]
# Predict classes and remove classifications that aren't within the threshold
return [(pred, loc) if rec else ("unknown", loc) for pred, loc, rec in zip(knn_clf.predict(faces_encodings), X_face_locations, are_matches)]
def faceRec(vid_path):
video_capture = cv2.VideoCapture(vid_path)
length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
frame_no=0
print(vid_path)
print("Number of total frames are: {}".format(length))
while True:
ret, frame = video_capture.read()
if not ret:
break
rgb_frame = frame[:, :, ::-1]
predictions=predict(rgb_frame,model_path="models/trained_knn_model.clf")
for name, (top, right, bottom, left) in predictions:
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
frame_no+=1
print("Writing frame {} / {}".format(frame_no, length))
# if predictions:
# actor=Actor(actorname=predictions[0],frameno=frame_no,image_file="default.jpg",x=predictions[1][0],y=predictions[1][1],z=predictions[1][2],w=predictions[1][3])
# db.session.add(actor)
# db.session.commit()
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
# destination="/home/yash/Desktop/face_recognition-master/examples/short_hamilton_clip.mp4"
destination="/home/yash/Desktop/Final/Face reco KNN/src/short_hamilton_clip.mp4"
faceRec(destination)
| 39.022727 | 173 | 0.684916 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,285 | 0.374199 |
764678c9c09ffe698c8ee2858f7d5376f8311d3c | 1,401 | py | Python | history/views.py | light-white/meeting | 464c1047d3cd9e620f9a4095f453186ef81cc854 | [
"Apache-2.0"
] | null | null | null | history/views.py | light-white/meeting | 464c1047d3cd9e620f9a4095f453186ef81cc854 | [
"Apache-2.0"
] | null | null | null | history/views.py | light-white/meeting | 464c1047d3cd9e620f9a4095f453186ef81cc854 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from history.models import *
# Create your views here.
def history(request):
historylist = History.objects.all()
paginator = Paginator(historylist, 10) # Show 25 contacts per page
page = request.GET.get('page')
try:
contacts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
contacts = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
contacts = paginator.page(paginator.num_pages)
if contacts.paginator.num_pages < 10:
pagelist = list(range(1,contacts.paginator.num_pages+1))
elif contacts.number < 5:
pagelist = list(range(1,11))
elif contacts.paginator.num_pages-contacts.number < 5:
pagelist = list(range(contacts.paginator.num_pages-9,contacts.paginator.num_pages+1))
else:
pagelist = list(range(contacts.number-4,contacts.number+6))
return render(request, 'history/history.html', {'contacts': contacts, 'pagelist':pagelist})
def historyitem(request, hid):
history = History.objects.filter(id = hid).first()
return render(request, 'history/historyitem.html', {'history':history})
| 41.205882 | 95 | 0.711635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.179158 |
76472899523719baf923d9a996ebdd4699f27a6a | 1,817 | py | Python | predictor.py | GlebovVL/Energo | 648154ff63799f553c453e8039f7c99c153bb846 | [
"MIT"
] | null | null | null | predictor.py | GlebovVL/Energo | 648154ff63799f553c453e8039f7c99c153bb846 | [
"MIT"
] | null | null | null | predictor.py | GlebovVL/Energo | 648154ff63799f553c453e8039f7c99c153bb846 | [
"MIT"
] | null | null | null | from datetime import timedelta
import random
import pandas as pd
class Predictor:
def __init__(self,avarii, coords):
self.avarii = avarii
self.coords=coords
pass
def got_prediction(self,date,length,where,probtype):
avarii_count = ((self.avarii.oblast==where)&\
(self.avarii[probtype]>0) &\
(self.avarii.date_dt>date )&\
(self.avarii.date_dt<date+timedelta(days=length))).sum()
avarii_count_2 = ((self.avarii.oblast==where)&\
(self.avarii[probtype]>0) &\
(self.avarii.date_dt>date )&\
(self.avarii.date_dt<date+timedelta(days=length*2))).sum()
avarii_count_3 = ((self.avarii.oblast==where)&\
(self.avarii[probtype]>0) &\
(self.avarii.date_dt>date )&\
(self.avarii.date_dt<date+timedelta(days=length*3))).sum()
# print(avarii_count,avarii_count_2)
res = random.random()/100
#print(res)
res += avarii_count/(avarii_count+1)
res += max((avarii_count_2-avarii_count)/((avarii_count_2-avarii_count)*2+1),0)
#print(res)
res += max((avarii_count_3-avarii_count-avarii_count_2)/((avarii_count_3-avarii_count_2-avarii_count)*5+1),0)
#print(res)
res = min(0.9+ (res*1000%10)/150,res)
return res
def got_dataset(self,date,length,where,probtypes=[0,1,2,3,4,5,6,7]):
res=pd.DataFrame({"oblast":self.coords.index}, index=self.coords.index)
res["lat"] = self.coords.lat
res["lon"] = self.coords.lon
res["risk8"]=0
for i in probtypes:
res["risk"+str(i)]=res["oblast"].apply(lambda x:self.got_prediction(date,length,x,i))
res["risk8"] += (1-res["risk8"])* res["risk"+str(i)]
return res
| 42.255814 | 117 | 0.593836 | 1,751 | 0.963676 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.070446 |
764756cae57e53f40b2dff05b58adf03c8eceb89 | 9,210 | py | Python | predictit/market.py | harryposner/pypredictit | bce6f74440b271de7083f9c526a48508cfbd7bd7 | [
"MIT"
] | 4 | 2020-10-14T15:21:37.000Z | 2020-12-17T04:06:09.000Z | predictit/market.py | harryposner/pypredictit | bce6f74440b271de7083f9c526a48508cfbd7bd7 | [
"MIT"
] | 1 | 2020-12-24T04:28:09.000Z | 2020-12-24T04:28:09.000Z | predictit/market.py | harryposner/pypredictit | bce6f74440b271de7083f9c526a48508cfbd7bd7 | [
"MIT"
] | 2 | 2020-12-17T04:43:26.000Z | 2020-12-23T22:54:19.000Z | import datetime as dt
from decimal import Decimal
import pytz
from .utils import concurrent_get
class Market(object):
"""Interact with PredictIt markets
This class handles market-level transactions with PredictIt,
including getting market risk, payouts, and contract IDs.
Initializing this object will call the `update_all` method,
contacting PredictIt twice.
Parameters
----------
account : predictit.Account
market_id : int
"""
def __init__(self, account, market_id):
if not isinstance(market_id, int):
raise TypeError("Market ID must be int, not {}".format(type(market_id)))
else:
self._market_id = market_id
self.account = account
self.update_all()
def __repr__(self):
repr_fmt = "Market(market_id={}, market_name={})"
return repr_fmt.format(self.market_id, self.market_name)
@property
def market_id(self):
"""int"""
return self._market_id
def update_info(self):
"""Update market properties other than contract IDs
Requests http://www.predictit.org/api/Market/{self.market_id} to
update properties:
`market_name`
`market_type`
`end_date`
`is_active`
`rule`
`have_ownership`
`have_trade_history`
`investment`
`max_payout`
`info`
`date_opened`
`is_market_watched`
`status`
`is_open`
`is_open_status_message`
`is_trading_suspended`
`is_trading_suspended_message`
`is_engine_busy`
`is_engine_busy_message`
Returns
-------
requests.Response
The response from requesting market info from PredictIt
"""
url_fmt = "https://www.predictit.org/api/Market/{s.market_id}"
resp = self.account.session.get(url_fmt.format(s=self), timeout=5)
self._update_info(resp)
return resp
def _update_info(self, resp):
resp.raise_for_status()
mkt_info = resp.json(parse_float=Decimal)
self._market_name = mkt_info["marketName"]
self._market_type = mkt_info["marketType"]
try:
end_date = dt.datetime.strptime(
mkt_info["dateEndString"][:19], "%m/%d/%Y %I:%M %p"
)
except ValueError:
# It used to read "N/A" if there was no end date; now (Dec
# 2020) it reads "NA". EAFP should be more robust to any
# future changes in format than checking against a list of
# known null values.
self._end_date = None
else:
tz_string = mkt_info["dateEndString"][19:].strip(" ()")
if tz_string == "ET":
# Declare naive datetime as US East, then convert to UTC
self._end_date = (
end_date.astimezone(pytz.timezone("US/Eastern"))
.astimezone(pytz.timezone("UTC"))
.replace(tzinfo=None)
)
else:
# Why, oh why, doesn't PredictIt use a standard timezone
# format?
raise NotImplementedError(
'PredictIt returned a timezone other than "ET" (should '
"not happen)"
)
self._is_active = mkt_info["isActive"]
self._rule = mkt_info["rule"]
self._have_ownership = mkt_info["userHasOwnership"]
self._have_trade_history = mkt_info["userHasTradeHistory"]
self._investment = mkt_info["userInvestment"]
self._max_payout = mkt_info["userMaxPayout"]
self._info = mkt_info["info"]
self._date_opened = dt.datetime.strptime(
mkt_info["dateOpened"], "%Y-%m-%dT%H:%M:%S.%f"
)
self._is_market_watched = mkt_info["isMarketWatched"]
self._status = mkt_info["status"]
self._is_open = mkt_info["isOpen"]
self._is_open_status_message = mkt_info["isOpenStatusMessage"]
self._is_trading_suspended = mkt_info["isTradingSuspended"]
self._is_trading_suspended_message = mkt_info["isTradingSuspendedMessage"]
self._is_engine_busy = mkt_info["isEngineBusy"]
self._is_engine_busy_message = mkt_info["isEngineBusyMessage"]
return resp
@property
def market_name(self):
"""str : Human-readable name for this market"""
return self._market_name
@property
def market_type(self):
"""int : I'm not sure what this is. These are my best guesses:
0 : Single-contract market
3 : Multiple-contract linked market
"""
return self._market_type
@property
def end_date(self):
"""NoneType or datetime.datetime : Date by which market closes
If the market has a definite end date, then this returns the UTC
naive datetime by which the market should close. The market may
close before the end date if the conditions for resolving the
market are met (e.g. "Will X run?" if X announces candidacy).
If there is no definite end date, this returns None.
"""
return self._end_date
@property
def is_active(self):
"""bool : True if the market has not yet been settled, else False."""
return self._is_active
@property
def rule(self):
"""str : Human-readable rule for resolving this market"""
return self._rule
@property
def have_ownership(self):
"""bool : True if user owns any shares in this market"""
return self._have_ownership
@property
def have_trade_history(self):
"""bool : True if user has traded any shares in this market"""
return self._have_trade_history
@property
def investment(self):
"""decimal.Decimal : Dollars spent to buy currently owned shares"""
return self._investment
@property
def max_payout(self):
"""decimal.Decimal : Maximum dollar payout in market net of fees"""
return self._max_payout
@property
def info(self):
return self._info
@property
def date_opened(self):
"""datetime.datetime : UTC naive datetime when market opened"""
return self._date_opened
@property
def is_market_watched(self):
"""bool : True if user is watching market on profile"""
return self._is_market_watched
@property
def status(self):
"""str : 'Open' or 'Expired', but can't rule out other values"""
return self._status
@property
def is_open(self):
"""bool"""
return self._is_open
@property
def is_open_status_message(self):
return self._is_open_status_message
@property
def is_trading_suspended(self):
"""bool"""
return self._is_trading_suspended
@property
def is_trading_suspended_message(self):
return self._is_trading_suspended_message
@property
def is_engine_busy(self):
"""bool"""
return self._is_engine_busy
@property
def is_engine_busy_message(self):
return self._is_engine_busy_message
def update_contracts(self):
"""Get the contract IDs for this market
Requests https://www.predictit.org/api/Market/{self.market_id}/Contracts
to update the `contract_ids` property.
Returns
-------
requests.Response
The response from requesting the contracts for this market
from PredictIt
"""
url_fmt = "https://www.predictit.org/api/Market/{s.market_id}/Contracts"
resp = self.account.session.get(url_fmt.format(s=self), timeout=5)
self._update_contracts(resp)
return resp
def _update_contracts(self, resp):
resp.raise_for_status()
contracts = resp.json(parse_float=Decimal)
# We don't want to make a list of actual Contract objects since
# that would make three requests for every contract in the
# market, and we want to be explicit whenever we make a request.
self._contract_ids = [c["contractId"] for c in contracts]
return resp
@property
def contract_ids(self):
"""list of int : The IDs for each contract in this market"""
# We don't want to return the original, since lists are mutable
return self._contract_ids.copy()
def update_all(self):
"""Concurrently run `update_info` and `update_contracts`
Returns
-------
list of requests.Response
In order, responses from requesting market info and from
requesting contracts for this market
"""
url_fmts = [
"https://www.predictit.org/api/Market/{s.market_id}",
"https://www.predictit.org/api/Market/{s.market_id}/Contracts",
]
urls = [fmt.format(s=self) for fmt in url_fmts]
responses = concurrent_get(urls, session=self.account.session)
self._update_info(responses[0])
self._update_contracts(responses[1])
return responses
| 32.090592 | 84 | 0.610858 | 9,109 | 0.989034 | 0 | 0 | 3,045 | 0.330619 | 0 | 0 | 4,354 | 0.472747 |
76479217ff8176e027291b888d7a0e67882521eb | 8,614 | py | Python | prep_data/sketchycococrops_to_tfrecord.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | 9 | 2021-08-18T17:49:42.000Z | 2022-02-22T02:15:07.000Z | prep_data/sketchycococrops_to_tfrecord.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | null | null | null | prep_data/sketchycococrops_to_tfrecord.py | leosampaio/scene-designer | 8a7276067acfde1997d386942aabc44d92436a1a | [
"MIT"
] | 1 | 2021-10-02T19:53:03.000Z | 2021-10-02T19:53:03.000Z | import argparse
import json
import os
import re
import glob
import imageio
import skimage.transform as sk_transform
import numpy as np
import tensorflow as tf
import utils
def default_hparams():
hps = utils.hparams.HParams(
image_size=256,
min_object_size=0.05,
excluded_meta_file='prep_data/coco/mats_objs_sketchables_v2.json',
crop_size=96,
)
return hps
def load_all_data_and_save_in_chunks(set_name, image_ids, image_dir, id_to_filename, id_to_size,
id_to_objects, target_dir, obj_ids_in_sketchycoco, ids_to_sketch_file, hps, meta, masked=False):
# load all images, saving one big chunk at a time
image_counter = 0
crops_counter = 0
tf_record_shard_path = os.path.join(target_dir, "{}-{:03}.records".format(set_name, 0))
options = tf.io.TFRecordOptions(compression_type="GZIP")
with tf.io.TFRecordWriter(tf_record_shard_path, options) as writer:
for img_id in image_ids:
img = imageio.imread(os.path.join(image_dir, id_to_filename[img_id]))
size = id_to_size[img_id]
objs = id_to_objects[img_id]
c_crops, c_objs_y, obj_ids = utils.coco.preprocess_for_boxes(img, size, objs, hps, meta,
filter_obj_ids=obj_ids_in_sketchycoco, return_ids=True,
masked=masked)
if c_crops is not None:
sketches = load_sketches(obj_ids, ids_to_sketch_file)
for crop, y, sketch in zip(c_crops, c_objs_y, sketches):
tf_example = utils.tfrecord.sketchycoco_crop_example(crop.numpy(), y, sketch)
writer.write(tf_example.SerializeToString())
crops_counter += 1
image_counter += 1
if image_counter % 100 == 0 and image_counter != 0:
print("Saved {} crops from {} images from {} set".format(crops_counter, image_counter, set_name))
return crops_counter
def load_all_sketchycoco_object_ids_and_sketches(objects_folder, split):
all_the_files = [f for f in glob.glob(os.path.join(objects_folder, "GT", "{}/*/*".format(split))) if f.endswith('png')]
object_ids = [int(re.findall(r'\d+', os.path.basename(f))[0]) for f in all_the_files]
sketch_files = [f for f in glob.glob(os.path.join(objects_folder, "Sketch", "{}/*/*".format(split))) if f.endswith('png')]
ids_to_sketch_file = {int(re.findall(r'\d+', os.path.basename(f))[0]): f for f in sketch_files}
return object_ids, ids_to_sketch_file
def load_sketches(ids, ids_to_sketch_file):
sketches = np.zeros((len(ids), 96, 96, 1))
for i, idd in enumerate(ids):
skt = imageio.imread(ids_to_sketch_file[idd])
sketches[i] = preprocess_sketch(skt, 96)
return sketches
def preprocess_sketch(image, image_size):
scaled = sk_transform.resize(image, (image_size, image_size))
if scaled.shape[-1] == 3:
scaled = scaled[..., 0]
if len(scaled.shape) == 2:
scaled = np.reshape(scaled,
(image_size, image_size, 1))
return scaled
def main():
# Parsing arguments
parser = argparse.ArgumentParser(
description='Prepare large dataset for chunked loading')
parser.add_argument('--dataset-dir', default='/store/shared/datasets/coco-stuff')
parser.add_argument('--target-dir', default='/store/lribeiro/datasets/sketchycococrops-tfrecord')
parser.add_argument('--sketchycoco-dir', default='/store/shared/datasets/SketchyCOCO')
parser.add_argument("--masked", action="store_true")
parser.add_argument("-g", "--gpu", default=[0], type=int, nargs='+', help="GPU ID to run on", )
parser.add_argument('--hparams', type=str)
args = parser.parse_args()
utils.gpu.setup_gpu(args.gpu)
hps = default_hparams()
if args.hparams is not None:
hps = hps.parse(args.hparams)
hps = dict(hps.values())
# get all the full paths
train_image_dir = os.path.join(args.dataset_dir, 'images/train2017')
val_image_dir = os.path.join(args.dataset_dir, 'images/val2017')
train_instances_json = os.path.join(args.dataset_dir, 'annotations/instances_train2017.json')
train_stuff_json = os.path.join(args.dataset_dir, 'annotations/stuff_train2017.json')
val_instances_json = os.path.join(args.dataset_dir, 'annotations/instances_val2017.json')
val_stuff_json = os.path.join(args.dataset_dir, 'annotations/stuff_val2017.json')
sketchycoco_objs_dir = os.path.join(args.sketchycoco_dir, "Object")
meta_filename = os.path.join(args.target_dir, "meta.json")
if not os.path.isdir(args.target_dir):
os.mkdir(args.target_dir)
# load up all train metadata
print("Loading train metadata...")
(object_idx_to_name, object_name_to_idx, objects_list, total_objs,
train_image_ids,
train_image_id_to_filename,
train_image_id_to_size,
train_image_id_to_objects) = utils.coco.prepare_and_load_metadata(train_instances_json, train_stuff_json)
train_n_images = len(train_image_ids)
# load up all valid metadata
print("Loading validation metadata...")
(_, _, _, _,
valid_image_ids,
valid_image_id_to_filename,
valid_image_id_to_size,
valid_image_id_to_objects) = utils.coco.prepare_and_load_metadata(val_instances_json, val_stuff_json)
valid_n_images = len(valid_image_ids)
with open(hps['excluded_meta_file']) as emf:
materials_metadata = json.load(emf)
concrete_objs = materials_metadata["objects"]
allowed_materials = materials_metadata["materials"]
fully_excluded_objs = materials_metadata["fully_excluded"]
object_id_to_idx = {ident: i for i, ident in enumerate(objects_list)}
# include info about the extra __image__ object
object_id_to_idx[0] = len(objects_list)
objects_list = np.append(objects_list, 0)
print("Saving metadata...")
PREDICATES_VALUES = ['left of', 'right of', 'above', 'below', 'inside', 'surrounding']
pred_idx_to_name = ['__in_image__'] + PREDICATES_VALUES
pred_name_to_idx = {name: idx for idx, name in enumerate(pred_idx_to_name)}
meta = {
'obj_name_to_ID': object_name_to_idx,
'obj_ID_to_name': object_idx_to_name,
'obj_idx_to_ID': objects_list.tolist(),
'obj_ID_to_idx': object_id_to_idx,
'obj_idx_to_name': [object_idx_to_name[objects_list[i]] for i in range(len(objects_list))],
'train_total_objs': total_objs,
'n_train_samples': train_n_images,
'n_valid_samples': valid_n_images,
'concrete_objs': concrete_objs,
'allowed_materials': allowed_materials,
'fully_excluded_objs': fully_excluded_objs,
'pred_idx_to_name': pred_idx_to_name,
'pred_name_to_idx': pred_name_to_idx
}
with open(meta_filename, 'w') as outfile:
json.dump(meta, outfile)
obj_ids_valid, ids_to_sketch_file_valid = load_all_sketchycoco_object_ids_and_sketches(sketchycoco_objs_dir, 'val')
obj_ids_train, ids_to_sketch_file_train = load_all_sketchycoco_object_ids_and_sketches(sketchycoco_objs_dir, 'train')
# validation
meta['n_valid_samples'] = load_all_data_and_save_in_chunks(
'valid',
image_ids=valid_image_ids,
image_dir=val_image_dir,
id_to_filename=valid_image_id_to_filename,
id_to_size=valid_image_id_to_size,
id_to_objects=valid_image_id_to_objects,
target_dir=args.target_dir,
obj_ids_in_sketchycoco=obj_ids_valid,
ids_to_sketch_file=ids_to_sketch_file_valid,
meta=meta,
hps=hps,
masked=args.masked)
print("Saved {} crops for valid set".format(meta['n_valid_samples']))
with open(meta_filename, 'w') as outfile:
json.dump(meta, outfile)
# finally, the train set
meta['n_train_samples'] = load_all_data_and_save_in_chunks(
'train',
image_ids=train_image_ids,
image_dir=train_image_dir,
id_to_filename=train_image_id_to_filename,
id_to_size=train_image_id_to_size,
id_to_objects=train_image_id_to_objects,
target_dir=args.target_dir,
obj_ids_in_sketchycoco=obj_ids_train,
ids_to_sketch_file=ids_to_sketch_file_train,
meta=meta,
hps=hps,
masked=args.masked)
print("Saved {} crops for train set".format(meta['n_train_samples']))
with open(meta_filename, 'w') as outfile:
json.dump(meta, outfile)
if __name__ == '__main__':
main()
| 41.413462 | 133 | 0.683306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,464 | 0.169956 |
764898f3a18040594993230d8331b7556afdb198 | 372 | py | Python | Solutions/String/L409_LongestPalindrome.py | ufolux/PythonLeetCodeSolution | df905526f1982ef4aa417f6fe060a66d44c18d07 | [
"MIT"
] | null | null | null | Solutions/String/L409_LongestPalindrome.py | ufolux/PythonLeetCodeSolution | df905526f1982ef4aa417f6fe060a66d44c18d07 | [
"MIT"
] | null | null | null | Solutions/String/L409_LongestPalindrome.py | ufolux/PythonLeetCodeSolution | df905526f1982ef4aa417f6fe060a66d44c18d07 | [
"MIT"
] | null | null | null | class Solution:
def longestPalindrome(self, s: str) -> int:
d = {}
for c in s:
if c not in d:
d[c] = 1
else:
d[c] = d[c] + 1
res = 0
for _, n in d.items():
res += n - (n & 1)
return res + 1 if res < len(s) else res
s = Solution()
s.longestPalindrome("ccd")
| 20.666667 | 47 | 0.408602 | 327 | 0.879032 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.013441 |
764937dbe3389486664886c5dde62290994457f4 | 551 | py | Python | lessweb/__init__.py | qorzj/lessweb | d21473d724216c3afa1101729137573ff21b7015 | [
"MIT"
] | 17 | 2017-11-02T02:07:35.000Z | 2020-07-23T06:33:36.000Z | lessweb/__init__.py | qorzj/lessweb | d21473d724216c3afa1101729137573ff21b7015 | [
"MIT"
] | 12 | 2018-02-06T05:52:04.000Z | 2021-02-07T14:43:35.000Z | lessweb/__init__.py | qorzj/lessweb | d21473d724216c3afa1101729137573ff21b7015 | [
"MIT"
] | 2 | 2018-06-06T09:30:44.000Z | 2018-09-19T02:03:16.000Z | """lessweb: 用最python3的方法创建web apps"""
__version__ = '0.3.3'
__author__ = [
'qorzj <inull@qq.com>',
]
__license__ = "MIT"
# from . import application, context, model, storage, webapi
from .application import interceptor, Application
from .context import Context, Request, Response
from .storage import Storage
from .bridge import uint, ParamStr, MultipartFile, Jsonizable
from .webapi import BadParamError, NotFoundError, Cookie, HttpStatus, ResponseStatus
from .utils import _nil, eafp
from .client import Client
from .service import Service
| 26.238095 | 84 | 0.771325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.256637 |
764a1991418da83a2826386345ee15787a0bf609 | 759 | py | Python | notification/admin.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 1 | 2020-04-28T09:10:53.000Z | 2020-04-28T09:10:53.000Z | notification/admin.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 25 | 2021-03-28T15:20:02.000Z | 2021-03-28T16:02:48.000Z | notification/admin.py | VadVergasov/clist | 4afcdfe88250d224043b28efa511749347cec71c | [
"Apache-2.0"
] | null | null | null | from pyclist.admin import BaseModelAdmin, admin_register
from notification.models import Notification, Task
@admin_register(Notification)
class NotificationAdmin(BaseModelAdmin):
list_display = ['coder', 'method', 'before', 'period', 'last_time', 'modified']
list_filter = ['method']
search_fields = ['coder__user__username', 'method', 'period']
def get_readonly_fields(self, request, obj=None):
return super().get_readonly_fields(request, obj)
@admin_register(Task)
class TaskAdmin(BaseModelAdmin):
list_display = ['notification', 'created', 'modified', 'is_sent']
list_filter = ['notification__method', 'is_sent']
search_fields = ['notification__coder__user__username', 'notification__method', 'subject', 'message']
| 37.95 | 105 | 0.741765 | 593 | 0.781291 | 0 | 0 | 645 | 0.849802 | 0 | 0 | 249 | 0.328063 |
764a345d21e1234d5461a595f90e26c2e556c374 | 802 | py | Python | google_cloud_storage_cls/utils/gcp_constant.py | mayurdhamecha-crest/ta_cloud_exchange_plugins | 8d64c92909f28bcb2067587ec3361499de5d5723 | [
"BSD-3-Clause"
] | null | null | null | google_cloud_storage_cls/utils/gcp_constant.py | mayurdhamecha-crest/ta_cloud_exchange_plugins | 8d64c92909f28bcb2067587ec3361499de5d5723 | [
"BSD-3-Clause"
] | null | null | null | google_cloud_storage_cls/utils/gcp_constant.py | mayurdhamecha-crest/ta_cloud_exchange_plugins | 8d64c92909f28bcb2067587ec3361499de5d5723 | [
"BSD-3-Clause"
] | null | null | null | """GCP Storage Constant."""
locations_list = [
"US",
"EU",
"ASIA",
"ASIA1",
"EUR4",
"NAM4",
"NORTHAMERICA-NORTHEAST1",
"NORTHAMERICA-NORTHEAST2",
"US-CENTRAL1",
"US-EAST1",
"US-EAST4",
"US-WEST1",
"US-WEST2",
"US-WEST3",
"US-WEST4",
"SOUTHAMERICA-EAST1",
"EUROPE-CENTRAL2",
"EUROPE-NORTH1",
"EUROPE-WEST1",
"EUROPE-WEST2",
"EUROPE-WEST3",
"EUROPE-WEST4",
"EUROPE-WEST6",
"ASIA-EAST1",
"ASIA-EAST2",
"ASIA-NORTHEAST1",
"ASIA-NORTHEAST2",
"ASIA-NORTHEAST3",
"ASIA-SOUTH1",
"ASIA-SOUTH2",
"ASIA-SOUTHEAST1",
"ASIA-SOUTHEAST2",
"AUSTRALIA-SOUTHEAST1",
"AUSTRALIA-SOUTHEAST2",
]
storage_classes_list = [
"STANDARD",
"NEARLINE",
"COLDLINE",
"ARCHIVE",
]
| 17.434783 | 30 | 0.553616 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.65212 |
764dae1c165deea13176d04ddf5d9c3e1390137e | 643 | py | Python | lazytorch/layers/__init__.py | mgmalek/lazytorch | 9950b8841488471e147d5dbb86a446663b6c15dc | [
"MIT"
] | null | null | null | lazytorch/layers/__init__.py | mgmalek/lazytorch | 9950b8841488471e147d5dbb86a446663b6c15dc | [
"MIT"
] | null | null | null | lazytorch/layers/__init__.py | mgmalek/lazytorch | 9950b8841488471e147d5dbb86a446663b6c15dc | [
"MIT"
] | null | null | null | from .conv import LazyExpansionConv2d, LazyReductionConv2d
from .conv_norm_activ import ConvNormActivation, LazyConvNormActivation
from .depth_sep_conv import (
DepthwiseConv2d,
PointwiseConv2d,
DepthSepConv2d,
LazyDepthwiseConv2d,
LazyPointwiseConv2d,
LazyExpansionPointwiseConv2d,
LazyReductionPointwiseConv2d,
LazyDepthSepConv2d,
LazyExpansionDepthSepConv2d,
LazyReductionDepthSepConv2d,
)
from .squeeze_excitation import SqueezeExcitation, LazySqueezeExcitation
from .bottleneck import BottleneckBlock, LazyBottleneckBlock
from .inverted_bottleneck import InvertedBottleneck, LazyInvertedBottleneck
| 35.722222 | 75 | 0.839813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
764ff5611043a9510a54937a0ad6a65d0156e951 | 845 | py | Python | functions_legacy/PathsCauchy.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | 6 | 2021-04-10T13:24:30.000Z | 2022-03-26T08:20:42.000Z | functions_legacy/PathsCauchy.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | null | null | null | functions_legacy/PathsCauchy.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | 6 | 2019-08-13T22:02:17.000Z | 2022-02-09T17:49:12.000Z | from numpy import ones, cumsum, diff, tile, r_
from numpy.random import rand
from scipy.stats import t
def PathsCauchy(x0,mu,sigma,tau,j_):
# This function generates paths for the process x such that the increments
# dx are iid Cauchy distributed.
# INPUTS
# x0 :[scalar] initial value of process x at time zero
# mu :[scalar] location parameter of Cauchy distribution
# sigma :[scalar] dispersion arameter of Cauchy distribution
# tau :[row vector] vector of times for simulations
# j_ :[scalar] total number of paths
# OPS
# x :[matrix](j_ x tau_) array with paths on the rows
## Code
t_ = len(tau)
r = rand(j_,t_-1)
dx = t.ppf(r,1,tile(mu*diff(tau,1),(j_,1)),tile(sigma*diff(tau,1),(j_,1)))
x = r_['-1',x0*ones((j_,1)), x0+cumsum(dx,axis=1)]
return x
| 33.8 | 78 | 0.64142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.547929 |
76501c0e0a4d032fa02c49003cd7b75a3d3f4eaf | 879 | py | Python | main.py | Elvira521feng/InfoNews | 5db22738446b1026db6479b91dc42c3aeba17060 | [
"MIT"
] | null | null | null | main.py | Elvira521feng/InfoNews | 5db22738446b1026db6479b91dc42c3aeba17060 | [
"MIT"
] | null | null | null | main.py | Elvira521feng/InfoNews | 5db22738446b1026db6479b91dc42c3aeba17060 | [
"MIT"
] | null | null | null | from flask import current_app
from flask_script import Manager
from flask_migrate import MigrateCommand
from info import create_app
# 创建应用
app = create_app("dev")
# 创建管理器
mgr = Manager(app)
# 添加迁移命令
mgr.add_command("mc", MigrateCommand)
# 生成超级管理员命令
@mgr.option("-u", dest="username")
@mgr.option("-p", dest="password")
def create_superuser(username, password):
if not all([username, password]):
print("账号/密码不完整")
return
from info.models import User
from info import db
user = User()
user.mobile = username
user.password = password
user.nick_name = username
user.is_admin = True
try:
db.session.add(user)
db.session.commit()
except BaseException as e:
current_app.logger.error(e)
db.session.rollback()
print("生成失败")
print("生成管理员成功")
if __name__ == '__main__':
mgr.run() | 21.439024 | 41 | 0.664391 | 0 | 0 | 0 | 0 | 620 | 0.643821 | 0 | 0 | 188 | 0.195223 |
76522c2bfc85d2b19b789e29bdbb4e4770d87e91 | 1,864 | py | Python | hwtHls/ssa/basicBlock.py | Nic30/hwtHls | 1fac6ed128318e698d51e15e9871249ddf243e1c | [
"MIT"
] | 8 | 2018-09-25T03:28:11.000Z | 2021-12-15T07:44:38.000Z | hwtHls/ssa/basicBlock.py | Nic30/hwtHls | 1fac6ed128318e698d51e15e9871249ddf243e1c | [
"MIT"
] | 1 | 2020-12-21T10:56:44.000Z | 2020-12-21T10:56:44.000Z | hwtHls/ssa/basicBlock.py | Nic30/hwtHls | 1fac6ed128318e698d51e15e9871249ddf243e1c | [
"MIT"
] | 2 | 2018-09-25T03:28:18.000Z | 2021-12-15T10:28:35.000Z | from typing import List
from hwtHls.ssa.context import SsaContext
from hwtHls.ssa.instr import SsaInstrBranch, SsaInstr
from hwtHls.ssa.phi import SsaPhi
class SsaBasicBlock():
"""
Basic Block from Static Single Assignment (SSA) normal form of code.
:ivar label: label for debug purposes
:ivar predecessors: list of block from where the control flow can go to this block
:ivar phis: list of phi functions which are selecting a value for a variable based on predecessor block
:ivar body: statements of this block
:ivar successors: an object to keep track of jumps from this block
:ivar origns: list of objects which contributed to existence of this object
"""
def __init__(self, ctx: SsaContext, label:str):
self.ctx = ctx
self.label = label
self.predecessors: List[SsaBasicBlock] = []
self.phis: List[SsaPhi] = []
self.body: List[SsaInstr] = []
self.successors = SsaInstrBranch(self)
self.origins = []
def appendPhi(self, phi: SsaPhi):
assert phi.block is None, (phi, phi.block, self)
# assert not self.body, ("Adding phi if already have instructions", self, phi)
phi.block = self
self.phis.append(phi)
def insertPhi(self, index: int, phi: SsaPhi):
assert phi.block is None, (phi, phi.block, self)
phi.block = self
self.phis.insert(index, phi)
def appendInstruction(self, instr: SsaInstr):
assert instr.block is None, (instr, instr.block, self)
instr.block = self
self.body.append(instr)
def insertInstruction(self, index: int, instr: SsaInstr):
assert instr.block is None, (instr, instr.block, self)
instr.block = self
self.body.insert(index, instr)
def __repr__(self):
return f"<{self.__class__.__name__:s} {self.label:s}>"
| 35.846154 | 107 | 0.6647 | 1,706 | 0.915236 | 0 | 0 | 0 | 0 | 0 | 0 | 639 | 0.342811 |
765266b4bc6c376f8d781ac5b9e414a2c40c4c70 | 6,546 | py | Python | parser/team12/src/EXPRESION/EXPRESION_RELACIONAL/Expresion_Relacional.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team12/src/EXPRESION/EXPRESION_RELACIONAL/Expresion_Relacional.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team12/src/EXPRESION/EXPRESION_RELACIONAL/Expresion_Relacional.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | import sys, os.path
import datetime
nodo_dir = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..','..')) + '\\ENTORNO\\')
sys.path.append(nodo_dir)
from Tipo import Data_Type
# **************************************************************************************************************
def diferente(exp1, exp2, expRes, enviroment):
val1 = exp1.execute(enviroment)
val2 = exp2.execute(enviroment)
if exp1.tipo.data_type == Data_Type.numeric and exp2.tipo.data_type == Data_Type.numeric :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 != val2
elif exp1.tipo.data_type == Data_Type.character and exp2.tipo.data_type == Data_Type.character :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 != val2
elif exp1.tipo.data_type == Data_Type.boolean and exp2.tipo.data_type == Data_Type.boolean :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 != val2
else:
expRes.tipo.data_type = Data_Type.error
expRes.valorExpresion = None
return expRes
# **************************************************************************************************************
# **************************************************************************************************************
def igualdad(exp1, exp2, expRes, enviroment):
val1 = exp1.execute(enviroment)
val2 = exp2.execute(enviroment)
if exp1.tipo.data_type == Data_Type.numeric and exp2.tipo.data_type == Data_Type.numeric :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 == val2
elif exp1.tipo.data_type == Data_Type.character and exp2.tipo.data_type == Data_Type.character :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 == val2
elif exp1.tipo.data_type == Data_Type.boolean and exp2.tipo.data_type == Data_Type.boolean :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 == val2
else:
expRes.tipo.data_type = Data_Type.error
expRes.valorExpresion = None
return expRes
# **************************************************************************************************************
# **************************************************************************************************************
def mayor(exp1, exp2, expRes, enviroment):
val1 = exp1.execute(enviroment)
val2 = exp2.execute(enviroment)
if exp1.tipo.data_type == Data_Type.numeric and exp2.tipo.data_type == Data_Type.numeric :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 > val2
elif exp1.tipo.data_type == Data_Type.character and exp2.tipo.data_type == Data_Type.character :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 > val2
elif exp1.tipo.data_type == Data_Type.boolean and exp2.tipo.data_type == Data_Type.boolean :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 > val2
else:
expRes.tipo.data_type = Data_Type.error
expRes.valorExpresion = None
return expRes
# **************************************************************************************************************
# **************************************************************************************************************
def mayorigual(exp1, exp2, expRes, enviroment):
val1 = exp1.execute(enviroment)
val2 = exp2.execute(enviroment)
if exp1.tipo.data_type == Data_Type.numeric and exp2.tipo.data_type == Data_Type.numeric :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 >= val2
elif exp1.tipo.data_type == Data_Type.character and exp2.tipo.data_type == Data_Type.character :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 >= val2
elif exp1.tipo.data_type == Data_Type.boolean and exp2.tipo.data_type == Data_Type.boolean :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 >= val2
else:
expRes.tipo.data_type = Data_Type.error
expRes.valorExpresion = None
return expRes
# **************************************************************************************************************
# **************************************************************************************************************
def menor(exp1, exp2, expRes, enviroment):
val1 = exp1.execute(enviroment)
val2 = exp2.execute(enviroment)
if exp1.tipo.data_type == Data_Type.numeric and exp2.tipo.data_type == Data_Type.numeric :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 < val2
elif exp1.tipo.data_type == Data_Type.character and exp2.tipo.data_type == Data_Type.character :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 < val2
elif exp1.tipo.data_type == Data_Type.boolean and exp2.tipo.data_type == Data_Type.boolean :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 < val2
else:
expRes.tipo.data_type = Data_Type.error
expRes.valorExpresion = None
return expRes
# **************************************************************************************************************
# **************************************************************************************************************
def menorigual(exp1, exp2, expRes, enviroment):
val1 = exp1.execute(enviroment)
val2 = exp2.execute(enviroment)
if exp1.tipo.data_type == Data_Type.numeric and exp2.tipo.data_type == Data_Type.numeric :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 <= val2
elif exp1.tipo.data_type == Data_Type.character and exp2.tipo.data_type == Data_Type.character :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 <= val2
elif exp1.tipo.data_type == Data_Type.boolean and exp2.tipo.data_type == Data_Type.boolean :
expRes.tipo.data_type = Data_Type.boolean
expRes.valorExpresion = val1 <= val2
else:
expRes.tipo.data_type = Data_Type.error
expRes.valorExpresion = None
return expRes
# ************************************************************************************************************** | 38.505882 | 112 | 0.540636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,365 | 0.208524 |
7652996e6f5ffb51a7d0df902af01221997efaae | 728 | py | Python | app/models.py | evantoh/news-article | 54976a3b1b5f6a141bbe13e8ab3a49dfd46d8500 | [
"Unlicense"
] | null | null | null | app/models.py | evantoh/news-article | 54976a3b1b5f6a141bbe13e8ab3a49dfd46d8500 | [
"Unlicense"
] | null | null | null | app/models.py | evantoh/news-article | 54976a3b1b5f6a141bbe13e8ab3a49dfd46d8500 | [
"Unlicense"
] | null | null | null |
class Articles:
def __init__(self,id,name,author, title, description, url, urlToImage,publishedAt):
self.id = id
self.name = name
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
class Source:
"""
Source class to define news source object
"""
def __init__(self, id, name, author, title, url, urlToImage, publishedAt):
self.id = id
self.name = name
self.author = author
self.title = title
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
| 21.411765 | 87 | 0.587912 | 708 | 0.972527 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.078297 |
7653939722f793ed976839ee4ae8d6af71343c7a | 810 | py | Python | fppv/cli.py | kdheepak/fppv | 5adfb5a6bc62ffcd56faa888ddfe173ae185656c | [
"BSD-3-Clause"
] | null | null | null | fppv/cli.py | kdheepak/fppv | 5adfb5a6bc62ffcd56faa888ddfe173ae185656c | [
"BSD-3-Clause"
] | null | null | null | fppv/cli.py | kdheepak/fppv | 5adfb5a6bc62ffcd56faa888ddfe173ae185656c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""cli module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import traceback
import importlib
from . import version
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('modulename')
@click.option('--debug', default=False, help='')
@click.version_option(version.__version__, '--version')
def cli(**kwargs):
modulename = kwargs['modulename']
module = importlib.import_module(modulename)
try:
version = module.__version__
except:
version = ''
click.secho("{} == ".format(modulename), nl=False)
click.secho("{}".format(version), fg='green', bold=True)
if __name__ == '__main__':
cli()
| 23.142857 | 60 | 0.706173 | 0 | 0 | 0 | 0 | 486 | 0.6 | 0 | 0 | 127 | 0.15679 |
76544489acae36e149f993c55ba99cd6dec43dd9 | 17,134 | py | Python | main.py | kozistr/naver-movie-rate-prediction | 2c0a55c690e319fd4bbe49f23f80e405ed4881b3 | [
"MIT"
] | 4 | 2018-08-17T08:11:51.000Z | 2018-08-22T19:08:57.000Z | main.py | kozistr/naver-movie-rate-prediction | 2c0a55c690e319fd4bbe49f23f80e405ed4881b3 | [
"MIT"
] | 5 | 2018-08-25T18:51:55.000Z | 2018-09-09T08:55:29.000Z | main.py | kozistr/naver-movie-rate-prediction | 2c0a55c690e319fd4bbe49f23f80e405ed4881b3 | [
"MIT"
] | 1 | 2019-01-12T16:13:58.000Z | 2019-01-12T16:13:58.000Z | import os
import time
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from config import get_config, export_config
from model.textcnn import TextCNN
from model.textrnn import TextRNN
from sklearn.model_selection import train_test_split
from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator
parser = argparse.ArgumentParser(description='train/test movie review classification model')
parser.add_argument('--checkpoint', type=str, help='pre-trained model', default=None)
parser.add_argument('--refine_data', type=bool, help='solving data imbalance problem', default=False)
args = parser.parse_args()
# parsed args
checkpoint = args.checkpoint
refine_data = args.refine_data
# Configuration
config, _ = get_config()
np.random.seed(config.seed)
tf.set_random_seed(config.seed)
def data_distribution(y_, size=10, img='dist.png'):
"""
movie rate data distribution via plot chart
:param y_: rate data, numpy array
:param size: classes, int
:param img: save to, str
:return: numpy array
"""
from matplotlib import pyplot as plt
# showing data distribution
y_dist = np.zeros((10,), dtype=np.int32)
for y in tqdm(y_):
if size == 1:
y_dist[y - 1] += 1
else:
y_dist[np.argmax(y, axis=-1)] += 1
plt.figure(figsize=(10, 8))
plt.xlabel('rate')
plt.ylabel('frequency')
plt.grid(True)
plt.bar(range(size), y_dist, width=.35, align='center', alpha=.5, label='rainfall')
plt.xticks(range(10), list(range(1, 11)))
plt.savefig(img)
plt.show()
return y_dist
def data_confusion_matrix(y_pred, y_true, labels, normalize=True):
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
"""
0-3: bad
4-7: normal
7-10: good
"""
def labeling(y):
if 0 <= y < 3:
return 0
elif 3 <= y < 7:
return 1
else:
return 2
y_pred = np.array([labeling(y) for y in y_pred])
y_true = np.array([labeling(y[0]) for y in y_true])[:-20]
assert y_pred.shape[0] == y_true.shape[0]
cnf_mat = confusion_matrix(y_pred, y_true)
np.set_printoptions(precision=2)
if normalize:
cnf_mat = cnf_mat.astype('float') / cnf_mat.sum(axis=1)[:, np.newaxis]
plt.figure()
plt.imshow(cnf_mat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
thresh = cnf_mat.max() / 2.
for i, j in itertools.product(range(cnf_mat.shape[0]), range(cnf_mat.shape[1])):
plt.text(j, i, format(cnf_mat[i, j], '.2f'),
horizontalalignment="center",
color="white" if cnf_mat[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig("./confusion_matrix.png")
plt.show()
def load_trained_embeds(embed_mode='char'):
"""
:param embed_mode: embedding mode, str
:return: embedding vector, numpy array
"""
if embed_mode == 'd2v':
vec = Doc2VecEmbeddings(config.d2v_model, config.embed_size) # Doc2Vec Loader
if config.verbose:
print("[+] Doc2Vec loaded! Total %d pre-trained sentences, %d dims" % (len(vec), config.embed_size))
elif embed_mode == 'w2v':
vec = Word2VecEmbeddings(config.w2v_model, config.embed_size) # WOrd2Vec Loader
if config.verbose:
print("[+] Word2Vec loaded! Total %d pre-trained words, %d dims" % (len(vec), config.embed_size))
else:
vec = Char2VecEmbeddings()
if config.verbose:
print("[+] Using Char2Vec, %d dims" % config.embed_size)
return vec
if __name__ == '__main__':
embed_type = config.use_pre_trained_embeds
# Stage 1 : loading trained embeddings
vectors = load_trained_embeds(embed_type)
# Stage 2 : loading tokenize data
if config.use_pre_trained_embeds == 'c2v': # Char2Vec
if os.path.isfile(config.processed_dataset):
ds = DataLoader(file=config.processed_dataset,
fn_to_save=None,
load_from='db',
n_classes=config.n_classes,
analyzer='char',
is_analyzed=True,
use_save=False,
config=config) # DataSet Loader
else:
ds = DataLoader(file=None,
fn_to_save=config.processed_dataset,
load_from='db',
n_classes=config.n_classes,
analyzer='char',
is_analyzed=False,
use_save=True,
config=config) # DataSet Loader
ds_len = len(ds)
x_data = np.zeros((ds_len, config.sequence_length), dtype=np.uint8)
sen_len = list()
min_length, max_length, avg_length = config.sequence_length, 0, 0
for i in tqdm(range(ds_len)):
sentence = ' '.join(ds.sentences[i]).strip('\n')
sentence_length = len(sentence)
if sentence_length < min_length:
min_length = sentence_length
if sentence_length > max_length:
max_length = sentence_length
sen_len.append(sentence_length)
sent = vectors.decompose_str_as_one_hot(sentence,
warning=False)[:config.sequence_length]
x_data[i] = np.pad(sent, (0, config.sequence_length - len(sent)), 'constant', constant_values=0)
if config.verbose:
print("[*] Total %d samples (training)" % x_data.shape[0])
print(" [*] min length of reviews : %d" % min_length)
print(" [*] max length of reviews : %d" % max_length)
avg_length = sum(sen_len) / x_data.shape[0]
print(" [*] avg length of reviews : %d" % avg_length)
else: # Word2Vec / Doc2Vec
ds = DataLoader(file=config.processed_dataset,
n_classes=config.n_classes,
analyzer=None,
is_analyzed=True,
use_save=False,
config=config) # DataSet Loader
ds_len = len(ds)
x_data = np.zeros((ds_len, config.sequence_length), dtype=np.int32)
for i in tqdm(range(ds_len)):
sent = ds.sentences[i][:config.sequence_length]
x_data[i] = np.pad(vectors.words_to_index(sent),
(0, config.sequence_length - len(sent)), 'constant', constant_values=config.vocab_size)
y_data = np.array(ds.labels).reshape(-1, config.n_classes)
ds = None
if config.verbose:
print("[*] sentence to %s index conversion finish!" % config.use_pre_trained_embeds)
if refine_data:
# resizing the amount of rate-10 data
# 2.5M to 500K # downsize to 20%
if not config.n_classes == 1:
rate_10_idx = [idx for idx, y in tqdm(enumerate(y_data)) if np.argmax(y, axis=-1) == 9]
else:
rate_10_idx = [idx for idx, y in tqdm(enumerate(y_data)) if y == 10]
rand_idx = np.random.choice(rate_10_idx, 4 * len(rate_10_idx) // 5)
x_data = np.delete(x_data, rand_idx, axis=0).reshape(-1, config.sequence_length)
y_data = np.delete(y_data, rand_idx, axis=0).reshape(-1, config.n_classes)
if config.verbose:
print("[*] refined comment : ", x_data.shape)
print("[*] refined rate : ", y_data.shape)
# shuffle/split data
x_train, x_valid, y_train, y_valid = train_test_split(x_data, y_data, random_state=config.seed,
test_size=config.test_size, shuffle=True)
if config.verbose:
print("[*] train/test %d/%d(%.1f/%.1f) split!" % (len(y_train), len(y_valid),
1. - config.test_size, config.test_size))
del x_data, y_data
data_size = x_train.shape[0]
# DataSet Iterator
di = DataIterator(x=x_train, y=y_train, batch_size=config.batch_size)
if config.device == 'gpu':
dev_config = tf.ConfigProto()
dev_config.gpu_options.allow_growth = True
else:
dev_config = None
with tf.Session(config=dev_config) as s:
if config.model == 'charcnn':
# Model Loaded
model = TextCNN(s=s,
mode=config.mode,
w2v_embeds=vectors.embeds if not embed_type == 'c2v' else None,
n_classes=config.n_classes,
optimizer=config.optimizer,
kernel_sizes=config.kernel_size,
n_filters=config.filter_size,
n_dims=config.embed_size,
vocab_size=config.character_size if embed_type == 'c2v' else config.vocab_size + 1,
sequence_length=config.sequence_length,
lr=config.lr,
lr_decay=config.lr_decay,
lr_lower_boundary=config.lr_lower_boundary,
fc_unit=config.fc_unit,
th=config.act_threshold,
grad_clip=config.grad_clip,
summary=config.pretrained,
score_function=config.score_function,
use_se_module=config.use_se_module,
se_radio=config.se_ratio,
se_type=config.se_type,
use_multi_channel=config.use_multi_channel)
elif config.model == 'charrnn':
model = TextRNN(s=s,
mode=config.mode,
w2v_embeds=vectors.embeds if not embed_type == 'c2v' else None,
n_classes=config.n_classes,
optimizer=config.optimizer,
n_gru_cells=config.n_gru_cells,
n_gru_layers=config.n_gru_layers,
n_attention_size=config.n_attention_size,
n_dims=config.embed_size,
vocab_size=config.character_size if embed_type == 'c2v' else config.vocab_size + 1,
sequence_length=config.sequence_length,
lr=config.lr,
lr_decay=config.lr_decay,
lr_lower_boundary=config.lr_lower_boundary,
fc_unit=config.fc_unit,
grad_clip=config.grad_clip,
summary=config.pretrained)
else:
raise NotImplementedError("[-] Not Implemented Yet")
if config.verbose:
print("[+] %s model loaded" % config.model)
# Initializing
s.run(tf.global_variables_initializer())
# exporting config
export_config()
# loading checkpoint
global_step = 0
if checkpoint:
print("[*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(config.pretrained)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %d" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
start_time = time.time()
if config.is_train:
best_loss = 1e1 # initial value
batch_size = config.batch_size
model.global_step.assign(tf.constant(global_step))
restored_epochs = global_step // (data_size // batch_size)
for epoch in range(restored_epochs, config.epochs):
for x_tr, y_tr in di.iterate():
# training
_, loss, acc = s.run([model.train_op, model.loss, model.accuracy],
feed_dict={
model.x: x_tr,
model.y: y_tr,
model.do_rate: config.drop_out,
})
if global_step and global_step % config.logging_step == 0:
# validation
rand_idx = np.random.choice(np.arange(len(y_valid)), len(y_valid) // 20) # 5% of valid data
x_va, y_va = x_valid[rand_idx], y_valid[rand_idx]
valid_loss, valid_acc = 0., 0.
valid_iter = len(y_va) // batch_size
for i in tqdm(range(0, valid_iter)):
v_loss, v_acc = s.run([model.loss, model.accuracy],
feed_dict={
model.x: x_va[batch_size * i:batch_size * (i + 1)],
model.y: y_va[batch_size * i:batch_size * (i + 1)],
model.do_rate: .0,
})
valid_acc += v_acc
valid_loss += v_loss
valid_loss /= valid_iter
valid_acc /= valid_iter
print("[*] epoch %03d global step %07d" % (epoch, global_step),
" train_loss : {:.8f} train_acc : {:.4f}".format(loss, acc),
" valid_loss : {:.8f} valid_acc : {:.4f}".format(valid_loss, valid_acc))
# summary
summary = s.run(model.merged,
feed_dict={
model.x: x_tr,
model.y: y_tr,
model.do_rate: .0,
})
# Summary saver
model.writer.add_summary(summary, global_step)
# Model save
model.saver.save(s, config.pretrained + '%s.ckpt' % config.model,
global_step=global_step)
if valid_loss < best_loss:
print("[+] model improved {:.7f} to {:.7f}".format(best_loss, valid_loss))
best_loss = valid_loss
model.best_saver.save(s, config.pretrained + '%s-best_loss.ckpt' % config.model,
global_step=global_step)
print()
model.global_step.assign_add(tf.constant(1))
global_step += 1
end_time = time.time()
print("[+] Training Done! Elapsed {:.8f}s".format(end_time - start_time))
else: # test
x_train, y_train = None, None
x_va, y_va = x_valid, y_valid
valid_loss, valid_acc = 0., 0.
batch_size = config.batch_size
valid_iter = len(y_va) // config.batch_size
v_rates = []
for i in tqdm(range(0, valid_iter)):
v_loss, v_acc, v_rate = s.run([model.loss, model.accuracy, model.rates],
feed_dict={
model.x: x_va[batch_size * i:batch_size * (i + 1)],
model.y: y_va[batch_size * i:batch_size * (i + 1)],
model.do_rate: .0,
})
valid_acc += v_acc
valid_loss += v_loss
for j in v_rate:
v_rates.append(j)
valid_loss /= valid_iter
valid_acc /= valid_iter
print("[+] Validation Result (%s model %d global steps), total %d samples" %
(config.model, global_step, x_valid.shape[0]))
print(" => valid_loss (MSE) : {:.8f} valid_acc (th=1.0) : {:.4f}".format(valid_loss, valid_acc))
"""
with open('pred.txt', 'w') as f:
f.writelines([str("{:.4f}\n".format(rate[0])) for rate in v_rates])
"""
# confusion matrix
data_confusion_matrix(v_rates, y_va, ["bad", "normal", "good"])
| 39.662037 | 118 | 0.516867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,373 | 0.138497 |
76560a250f0facbd8764ad7981ecd577c8321aaa | 2,278 | py | Python | tests/test_config.py | sidorov-as/flake8-adjustable-complexity | db46211b5ebeed7b73971c8e82132c8fd02f768b | [
"MIT"
] | 13 | 2019-04-25T12:51:45.000Z | 2021-09-03T09:58:30.000Z | tests/test_config.py | sidorov-as/flake8-adjustable-complexity | db46211b5ebeed7b73971c8e82132c8fd02f768b | [
"MIT"
] | 6 | 2019-04-21T08:11:24.000Z | 2020-05-04T10:57:29.000Z | tests/test_config.py | sidorov-as/flake8-adjustable-complexity | db46211b5ebeed7b73971c8e82132c8fd02f768b | [
"MIT"
] | 4 | 2019-09-06T23:09:10.000Z | 2021-01-27T14:53:43.000Z | import pytest
from flake8.exceptions import ExecutionError
from flake8_adjustable_complexity.config import DEFAULT_CONFIG
@pytest.mark.parametrize(
('args', 'max_mccabe_complexity'),
[
(['--max-mccabe-complexity=5'], 5),
(['--max-adjustable-complexity=10'], 10),
([], DEFAULT_CONFIG.max_mccabe_complexity),
],
)
def test_parse_max_mccabe_complexity(parse_options, args, max_mccabe_complexity):
config = parse_options(args)
assert config.max_mccabe_complexity == max_mccabe_complexity
@pytest.mark.parametrize(
('args', 'max_complexity_per_path'),
[
(
[
'--per-path-max-adjustable-complexity',
'foo.py:10,bar.py:20',
],
{
'foo.py': 10,
'bar.py': 20,
},
),
([], DEFAULT_CONFIG.max_complexity_per_path),
],
)
def test_parse_max_complexity_per_path(parse_options, args, max_complexity_per_path):
config = parse_options(args)
assert config.max_complexity_per_path == max_complexity_per_path
def test_parse_max_complexity_per_path_error(parse_options):
args = [
'--per-path-max-adjustable-complexity',
'foo.py:invalid-complexity',
]
with pytest.raises(ExecutionError) as excinfo:
parse_options(args)
assert "Couldn\'t parse --per-path-adjustable-max-complexity" in str(excinfo.value)
@pytest.mark.parametrize(
('args', 'var_names_blacklist'),
[
(
['--var-names-extra-blacklist=my_obj,my_var'],
DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'},
),
(
['--var-names-whitelist=var,result'],
DEFAULT_CONFIG.var_names_blacklist - {'var', 'result'},
),
(
[
'--var-names-extra-blacklist=my_obj,my_var',
'--var-names-whitelist=var,result',
],
(DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'}) - {'var', 'result'},
),
([], DEFAULT_CONFIG.var_names_blacklist),
],
)
def test_parse_var_names_blacklist(parse_options, args, var_names_blacklist):
config = parse_options(args)
assert config.var_names_blacklist == var_names_blacklist
| 28.475 | 92 | 0.615891 | 0 | 0 | 0 | 0 | 1,810 | 0.794557 | 0 | 0 | 552 | 0.242318 |
76563bc9936580a8d4669bd7cefb4b1d15996d7b | 36 | py | Python | Allswap_djangoREST/backend/allswap/accounts/views.py | yds05238/AllSwap_Backend | 95429fe6c709feef6b9e4b2349921e1cc4dd4c18 | [
"MIT"
] | 2 | 2020-02-19T05:06:49.000Z | 2020-02-20T17:34:41.000Z | Allswap_djangoREST/backend/allswap/accounts/views.py | yds05238/AllSwap_Backend | 95429fe6c709feef6b9e4b2349921e1cc4dd4c18 | [
"MIT"
] | 28 | 2020-06-05T20:52:59.000Z | 2022-03-12T00:15:17.000Z | Allswap_djangoREST/backend/allswap/accounts/views.py | yds05238/AllSwap | 95429fe6c709feef6b9e4b2349921e1cc4dd4c18 | [
"MIT"
] | null | null | null | from rest_framework import generics
| 18 | 35 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |